query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Test the popxl mnist with replication example
def test_documentation_popxl_mnist_replication_train(self): filename = "mnist_rts.py --replication-factor 2" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_rand(self):\n assert len(self._mnist.random()[:5]) == 5\n pass", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def test_documentation_popxl_mnist(self):\n filename = \"mnist.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def test_dataset():\n X,Y = get_MNIST_training_normalized()\n digits_test_truth = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 632, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, 0, 0, 0, 0, 0]\n digits_test = []\n for example in itertools.islice(X,30):\n digits_test.append(sum(example[1:100]))\n assert(example.shape == (28*28,))\n\n show_as_image(X[0,:], 28, 28)\n print digits_test\n print digits_test_truth\n assert(digits_test_truth == digits_test)\n assert(X.shape == (60000, 28*28))\n assert(Y.shape == (60000,))\n return \"Dziala :)\"", "def test_load_data(self):\n assert len(self._mnist.get()) == 10\n assert self._mnist.get()[0].label == 7\n pass", "def test_readme_minimal():\n # Data sampler that generates balanced batches from MNIST dataset\n sampler = TFDatasetMultiShotMemorySampler(\n dataset_name='mnist',\n classes_per_batch=10\n )\n\n # Build a Similarity model using standard Keras layers\n inputs = layers.Input(shape=(28, 28, 1))\n x = layers.experimental.preprocessing.Rescaling(1/255)(inputs)\n x = layers.Conv2D(64, 3, activation='relu')(x)\n x = layers.Flatten()(x)\n x = layers.Dense(64, activation='relu')(x)\n outputs = MetricEmbedding(64)(x)\n\n # Build a specialized Similarity model\n model = SimilarityModel(inputs, outputs)\n\n # Train Similarity model using contrastive loss\n model.compile('adam', loss=MultiSimilarityLoss())\n model.fit(sampler, epochs=5)\n\n # Index 100 embedded MNIST examples to make them searchable\n sx, sy = sampler.get_slice(0, 100)\n model.index(x=sx, y=sy, data=sx)\n\n # Find the top 5 most similar indexed MNIST examples for a given example\n qx, qy = sampler.get_slice(3713, 1)\n nns = model.single_lookup(qx[0]) # noqa\n\n # ! don't add viz its block the test in certain env.\n # Visualize the query example and its top 5 neighbors\n # viz_neigbors_imgs(qx[0], qy[0], nns)", "def pick_data(ns, digits):\n f = gzip.open('data/mnist.pkl.gz', 'rb')\n train_set, valid_set, test_set = cPickle.load(f)\n f.close()\n images, labels = train_set\n\n originals = []; \n shapes = []; \n true_labels = [];\n i = 0\n for n, d in zip(ns, digits):\n # picking n elements with digit d\n x = np.where(labels==d)[0]\n idx = np.random.choice(x, n, replace=False)\n imgs = images[idx]\n originals.append(imgs)\n contours = [mnistshape.get_shape2(im.reshape((28,28)), n=30, s=5, ir=2)\n for im in imgs]\n shapes.append(contours)\n true_labels.append([i]*n)\n i += 1\n originals = np.concatenate(originals)\n true_labels = np.concatenate(true_labels)\n \n new_shapes = []\n for cluster in shapes:\n for shape in cluster:\n new_shapes.append(shape)\n new_shapes = np.array(new_shapes)\n\n # return shuffled data\n idx = range(len(originals))\n np.random.shuffle(idx)\n return originals[idx], new_shapes[idx], true_labels[idx]", "def test_keras_mnist_return_X_y():\n X, y = fetch(\"mnist\", return_X_y=True)\n assert X.shape == (70000, 28 * 28)\n assert y.shape == (70000,)", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def load_mnist(path='./', test_size=0.3, random_state = 123):\n \n np.random.seed(random_state)\n if 'X_train.npy' not in os.listdir(path=path) or 'y_train.npy' not in os.listdir(path=path):\n print (\"Train dataset not found. Downloading...\")\n os.system(\"curl -L -o train.zip {}\".format(TRAIN_DATA_LINK))\n os.system(\"unzip train.zip\")\n os.system(\"tar -xf trainingSet.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSet'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSet/{}'.format(class_name)):\n image = imread('./trainingSet/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_train = np.array(images)\n y_train = np.array(labels)\n\n permutation = np.random.permutation(X_train.shape[0])\n X_train = X_train[permutation]\n y_train = y_train[permutation]\n\n with open('X_train.npy', 'wb') as f:\n np.save(f, X_train)\n with open('y_train.npy', 'wb') as f:\n np.save(f, y_train)\n os.system(\"rm -rf trainingSet\")\n os.system(\"rm -rf train.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n else:\n X_train = np.load('X_train.npy')\n y_train = np.load('y_train.npy')\n\n if 'X_test.npy' not in os.listdir(path=path) or 'y_test.npy' not in os.listdir(path=path):\n print (\"Test dataset not found. Downloading...\")\n os.system(\"curl -L -o test.zip {}\".format(TEST_DATA_LINK))\n os.system(\"unzip test.zip\")\n os.system(\"tar -xf trainingSample.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSample'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSample/{}'.format(class_name)):\n image = imread('./trainingSample/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_test = np.array(images)\n y_test = np.array(labels)\n with open('X_test.npy', 'wb') as f:\n np.save(f, X_test)\n with open('y_test.npy', 'wb') as f:\n np.save(f, y_test)\n\n os.system(\"rm -rf trainingSample\")\n os.system(\"rm -rf test.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n\n else:\n X_test = np.load('X_test.npy')\n y_test = np.load('y_test.npy')\n\n return X_train, X_test, y_train, y_test", "def replicateExample(self):\n\n C, E = self.getAtomStrainMatches(matches = 5000)\n self.removeByAtomStrain(keep = 5000)\n r = self.getAtomStrainRatio(const = C, exp = E)\n self.indexSortInterfaces(index = np.argsort(r))", "def create_mnistm(X: Any) -> Any:\n\n bst_path = \"./data/MNIST_M/BSR_bsds500.tgz\"\n\n rand = np.random.RandomState(42)\n train_files = []\n\n with tarfile.open(bst_path, \"r\") as bsr_file:\n for name in bsr_file.getnames():\n if name.startswith(\"BSR/BSDS500/data/images/train/\"):\n train_files.append(name)\n\n print(\"Loading BSR training images\")\n background_data = []\n for name in train_files:\n try:\n fp = bsr_file.extractfile(name)\n bg_img = skimage.io.imread(fp)\n background_data.append(bg_img)\n except:\n continue\n\n X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)\n for i in range(X.shape[0]):\n if i % 1000 == 0:\n print(\"Processing example\", i)\n\n bg_img = rand.choice(background_data)\n d = mnist_to_img(X[i])\n d = compose_image(d, bg_img)\n X_[i] = d\n\n return X_", "def test_mnist():\n skip_if_no_data()\n mode = get_default_mode()\n if hasattr(mode, 'check_py_code'):\n old_value = mode.check_py_code\n mode.check_py_code = False\n try:\n if config.mode == \"DEBUG_MODE\":\n yaml_file = 'mnist_fast'\n else:\n yaml_file = 'mnist'\n limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'\n % yaml_file))\n try:\n os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))\n os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))\n except Exception:\n pass\n finally:\n if hasattr(mode, 'check_py_code'):\n mode.check_py_code = old_value", "def main():\n # \"\"\"Prepare neuromorphic MNIST image datasets for use in caffe\n # Each dataset will be generated with different number of unique spikes\n # \"\"\"\n # initial_size = 1e6 #best to make this big enough avoid expensive\n # re-allocation\n # test_dir = os.path.abspath('testFull')\n # train_dir = os.path.abspath('trainFull')\n\n # for num_spikes in range(150, 260, 10):\n # #test directory\n # image_dataset = generate_nmnist_dataset(initial_size, test_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'testlmdb' + str(num_spikes)\n # database = save_to_lmdb(image_dataset, output_lmdb)\n # #database.process_all_data(show_lmdb_datum)\n\n # #train directory\n # image_dataset = generate_nmnist_dataset(initial_size, train_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'trainlmdb' + str(num_spikes)\n # save_to_lmdb(image_dataset, output_lmdb)\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))\n # best to make this big enough avoid expensive re-allocation\n initial_size = 6e5\n test_dir = os.path.abspath('testFull')\n train_dir = os.path.abspath('trainFull')\n\n # test directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, test_dir)\n save_to_lmdb(image_dataset, 'testlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_test.mat')\n # database.process_all_data(show_lmdb_datum)\n\n # train directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, train_dir)\n save_to_lmdb(image_dataset, 'trainlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_train.mat')\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))", "def MNIST_data():\n\n # Pobieramy macierze numpy z cyframi\n # images[i,j,k] <=> piksel (j,k) z i-tego obrazka w zbiorze danych\n images, labels = get_MNIST_dataset(range(10), \"training\") #pierwszy argument to\n\n # a) Ilosc przykladow i rozmiary danych\n print \"Raw training data dimensions \", images.shape\n print \"Labels dimensions \",labels.shape\n\n # b) Ile jest cyfr 2?\n print \"Counting 2 in training dataset \",len(filter(lambda x: x == 2, labels))\n\n # c) Jaki jest sredni obrazek 2 ? (Usrednienie wszystkich macierzy ktore sa 2)\n\n #1. Pobierzmy wszystkie dwojki, fajny sposob indeksowania\n print labels == 2\n only_2 = images[labels == 2, :, :]\n print \"Checking number of 2s \", only_2.shape\n\n #2. TODO: Usrednienie (matrix.mean moze byc przydatne)\n\n #3. TODO: narysowanie usrednionej cyfry (zobacz pl.imshow)\n\n # d) Ostatnie - przetworzmy ostatnia cyfre do 1 wymiarowego wektora\n vectorized = np.reshape(images[-1], newshape=(images[-1].shape[0]*images[-1].shape[1]))\n print \"Vectorized last digit \", vectorized", "def create_mnistm(X):\r\n X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)\r\n for i in range(X.shape[0]):\r\n bg_img = rand.choice(background_data)\r\n d = mnist_to_img(X[i])\r\n d = compose_image(d, bg_img)\r\n X_[i] = d\r\n return X_", "def MNIST_experiment():\n tsetlin_machine = TsetlinMachine(number_clauses=1000,\n number_action_states=1000,\n precision=3.0,\n threshold=10)\n\n X, y, val_X, val_y = MNIST()\n\n tsetlin_machine.fit(X, y, val_X, val_y, 300)\n print('Final training accuracy:', tsetlin_machine.accuracy(X, y))\n print('Final validation accuracy:', tsetlin_machine.accuracy(val_X, val_y))", "def test_generate_nb_testing(self):\n pass", "def test_X_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_test.equals(atom.mnb.X_test)\n assert check_scaling(atom.lr.X_test)", "def test_generate_nb(self):\n pass", "def mnist_noniid(dataset, num_users):\n # num_shards, num_imgs = 2*num_users, int(dataset.data.size()[0]/2/num_users) # choose two number from a set with num_shards, each client has 2*num_imgs images\n # idx_shard = [i for i in range(num_shards)]\n # dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}\n # idxs = np.arange(dataset.data.size()[0])\n # labels = dataset.train_labels.numpy()\n #\n # # sort labels\n # idxs_labels = np.vstack((idxs, labels))\n # idxs_labels = idxs_labels[:,idxs_labels[1,:].argsort()]\n # idxs = idxs_labels[0,:]\n #\n # # divide and assign\n # for i in range(num_users):\n # rand_set = set(np.random.choice(idx_shard, 2, replace=False))\n # idx_shard = list(set(idx_shard) - rand_set)\n # for rand in rand_set:\n # dict_users[i] = np.concatenate((dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]), axis=0)\n # return dict_users\n\n label_list = dataset.targets.numpy()\n minLabel = min(label_list)\n numLabels = len(dataset.classes)\n\n dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}\n for i in range(0, len(label_list)):\n tmp_target_node = int((label_list[i] - minLabel) % num_users)\n if num_users > numLabels:\n tmpMinIndex = 0\n tmpMinVal = math.inf\n for n in range(0, num_users):\n if (n) % numLabels == tmp_target_node and len(dict_users[n]) < tmpMinVal:\n tmpMinVal = len(dict_users[n])\n tmpMinIndex = n\n tmp_target_node = tmpMinIndex\n dict_users[tmp_target_node] = np.concatenate((dict_users[tmp_target_node], [i]), axis=0)\n return dict_users", "def mnist(path=None):\r\n url = 'http://yann.lecun.com/exdb/mnist/'\r\n files = ['train-images-idx3-ubyte.gz',\r\n 'train-labels-idx1-ubyte.gz',\r\n 't10k-images-idx3-ubyte.gz',\r\n 't10k-labels-idx1-ubyte.gz']\r\n\r\n if path is None:\r\n # Set path to /home/USER/data/mnist or C:\\Users\\USER\\data\\mnist\r\n path = os.path.join(os.path.expanduser('~'), 'data', 'mnist')\r\n\r\n # Create path if it doesn't exist\r\n os.makedirs(path, exist_ok=True)\r\n\r\n # Download any missing files\r\n for file in files:\r\n if file not in os.listdir(path):\r\n urlretrieve(url + file, os.path.join(path, file))\r\n print(\"Downloaded %s to %s\" % (file, path))\r\n\r\n def _images(path):\r\n \"\"\"Return images loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255\r\n\r\n def _labels(path):\r\n \"\"\"Return labels loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 8 bytes are magic_number, n_labels\r\n integer_labels = np.frombuffer(f.read(), 'B', offset=8)\r\n\r\n def _onehot(integer_labels):\r\n \"\"\"Return matrix whose rows are onehot encodings of integers.\"\"\"\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot\r\n\r\n return _onehot(integer_labels)\r\n\r\n train_images = _images(os.path.join(path, files[0]))\r\n train_labels = _labels(os.path.join(path, files[1]))\r\n test_images = _images(os.path.join(path, files[2]))\r\n test_labels = _labels(os.path.join(path, files[3]))\r\n\r\n return train_images, train_labels, test_images, test_labels", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def example7(n):\n return mvmt.randomize(tile, n)", "def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels", "def test_star():\n test_path = tempfile.mkdtemp()\n x_train, metadata = star(test_path)\n try:\n assert x_train.shape == (5748, 8)\n except:\n shutil.rmtree(test_path)\n raise()", "def get_mini_samples():\n if GLOBALS['project_root']=='':\n print('please initialize project_root in GLOBALS first')\n return None\n data_path = os.path.join(GLOBALS['project_root'], 'data/MNIST/')\n pickle_path = os.path.join(data_path, 'mnist_mini_samples.pickle')\n if os.path.exists(pickle_path):\n with open(pickle_path, 'rb') as f:\n mini_samples = pickle.load(f)\n else:\n mnist = get_mnist()\n mini_samples = mnist.train.next_batch(50)\n with open(pickle_path, 'wb') as f:\n pickle.dump(mini_samples, f, pickle.HIGHEST_PROTOCOL)\n\n return mini_samples", "def codeepneat_mnist_example(_):\n # Set standard configuration specific to TFNE but not the neuroevolution process\n logging_level = logging.INFO\n config_file_path = './codeepneat_mnist_example_config.cfg'\n backup_dir_path = './tfne_state_backups/'\n max_generations = 20\n max_fitness = None\n\n # Read in optionally supplied flags, changing the just set standard configuration\n if flags.FLAGS.logging_level is not None:\n logging_level = flags.FLAGS.logging_level\n if flags.FLAGS.config_file is not None:\n config_file_path = flags.FLAGS.config_file\n if flags.FLAGS.backup_dir is not None:\n backup_dir_path = flags.FLAGS.backup_dir\n if flags.FLAGS.max_generations is not None:\n max_generations = flags.FLAGS.max_generations\n if flags.FLAGS.max_fitness is not None:\n max_fitness = flags.FLAGS.max_fitness\n\n # Set logging, parse config\n logging.set_verbosity(logging_level)\n config = tfne.parse_configuration(config_file_path)\n\n # Initialize the environment and the specific NE algorithm\n environment = tfne.environments.MNISTEnvironment(weight_training=True, config=config, verbosity=logging_level)\n ne_algorithm = tfne.algorithms.CoDeepNEAT(config)\n\n # Initialize evolution engine and supply config as well as initialized NE algorithm and evaluation environment.\n engine = tfne.EvolutionEngine(ne_algorithm=ne_algorithm,\n environment=environment,\n backup_dir_path=backup_dir_path,\n max_generations=max_generations,\n max_fitness=max_fitness)\n\n # Start training process, returning the best genome when training ends\n best_genome = engine.train()\n print(\"Best genome returned by evolution:\\n\")\n print(best_genome)\n\n # Increase epoch count in environment for a final training of the best genome. Train the genome and then replay it.\n print(\"Training best genome for 200 epochs...\\n\")\n environment.epochs = 20\n environment.eval_genome_fitness(best_genome)\n environment.replay_genome(best_genome)\n\n # Serialize and save genotype and Tensorflow model to demonstrate serialization\n best_genome.save_genotype(save_dir_path='./best_genome_genotype/')\n best_genome.save_model(file_path='./best_genome_model/')", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)", "def __init__(self):\n\n TEST_RATIO = 0.05\n mnist_trainset = datasets.MNIST(root='./data', train=True, download=True, transform=None)\n idxs = np.arange(mnist_trainset.train_data.size(0))\n np.random.shuffle(idxs)\n\n #print(torch.min(mnist_trainset.train_labels), torch.max(mnist_trainset.train_labels))\n #print(mnist_trainset.train_labels.size())\n \n # reshape input data to (1, 784) and normalize to range [0., 1.]\n self.train_data = torch.reshape(\n mnist_trainset.train_data[idxs].float(), (-1,1,28,28))/255.\n self.data_size = self.train_data.size(0)\n self.train_len = self.train_data.size(0)\n self.train_label = torch.Tensor([1]).float() # since there is only one class - 'real' image\n\n print('Train images -- {}'.format(self.train_data.size()))", "def test_setting_state_parallel(self):\n no_replicates = 25\n\n replicate(experiment, no_replicates, parallel=True, no_processes=2)\n for i in range(no_replicates):\n self.assertIn('result', state[SUBSTATE_KEY_PATTERN % i])\n self.assertEqual(state[SUBSTATE_KEY_PATTERN % i]['result'], \"bla\")", "def load_mnist(path, kind='train'):\n '''ref: http://yann.lecun.com/exdb/mnist/ '''\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n\n # check the offical doc to know how to extract the content\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000801(2049) magic number (MSB first)\n 0004 32 bit integer 60000 number of items\n 0008 unsigned byte ?? label\n 0009 unsigned byte ?? label\n ........\n xxxx unsigned byte ?? label\n The labels values are 0 to 9.\n '''\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000803(2051) magic number\n 0004 32 bit integer 60000 number of images\n 0008 32 bit integer 28 number of rows\n 0012 32 bit integer 28 number of columns\n 0016 unsigned byte ?? pixel\n 0017 unsigned byte ?? pixel\n ........\n xxxx unsigned byte ?? pixel\n Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).\n '''\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def test_machine_learning():", "def test_data_norange(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n lensumrange = random.randint(1, 10)\n\n ex.nreps = nreps\n ex.sumrange = [\"j\", range(lensumrange)]\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.vary[\"Y\"][\"with\"].add(\"j\")\n ex.vary[\"Y\"][\"along\"] = 0\n ex.vary[\"Z\"][\"with\"].update([\"rep\", \"j\"])\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n], cmds)\n idx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", idx * m * n, \"X_%d\" % idx], cmds)\n\n self.assertIn([\n \"dmalloc\", \"Y\", lensumrange * m * m + (lensumrange - 1) * m\n ], cmds)\n idx = random.randint(0, lensumrange - 1)\n self.assertIn([\"doffset\", \"Y\", idx * m, \"Y_%d\" % idx], cmds)\n\n self.assertIn([\"cmalloc\", \"Z\", nreps * lensumrange * n * n], cmds)\n idxrep = random.randint(0, nreps - 1)\n idxrange = random.randint(0, lensumrange - 1)\n self.assertIn([\"coffset\", \"Z\",\n (idxrep * lensumrange + idxrange) * n * n,\n \"Z_%d_%d\" % (idxrep, idxrange)], cmds)", "def get_mnist_mlp():\n # Set defaults.\n nb_classes = 10 #dataset dependent \n batch_size = 64\n epochs = 4\n input_shape = (784,)\n\n # Get the data.\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = x_train.reshape(60000, 784)\n x_test = x_test.reshape(10000, 784)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n # convert class vectors to binary class matrices\n y_train = to_categorical(y_train, nb_classes)\n y_test = to_categorical(y_test, nb_classes)\n\n return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs)", "def test_mnir_image():\n # Initiate the sunglint correction class\n g = deglint.GlintCorr(odc_meta_file, sub_product)\n\n # ---------------------- #\n # NIR subtraction #\n # ---------------------- #\n mnir_xarrlist = g.glint_subtraction(\n vis_bands=[\"3\"],\n corr_band=\"6\",\n water_val=5,\n )\n\n sungc_band = mnir_xarrlist[0].lmbadj_green.values # 3D array\n\n # path to expected sunglint corrected output from NIR subtraction\n exp_sungc_band = (\n data_path\n / \"MINUS_NIR\"\n / \"ga_ls8c_lmbadj_3-2-0_091086_2014-11-06_final_band03-deglint-600m.tif\"\n )\n\n # ensure that all valid sungint corrected pixels match expected\n with rasterio.open(exp_sungc_band, \"r\") as exp_sungc_ds:\n urd_band = urd(sungc_band[0, :, :], exp_sungc_ds.read(1), exp_sungc_ds.nodata)\n assert urd_band.max() < 0.001", "def test_predictor():", "def test_raises(self):\n no_replicates = 25\n try:\n replicate(experiment3, no_replicates)\n except RuntimeError as err:\n self.assertEqual(err, FAKE_ERROR)\n else:\n assert False", "def generate_test():\n o = []\n pos = [384, 288]\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n generate_set(begin=3 * note_group_size, start_pos=pos,\n length_multiplier=dist_multiplier, group_id=3, plot_map=True)", "def get_mnist():\n from keras.datasets import mnist\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n num_classes = 10\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n print (\"Using Channels first\")\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n print(\"Channels last\")\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n return (x_train, y_train), (x_test, y_test)", "def images_example(path='train_images.pickle'):\n patch_size = (8, 8)\n\n with open('train_images.pickle', 'rb') as f:\n train_pictures = pickle.load(f)\n\n patches = sample_patches(train_pictures, psize=patch_size, n=20000)\n\n plt.figure()\n plt.imshow(train_pictures[0])\n plt.title(\"Picture Example\")\n\n plt.figure()\n for i in range(4):\n plt.subplot(2, 2, i + 1)\n plt.imshow(patches[:, i].reshape(patch_size), cmap='gray')\n plt.title(\"Patch Example\")\n plt.show()", "def make_rmnist(n=10):\n td, vd, ts = load_data()\n indices = range(50000)\n random.shuffle(indices)\n values = [(j, td[1][j]) for j in indices]\n indices_subset = [[v[0] for v in values if v[1] == j][:n]\n for j in range(10)]\n flattened_indices = [i for sub in indices_subset for i in sub]\n random.shuffle(flattened_indices)\n td0_prime = [td[0][j] for j in flattened_indices]\n td1_prime = [td[1][j] for j in flattened_indices]\n td_prime = (td0_prime, td1_prime)\n\n train_data = td_prime[0]\n train_labels = td_prime[1]\n val_data = vd[0]\n val_labels = vd[1]\n test_data = ts[0]\n test_labels = ts[1]\n\n fname = 'data/rmnist_'+str(n)\n np.savez(fname,\n train_data = train_data,\n train_labels = train_labels,\n val_data = val_data,\n val_labels = val_labels,\n test_data = test_data,\n test_labels = test_labels)", "def test_getting_state_parallel(self):\n no_replicates = 25\n replicate(experiment2, no_replicates, parallel=True, no_processes=2)\n for i in range(no_replicates):\n self.assertNotIn(SUBSTATE_KEY_PATTERN % i + '.result', state)", "def qc_sample_mip(args):\n clarity_epp.qc.sample.set_mip_data_ready(lims, args.process_id)", "def test_full(args, model, device): \n test_path = '../data/full/original/'\n generate_path = '../data/full/generate/'\n test_image_num = len([name for name in os.listdir(test_path)\n if os.path.isfile(os.path.join(test_path, name))])\n\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0\n ind = 0\n for name in os.listdir(test_path):\n if os.path.isfile(os.path.join(test_path, name)):\n ind += 1\n test_original, test_style, image_height, image_width = load_test_dataset(name)\n x = torch.from_numpy(test_original).float()\n y_real = torch.from_numpy(test_style).float()\n x = x.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n y_real = y_real.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n \n y_fake = model.gen_g(x.view(-1, config.channels, image_height, image_width))\n y_fake = y_fake.view(config.channels, image_height, image_width)\n \n # Calculate PSNR & SSIM scores\n score_psnr += psnr_full(y_fake, y_real)\n \n y_fake_np = y_fake.detach().cpu().numpy().transpose(1, 2, 0)\n y_real_np = y_real.cpu().numpy().transpose(1, 2, 0)\n temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True)\n score_ssim_skimage += temp_ssim\n \n temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n score_ssim_minstar += temp_ssim\n \n score_msssim_minstar += multi_scale_ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n print('PSNR & SSIM scores of {} images are calculated.'.format(ind))\n \n utils.save_image(y_fake, os.path.join(generate_path, '{}-x.jpg'.format(name[:5] + args.model_type)))\n\n score_psnr /= test_image_num\n score_ssim_skimage /= test_image_num\n score_ssim_minstar /= test_image_num\n score_msssim_minstar /= test_image_num\n print('PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}'.format(\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))", "def test_ijones1_out(self):\r\n current = main('test_ijones/ijones1.in', 'test_ijones/ijones1.out')\r\n\r\n self.assertEqual(current, 5)", "def load_mnist_digits_bg(batch_size = 128):\r\n\r\n def relabel_letter_class(class_idx):\r\n excluded_letters_idx = [6,8,11,14,16]\r\n if class_idx in excluded_letters_idx:\r\n return None\r\n if class_idx >= 10:\r\n return 10\r\n\r\n\r\n\r\n background_train = torchvision.datasets.EMNIST(root='./data',\r\n train=True,\r\n download=True,\r\n split = 'letters',\r\n transform = emnist_img_transform,\r\n target_transform = relabel_letter_class)\r\n\r\n\r\n\r\n background_test = torchvision.datasets.EMNIST(root='./data',\r\n train=False,\r\n download=True,\r\n split = 'letters',\r\n transform = emnist_img_transform,\r\n target_transform = relabel_letter_class)\r\n\r\n\r\n mnist_train = torchvision.datasets.EMNIST(root='./data',\r\n train=True,\r\n download=True,\r\n split = 'mnist',\r\n transform = emnist_img_transform)\r\n\r\n\r\n\r\n mnist_test = torchvision.datasets.EMNIST(root='./data',\r\n train=False,\r\n download=True,\r\n split = 'mnist',\r\n transform = emnist_img_transform)\r\n\r\n # Discard unwanted letters from the background data\r\n background_train = discard_none_targets(background_train)\r\n background_test = discard_none_targets(background_test)\r\n\r\n # merge background data and digits data into a new data set\r\n train_ds = ConcatDataset([mnist_train,background_train])\r\n test_ds = ConcatDataset([mnist_test,background_test])\r\n\r\n\r\n # create data loaders and shuffle everything...\r\n train_dl = torch.utils.data.DataLoader(train_ds,\r\n batch_size=batch_size,\r\n shuffle=True)\r\n\r\n test_dl = torch.utils.data.DataLoader(test_ds,\r\n batch_size=batch_size,\r\n shuffle=True)\r\n\r\n return train_dl,test_dl", "def generate_nmnist_dataset(initial_size, input_dir, num_spikes, step_factor):\n image_dataset = np.rec.array(None, dtype=[('height', np.uint16),\n ('width', np.uint16),\n ('image_data', 'object'),\n ('label', np.uint32)],\n shape=initial_size)\n num_images = 0\n\n # loop through each folder within the test directories\n for i in range(0, 10):\n current_dir = input_dir + os.path.sep + str(i) + os.path.sep + '*.bin'\n print('Processing {}...'.format(current_dir))\n for filename in glob.iglob(current_dir):\n images = prepare_n_mnist(filename, True, num_spikes, step_factor)\n if num_images + len(images) >= image_dataset.size:\n image_dataset = np.resize(image_dataset,\n (num_images + len(images)) * 2)\n add_images_to_dataset(image_dataset, images, num_images, i, 28, 28)\n num_images += len(images)\n\n return image_dataset[0:num_images]", "def use_mnist_model(self):\n\n\t\t# load the model\n\t\tnumber_recognizer_MNIST = load_model('models/MNIST_digits_recognition.h5', compile=False)\n\n\t\t# create empty ndarray\n\t\tnumbers_mnist = np.ones(shape=(self.sudoku_size, self.sudoku_size))\n\n\t\tpics = deepcopy(self.list_of_number_pictures)\n\t\tfor i in range(self.sudoku_size):\n\t\t\tfor j in range(self.sudoku_size):\n\t\t\t\tpics[i][j] = self.preprocess_cell(pics[i][j], mnist=True, resize=True, clean_remains=True)\n\t\t\t\tif self.empty_cells[i][j] != 0:\n\t\t\t\t\tnumbers_mnist[i][j] = np.argmax(number_recognizer_MNIST.predict([[pics[i][j].reshape(28,28,1)]]))\n\n\t\treturn numbers_mnist", "def test_fsnps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = fsnps(test_path)\n try:\n assert x_train.shape == (432, 10)\n except:\n shutil.rmtree(test_path)\n raise()", "def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)", "def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test", "def test_snow_pumps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = snow_pumps(test_path)\n try:\n assert x_train.shape == (13, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def import_mnist():\n\turl_mnist = \"http://deeplearning.net/data/mnist/mnist.pkl.gz\"\n\tfile_name = \"mnist.pkl.gz\"\n\twork_directory = \"mnist\"\n\tfile_path = maybe_download(url=url_mnist, file_name=file_name, work_directory=work_directory)\n\n\timport pickle\n\twith gzip.open(file_path,'rb') as ff :\n\t\tu = pickle._Unpickler( ff )\n\t\tu.encoding = 'latin1'\n\t\ttrain, val, test = u.load()\n\t\ttrainX = np.array(train[0])\n\t\ttrainY = np.reshape(train[1], [50000, 1])\n\t\tvalX = np.array(val[0])\n\t\tvalY = np.reshape(val[1], [10000, 1])\n\t\ttestX = np.array(test[0])\n\t\ttestY = np.reshape(test[1], [10000, 1])\n\t\ttrainX = np.concatenate((trainX, valX), axis = 0)\n\t\ttrainY = np.concatenate((trainY, valY), axis = 0)\n\treturn trainX, trainY, testX, testY", "def test_train():\n set_seed(42) # Noqa\n transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n mnist_train = MNIST(\"./\", download=True, train=False, transform=transform)\n model = SimpleNet()\n\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)\n criterion = nn.CrossEntropyLoss()\n\n train_loader = DataLoader(mnist_train, batch_size=64, shuffle=True,\n num_workers=0)\n loss, accuracy = train(model, optimizer, criterion, train_loader,\n imshape=(-1, 28*28))\n\n assert type(loss) == torch.Tensor\n assert type(accuracy) == np.float64\n assert len(loss.shape) == 0", "def get_data(numbers):\r\n numbers = numbers\r\n n_classes = len(numbers)\r\n z = zipfile.ZipFile('lab3/mnist.pkl.zip', 'r')\r\n k = z.extract('mnist.pkl') # Извлечь файл из архива\r\n with open(k, 'rb') as f:\r\n train_set, _, test_set = pickle.load(f, encoding=\"bytes\")\r\n x_train = train_set[0]\r\n x_test = test_set[0]\r\n x_train[x_train >= 0.5] = 1\r\n x_train[x_train < 0.5] = 0\r\n x_test[x_test >= 0.5] = 1\r\n x_test[x_test < 0.5] = 0\r\n y_train = train_set[1]\r\n y_test = test_set[1]\r\n idx_train = [[np.where(y_train == i)] for i in numbers]\r\n idx_test = [[np.where(y_test == i)] for i in numbers]\r\n idx_x_train = [x_train[idx_train[i][0]] for i in range(len(idx_train))]\r\n idx_x_test = [x_test[idx_test[i][0]] for i in range(len(idx_test))]\r\n idx_y_test = [y_test[idx_test[i][0]] for i in range(len(idx_test))]\r\n x_train_new = shuffle(np.concatenate(idx_x_train))\r\n x_test_new = shuffle(np.concatenate(idx_x_test))\r\n y_test_new = shuffle(np.concatenate(idx_y_test))\r\n return x_train_new, x_test_new, y_test_new, numbers, n_classes", "def test_get_random_voxels(self):\n n_vox = 100\n rand_vox = reduce.get_random_voxels(self.dataset, n_vox)\n n_studies = self.dataset.image_table.data.shape[1]\n self.assertEqual(rand_vox.shape, (n_vox, n_studies))", "def run_experiments(data_set=\"\",compact=2,exp_name=\"\",x=\"\"):\n cwd = os.getcwd()\n results_path = cwd+'/data/geometric/'+exp_name+'_'\n dataset_path = cwd+'/data/geometric/'+data_set+'_'\n compact = int(compact)\n if \"m\" in x:\n # make new pxy\n pxy, Xdata, groups = gen_geometric_pxy()\n np.save(dataset_path+'Xdata',Xdata)\n np.save(dataset_path+'groups',groups)\n np.save(dataset_path+'pxy',pxy)\n else:\n # load existing pxy\n pxy = np.load(dataset_path+'pxy.npy')\n if \"r\" in x: # regular experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n if \"ip\" in x: # initialization experiments - positive p0\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_pos.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_pos.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n if \"in\" in x: # initialization experiments - negative p0\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_neg.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_neg.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n if \"c\" in x: # convergence tolerance experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_ctol.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_ctol.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n if \"z\" in x: # zeroL tolerance experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_zeroLtol.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_zeroLtol.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n if \"b\" in x: # trying proposed optimal beta\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_bestbeta.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_bestbeta.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n return 0", "def train_mnist():\r\n # type: () -> None\r\n\r\n # Build dataset and model\r\n dataset = MNIST_TRAIN(path=Config.video_folder)\r\n model = LSAMNIST(input_shape=dataset.shape, code_length=64,\r\n cpd_channels=100).to(device).train()\r\n\r\n # Set up result helper and perform test\r\n helper = OneClassResultHelper(dataset, model,\r\n checkpoints_dir=Config.model_ckpt,\r\n output_file='mnist.txt')\r\n helper.train_one_class_classification()", "def test_X_train_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_train.equals(atom.mnb.X_train)\n assert check_scaling(atom.lr.X_train)", "def test_10_test_model(self, example):\n res = example.calc_model()\n print(example.trips_ij)\n total_trips_target = example.persons_gi.sum()\n total_trips_actual = example.trips_ij.sum()\n np.testing.assert_almost_equal(total_trips_target, total_trips_actual)", "def test_X_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X.equals(atom.mnb.X)\n assert check_scaling(atom.lr.X)", "def test_metric_learning(smote_class):\n nn_params = {'metric': 'precomputed',\n 'metric_learning_method': 'ITML'}\n X, y = smote_class(nn_params=nn_params).sample(dataset['data'],\n dataset['target'])\n\n assert np.unique(y).shape[0] == 2\n assert X.shape[0] > 0", "def test_dataset_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.dataset.equals(atom.mnb.dataset)\n assert check_scaling(atom.lr.dataset)", "def test_mnist_valid():\n skip_if_no_data()\n mode = get_default_mode()\n if hasattr(mode, 'check_py_code'):\n old_value = mode.check_py_code\n mode.check_py_code = False\n try:\n if config.mode == \"DEBUG_MODE\":\n yaml_file = 'mnist_valid_fast'\n else:\n yaml_file = 'mnist_valid'\n limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'\n % yaml_file))\n try:\n os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))\n os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))\n except Exception:\n pass\n finally:\n if hasattr(mode, 'check_py_code'):\n mode.check_py_code = old_value", "def test_patches(args, model, device): \n if args.val_patches:\n test_path = config.data_path + '/val/original/'\n elif args.test_patches:\n test_path = config.data_path + '/test/original/'\n test_image_num = len([name for name in os.listdir(test_path)\n if os.path.isfile(os.path.join(test_path, name))]) // config.batch_size * config.batch_size\n\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0\n for start in range(0, test_image_num, config.batch_size):\n end = min(start + config.batch_size, test_image_num)\n if args.val_patches:\n test_original, test_style = load_test_dataset_patches('val', config.data_path, start, end,\n config.height * config.width * config.channels) \n elif args.test_patches:\n test_original, test_style = load_test_dataset_patches('test', config.data_path, start, end,\n config.height * config.width * config.channels)\n x = torch.from_numpy(test_original).float()\n y_real = torch.from_numpy(test_style).float()\n x = x.view(-1, config.height, config.width, config.channels).permute(0, 3, 1, 2).to(device)\n y_real = y_real.view(-1, config.height, config.width, config.channels).permute(0, 3, 1, 2).to(device)\n\n y_fake = model.gen_g(x)\n\n # Calculate PSNR & SSIM scores\n score_psnr += psnr(y_fake, y_real) * config.batch_size\n\n y_fake_np = y_fake.detach().cpu().numpy().transpose(0, 2, 3, 1)\n y_real_np = y_real.cpu().numpy().transpose(0, 2, 3, 1)\n temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True)\n score_ssim_skimage += (temp_ssim * config.batch_size)\n\n temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n score_ssim_minstar += temp_ssim * config.batch_size\n\n score_msssim_minstar += multi_scale_ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5) * config.batch_size\n print('PSNR & SSIM scores of {} images are calculated.'.format(end))\n\n score_psnr /= test_image_num\n score_ssim_skimage /= test_image_num\n score_ssim_minstar /= test_image_num\n score_msssim_minstar /= test_image_num\n print('PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}'.format(\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))", "def generate_nmnist_continuous_dataset(initial_size, input_dir):\n image_dataset = np.rec.array(None, dtype=[('height', np.uint16),\n ('width', np.uint16),\n ('image_data', 'object'),\n ('label', np.uint32)],\n shape=initial_size)\n num_images = 0\n\n # loop through each folder within the test directories\n for i in range(0, 10):\n current_dir = input_dir + os.path.sep + str(i) + os.path.sep + '*.bin'\n print('Processing {}...'.format(current_dir))\n for filename in glob.iglob(current_dir):\n image = prepare_n_mnist_continuous(filename, False, False)\n if num_images + 1 >= image_dataset.size:\n image_dataset = np.resize(image_dataset, (num_images * 2))\n add_images_to_dataset(image_dataset, image, num_images, i, 28, 28)\n num_images += 1\n\n return image_dataset[0:num_images]", "def __init__(self, xspan, yspan, pop_size):\r\n # width of seed on the x-axis\r\n self.xspan = xspan \r\n # height of seed on the y-axis\r\n self.yspan = yspan \r\n # initial seed of zeros, to be modified later\r\n self.cells = np.zeros((xspan, yspan), dtype=np.int) \r\n # initial history of zeros\r\n self.history = np.zeros(pop_size, dtype=np.float) \r\n # initial similarities of zeros\r\n self.similarities = np.zeros(pop_size, dtype=np.float) \r\n # position of seed in the population array, to be modified later\r\n self.address = 0", "def wild_test(img, mod):\n img = cv2.imread(img)\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n img_resize = cv2.resize(img_gray, (28, 28))\n img_resize = img_resize.reshape((1, 28, 28))\n print (\"Image size\", img_resize.shape)\n # it is ugly, you can make this much better\n data = np.asarray([img_resize]*100)\n test_iter = mx.io.NDArrayIter(data, None, 100)\n prob = mod.predict(test_iter)\n print (\"The prediction is :\", np.argmax(prob.asnumpy()[0]))", "def compare_n_neurons(self, title: str, model: SVMclassifier, neurons:list=list(range(5, 101, 5))):\n macro = []\n micro = []\n weighted = []\n macro_r = []\n micro_r = []\n weighted_r = []\n\n for n in neurons:\n\n # normal labels\n d = Data(self.populations, self.path + str(n))\n d.split_trial_wise()\n d.use_SMOTE()\n X, x, Y, y = d.get_data()\n model.set_data(X, x, Y, y)\n model.train()\n mi, ma, weigth = model.predict()\n macro.append(ma)\n micro.append(mi)\n weighted.append(weigth)\n \n # Randomized labels\n d = Data(self.populations, self.path + str(n))\n d.split_trial_wise()\n d.shuffle_labels()\n d.use_SMOTE()\n X, x, Y, y = d.get_data()\n model.set_data(X, x, Y, y)\n model.train()\n mi, ma, weigth = model.predict()\n macro_r.append(ma)\n micro_r.append(mi)\n weighted_r.append(weigth)\n\n plt.plot(neurons,macro, marker = 'o', color='#f70d1a', label=\"Macro F1\")\n plt.plot(neurons,micro, marker = 'x', color='#08088A', label=\"Micro F1\")\n plt.plot(neurons,weighted, marker = '+', color='#FFBF00', label=\"weighted F1\")\n plt.plot(neurons,macro_r, marker = 'o', color='#f70d1a', label=\"Macro F1 (random)\", linestyle = '--')\n plt.plot(neurons,micro_r, marker = 'x', color='#08088A', label=\"Micro F1 (random)\", linestyle = '--')\n plt.plot(neurons,weighted_r, marker = '+', color='#FFBF00', label=\"weighted F1 (random)\", linestyle = '--')\n plt.xlabel(\"#Neurons\")\n plt.xticks(neurons)\n plt.ylabel(\"F1-Scores\")\n plt.ylim([0, 1])\n plt.legend(loc=\"upper left\")\n plt.title(title)\n plt.show()", "def test_n_iris(self):\r\n n = NeuronNetwork(1,\r\n [3],\r\n [[[0.2,0.2,0.2,0.2]]*3],\r\n [[-1.0,-1.0,-1.0]],learningRate=0.3)\r\n print(n)\r\n \r\n data = load_iris()\r\n\r\n inputs = data.data\r\n target = []\r\n for x in data.target:\r\n empty = [0,0,0]\r\n empty[x] = 1\r\n target.append(empty)\r\n \r\n n.train(inputs, target, 2000, 10*60)\r\n print(n)\r\n\r\n total = 0\r\n error = 0\r\n for i, x in enumerate(target, 0):\r\n out = n.feed_forward(inputs[i])\r\n if i < 50:\r\n error += self.mse(out, [1,0,0])\r\n if np.argmax(out) == 0:\r\n total +=1\r\n print(i, out, 1)\r\n elif i >= 50 and i < 100:\r\n error += self.mse(out, [0,1,0])\r\n if np.argmax(out) == 1:\r\n total +=1\r\n print(i, out, 2)\r\n elif i >= 100 and i < 150:\r\n error += self.mse(out, [0,0,1])\r\n if np.argmax(out) == 2:\r\n total +=1\r\n print(i, out, 3)\r\n\r\n print(f'MSE: {error/150}, RMSE:{math.sqrt(error/150)}')\r\n print(f'Accuracy:{total/len(target)}')", "def fetch_multimnist_image(label):\n dataset = MultiMNIST('./data', train=False, download=True,\n transform=transforms.ToTensor(),\n target_transform=charlist_tensor)\n images = dataset.test_data\n labels = dataset.test_labels\n n_rows = len(images)\n\n images = []\n for i in xrange(n_rows):\n image = images[i]\n text = labels[i]\n if tensor_to_string(text.squeeze(0)) == label:\n images.append(image)\n\n if len(images) == 0:\n sys.exit('No images with label (%s) found.' % label)\n\n images = torch.cat(images).cpu().numpy()\n ix = np.random.choice(np.arange(images.shape[0]))\n image = images[ix]\n image = torch.from_numpy(image).float() \n image = image.unsqueeze(0)\n return Variable(image, volatile=True)", "def test_random_multi_image():\n\n shap.image_plot([np.random.randn(3, 20, 20) for i in range(3)], np.random.randn(3, 20, 20), show=False)", "def testSample(self):\n val_idx = np.load(self.validation_set)\n val_sampler = SubsetRandomSampler(val_idx)\n pose_dataset = PoseDataset(self.dataset_path)\n val_loader = DataLoader(dataset=pose_dataset, batch_size=1,\\\n sampler=val_sampler)\n for i in range(5):\n data_iter = iter(val_loader)\n skel_2d, skel_z = next(data_iter)\n\n # inference\n skel_2d = skel_2d.to(self.device)\n z_out = self.net(skel_2d)\n\n # show\n skel_2d = skel_2d.cpu().numpy()\n skel_2d = skel_2d.reshape((2, -1), order='F') # [(x,y) x n_joint]\n z_out = z_out.detach().cpu().numpy()\n z_out = z_out.reshape(-1)\n z_gt = skel_z.numpy().reshape(-1)\n self.show_skeletons(skel_2d, z_out, z_gt)", "def prepare_n_mnist(filename, is_filter, num_spikes, step_factor=1):\n td = ev.read_dataset(filename)\n # td.show_td(100)\n td.data = stabilize(td)\n td.data = td.extract_roi([3, 3], [28, 28], True)\n images = make_td_images(td, num_spikes, step_factor)\n\n if is_filter:\n images = ndimage.median_filter(images, 3)\n\n # for image in images:\n # cv2.imshow('img', image)\n # cv2.waitKey(70)\n return images", "def test_multi_experiment_nan():\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_script = get_functionality_script(output_directory=tmp_dir,\n number_of_iter=2,\n experiment_repeats=2,\n number_nan_repeats=2)\n exp_builder = ExperimentBuilder(yaml_script)\n # This should run correctly and not raise errors\n exp_builder.run_experiments()", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres données quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def test_on_all(self) -> None:\n x_test, y_test = self.mnist.test.images, self.mnist.test.labels\n N = self.mnist.test.num_examples\n\n # I have replaced all -1 with self.mb_size to be sure about exact shapes of all layers.\n assert N % self.mb_size == 0,\\\n \"Sorry, mb_size must divide the number of images in test set\"\n\n results = np.array([0., 0.])\n for batch_no in range(N // self.mb_size):\n beg = batch_no * self.mb_size\n end = min(N, (batch_no + 1) * self.mb_size)\n len_batch = end - beg\n batch_results = np.array(self.test_on_batch(x_test[beg:end], y_test[beg:end]))\n results += batch_results * len_batch\n results /= N\n self.logger.info(\"(Test(final): Loss: {0[0]}, accuracy: {0[1]}\".format(results))", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def test_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.test.equals(atom.mnb.test)\n assert check_scaling(atom.lr.test)", "def __init__(self, sim_size=20, test_size=0.25):\n super().__init__(sim_size, test_size)\n self.setting = 'n neighbors'\n self.leaf_size = 30\n self.p = 2\n self.metric = 'minkowski'", "def load_mnist(path='mnist/mnist.npz'):\n\n with np.load(path) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n x_train = x_train.astype(np.float32) / 255.\n y_train = y_train.astype(np.int32)\n x_test = x_test.astype(np.float32) / 255.\n y_test = y_test.astype(np.int32)\n \n return (x_train, y_train), (x_test, y_test)", "def main():\n # Import or download the mnist data, from target file path.\n mnist = input_data.read_data_sets(\"Data/\", one_hot=True)\n\n # Train and test model.\n train(mnist)", "def load_mnist_dataset(shape=(-1,784)):\n # We first define a download function, supporting both Python 2 and 3.\n if sys.version_info[0] == 2:\n from urllib import urlretrieve\n else:\n from urllib.request import urlretrieve\n\n def download(filename, source='http://yann.lecun.com/exdb/mnist/'):\n print(\"Downloading %s\" % filename)\n urlretrieve(source + filename, filename)\n\n # We then define functions for loading MNIST images and labels.\n # For convenience, they also download the requested files if needed.\n import gzip\n\n def load_mnist_images(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n # The inputs are vectors now, we reshape them to monochrome 2D images,\n # following the shape convention: (examples, channels, rows, columns)\n data = data.reshape(shape)\n # data = data.reshape(-1, 1, 28, 28) # for lasagne\n # data = data.reshape(-1, 28, 28, 1) # for tensorflow\n # data = data.reshape(-1, 784) # for tensorflow\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n # (Actually to range [0, 255/256], for compatibility to the version\n # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n return data / np.float32(256)\n\n def load_mnist_labels(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return data\n\n # We can now download and read the training and test set images and labels.\n ## you may want to change the path\n data_dir = '' #os.getcwd() + '/lasagne_tutorial/'\n # print('data_dir > %s' % data_dir)\n\n X_train = load_mnist_images(data_dir+'train-images-idx3-ubyte.gz')\n y_train = load_mnist_labels(data_dir+'train-labels-idx1-ubyte.gz')\n X_test = load_mnist_images(data_dir+'t10k-images-idx3-ubyte.gz')\n y_test = load_mnist_labels(data_dir+'t10k-labels-idx1-ubyte.gz')\n\n # We reserve the last 10000 training examples for validation.\n X_train, X_val = X_train[:-10000], X_train[-10000:]\n y_train, y_val = y_train[:-10000], y_train[-10000:]\n\n ## you may want to plot one example\n # print('X_train[0][0] >', X_train[0][0].shape, type(X_train[0][0])) # for lasagne\n # print('X_train[0] >', X_train[0].shape, type(X_train[0])) # for tensorflow\n # # exit()\n # # [[..],[..]] (28, 28) numpy.ndarray\n # # plt.imshow 只支持 (28, 28)格式,不支持 (1, 28, 28),所以用 [0][0]\n # fig = plt.figure()\n # #plotwindow = fig.add_subplot(111)\n # # plt.imshow(X_train[0][0], cmap='gray') # for lasagne (-1, 1, 28, 28)\n # plt.imshow(X_train[0].reshape(28,28), cmap='gray') # for tensorflow (-1, 28, 28, 1)\n # plt.title('A training image')\n # plt.show()\n\n # We just return all the arrays in order, as expected in main().\n # (It doesn't matter how we do this as long as we can read them again.)\n return X_train, y_train, X_val, y_val, X_test, y_test", "def __init__(self, sim_size=20, test_size=0.25):\n super().__init__(sim_size, test_size)\n self.setting = 'n neighbors'", "def create_mnist_dataset(mode='train', num_samples=2, batch_size=2):\n mnist_path = '/home/workspace/mindspore_dataset/mnist'\n num_parallel_workers = 1\n\n # define dataset\n mnist_ds = ds.MnistDataset(os.path.join(mnist_path, mode), num_samples=num_samples, shuffle=False)\n\n resize_height, resize_width = 32, 32\n\n # define map operations\n resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode\n rescale_nml_op = CV.Rescale(1 / 0.3081, -1 * 0.1307 / 0.3081)\n rescale_op = CV.Rescale(1.0 / 255.0, shift=0.0)\n hwc2chw_op = CV.HWC2CHW()\n type_cast_op = C.TypeCast(mstype.int32)\n\n # apply map operations on images\n mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n\n # apply DatasetOps\n mnist_ds = mnist_ds.batch(batch_size=batch_size, drop_remainder=True)\n\n return mnist_ds", "def test_mlp():\r\n datasets = gen_data()\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x , test_set_y = datasets[2]\r\n\r\n\r\n\r\n batch_size = 100 # size of the minibatch\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n #print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n x = T.matrix('x') # the data is presented as rasterized images\r\n y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n rng = numpy.random.RandomState(1234)\r\n\r\n # construct the MLP class\r\n classifier = MLP( rng = rng, input=x, n_in=28*28, n_hidden = 500, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model.\r\n # We take the mean of the cost over each minibatch.\r\n cost = classifier.negative_log_likelihood(y).mean()\r\n\r\n # compute the gradient of cost with respect to theta (stored in params)\r\n # the resulting gradients will be stored in a list gparams\r\n gparams = []\r\n for param in classifier.params:\r\n gparam = T.grad(cost, param)\r\n gparams.append(gparam)\r\n\r\n # Some optimizations needed are tagged with 'fast_run'\r\n # TODO: refine that and include only those\r\n mode = theano.compile.get_default_mode().including('fast_run')\r\n\r\n updates2 = OrderedDict()\r\n\r\n updates2[classifier.hiddenLayer.params[0]]=T.grad(cost,classifier.hiddenLayer.params[0])\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]},\r\n mode=mode)\r\n #print 'MODEL 1'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])\r\n\r\n # Even without FeatureShape\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n mode=mode.excluding('ShapeOpt'),\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]})\r\n #print\r\n #print 'MODEL 2'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])", "def __init__(self, data_dir, input_seq_length=10, target_seq_length=10, as_binary=False):\n assert input_seq_length + target_seq_length <= 20, \"The maximum total test sequence length is 20.\"\n \n try:\n filepath = light.utils.data.download(MNIST_TEST_URL, data_dir)\n print(\"Loading MNIST test set from numpy-array. This might take a while...\")\n data = np.load(filepath)\n data = np.float32(data)\n except:\n print 'Please set the correct path to the dataset. Might be caused by a download error.'\n sys.exit()\n\n # introduce channel dimension\n data = np.expand_dims(data, axis=4)\n \n # use value scale [0,1]\n data = data / 255.0 \n \n if as_binary:\n self._data = light.utils.data.as_binary(data)\n else:\n self._data = data\n \n dataset_size = data.shape[0]\n self._row = 0\n \n super(MovingMNISTTestDataset, self).__init__(data_dir, dataset_size, input_shape=[input_seq_length, 64, 64, 1],\n target_shape=[target_seq_length, 64, 64, 1])", "def mnist_training():\n mndata = MNIST(MNIST_PATH)\n train_ims, train_labels = mndata.load_training()\n train_X = np.array(train_ims).T\n train_y = np.array(train_labels).T\n return train_X, train_y", "def test_sampling(self):\n dim = Fidelity(\"epoch\", 1, 2)\n assert dim.sample() == [2]\n dim = Fidelity(\"epoch\", 1, 5)\n assert dim.sample() == [5]\n dim = Fidelity(\"epoch\", 1, 5)\n assert dim.sample(4) == [5] * 4", "def test_clone_scenario(self):\n pass", "def test_Gaussian_NB_estimators():" ]
[ "0.664523", "0.6479431", "0.6476196", "0.64748347", "0.64239633", "0.64103997", "0.6363367", "0.6277", "0.62729543", "0.61764646", "0.60306984", "0.596759", "0.5963632", "0.5930057", "0.58732194", "0.58112574", "0.58030224", "0.5776231", "0.57753116", "0.56806064", "0.5629193", "0.5615691", "0.5602601", "0.55982304", "0.55744094", "0.5548192", "0.55394477", "0.55367374", "0.5528279", "0.5498519", "0.5495525", "0.54662037", "0.54486316", "0.54254085", "0.54196984", "0.54130906", "0.54093075", "0.5399506", "0.53477764", "0.5339651", "0.5331946", "0.53286535", "0.5318302", "0.5313433", "0.53078204", "0.5307298", "0.5300769", "0.52974904", "0.52954614", "0.52939063", "0.52703404", "0.5266537", "0.52552783", "0.52483034", "0.52422434", "0.5238574", "0.5230442", "0.52290446", "0.5222727", "0.52132416", "0.5209902", "0.52093846", "0.5207585", "0.52057695", "0.52010214", "0.51991594", "0.5190039", "0.51871353", "0.5181615", "0.51772016", "0.51718724", "0.5166818", "0.5162099", "0.5160019", "0.51568127", "0.51519144", "0.5149172", "0.5148296", "0.51322716", "0.5131214", "0.5130828", "0.51291835", "0.5128477", "0.5127354", "0.51227313", "0.51213235", "0.5120069", "0.5112548", "0.5110818", "0.5101778", "0.51012594", "0.50981873", "0.50956833", "0.5093646", "0.50928605", "0.5092029", "0.50904316", "0.5083519", "0.50777215", "0.50775456" ]
0.7531117
0
Test the popxl mnist with RTS example
def test_documentation_popxl_mnist_rts_train(self): filename = "mnist_rts.py --replication-factor 2 --rts" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def test_documentation_popxl_mnist(self):\n filename = \"mnist.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_keras_mnist_return_X_y():\n X, y = fetch(\"mnist\", return_X_y=True)\n assert X.shape == (70000, 28 * 28)\n assert y.shape == (70000,)", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def test_dataset():\n X,Y = get_MNIST_training_normalized()\n digits_test_truth = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 632, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, 0, 0, 0, 0, 0]\n digits_test = []\n for example in itertools.islice(X,30):\n digits_test.append(sum(example[1:100]))\n assert(example.shape == (28*28,))\n\n show_as_image(X[0,:], 28, 28)\n print digits_test\n print digits_test_truth\n assert(digits_test_truth == digits_test)\n assert(X.shape == (60000, 28*28))\n assert(Y.shape == (60000,))\n return \"Dziala :)\"", "def test_rand(self):\n assert len(self._mnist.random()[:5]) == 5\n pass", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def create_mnistm(X: Any) -> Any:\n\n bst_path = \"./data/MNIST_M/BSR_bsds500.tgz\"\n\n rand = np.random.RandomState(42)\n train_files = []\n\n with tarfile.open(bst_path, \"r\") as bsr_file:\n for name in bsr_file.getnames():\n if name.startswith(\"BSR/BSDS500/data/images/train/\"):\n train_files.append(name)\n\n print(\"Loading BSR training images\")\n background_data = []\n for name in train_files:\n try:\n fp = bsr_file.extractfile(name)\n bg_img = skimage.io.imread(fp)\n background_data.append(bg_img)\n except:\n continue\n\n X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)\n for i in range(X.shape[0]):\n if i % 1000 == 0:\n print(\"Processing example\", i)\n\n bg_img = rand.choice(background_data)\n d = mnist_to_img(X[i])\n d = compose_image(d, bg_img)\n X_[i] = d\n\n return X_", "def test_load_data(self):\n assert len(self._mnist.get()) == 10\n assert self._mnist.get()[0].label == 7\n pass", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def test_predictor():", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def MNIST_experiment():\n tsetlin_machine = TsetlinMachine(number_clauses=1000,\n number_action_states=1000,\n precision=3.0,\n threshold=10)\n\n X, y, val_X, val_y = MNIST()\n\n tsetlin_machine.fit(X, y, val_X, val_y, 300)\n print('Final training accuracy:', tsetlin_machine.accuracy(X, y))\n print('Final validation accuracy:', tsetlin_machine.accuracy(val_X, val_y))", "def run(prefix):\n # run_tests.assert_folder_is_empty(prefix=prefix)\n xrs_good,xrs_poor,f_obs,r_free_flags = run_tests.setup_helix_example()\n # pdb_inp = os.path.join(qr_unit_tests,\"data_files\",\"2lvr.pdb\")\n r = run_tests.run_cmd(prefix,\n args = [\"restraints=cctbx\",\"mode=gtest\",\"g_scan=20\",\"g_mode=1\"],\n pdb_name = 'm00_poor.pdb', mtz_name='')\n assert os.path.isfile('1-20.npy')", "def test_machine_learning():", "def try4():\n path = '/Users/mayankkejriwal/git-projects/bioExperiments/tsne_python/'\n mnist = path+'mnist2500_X.txt'\n X = numpy.loadtxt(mnist)\n labels = numpy.loadtxt(path+\"mnist2500_labels.txt\")\n Y = tsne.tsne(X, 2, 50, 20.0)\n pylab.scatter(Y[:,0], Y[:,1], 20, labels)\n pylab.show()", "def main():\n # Import or download the mnist data, from target file path.\n mnist = input_data.read_data_sets(\"Data/\", one_hot=True)\n\n # Train and test model.\n train(mnist)", "def test_X_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_test.equals(atom.mnb.X_test)\n assert check_scaling(atom.lr.X_test)", "def test_readme_minimal():\n # Data sampler that generates balanced batches from MNIST dataset\n sampler = TFDatasetMultiShotMemorySampler(\n dataset_name='mnist',\n classes_per_batch=10\n )\n\n # Build a Similarity model using standard Keras layers\n inputs = layers.Input(shape=(28, 28, 1))\n x = layers.experimental.preprocessing.Rescaling(1/255)(inputs)\n x = layers.Conv2D(64, 3, activation='relu')(x)\n x = layers.Flatten()(x)\n x = layers.Dense(64, activation='relu')(x)\n outputs = MetricEmbedding(64)(x)\n\n # Build a specialized Similarity model\n model = SimilarityModel(inputs, outputs)\n\n # Train Similarity model using contrastive loss\n model.compile('adam', loss=MultiSimilarityLoss())\n model.fit(sampler, epochs=5)\n\n # Index 100 embedded MNIST examples to make them searchable\n sx, sy = sampler.get_slice(0, 100)\n model.index(x=sx, y=sy, data=sx)\n\n # Find the top 5 most similar indexed MNIST examples for a given example\n qx, qy = sampler.get_slice(3713, 1)\n nns = model.single_lookup(qx[0]) # noqa\n\n # ! don't add viz its block the test in certain env.\n # Visualize the query example and its top 5 neighbors\n # viz_neigbors_imgs(qx[0], qy[0], nns)", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def test_active_inference_SPM_1b(self):", "def main(): \n symbolic_sample()\n print 'Done.'", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)", "def test_snow_pumps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = snow_pumps(test_path)\n try:\n assert x_train.shape == (13, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def main():\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)", "def main():\n ex = Experiment(SEED)\n ex.main()", "def test_star():\n test_path = tempfile.mkdtemp()\n x_train, metadata = star(test_path)\n try:\n assert x_train.shape == (5748, 8)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def run_experiments(data_set=\"\",compact=2,exp_name=\"\",x=\"\"):\n cwd = os.getcwd()\n results_path = cwd+'/data/geometric/'+exp_name+'_'\n dataset_path = cwd+'/data/geometric/'+data_set+'_'\n compact = int(compact)\n if \"m\" in x:\n # make new pxy\n pxy, Xdata, groups = gen_geometric_pxy()\n np.save(dataset_path+'Xdata',Xdata)\n np.save(dataset_path+'groups',groups)\n np.save(dataset_path+'pxy',pxy)\n else:\n # load existing pxy\n pxy = np.load(dataset_path+'pxy.npy')\n if \"r\" in x: # regular experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n if \"ip\" in x: # initialization experiments - positive p0\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_pos.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_pos.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n if \"in\" in x: # initialization experiments - negative p0\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_neg.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_neg.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n if \"c\" in x: # convergence tolerance experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_ctol.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_ctol.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n if \"z\" in x: # zeroL tolerance experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_zeroLtol.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_zeroLtol.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n if \"b\" in x: # trying proposed optimal beta\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_bestbeta.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_bestbeta.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n return 0", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def mnist(path=None):\r\n url = 'http://yann.lecun.com/exdb/mnist/'\r\n files = ['train-images-idx3-ubyte.gz',\r\n 'train-labels-idx1-ubyte.gz',\r\n 't10k-images-idx3-ubyte.gz',\r\n 't10k-labels-idx1-ubyte.gz']\r\n\r\n if path is None:\r\n # Set path to /home/USER/data/mnist or C:\\Users\\USER\\data\\mnist\r\n path = os.path.join(os.path.expanduser('~'), 'data', 'mnist')\r\n\r\n # Create path if it doesn't exist\r\n os.makedirs(path, exist_ok=True)\r\n\r\n # Download any missing files\r\n for file in files:\r\n if file not in os.listdir(path):\r\n urlretrieve(url + file, os.path.join(path, file))\r\n print(\"Downloaded %s to %s\" % (file, path))\r\n\r\n def _images(path):\r\n \"\"\"Return images loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255\r\n\r\n def _labels(path):\r\n \"\"\"Return labels loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 8 bytes are magic_number, n_labels\r\n integer_labels = np.frombuffer(f.read(), 'B', offset=8)\r\n\r\n def _onehot(integer_labels):\r\n \"\"\"Return matrix whose rows are onehot encodings of integers.\"\"\"\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot\r\n\r\n return _onehot(integer_labels)\r\n\r\n train_images = _images(os.path.join(path, files[0]))\r\n train_labels = _labels(os.path.join(path, files[1]))\r\n test_images = _images(os.path.join(path, files[2]))\r\n test_labels = _labels(os.path.join(path, files[3]))\r\n\r\n return train_images, train_labels, test_images, test_labels", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def test_mlp():\r\n datasets = gen_data()\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x , test_set_y = datasets[2]\r\n\r\n\r\n\r\n batch_size = 100 # size of the minibatch\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n #print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n x = T.matrix('x') # the data is presented as rasterized images\r\n y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n rng = numpy.random.RandomState(1234)\r\n\r\n # construct the MLP class\r\n classifier = MLP( rng = rng, input=x, n_in=28*28, n_hidden = 500, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model.\r\n # We take the mean of the cost over each minibatch.\r\n cost = classifier.negative_log_likelihood(y).mean()\r\n\r\n # compute the gradient of cost with respect to theta (stored in params)\r\n # the resulting gradients will be stored in a list gparams\r\n gparams = []\r\n for param in classifier.params:\r\n gparam = T.grad(cost, param)\r\n gparams.append(gparam)\r\n\r\n # Some optimizations needed are tagged with 'fast_run'\r\n # TODO: refine that and include only those\r\n mode = theano.compile.get_default_mode().including('fast_run')\r\n\r\n updates2 = OrderedDict()\r\n\r\n updates2[classifier.hiddenLayer.params[0]]=T.grad(cost,classifier.hiddenLayer.params[0])\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]},\r\n mode=mode)\r\n #print 'MODEL 1'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])\r\n\r\n # Even without FeatureShape\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n mode=mode.excluding('ShapeOpt'),\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]})\r\n #print\r\n #print 'MODEL 2'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])", "def mnist_training():\n mndata = MNIST(MNIST_PATH)\n train_ims, train_labels = mndata.load_training()\n train_X = np.array(train_ims).T\n train_y = np.array(train_labels).T\n return train_X, train_y", "def main():\n # \"\"\"Prepare neuromorphic MNIST image datasets for use in caffe\n # Each dataset will be generated with different number of unique spikes\n # \"\"\"\n # initial_size = 1e6 #best to make this big enough avoid expensive\n # re-allocation\n # test_dir = os.path.abspath('testFull')\n # train_dir = os.path.abspath('trainFull')\n\n # for num_spikes in range(150, 260, 10):\n # #test directory\n # image_dataset = generate_nmnist_dataset(initial_size, test_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'testlmdb' + str(num_spikes)\n # database = save_to_lmdb(image_dataset, output_lmdb)\n # #database.process_all_data(show_lmdb_datum)\n\n # #train directory\n # image_dataset = generate_nmnist_dataset(initial_size, train_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'trainlmdb' + str(num_spikes)\n # save_to_lmdb(image_dataset, output_lmdb)\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))\n # best to make this big enough avoid expensive re-allocation\n initial_size = 6e5\n test_dir = os.path.abspath('testFull')\n train_dir = os.path.abspath('trainFull')\n\n # test directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, test_dir)\n save_to_lmdb(image_dataset, 'testlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_test.mat')\n # database.process_all_data(show_lmdb_datum)\n\n # train directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, train_dir)\n save_to_lmdb(image_dataset, 'trainlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_train.mat')\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))", "def load_mnist(path='./', test_size=0.3, random_state = 123):\n \n np.random.seed(random_state)\n if 'X_train.npy' not in os.listdir(path=path) or 'y_train.npy' not in os.listdir(path=path):\n print (\"Train dataset not found. Downloading...\")\n os.system(\"curl -L -o train.zip {}\".format(TRAIN_DATA_LINK))\n os.system(\"unzip train.zip\")\n os.system(\"tar -xf trainingSet.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSet'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSet/{}'.format(class_name)):\n image = imread('./trainingSet/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_train = np.array(images)\n y_train = np.array(labels)\n\n permutation = np.random.permutation(X_train.shape[0])\n X_train = X_train[permutation]\n y_train = y_train[permutation]\n\n with open('X_train.npy', 'wb') as f:\n np.save(f, X_train)\n with open('y_train.npy', 'wb') as f:\n np.save(f, y_train)\n os.system(\"rm -rf trainingSet\")\n os.system(\"rm -rf train.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n else:\n X_train = np.load('X_train.npy')\n y_train = np.load('y_train.npy')\n\n if 'X_test.npy' not in os.listdir(path=path) or 'y_test.npy' not in os.listdir(path=path):\n print (\"Test dataset not found. Downloading...\")\n os.system(\"curl -L -o test.zip {}\".format(TEST_DATA_LINK))\n os.system(\"unzip test.zip\")\n os.system(\"tar -xf trainingSample.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSample'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSample/{}'.format(class_name)):\n image = imread('./trainingSample/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_test = np.array(images)\n y_test = np.array(labels)\n with open('X_test.npy', 'wb') as f:\n np.save(f, X_test)\n with open('y_test.npy', 'wb') as f:\n np.save(f, y_test)\n\n os.system(\"rm -rf trainingSample\")\n os.system(\"rm -rf test.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n\n else:\n X_test = np.load('X_test.npy')\n y_test = np.load('y_test.npy')\n\n return X_train, X_test, y_train, y_test", "def testvis(layers='first'):\n\n\tfrom scipy.io import loadmat\n\tfrom setup import NeuralNetwork as nnsetup\n\n\tresult = loadmat(\"goodmatx.mat\")\n\tw1 = result['v1']\n\tw0 = result['v0']\n\tx,y = result['train_x'], result['train_y']\n\n\t# result = loadmat(\"ducky.mat\")\n\t# x = result['train_x']\n\t# y = result['train_y']\n\n\tsize = [x.shape[1], 1000, y.shape[1]]\n\n\tnn = nnsetup([size[0],size[1],size[0]],output='sigm')\n\n\tnn.W[0] = w0\n\tnn.W[1] = w1\n\t\n\tfor i in range(50):\n\t\tvisualize( nn, x, k = 3000+i*4, layers=layers, mode='save' )", "def test_generate_nb_testing(self):\n pass", "def test_run_simplega():\n WRFga_winner = run_simplega(pop_size=100, n_generations=1, testing=True)\n assert WRFga_winner.Fitness >= 0", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres données quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def test_mnist():\n skip_if_no_data()\n mode = get_default_mode()\n if hasattr(mode, 'check_py_code'):\n old_value = mode.check_py_code\n mode.check_py_code = False\n try:\n if config.mode == \"DEBUG_MODE\":\n yaml_file = 'mnist_fast'\n else:\n yaml_file = 'mnist'\n limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'\n % yaml_file))\n try:\n os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))\n os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))\n except Exception:\n pass\n finally:\n if hasattr(mode, 'check_py_code'):\n mode.check_py_code = old_value", "def test_genx(nsd, backend):\n # NCOLS of data:\n # 2 - test kernel only\n # 3 - test kernel and chi2 calculation\n # 4 - test resolution smearing and chi2 calculation\n\n test_name, slabs, data = nsd\n\n kernel_test(slabs, data, backend)", "def import_mnist():\n\turl_mnist = \"http://deeplearning.net/data/mnist/mnist.pkl.gz\"\n\tfile_name = \"mnist.pkl.gz\"\n\twork_directory = \"mnist\"\n\tfile_path = maybe_download(url=url_mnist, file_name=file_name, work_directory=work_directory)\n\n\timport pickle\n\twith gzip.open(file_path,'rb') as ff :\n\t\tu = pickle._Unpickler( ff )\n\t\tu.encoding = 'latin1'\n\t\ttrain, val, test = u.load()\n\t\ttrainX = np.array(train[0])\n\t\ttrainY = np.reshape(train[1], [50000, 1])\n\t\tvalX = np.array(val[0])\n\t\tvalY = np.reshape(val[1], [10000, 1])\n\t\ttestX = np.array(test[0])\n\t\ttestY = np.reshape(test[1], [10000, 1])\n\t\ttrainX = np.concatenate((trainX, valX), axis = 0)\n\t\ttrainY = np.concatenate((trainY, valY), axis = 0)\n\treturn trainX, trainY, testX, testY", "def test_fsnps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = fsnps(test_path)\n try:\n assert x_train.shape == (432, 10)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_1():\n constr = dict()\n constr['maxfun'] = np.random.randint(1, 5 + 1)\n\n get_random_init(constr)\n simulate('test.trempy.ini')\n estimate('test.trempy.ini')", "def test_documentation_popxl_addition_variable(self):\n filename = \"tensor_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_10_test_model(self, example):\n res = example.calc_model()\n print(example.trips_ij)\n total_trips_target = example.persons_gi.sum()\n total_trips_actual = example.trips_ij.sum()\n np.testing.assert_almost_equal(total_trips_target, total_trips_actual)", "def main():\n\n dataset = ConvMNIST(64)\n print(dataset.get_train().x.shape)\n\n\n inputs = Value(type=tf.float32, shape=(None, 28, 28, 1), cls = None)\n targets = Value(type=tf.int64, shape=(None), cls = 10)\n learning_rate = 0.0001\n\n fc_hidden = [1024, 500]\n c_h = [\n (3, 3, 1, 32),\n (3, 3, 32, 64)\n ]\n conv_hidden = ConvHidden(conv_weights=c_h, fc_weights=fc_hidden)\n\n config = Config(inputs, targets, conv_hidden, learning_rate)\n\n network = ConvNetworkBuilder(config)\n hidden = FFConvHiddenBuilder()\n _ = network.build_network(hidden)\n\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n\n trainer = Trainer(network, train_config)\n trainer.train(dataset)", "def main(_):\n description = xm.ExperimentDescription(\n 'HIS - trial=%d' % FLAGS.trial, tags=['his'])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def run_example(num_points_to_sample=1000, verbose=True, **kwargs):\n\n exp = Experiment([[1, 52], [0, 6], [1, 52]]) # 2D experiment, we build a tensor product domain\n # Bootstrap with some known or already sampled point(s)\n exp.historical_data.append_sample_points([\n SamplePoint([26, 2, 46], get_fitness([26, 2, 35]), 0.5), # Iterables of the form [point, f_val, f_var] are also allowed\n ])\n # Sample num_points_to_sample points\n for i in range(num_points_to_sample):\n # Use MOE to determine what is the point with highest Expected Improvement to use next\n next_point_to_sample = map(round, gp_next_points(exp, **kwargs)[0]) # in [A, X, B] form, rounded integers\n value_of_next_point = get_fitness(next_point_to_sample)\n\n if verbose:\n if in_results(next_point_to_sample):\n print '***', \"Sampled f({0:s}) = {1:.18E}\".format(str(next_point_to_sample), value_of_next_point), '***'\n else:\n print \"Sampled f({0:s}) = {1:.18E}\".format(str(next_point_to_sample), value_of_next_point)\n\n bank[i,0:3] = next_point_to_sample\n bank[i,3] = value_of_next_point\n # Add the information about the point to the experiment historical data to inform the GP\n exp.historical_data.append_sample_points([SamplePoint(next_point_to_sample, value_of_next_point, 0.01)]) # We can add some noise", "def MNIST_data():\n\n # Pobieramy macierze numpy z cyframi\n # images[i,j,k] <=> piksel (j,k) z i-tego obrazka w zbiorze danych\n images, labels = get_MNIST_dataset(range(10), \"training\") #pierwszy argument to\n\n # a) Ilosc przykladow i rozmiary danych\n print \"Raw training data dimensions \", images.shape\n print \"Labels dimensions \",labels.shape\n\n # b) Ile jest cyfr 2?\n print \"Counting 2 in training dataset \",len(filter(lambda x: x == 2, labels))\n\n # c) Jaki jest sredni obrazek 2 ? (Usrednienie wszystkich macierzy ktore sa 2)\n\n #1. Pobierzmy wszystkie dwojki, fajny sposob indeksowania\n print labels == 2\n only_2 = images[labels == 2, :, :]\n print \"Checking number of 2s \", only_2.shape\n\n #2. TODO: Usrednienie (matrix.mean moze byc przydatne)\n\n #3. TODO: narysowanie usrednionej cyfry (zobacz pl.imshow)\n\n # d) Ostatnie - przetworzmy ostatnia cyfre do 1 wymiarowego wektora\n vectorized = np.reshape(images[-1], newshape=(images[-1].shape[0]*images[-1].shape[1]))\n print \"Vectorized last digit \", vectorized", "def test_menhinick(self):\n self.assertEqual(menhinick(self.TestData), 9/sqrt(22))", "def test_svm():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = r.randint(1, 10598)\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n\n # iris\n pres = \"Test pour le data set Iris (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Iris, quantum_instance)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Breast_cancer, quantum_instance)\n\n # digits (it's long so be careful)\n #pres = \"Test pour le data set Digits (difficile, classique)\"\n #test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n pres = \"Test pour le data set Wine (moyen, classique)\"\n test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func(pres, 25, 10, 2, True, Gaussian, quantum_instance)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func(pres, 10, 15, 14, True, Sequence, quantum_instance)", "def test(ndigit, elambda, showSamples, showConfusion):\n Data, Label = getData()\n trainX, trainY, testX, testY = splitData(Data, Label, ndigit)\n trainX_mean = np.mean(trainX, axis=0)\n trainX_new = trainX - trainX_mean\n eigenvectors = getEigenVectors(trainX_new, elambda)\n trainX_eigen = trainX_new.dot(eigenvectors)\n testX_new = testX - trainX_mean\n testX_eigen = testX_new.dot(eigenvectors)\n testO = []\n if showSamples:\n correct_samples = []\n correct_samples_nearest = []\n correct_samples_eigen = []\n correct_samples_nearest_eigen = []\n correct_samples_labels = []\n correct_samples_predictions = []\n wrong_samples = []\n wrong_samples_nearest = []\n wrong_samples_eigen = []\n wrong_samples_nearest_eigen = []\n wrong_samples_labels = []\n wrong_samples_predictions = []\n if showConfusion:\n conf = np.zeros((ndigit, ndigit))\n for i in xrange(testX_eigen.shape[0]):\n t = testX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n p = int(trainY[j])\n y = int(testY[i])\n if showConfusion:\n conf[p, y] += 1\n if showSamples:\n if p == y:\n if len(correct_samples) < y + 1:\n correct_samples.append(testX[i])\n correct_samples_nearest.append(trainX[j])\n correct_samples_eigen.append(testX_eigen[i])\n correct_samples_nearest_eigen.append(trainX_eigen[j])\n correct_samples_labels.append(y)\n correct_samples_predictions.append(p)\n else:\n if len(wrong_samples) < y + 1:\n wrong_samples.append(testX[i])\n wrong_samples_nearest.append(trainX[j])\n wrong_samples_eigen.append(testX_eigen[i])\n wrong_samples_nearest_eigen.append(trainX_eigen[j])\n wrong_samples_labels.append(y)\n wrong_samples_predictions.append(p)\n testO.append(p)\n testO = np.array(testO)\n train0 = []\n for i in xrange(trainX_eigen.shape[0]):\n t = trainX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n min_class = trainY[j]\n train0.append(min_class)\n train0 = np.array(train0)\n print \"for digits = %d lambda = %.2f train = %.6f test = %.6f \" % (\n ndigit, elambda, (train0 == trainY).mean(), (testO == testY).mean())\n if showConfusion:\n print conf\n if showSamples:\n displaySamples(correct_samples_labels, correct_samples_predictions,\n correct_samples, correct_samples_nearest,\n correct_samples_eigen, correct_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Correct')\n displaySamples(wrong_samples_labels, wrong_samples_predictions,\n wrong_samples, wrong_samples_nearest,\n wrong_samples_eigen, wrong_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Wrong')", "def test_generate_nb(self):\n pass", "def main():\n training_data, validation_data, test_data = mnist.load()\n\n model = nn.NeuralNetwork([784, 100, 10], learning_rate=0.01, batch_size=50)\n\n model_training = training.EarlyStoppingRegularization(model,\n training_data,\n validation_data,\n test_data,\n max_steps_without_progression=2)\n result = model_training.train()\n\n result.save('models/mnist')", "def main():\n\n NUM_TRAIN = noise.init_train_thresh\n NUM_TEST = 20\n XDIM = 1\n\n # Train the emulator\n x_train = np.random.uniform(size=(NUM_TRAIN, XDIM))\n y_train = np.array([noise(x) for x in x_train])\n\n # Output error estimates\n noise.output_err = True\n\n # Get values from the trained emulator\n x_emu = np.random.uniform(size=(NUM_TEST, XDIM))\n\n y_emu = np.zeros_like(x_emu)\n y_err = np.zeros_like(x_emu)\n\n for i, x in enumerate(x_emu):\n val, err = noise(x)\n y_emu[i] = val\n y_err[i] = err\n\n # Plot the results\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.scatter(x_train[:, 0], y_train, marker=\"+\", label=\"training values\")\n ax.errorbar(\n x_emu,\n y_emu[:, 0],\n yerr=y_err.flatten(),\n linestyle=\"None\",\n marker=\"o\",\n capsize=3,\n label=\"emulator\",\n color=\"red\",\n )\n\n ax.legend()\n\n # `__file__` is undefined when running in sphinx\n try:\n fig.savefig(__file__ + \".png\")\n except NameError:\n pass", "def run_examples():\n\n for example in examples:\n\n print(str(example) + \" : \", end=\" \")\n try:\n t, smush = analyse(example, my_env)\n print(lookup(t, smush))\n # print(\"Smush\")\n # for k,v in smush.items():\n # print(f\"\\t{k} : {v}\")\n except (ParseError, InferenceError) as e:\n print(e)", "def test_mlp(learning_rate=.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=150,\n dataset='mnist.pkl.gz', batch_size=20, n_hidden=100):\n #Note - transfer is used to check whether test_mlp is running for the first time with new weights or second time with transferred weights\n #Transfer is initialized to be false.\n #a transfer in the if statement will run the code for the Letters data set first and Numbers data set second.\n #(Not transfer) will run the code for the Numbers data set first and Letters data set second. \n\n #CHANGE FLAG - edit order datasets are run in and dataset name\n if(transfer):\n #datasets = load_data(dataset)\n f = open('HSFNums.p','rb')\n datasets = pickle.load(f)\n\n else:\n #datasets = getHSF()\n f = open('HSFLetters2.p','rb')\n datasets = pickle.load(f)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n f.close()\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\n #total size of valid data is printed\n print 'This is the vector size of the inputs' #\n print train_set_x.get_value(borrow=True).shape #\n print n_train_batches #\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\n\n #Data reduction\n if(transfer):\n train_set_x = train_set_x[0:int(1.0*n_train_batches*batch_size),:]\n train_set_y = train_set_y[0:int(1.0*n_train_batches*batch_size)]\n\n\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print '... building the model'\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n x = T.matrix('x') # the data is presented as rasterized images\n y = T.ivector('y') # the labels are presented as 1D vector of\n # [int] labels\n\n rng = numpy.random.RandomState(1234)\n\n # construct the MLP class\n #problem is you can't pass weights through here, b/c of gradient descent\n #algorithms use these parameters\n\n #Numbers have 10 classifications, Letters have 26 classifications.\n #transfer is initialized as false, so depending on which dataset should be run first, edit this\n #CHANGE FLAG - edit the order the network trains in and the number of outputs (n_out)\n if(transfer):\n classifier = MLP(\n rng=rng,\n input=x,\n n_in=28 * 28,\n n_hidden=n_hidden,\n n_out=10\n )\n else:\n classifier = MLP(\n rng=rng,\n input=x,\n n_in=28 * 28,\n n_hidden=n_hidden,\n n_out=26\n )\n\n # the cost we minimize during training is the negative log likelihood of\n # the model plus the regularization terms (L1 and L2); cost is expressed\n # here symbolically\n cost = (\n classifier.negative_log_likelihood(y)\n + L1_reg * classifier.L1\n + L2_reg * classifier.L2_sqr\n )\n\n # compiling a Theano function that computes the mistakes that are made\n # by the model on a minibatch\n test_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: test_set_x[index * batch_size:(index + 1) * batch_size],\n y: test_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n\n validate_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: valid_set_x[index * batch_size:(index + 1) * batch_size],\n y: valid_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n\n # compute the gradient of cost with respect to theta (stored in params)\n # the resulting gradients will be stored in a list gparams\n gparams = [T.grad(cost, param) for param in classifier.params]\n\n # specify how to update the parameters of the model as a list of\n # (variable, update expression) pairs\n\n # given two lists of the same length, A = [a1, a2, a3, a4] and\n # B = [b1, b2, b3, b4], zip generates a list C of same size, where each\n # element is a pair formed from the two lists :\n # C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]\n updates = [\n (param, param - learning_rate * gparam)\n for param, gparam in zip(classifier.params, gparams)\n ]\n\n # compiling a Theano function `train_model` that returns the cost, but\n # in the same time updates the parameter of the model based on the rules\n # defined in `updates`\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n #CHANGE FLAG - edit based on the order the network rusn in and the input file name\n inputSize=100 #number of input images sampled from next dataset for transfer calculations\n if(not transfer):\n #f2 = open('HSFLetters2.p','rb')\n #f2 can be changed based on whether letters should be transferred to numbers or v.c.\n f2 = open('HSFNums.p','rb')\n datasetsTransfer = pickle.load(f2)\n train_set_x2, train_set_y2 = datasetsTransfer[0]\n inputs=train_set_x2.get_value(borrow=True) #inputs\n f2.close()\n \n ###############\n # TRAIN MODEL #\n ###############\n print '... training'\n\n # early-stopping parameters\n patience = 10000 # look as this many examples regardless\n patience_increase = 2 # wait this much longer when a new best is\n # found\n improvement_threshold = 0.995 # a relative improvement of this much is\n # considered significant\n validation_frequency = min(n_train_batches, patience / 2)\n # go through this many\n # minibatches before checking the network\n # on the validation set; in this case we\n # check every epoch\n\n best_validation_loss = numpy.inf\n best_iter = 0\n test_score = 0.\n start_time = timeit.default_timer()\n\n epoch = 0\n done_looping = False\n\n\n #opening files to print validation error to\n if(not transfer):\n outFile = open('out.txt','w')\n else:\n outFile = open('outTransfer.txt','w')\n\n\n #Inserted code for printing out validation after randomization\n validation_losses = [validate_model(i) for i\n in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n outFile.write(str(this_validation_loss*100)) #printing the error out to the file, turned to string b/c still using write function\n outFile.write('\\n')\n\n\n while (epoch < n_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in xrange(n_train_batches):\n\n minibatch_avg_cost = train_model(minibatch_index)\n # iteration number\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n # compute zero-one loss on validation set\n validation_losses = [validate_model(i) for i\n in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n outFile.write(str(this_validation_loss*100)) #printing the error out to the file, turned to string b/c still using write function\n outFile.write('\\n')\n print(\n 'epoch %i, minibatch %i/%i, validation error %f %%' %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n )\n\n\n # if we got the best validation score until now\n if this_validation_loss < best_validation_loss:\n #improve patience if loss improvement is good enough\n if (\n this_validation_loss < best_validation_loss *\n improvement_threshold\n ):\n patience = max(patience, iter * patience_increase)\n\n best_validation_loss = this_validation_loss\n best_iter = iter\n\n # test it on the test set\n test_losses = [test_model(i) for i\n in xrange(n_test_batches)]\n test_score = numpy.mean(test_losses)\n\n print((' epoch %i, minibatch %i/%i, test error of '\n 'best model %f %%') %\n (epoch, minibatch_index + 1, n_train_batches,\n test_score * 100.))\n\n if patience <= iter:\n done_looping = True\n break\n #closing file\n outFile.close()\n end_time = timeit.default_timer()\n print(('Optimization complete. Best validation score of %f %% '\n 'obtained at iteration %i, with test performance %f %%') %\n (best_validation_loss * 100., best_iter + 1, test_score * 100.))\n print >> sys.stderr, ('The code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.2fm' % ((end_time - start_time) / 60.))\n\n\n\n #Goal of block: Calculate hidden node activations and find which weights to transfer\n # Create global theano shared variable for the weights to transfer\n if(not transfer):\n \n #Set threshold to determine bounds for activated nodes - Weights leading to activated nodes with absolute values >= threshold\n #will be copied over. Other weights are re-initialized.\n threshold = 0.0\n n_in = 28*28\n #inputs are passed from the train_set_x above\n hidden1W = classifier.hiddenLayer.W.get_value()\n hidden1Wcopy = hidden1W\n #Making a copy of the first hidden layer of weights to be used in calculations for second hidden lyaer of weights\n aveList = []\n #aveList represents the average hidden node activations for layer 1\n print 'starting transfer calculations'\n for i in range(0,n_hidden):\n x = 0\n for j in range(0,inputSize):\n #Design choice to use absolute value b/c a positive activation and a negative activation were both considered important\n x += abs(numpy.tanh(numpy.tensordot(inputs[j,:],hidden1W[:,i],axes=1)))\n aveList.append(x/inputSize)\n\n print 'ending calculation'\n\n count = 0\n for i in range(0,n_hidden):\n \n if(aveList[i] < threshold):\n #If the activation is below the threshold, then the weights corresponding leading to that hidden node will be reinitialized\n hidden1W[:,i] = numpy.asarray(\n rng.uniform(\n low=-numpy.sqrt(6. / (n_in + n_hidden)),\n high=numpy.sqrt(6. / (n_in + n_hidden)),\n size=(n_in,1)\n ),\n dtype=theano.config.floatX\n ).flatten()\n else:\n count+=1\n print 'A total number of ' + str(count) + ' H1 nodes passed the threshold'\n \n #saving count of hidden nodes\n outFile3 = open('transfer.txt','w')\n outFile3.write(str(count))\n outFile3.write('\\n')\n\n\n\n hidden1Act = numpy.zeros((1,n_hidden))\n #Making a dummy hidden layer variable to edit\n\n #now for the next hidden layer :)\n hidden2W = classifier.hiddenLayer2.W.get_value()\n aveList = []\n #aveList here represents the average hidden node activations for layer 2\n print 'starting next hidden layer calculation'\n for i in range(0,n_hidden):\n x = 0\n for j in range(0,inputSize):\n for k in range(0,n_hidden):\n hidden1Act[0][k] = numpy.tanh(numpy.tensordot(inputs[j,:],hidden1Wcopy[:,k],axes=1))\n x += abs(numpy.tanh(numpy.tensordot(hidden1Act[0,:],hidden2W[:,i],axes=1)))\n aveList.append(x/inputSize)\n print 'ending hidden 2 calculation'\n count = 0\n for i in range(0,n_hidden):\n if(aveList[i] < threshold):\n hidden2W[:,i] = numpy.asarray(\n rng.uniform(\n low=-numpy.sqrt(6. / (n_hidden + n_hidden)),\n high=numpy.sqrt(6. / (n_hidden + n_hidden)),\n size = (n_hidden,1)\n ),\n dtype=theano.config.floatX\n ).flatten()\n else:\n count += 1\n print 'A total number of ' + str(count) + ' H2 nodes passed the threshold'\n\n outFile3.write(str(count))\n outFile3.close()\n\n\n #3 global variables exist. tensor and tensor2 variables are the global theano shared variables for the weights.\n #During the next run, the MLP will be initialized with these weights thereby transferring the weights from this run.\n global transfer\n transfer = True\n global tensor\n global tensor2\n tensor = theano.shared(value=hidden1W,name = 'W', borrow=True)\n tensor2 = theano.shared(value = hidden2W, name = 'tensor2', borrow=True)\n\n test_mlp() \n else:\n print 'Thank you for running this transfer program'\n print 'Below are descriptions of files that have been created'\n print 'out.txt - validation error while training'\n print 'outTransfer.txt - validation error while training after transfer learning'\n print 'transfer.txt - number of hidden nodes transferred in each layer'", "def utest_SGD_Test():\n model_fname = \"../work/model\"\n # test binary classification.\n if False:\n #test_fname = \"../work/train.bz2\"\n test_fname = \"../work/rcv1_test.binary.bz2\"\n if True:\n test_fname = \"../work/iris_multi.train\"\n test_logreg(model_fname,test_fname,prob=True,acc=True)\n pass", "def main():\n # Call testing function\n testMinivan()", "def test1():\n for test in pkl.load(open(TEST_RESOURCES_DIR / \"regression_vault.pkl\", \"rb\"))[:5]:\n init_dict, rslt = test\n np.testing.assert_array_equal(run_regression_test(init_dict), rslt)", "def get_results():\r\n #Get python results\r\n import mnist_nn\r\n import mnist_nn_gpu\r\n mnist_nn.save_results()\r\n mnist_nn_gpu.save_results()\r\n\r\n #Get cpp results\r\n import subprocess\r\n subprocess.call(['c++//./run.sh'])", "def test_mnir_image():\n # Initiate the sunglint correction class\n g = deglint.GlintCorr(odc_meta_file, sub_product)\n\n # ---------------------- #\n # NIR subtraction #\n # ---------------------- #\n mnir_xarrlist = g.glint_subtraction(\n vis_bands=[\"3\"],\n corr_band=\"6\",\n water_val=5,\n )\n\n sungc_band = mnir_xarrlist[0].lmbadj_green.values # 3D array\n\n # path to expected sunglint corrected output from NIR subtraction\n exp_sungc_band = (\n data_path\n / \"MINUS_NIR\"\n / \"ga_ls8c_lmbadj_3-2-0_091086_2014-11-06_final_band03-deglint-600m.tif\"\n )\n\n # ensure that all valid sungint corrected pixels match expected\n with rasterio.open(exp_sungc_band, \"r\") as exp_sungc_ds:\n urd_band = urd(sungc_band[0, :, :], exp_sungc_ds.read(1), exp_sungc_ds.nodata)\n assert urd_band.max() < 0.001", "def load_data(m=5000, n=100, path='D:/file/vscode/py/data/mnist.npz'):\r\n f = np.load(path)\r\n x_train, y_train = f['x_train'], f['y_train']\r\n\r\n x_test, y_test = f['x_test'], f['y_test']\r\n\r\n f.close()\r\n return (x_train, y_train), (x_test, y_test)", "def test_imsim():\n import yaml\n import astropy.units as u\n import matplotlib.pyplot as plt\n from tqdm import tqdm\n # Need these for `eval` below\n from numpy import array\n import coord\n\n with open(DATA_DIR / \"wcs_466749.yaml\", 'r') as f:\n wcss = yaml.safe_load(f)\n\n cmds = {}\n with open(DATA_DIR / \"phosim_cat_466749.txt\", 'r') as f:\n for line in f:\n k, v = line.split()\n try:\n v = int(v)\n except ValueError:\n try:\n v = float(v)\n except ValueError:\n pass\n cmds[k] = v\n\n # Values below (and others) from phosim_cat_466749.txt\n rc = cmds['rightascension']\n dc = cmds['declination']\n boresight = galsim.CelestialCoord(\n rc*galsim.degrees,\n dc*galsim.degrees\n )\n obstime = Time(cmds['mjd'], format='mjd', scale='tai')\n obstime -= 15*u.s\n band = \"ugrizy\"[cmds['filter']]\n wavelength_dict = dict(\n u=365.49,\n g=480.03,\n r=622.20,\n i=754.06,\n z=868.21,\n y=991.66\n )\n wavelength = wavelength_dict[band]\n camera = imsim.get_camera()\n\n rotTelPos = cmds['rottelpos'] * galsim.degrees\n telescope = imsim.load_telescope(f\"LSST_{band}.yaml\", rotTelPos=rotTelPos)\n # Ambient conditions\n # These are a guess.\n temperature = 293.\n pressure = 69.0\n H2O_pressure = 1.0\n\n # Start by constructing a refractionless factory, which we can use to\n # cross-check some of the other values in the phosim cmd file.\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera,\n temperature=temperature,\n pressure=0.0,\n H2O_pressure=H2O_pressure\n )\n\n aob, zob, hob, dob, rob, eo = factory._ICRF_to_observed(\n boresight.ra.rad, boresight.dec.rad, all=True\n )\n np.testing.assert_allclose(\n np.rad2deg(aob)*3600, cmds['azimuth']*3600,\n rtol=0, atol=2.0\n )\n np.testing.assert_allclose(\n (90-np.rad2deg(zob))*3600, cmds['altitude']*3600,\n rtol=0, atol=6.0,\n )\n q = factory.q * galsim.radians\n rotSkyPos = rotTelPos - q\n # Hmmm.. Seems like we ought to be able to do better than 30 arcsec on the\n # rotator? Maybe this is defined at a different point in time? Doesn't seem\n # to affect the final WCS much though.\n np.testing.assert_allclose(\n rotSkyPos.deg*3600, cmds['rotskypos']*3600,\n rtol=0, atol=30.0,\n )\n\n # We accidentally simulated DC2 with the camera rotated 180 degrees too far.\n # That includes the regression test data here. So to fix the WCS code, but\n # still use the same regression data, we need to add 180 degrees here. Just\n # rotate the camera by another 180 degrees\n telescope = telescope.withLocallyRotatedOptic(\n \"LSSTCamera\", batoid.RotZ(np.deg2rad(180))\n )\n\n # For actual WCS check, we use a factory that _does_ know about refraction.\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera,\n temperature=temperature,\n pressure=pressure,\n H2O_pressure=H2O_pressure\n )\n\n do_plot = False\n my_centers = []\n imsim_centers = []\n if do_plot:\n _, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 12))\n i = 0\n r1 = []\n d1 = []\n r2 = []\n d2 = []\n rng = np.random.default_rng(1234)\n for k, v in tqdm(wcss.items()):\n name = k[18:25].replace('-', '_')\n det = camera[name]\n cpix = det.getCenter(cameraGeom.PIXELS)\n\n wcs = factory.getWCS(det, order=2)\n wcs1 = eval(v)\n # Need to adjust ab parameters to new GalSim convention\n wcs1.ab[0,1,0] = 1.0\n wcs1.ab[1,0,1] = 1.0\n\n my_centers.append(wcs.posToWorld(galsim.PositionD(cpix.x, cpix.y)))\n imsim_centers.append(wcs1.posToWorld(galsim.PositionD(cpix.x, cpix.y)))\n\n corners = det.getCorners(cameraGeom.PIXELS)\n xs = np.array([corner.x for corner in corners])\n ys = np.array([corner.y for corner in corners])\n ra1, dec1 = wcs.xyToradec(xs, ys, units='radians')\n ra2, dec2 = wcs1.xyToradec(xs, ys, units='radians')\n if i == 0:\n labels = ['batoid', 'PhoSim']\n else:\n labels = [None]*2\n if do_plot:\n ax.plot(ra1, dec1, c='r', label=labels[0])\n ax.plot(ra2, dec2, c='b', label=labels[1])\n\n # add corners to ra/dec check lists\n r1.extend(ra1)\n d1.extend(dec1)\n r2.extend(ra2)\n d2.extend(dec2)\n # Add some random points as well\n xs = rng.uniform(0, 4000, 100)\n ys = rng.uniform(0, 4000, 100)\n ra1, dec1 = wcs.xyToradec(xs, ys, units='radians')\n ra2, dec2 = wcs1.xyToradec(xs, ys, units='radians')\n r1.extend(ra1)\n d1.extend(dec1)\n r2.extend(ra2)\n d2.extend(dec2)\n i += 1\n\n if do_plot:\n ax.legend()\n xlim = ax.get_xlim()\n ax.set_xlim(xlim[1], xlim[0])\n plt.show()\n\n dist = sphere_dist(r1, d1, r2, d2)\n print(\"sphere dist mean, max, std\")\n print(\n np.rad2deg(np.mean(dist))*3600,\n np.rad2deg(np.max(dist))*3600,\n np.rad2deg(np.std(dist))*3600,\n )\n np.testing.assert_array_less(\n np.rad2deg(np.mean(dist))*3600,\n 5.0\n )\n if do_plot:\n plt.hist(np.rad2deg(dist)*3600, bins=100)\n plt.show()\n\n if do_plot:\n r1 = np.array([c.ra.rad for c in my_centers])\n d1 = np.array([c.dec.rad for c in my_centers])\n r2 = np.array([c.ra.rad for c in imsim_centers])\n d2 = np.array([c.dec.rad for c in imsim_centers])\n cd = np.cos(np.deg2rad(cmds['declination']))\n q = plt.quiver(r1, d1, np.rad2deg(r1-r2)*3600*cd, np.rad2deg(d1-d2)*3600)\n plt.quiverkey(q, 0.5, 1.1, 5.0, \"5 arcsec\", labelpos='E')\n plt.show()", "def main():\n \"\"\"\n This is just for testing the functions\n \"\"\"\n\n x1 = np.array([1, 1, 1, 1, -1, -1, 1, 1, 1])\n x2 = np.array([1, -1, 1, 1, 1, 1, 1, -1, 1])\n x3 = np.array([-1, 1, -1, -1, 1, -1, -1, 1, -1])\n train_set = np.vstack((x1, x2))\n train_set = np.vstack((train_set, x3))\n\n\n params = {\n \"epochs\": 100,\n \"neurons\": len(x1),\n \"learn_method\": 'classic'\n }\n\n hop = hop_net.HopfieldNet(train_set, **params)\n hop.batch_train()\n show_trained(train_set)\n\n x4d = [1,1,1,1,1,1,1,1,1]\n x5d = [1,1,1,1,-1,-1,1,-1,-1]\n x45d = np.vstack((x4d, x5d))\n test_set = np.vstack((x45d, train_set))\n recalled_set = hop.recall(test_set)\n for i in range(test_set.shape[0]):\n show_tested(test_set[i], recalled_set[i])", "def pick_data(ns, digits):\n f = gzip.open('data/mnist.pkl.gz', 'rb')\n train_set, valid_set, test_set = cPickle.load(f)\n f.close()\n images, labels = train_set\n\n originals = []; \n shapes = []; \n true_labels = [];\n i = 0\n for n, d in zip(ns, digits):\n # picking n elements with digit d\n x = np.where(labels==d)[0]\n idx = np.random.choice(x, n, replace=False)\n imgs = images[idx]\n originals.append(imgs)\n contours = [mnistshape.get_shape2(im.reshape((28,28)), n=30, s=5, ir=2)\n for im in imgs]\n shapes.append(contours)\n true_labels.append([i]*n)\n i += 1\n originals = np.concatenate(originals)\n true_labels = np.concatenate(true_labels)\n \n new_shapes = []\n for cluster in shapes:\n for shape in cluster:\n new_shapes.append(shape)\n new_shapes = np.array(new_shapes)\n\n # return shuffled data\n idx = range(len(originals))\n np.random.shuffle(idx)\n return originals[idx], new_shapes[idx], true_labels[idx]", "def test_X_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X.equals(atom.mnb.X)\n assert check_scaling(atom.lr.X)", "def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test", "def test_full(args, model, device): \n test_path = '../data/full/original/'\n generate_path = '../data/full/generate/'\n test_image_num = len([name for name in os.listdir(test_path)\n if os.path.isfile(os.path.join(test_path, name))])\n\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0\n ind = 0\n for name in os.listdir(test_path):\n if os.path.isfile(os.path.join(test_path, name)):\n ind += 1\n test_original, test_style, image_height, image_width = load_test_dataset(name)\n x = torch.from_numpy(test_original).float()\n y_real = torch.from_numpy(test_style).float()\n x = x.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n y_real = y_real.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n \n y_fake = model.gen_g(x.view(-1, config.channels, image_height, image_width))\n y_fake = y_fake.view(config.channels, image_height, image_width)\n \n # Calculate PSNR & SSIM scores\n score_psnr += psnr_full(y_fake, y_real)\n \n y_fake_np = y_fake.detach().cpu().numpy().transpose(1, 2, 0)\n y_real_np = y_real.cpu().numpy().transpose(1, 2, 0)\n temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True)\n score_ssim_skimage += temp_ssim\n \n temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n score_ssim_minstar += temp_ssim\n \n score_msssim_minstar += multi_scale_ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n print('PSNR & SSIM scores of {} images are calculated.'.format(ind))\n \n utils.save_image(y_fake, os.path.join(generate_path, '{}-x.jpg'.format(name[:5] + args.model_type)))\n\n score_psnr /= test_image_num\n score_ssim_skimage /= test_image_num\n score_ssim_minstar /= test_image_num\n score_msssim_minstar /= test_image_num\n print('PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}'.format(\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))", "def get_mini_samples():\n if GLOBALS['project_root']=='':\n print('please initialize project_root in GLOBALS first')\n return None\n data_path = os.path.join(GLOBALS['project_root'], 'data/MNIST/')\n pickle_path = os.path.join(data_path, 'mnist_mini_samples.pickle')\n if os.path.exists(pickle_path):\n with open(pickle_path, 'rb') as f:\n mini_samples = pickle.load(f)\n else:\n mnist = get_mnist()\n mini_samples = mnist.train.next_batch(50)\n with open(pickle_path, 'wb') as f:\n pickle.dump(mini_samples, f, pickle.HIGHEST_PROTOCOL)\n\n return mini_samples", "def codeepneat_mnist_example(_):\n # Set standard configuration specific to TFNE but not the neuroevolution process\n logging_level = logging.INFO\n config_file_path = './codeepneat_mnist_example_config.cfg'\n backup_dir_path = './tfne_state_backups/'\n max_generations = 20\n max_fitness = None\n\n # Read in optionally supplied flags, changing the just set standard configuration\n if flags.FLAGS.logging_level is not None:\n logging_level = flags.FLAGS.logging_level\n if flags.FLAGS.config_file is not None:\n config_file_path = flags.FLAGS.config_file\n if flags.FLAGS.backup_dir is not None:\n backup_dir_path = flags.FLAGS.backup_dir\n if flags.FLAGS.max_generations is not None:\n max_generations = flags.FLAGS.max_generations\n if flags.FLAGS.max_fitness is not None:\n max_fitness = flags.FLAGS.max_fitness\n\n # Set logging, parse config\n logging.set_verbosity(logging_level)\n config = tfne.parse_configuration(config_file_path)\n\n # Initialize the environment and the specific NE algorithm\n environment = tfne.environments.MNISTEnvironment(weight_training=True, config=config, verbosity=logging_level)\n ne_algorithm = tfne.algorithms.CoDeepNEAT(config)\n\n # Initialize evolution engine and supply config as well as initialized NE algorithm and evaluation environment.\n engine = tfne.EvolutionEngine(ne_algorithm=ne_algorithm,\n environment=environment,\n backup_dir_path=backup_dir_path,\n max_generations=max_generations,\n max_fitness=max_fitness)\n\n # Start training process, returning the best genome when training ends\n best_genome = engine.train()\n print(\"Best genome returned by evolution:\\n\")\n print(best_genome)\n\n # Increase epoch count in environment for a final training of the best genome. Train the genome and then replay it.\n print(\"Training best genome for 200 epochs...\\n\")\n environment.epochs = 20\n environment.eval_genome_fitness(best_genome)\n environment.replay_genome(best_genome)\n\n # Serialize and save genotype and Tensorflow model to demonstrate serialization\n best_genome.save_genotype(save_dir_path='./best_genome_genotype/')\n best_genome.save_model(file_path='./best_genome_model/')", "def test_Gaussian_NB_estimators():", "def fixture_sim():\n\tEXAMPLE_FILE_FOLDER = str(MODULE_DIR) + \"/data/nail1/\"\n\tsim = read.load_sim(EXAMPLE_FILE_FOLDER)\n\treturn sim", "def _test_examples(self):\n checks = [\n (\n \"ex5_line-of-sight_solution\",\n [r\"RAJA sequential\", r\"RAJA OpenMP\", r\"result -- PASS\"],\n ),\n (\n \"ex6_stencil-offset-layout_solution\",\n [r\"RAJA Views \\(permuted\\)\", r\"result -- PASS\"],\n ),\n (\n \"ex8_tiled-matrix-transpose_solution\",\n [r\"parallel top inner loop\", r\"collapsed inner loops\", r\"result -- PASS\"],\n ),\n (\"kernel-dynamic-tile\", [r\"Running index\", r\"(24,24)\"]),\n (\"plugin-example\", [r\"Launching host kernel for the 10 time\"]),\n (\"tut_batched-matrix-multiply\", [r\"result -- PASS\"]),\n (\"wave-eqn\", [r\"Max Error = 2\", r\"Evolved solution to time\"]),\n ]\n for exe, expected in checks:\n reason = \"test: checking output of {0} for {1}\".format(exe, expected)\n self.run_test(\n exe,\n [],\n expected,\n installed=False,\n purpose=reason,\n skip_missing=True,\n work_dir=self._extra_tests_path,\n )", "def main():\n dims = params['dims']\n\n for d in dims:\n print('**** Running test for d={0:d} ****'.format(d))\n run_test(d)", "def test_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.test.equals(atom.mnb.test)\n assert check_scaling(atom.lr.test)", "def train_mnist():\r\n # type: () -> None\r\n\r\n # Build dataset and model\r\n dataset = MNIST_TRAIN(path=Config.video_folder)\r\n model = LSAMNIST(input_shape=dataset.shape, code_length=64,\r\n cpd_channels=100).to(device).train()\r\n\r\n # Set up result helper and perform test\r\n helper = OneClassResultHelper(dataset, model,\r\n checkpoints_dir=Config.model_ckpt,\r\n output_file='mnist.txt')\r\n helper.train_one_class_classification()", "def run_tests(): \n \n\n nextdata = [[21, 61, 42, 30], [33,45, 18, 29]]\n\n for xval, yval, snum, expect in nextdata:\n\n pmachine = PMachine()\n pmachine.serial_number = snum\n pmachine.run2_completion()\n result = pmachine.calc_square_total(xval, yval, showsquare=True)\n assert result == expect\n print(\"Got value {}={} as expected\".format(result, expect))", "def test_wf_ndst_8(plugin):\n wf = Workflow(name=\"wf_ndst_8\", input_spec=[\"x\", \"y\"])\n wf.add(add2(name=\"add2x\", x=wf.lzin.x).split(\"x\"))\n wf.add(add2(name=\"add2y\", x=wf.lzin.y).split(\"x\"))\n wf.add(\n multiply(name=\"mult\", x=wf.add2x.lzout.out, y=wf.add2y.lzout.out).combine(\n \"add2y.x\"\n )\n )\n wf.inputs.x = [1, 2, 3]\n wf.inputs.y = [11, 12]\n\n wf.set_output([(\"out\", wf.mult.lzout.out)])\n wf.plugin = plugin\n\n with Submitter(plugin=plugin) as sub:\n sub.run(wf)\n\n # checking the results\n while not wf.done:\n sleep(1)\n results = wf.result()\n\n assert len(results.output.out) == 3\n assert results.output.out[0] == [39, 42]\n assert results.output.out[1] == [52, 56]\n assert results.output.out[2] == [65, 70]", "def RunTest():\n #800nm\n RunData(g.glob('testdata/15*.fits'), out='test800nm')\n forwardModelJointFit(g.glob('testdata/15*.fits'), out='test800nmJoint', wavelength='800nm')\n _plotDifferenceIndividualVsJoined(individuals='results/test800nm?.pkl', joined='results/test800nmJoint.pkl',\n title='800nm')\n #700nm\n RunData(g.glob('testdata/17*.fits'), out='test700nm')\n forwardModelJointFit(g.glob('testdata/17*.fits'), out='test700nmJoint', wavelength='700nm')\n _plotDifferenceIndividualVsJoined(individuals='results/test700nm?.pkl', joined='results/test700nmJoint.pkl',\n title='700nm')", "def make_rmnist(n=10):\n td, vd, ts = load_data()\n indices = range(50000)\n random.shuffle(indices)\n values = [(j, td[1][j]) for j in indices]\n indices_subset = [[v[0] for v in values if v[1] == j][:n]\n for j in range(10)]\n flattened_indices = [i for sub in indices_subset for i in sub]\n random.shuffle(flattened_indices)\n td0_prime = [td[0][j] for j in flattened_indices]\n td1_prime = [td[1][j] for j in flattened_indices]\n td_prime = (td0_prime, td1_prime)\n\n train_data = td_prime[0]\n train_labels = td_prime[1]\n val_data = vd[0]\n val_labels = vd[1]\n test_data = ts[0]\n test_labels = ts[1]\n\n fname = 'data/rmnist_'+str(n)\n np.savez(fname,\n train_data = train_data,\n train_labels = train_labels,\n val_data = val_data,\n val_labels = val_labels,\n test_data = test_data,\n test_labels = test_labels)", "def get_data(numbers):\r\n numbers = numbers\r\n n_classes = len(numbers)\r\n z = zipfile.ZipFile('lab3/mnist.pkl.zip', 'r')\r\n k = z.extract('mnist.pkl') # Извлечь файл из архива\r\n with open(k, 'rb') as f:\r\n train_set, _, test_set = pickle.load(f, encoding=\"bytes\")\r\n x_train = train_set[0]\r\n x_test = test_set[0]\r\n x_train[x_train >= 0.5] = 1\r\n x_train[x_train < 0.5] = 0\r\n x_test[x_test >= 0.5] = 1\r\n x_test[x_test < 0.5] = 0\r\n y_train = train_set[1]\r\n y_test = test_set[1]\r\n idx_train = [[np.where(y_train == i)] for i in numbers]\r\n idx_test = [[np.where(y_test == i)] for i in numbers]\r\n idx_x_train = [x_train[idx_train[i][0]] for i in range(len(idx_train))]\r\n idx_x_test = [x_test[idx_test[i][0]] for i in range(len(idx_test))]\r\n idx_y_test = [y_test[idx_test[i][0]] for i in range(len(idx_test))]\r\n x_train_new = shuffle(np.concatenate(idx_x_train))\r\n x_test_new = shuffle(np.concatenate(idx_x_test))\r\n y_test_new = shuffle(np.concatenate(idx_y_test))\r\n return x_train_new, x_test_new, y_test_new, numbers, n_classes", "def test_synth_tr():\n test_path = tempfile.mkdtemp()\n x_train, metadata = synth_tr(test_path)\n try:\n assert x_train.shape == (250, 3)\n except:\n shutil.rmtree(test_path)\n raise()", "def dummy(args):\n\n task_ids = {'1': LossTypes.mse, '2': LossTypes.mse, '3': LossTypes.cross_entropy}\n input_dimension = 5000 # Dimensionality of each training set\n num_inputs_train = 750\n num_inputs_validate = 100\n num_inputs_test = 150\n\n # Training set\n x_train = np.random.random((num_inputs_train, input_dimension))\n y_train = {}\n\n # Validation set\n x_validate = np.random.random((num_inputs_validate, input_dimension))\n y_validate = {}\n\n # Testing set\n x_test = np.random.random((num_inputs_test, input_dimension))\n y_test = {}\n\n for task_id, loss_type in task_ids.iteritems():\n if loss_type is LossTypes.mse:\n y_train[task_id] = np.random.random((num_inputs_train, 1))\n y_validate[task_id] = np.random.random((num_inputs_validate, 1))\n y_test[task_id] = np.random.random((num_inputs_test, 1))\n elif loss_type is LossTypes.cross_entropy:\n # Training labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_train).reshape(1, num_inputs_train)\n y_train[task_id] = convert_to_one_hot(labels)\n\n # Validation labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_validate).reshape(1, num_inputs_validate)\n y_validate[task_id] = convert_to_one_hot(labels)\n\n # Testing labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_test).reshape(1, num_inputs_test)\n y_test[task_id] = convert_to_one_hot(labels)\n\n exp = Experiment(expt_name=\"synthetic\", task_ids=task_ids, x_train=x_train, x_validate=x_validate,\n x_test=x_test, y_train=y_train, y_validate=y_validate, y_test=y_test,\n model_class=LowLevelSharingModel, learning_rate=args.learning_rate,\n batch_size=args.batch_size, num_epochs=args.num_epochs)\n exp.initialize_network()\n exp.train()\n sys.stderr.write(\"Training complete. Logs, outputs, and model saved in \" + os.getcwd())", "def test_compute_glycemic_load(self):\n pass", "def run_example(num_points_to_sample=20, verbose=True, **kwargs):\n exp = Experiment([[0, 2], [0, 4]]) # 2D experiment, we build a tensor product domain\n # Bootstrap with some known or already sampled point(s)\n exp.historical_data.append_sample_points([\n SamplePoint([0, 0], function_to_minimize([0, 0]), 0.05), # Iterables of the form [point, f_val, f_var] are also allowed\n ])\n\n # Sample num_points_to_sample points\n for _ in range(num_points_to_sample):\n # Use MOE to determine what is the point with highest Expected Improvement to use next\n next_point_to_sample = gp_next_points(exp, **kwargs)[0] # By default we only ask for one point\n # Sample the point from our objective function, we can replace this with any function\n value_of_next_point = function_to_minimize(next_point_to_sample)\n\n if verbose:\n print \"Sampled f({0:s}) = {1:.18E}\".format(str(next_point_to_sample), value_of_next_point)\n\n # Add the information about the point to the experiment historical data to inform the GP\n exp.historical_data.append_sample_points([SamplePoint(next_point_to_sample, value_of_next_point, 0.01)]) # We can add some noise", "def Main():\n\n\n Matrice = [[0 for col in range(tailleM)] for row in range(tailleM)]\n result = 0\n tour = 1\n while result==0: \n \n temp = MiniMaxDecision(copy.deepcopy(Matrice), tailleM,0,0,copy.deepcopy(tour))\n Matrice = Result(Matrice,temp[1],temp[2],1)\n printMat(Matrice, tailleM)\n result = TerminalTest(Matrice)\n tour +=1\n userinput(Matrice, tailleM)\n printMat(Matrice, tailleM)\n result = TerminalTest(Matrice)\n tour +=1\n \n printMat(Matrice, tailleM)", "def gen_simple_test():\n count = 1\n mdict = {\n 'operating_frequency': 3e8,\n 'sample_rate': 8e3,\n 'signal': [1] * 5,\n 'origin_pos': [1000, 0, 0],\n 'dest_pos': [300, 200, 50],\n 'origin_vel': [0] * 3,\n 'dest_vel': [0] * 3,\n }\n io.savemat('{}{}_input'.format(tests_path, count), mdict)", "def get_mnist():\n from keras.datasets import mnist\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n num_classes = 10\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n print (\"Using Channels first\")\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n print(\"Channels last\")\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n return (x_train, y_train), (x_test, y_test)", "def run(gens, version):\n pop = neat.population.Population(CONFIG)\n stats = neat.statistics.StatisticsReporter()\n pop.add_reporter(stats)\n pop.add_reporter(neat.reporting.StdOutReporter(True))\n\n global DYNAMIC_PARAMS\n DYNAMIC_PARAMS = params(version)\n\n winner = pop.run(eval_fitness, gens)\n print(f\"es_hyperneat_xor_{VERSION_TEXT} done\")\n return winner, stats", "def test_loadData():\n \n sys = LVsystem.Ecosystem()\n \n sys.loadSetup('2Prey1Predator')\n \n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def wild_test(img, mod):\n img = cv2.imread(img)\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n img_resize = cv2.resize(img_gray, (28, 28))\n img_resize = img_resize.reshape((1, 28, 28))\n print (\"Image size\", img_resize.shape)\n # it is ugly, you can make this much better\n data = np.asarray([img_resize]*100)\n test_iter = mx.io.NDArrayIter(data, None, 100)\n prob = mod.predict(test_iter)\n print (\"The prediction is :\", np.argmax(prob.asnumpy()[0]))" ]
[ "0.69393843", "0.6572477", "0.6475336", "0.6470662", "0.63731503", "0.6351866", "0.62574834", "0.62303245", "0.6214079", "0.61291355", "0.6029285", "0.6016668", "0.601599", "0.6014156", "0.59936774", "0.5969918", "0.595324", "0.5940546", "0.59249955", "0.5917552", "0.58708", "0.5852096", "0.5836193", "0.5835081", "0.5788991", "0.57672447", "0.5757812", "0.5727704", "0.5723906", "0.5711445", "0.5688242", "0.56738627", "0.5672134", "0.5652877", "0.56421626", "0.56303865", "0.5606894", "0.56036067", "0.55937445", "0.5585313", "0.55843043", "0.55813795", "0.5576688", "0.5561642", "0.55504686", "0.5543815", "0.554308", "0.55323386", "0.55269194", "0.5525398", "0.5521008", "0.55181575", "0.5500016", "0.5488591", "0.5487649", "0.5484237", "0.5480651", "0.54791033", "0.5477469", "0.54745686", "0.546711", "0.5454658", "0.54501647", "0.54461527", "0.5436104", "0.54346323", "0.54334223", "0.54282826", "0.5427783", "0.5422552", "0.5419721", "0.54114324", "0.54087865", "0.5408049", "0.5406579", "0.5406004", "0.5403019", "0.5391363", "0.5386287", "0.53811055", "0.5380097", "0.5379187", "0.5372314", "0.5369747", "0.5365387", "0.53617287", "0.53574705", "0.53547317", "0.53533447", "0.53461796", "0.53391784", "0.53378093", "0.53325003", "0.5332289", "0.53311956", "0.5330723", "0.53290445", "0.53279996", "0.5322562", "0.53122485" ]
0.6889731
1
Test the popxl mnist with RTS example
def test_documentation_popxl_mnist_rts_train_test(self): filename = "mnist_rts.py --replication-factor 2 --rts --test" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def test_documentation_popxl_mnist(self):\n filename = \"mnist.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_keras_mnist_return_X_y():\n X, y = fetch(\"mnist\", return_X_y=True)\n assert X.shape == (70000, 28 * 28)\n assert y.shape == (70000,)", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def test_dataset():\n X,Y = get_MNIST_training_normalized()\n digits_test_truth = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 632, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, 0, 0, 0, 0, 0]\n digits_test = []\n for example in itertools.islice(X,30):\n digits_test.append(sum(example[1:100]))\n assert(example.shape == (28*28,))\n\n show_as_image(X[0,:], 28, 28)\n print digits_test\n print digits_test_truth\n assert(digits_test_truth == digits_test)\n assert(X.shape == (60000, 28*28))\n assert(Y.shape == (60000,))\n return \"Dziala :)\"", "def test_rand(self):\n assert len(self._mnist.random()[:5]) == 5\n pass", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def create_mnistm(X: Any) -> Any:\n\n bst_path = \"./data/MNIST_M/BSR_bsds500.tgz\"\n\n rand = np.random.RandomState(42)\n train_files = []\n\n with tarfile.open(bst_path, \"r\") as bsr_file:\n for name in bsr_file.getnames():\n if name.startswith(\"BSR/BSDS500/data/images/train/\"):\n train_files.append(name)\n\n print(\"Loading BSR training images\")\n background_data = []\n for name in train_files:\n try:\n fp = bsr_file.extractfile(name)\n bg_img = skimage.io.imread(fp)\n background_data.append(bg_img)\n except:\n continue\n\n X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)\n for i in range(X.shape[0]):\n if i % 1000 == 0:\n print(\"Processing example\", i)\n\n bg_img = rand.choice(background_data)\n d = mnist_to_img(X[i])\n d = compose_image(d, bg_img)\n X_[i] = d\n\n return X_", "def test_load_data(self):\n assert len(self._mnist.get()) == 10\n assert self._mnist.get()[0].label == 7\n pass", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def test_predictor():", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def MNIST_experiment():\n tsetlin_machine = TsetlinMachine(number_clauses=1000,\n number_action_states=1000,\n precision=3.0,\n threshold=10)\n\n X, y, val_X, val_y = MNIST()\n\n tsetlin_machine.fit(X, y, val_X, val_y, 300)\n print('Final training accuracy:', tsetlin_machine.accuracy(X, y))\n print('Final validation accuracy:', tsetlin_machine.accuracy(val_X, val_y))", "def run(prefix):\n # run_tests.assert_folder_is_empty(prefix=prefix)\n xrs_good,xrs_poor,f_obs,r_free_flags = run_tests.setup_helix_example()\n # pdb_inp = os.path.join(qr_unit_tests,\"data_files\",\"2lvr.pdb\")\n r = run_tests.run_cmd(prefix,\n args = [\"restraints=cctbx\",\"mode=gtest\",\"g_scan=20\",\"g_mode=1\"],\n pdb_name = 'm00_poor.pdb', mtz_name='')\n assert os.path.isfile('1-20.npy')", "def test_machine_learning():", "def try4():\n path = '/Users/mayankkejriwal/git-projects/bioExperiments/tsne_python/'\n mnist = path+'mnist2500_X.txt'\n X = numpy.loadtxt(mnist)\n labels = numpy.loadtxt(path+\"mnist2500_labels.txt\")\n Y = tsne.tsne(X, 2, 50, 20.0)\n pylab.scatter(Y[:,0], Y[:,1], 20, labels)\n pylab.show()", "def main():\n # Import or download the mnist data, from target file path.\n mnist = input_data.read_data_sets(\"Data/\", one_hot=True)\n\n # Train and test model.\n train(mnist)", "def test_X_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_test.equals(atom.mnb.X_test)\n assert check_scaling(atom.lr.X_test)", "def test_readme_minimal():\n # Data sampler that generates balanced batches from MNIST dataset\n sampler = TFDatasetMultiShotMemorySampler(\n dataset_name='mnist',\n classes_per_batch=10\n )\n\n # Build a Similarity model using standard Keras layers\n inputs = layers.Input(shape=(28, 28, 1))\n x = layers.experimental.preprocessing.Rescaling(1/255)(inputs)\n x = layers.Conv2D(64, 3, activation='relu')(x)\n x = layers.Flatten()(x)\n x = layers.Dense(64, activation='relu')(x)\n outputs = MetricEmbedding(64)(x)\n\n # Build a specialized Similarity model\n model = SimilarityModel(inputs, outputs)\n\n # Train Similarity model using contrastive loss\n model.compile('adam', loss=MultiSimilarityLoss())\n model.fit(sampler, epochs=5)\n\n # Index 100 embedded MNIST examples to make them searchable\n sx, sy = sampler.get_slice(0, 100)\n model.index(x=sx, y=sy, data=sx)\n\n # Find the top 5 most similar indexed MNIST examples for a given example\n qx, qy = sampler.get_slice(3713, 1)\n nns = model.single_lookup(qx[0]) # noqa\n\n # ! don't add viz its block the test in certain env.\n # Visualize the query example and its top 5 neighbors\n # viz_neigbors_imgs(qx[0], qy[0], nns)", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def test_active_inference_SPM_1b(self):", "def main(): \n symbolic_sample()\n print 'Done.'", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)", "def test_snow_pumps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = snow_pumps(test_path)\n try:\n assert x_train.shape == (13, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def main():\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)", "def main():\n ex = Experiment(SEED)\n ex.main()", "def test_star():\n test_path = tempfile.mkdtemp()\n x_train, metadata = star(test_path)\n try:\n assert x_train.shape == (5748, 8)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def run_experiments(data_set=\"\",compact=2,exp_name=\"\",x=\"\"):\n cwd = os.getcwd()\n results_path = cwd+'/data/geometric/'+exp_name+'_'\n dataset_path = cwd+'/data/geometric/'+data_set+'_'\n compact = int(compact)\n if \"m\" in x:\n # make new pxy\n pxy, Xdata, groups = gen_geometric_pxy()\n np.save(dataset_path+'Xdata',Xdata)\n np.save(dataset_path+'groups',groups)\n np.save(dataset_path+'pxy',pxy)\n else:\n # load existing pxy\n pxy = np.load(dataset_path+'pxy.npy')\n if \"r\" in x: # regular experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n if \"ip\" in x: # initialization experiments - positive p0\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_pos.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_pos.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n if \"in\" in x: # initialization experiments - negative p0\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_neg.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_neg.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n if \"c\" in x: # convergence tolerance experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_ctol.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_ctol.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n if \"z\" in x: # zeroL tolerance experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_zeroLtol.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_zeroLtol.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n if \"b\" in x: # trying proposed optimal beta\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_bestbeta.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_bestbeta.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n return 0", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def mnist(path=None):\r\n url = 'http://yann.lecun.com/exdb/mnist/'\r\n files = ['train-images-idx3-ubyte.gz',\r\n 'train-labels-idx1-ubyte.gz',\r\n 't10k-images-idx3-ubyte.gz',\r\n 't10k-labels-idx1-ubyte.gz']\r\n\r\n if path is None:\r\n # Set path to /home/USER/data/mnist or C:\\Users\\USER\\data\\mnist\r\n path = os.path.join(os.path.expanduser('~'), 'data', 'mnist')\r\n\r\n # Create path if it doesn't exist\r\n os.makedirs(path, exist_ok=True)\r\n\r\n # Download any missing files\r\n for file in files:\r\n if file not in os.listdir(path):\r\n urlretrieve(url + file, os.path.join(path, file))\r\n print(\"Downloaded %s to %s\" % (file, path))\r\n\r\n def _images(path):\r\n \"\"\"Return images loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255\r\n\r\n def _labels(path):\r\n \"\"\"Return labels loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 8 bytes are magic_number, n_labels\r\n integer_labels = np.frombuffer(f.read(), 'B', offset=8)\r\n\r\n def _onehot(integer_labels):\r\n \"\"\"Return matrix whose rows are onehot encodings of integers.\"\"\"\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot\r\n\r\n return _onehot(integer_labels)\r\n\r\n train_images = _images(os.path.join(path, files[0]))\r\n train_labels = _labels(os.path.join(path, files[1]))\r\n test_images = _images(os.path.join(path, files[2]))\r\n test_labels = _labels(os.path.join(path, files[3]))\r\n\r\n return train_images, train_labels, test_images, test_labels", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def test_mlp():\r\n datasets = gen_data()\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x , test_set_y = datasets[2]\r\n\r\n\r\n\r\n batch_size = 100 # size of the minibatch\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n #print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n x = T.matrix('x') # the data is presented as rasterized images\r\n y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n rng = numpy.random.RandomState(1234)\r\n\r\n # construct the MLP class\r\n classifier = MLP( rng = rng, input=x, n_in=28*28, n_hidden = 500, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model.\r\n # We take the mean of the cost over each minibatch.\r\n cost = classifier.negative_log_likelihood(y).mean()\r\n\r\n # compute the gradient of cost with respect to theta (stored in params)\r\n # the resulting gradients will be stored in a list gparams\r\n gparams = []\r\n for param in classifier.params:\r\n gparam = T.grad(cost, param)\r\n gparams.append(gparam)\r\n\r\n # Some optimizations needed are tagged with 'fast_run'\r\n # TODO: refine that and include only those\r\n mode = theano.compile.get_default_mode().including('fast_run')\r\n\r\n updates2 = OrderedDict()\r\n\r\n updates2[classifier.hiddenLayer.params[0]]=T.grad(cost,classifier.hiddenLayer.params[0])\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]},\r\n mode=mode)\r\n #print 'MODEL 1'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])\r\n\r\n # Even without FeatureShape\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n mode=mode.excluding('ShapeOpt'),\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]})\r\n #print\r\n #print 'MODEL 2'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])", "def mnist_training():\n mndata = MNIST(MNIST_PATH)\n train_ims, train_labels = mndata.load_training()\n train_X = np.array(train_ims).T\n train_y = np.array(train_labels).T\n return train_X, train_y", "def main():\n # \"\"\"Prepare neuromorphic MNIST image datasets for use in caffe\n # Each dataset will be generated with different number of unique spikes\n # \"\"\"\n # initial_size = 1e6 #best to make this big enough avoid expensive\n # re-allocation\n # test_dir = os.path.abspath('testFull')\n # train_dir = os.path.abspath('trainFull')\n\n # for num_spikes in range(150, 260, 10):\n # #test directory\n # image_dataset = generate_nmnist_dataset(initial_size, test_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'testlmdb' + str(num_spikes)\n # database = save_to_lmdb(image_dataset, output_lmdb)\n # #database.process_all_data(show_lmdb_datum)\n\n # #train directory\n # image_dataset = generate_nmnist_dataset(initial_size, train_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'trainlmdb' + str(num_spikes)\n # save_to_lmdb(image_dataset, output_lmdb)\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))\n # best to make this big enough avoid expensive re-allocation\n initial_size = 6e5\n test_dir = os.path.abspath('testFull')\n train_dir = os.path.abspath('trainFull')\n\n # test directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, test_dir)\n save_to_lmdb(image_dataset, 'testlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_test.mat')\n # database.process_all_data(show_lmdb_datum)\n\n # train directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, train_dir)\n save_to_lmdb(image_dataset, 'trainlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_train.mat')\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))", "def load_mnist(path='./', test_size=0.3, random_state = 123):\n \n np.random.seed(random_state)\n if 'X_train.npy' not in os.listdir(path=path) or 'y_train.npy' not in os.listdir(path=path):\n print (\"Train dataset not found. Downloading...\")\n os.system(\"curl -L -o train.zip {}\".format(TRAIN_DATA_LINK))\n os.system(\"unzip train.zip\")\n os.system(\"tar -xf trainingSet.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSet'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSet/{}'.format(class_name)):\n image = imread('./trainingSet/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_train = np.array(images)\n y_train = np.array(labels)\n\n permutation = np.random.permutation(X_train.shape[0])\n X_train = X_train[permutation]\n y_train = y_train[permutation]\n\n with open('X_train.npy', 'wb') as f:\n np.save(f, X_train)\n with open('y_train.npy', 'wb') as f:\n np.save(f, y_train)\n os.system(\"rm -rf trainingSet\")\n os.system(\"rm -rf train.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n else:\n X_train = np.load('X_train.npy')\n y_train = np.load('y_train.npy')\n\n if 'X_test.npy' not in os.listdir(path=path) or 'y_test.npy' not in os.listdir(path=path):\n print (\"Test dataset not found. Downloading...\")\n os.system(\"curl -L -o test.zip {}\".format(TEST_DATA_LINK))\n os.system(\"unzip test.zip\")\n os.system(\"tar -xf trainingSample.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSample'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSample/{}'.format(class_name)):\n image = imread('./trainingSample/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_test = np.array(images)\n y_test = np.array(labels)\n with open('X_test.npy', 'wb') as f:\n np.save(f, X_test)\n with open('y_test.npy', 'wb') as f:\n np.save(f, y_test)\n\n os.system(\"rm -rf trainingSample\")\n os.system(\"rm -rf test.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n\n else:\n X_test = np.load('X_test.npy')\n y_test = np.load('y_test.npy')\n\n return X_train, X_test, y_train, y_test", "def testvis(layers='first'):\n\n\tfrom scipy.io import loadmat\n\tfrom setup import NeuralNetwork as nnsetup\n\n\tresult = loadmat(\"goodmatx.mat\")\n\tw1 = result['v1']\n\tw0 = result['v0']\n\tx,y = result['train_x'], result['train_y']\n\n\t# result = loadmat(\"ducky.mat\")\n\t# x = result['train_x']\n\t# y = result['train_y']\n\n\tsize = [x.shape[1], 1000, y.shape[1]]\n\n\tnn = nnsetup([size[0],size[1],size[0]],output='sigm')\n\n\tnn.W[0] = w0\n\tnn.W[1] = w1\n\t\n\tfor i in range(50):\n\t\tvisualize( nn, x, k = 3000+i*4, layers=layers, mode='save' )", "def test_generate_nb_testing(self):\n pass", "def test_run_simplega():\n WRFga_winner = run_simplega(pop_size=100, n_generations=1, testing=True)\n assert WRFga_winner.Fitness >= 0", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres données quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def test_mnist():\n skip_if_no_data()\n mode = get_default_mode()\n if hasattr(mode, 'check_py_code'):\n old_value = mode.check_py_code\n mode.check_py_code = False\n try:\n if config.mode == \"DEBUG_MODE\":\n yaml_file = 'mnist_fast'\n else:\n yaml_file = 'mnist'\n limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'\n % yaml_file))\n try:\n os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))\n os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))\n except Exception:\n pass\n finally:\n if hasattr(mode, 'check_py_code'):\n mode.check_py_code = old_value", "def test_genx(nsd, backend):\n # NCOLS of data:\n # 2 - test kernel only\n # 3 - test kernel and chi2 calculation\n # 4 - test resolution smearing and chi2 calculation\n\n test_name, slabs, data = nsd\n\n kernel_test(slabs, data, backend)", "def import_mnist():\n\turl_mnist = \"http://deeplearning.net/data/mnist/mnist.pkl.gz\"\n\tfile_name = \"mnist.pkl.gz\"\n\twork_directory = \"mnist\"\n\tfile_path = maybe_download(url=url_mnist, file_name=file_name, work_directory=work_directory)\n\n\timport pickle\n\twith gzip.open(file_path,'rb') as ff :\n\t\tu = pickle._Unpickler( ff )\n\t\tu.encoding = 'latin1'\n\t\ttrain, val, test = u.load()\n\t\ttrainX = np.array(train[0])\n\t\ttrainY = np.reshape(train[1], [50000, 1])\n\t\tvalX = np.array(val[0])\n\t\tvalY = np.reshape(val[1], [10000, 1])\n\t\ttestX = np.array(test[0])\n\t\ttestY = np.reshape(test[1], [10000, 1])\n\t\ttrainX = np.concatenate((trainX, valX), axis = 0)\n\t\ttrainY = np.concatenate((trainY, valY), axis = 0)\n\treturn trainX, trainY, testX, testY", "def test_fsnps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = fsnps(test_path)\n try:\n assert x_train.shape == (432, 10)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_1():\n constr = dict()\n constr['maxfun'] = np.random.randint(1, 5 + 1)\n\n get_random_init(constr)\n simulate('test.trempy.ini')\n estimate('test.trempy.ini')", "def test_documentation_popxl_addition_variable(self):\n filename = \"tensor_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_10_test_model(self, example):\n res = example.calc_model()\n print(example.trips_ij)\n total_trips_target = example.persons_gi.sum()\n total_trips_actual = example.trips_ij.sum()\n np.testing.assert_almost_equal(total_trips_target, total_trips_actual)", "def main():\n\n dataset = ConvMNIST(64)\n print(dataset.get_train().x.shape)\n\n\n inputs = Value(type=tf.float32, shape=(None, 28, 28, 1), cls = None)\n targets = Value(type=tf.int64, shape=(None), cls = 10)\n learning_rate = 0.0001\n\n fc_hidden = [1024, 500]\n c_h = [\n (3, 3, 1, 32),\n (3, 3, 32, 64)\n ]\n conv_hidden = ConvHidden(conv_weights=c_h, fc_weights=fc_hidden)\n\n config = Config(inputs, targets, conv_hidden, learning_rate)\n\n network = ConvNetworkBuilder(config)\n hidden = FFConvHiddenBuilder()\n _ = network.build_network(hidden)\n\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n\n trainer = Trainer(network, train_config)\n trainer.train(dataset)", "def main(_):\n description = xm.ExperimentDescription(\n 'HIS - trial=%d' % FLAGS.trial, tags=['his'])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def run_example(num_points_to_sample=1000, verbose=True, **kwargs):\n\n exp = Experiment([[1, 52], [0, 6], [1, 52]]) # 2D experiment, we build a tensor product domain\n # Bootstrap with some known or already sampled point(s)\n exp.historical_data.append_sample_points([\n SamplePoint([26, 2, 46], get_fitness([26, 2, 35]), 0.5), # Iterables of the form [point, f_val, f_var] are also allowed\n ])\n # Sample num_points_to_sample points\n for i in range(num_points_to_sample):\n # Use MOE to determine what is the point with highest Expected Improvement to use next\n next_point_to_sample = map(round, gp_next_points(exp, **kwargs)[0]) # in [A, X, B] form, rounded integers\n value_of_next_point = get_fitness(next_point_to_sample)\n\n if verbose:\n if in_results(next_point_to_sample):\n print '***', \"Sampled f({0:s}) = {1:.18E}\".format(str(next_point_to_sample), value_of_next_point), '***'\n else:\n print \"Sampled f({0:s}) = {1:.18E}\".format(str(next_point_to_sample), value_of_next_point)\n\n bank[i,0:3] = next_point_to_sample\n bank[i,3] = value_of_next_point\n # Add the information about the point to the experiment historical data to inform the GP\n exp.historical_data.append_sample_points([SamplePoint(next_point_to_sample, value_of_next_point, 0.01)]) # We can add some noise", "def MNIST_data():\n\n # Pobieramy macierze numpy z cyframi\n # images[i,j,k] <=> piksel (j,k) z i-tego obrazka w zbiorze danych\n images, labels = get_MNIST_dataset(range(10), \"training\") #pierwszy argument to\n\n # a) Ilosc przykladow i rozmiary danych\n print \"Raw training data dimensions \", images.shape\n print \"Labels dimensions \",labels.shape\n\n # b) Ile jest cyfr 2?\n print \"Counting 2 in training dataset \",len(filter(lambda x: x == 2, labels))\n\n # c) Jaki jest sredni obrazek 2 ? (Usrednienie wszystkich macierzy ktore sa 2)\n\n #1. Pobierzmy wszystkie dwojki, fajny sposob indeksowania\n print labels == 2\n only_2 = images[labels == 2, :, :]\n print \"Checking number of 2s \", only_2.shape\n\n #2. TODO: Usrednienie (matrix.mean moze byc przydatne)\n\n #3. TODO: narysowanie usrednionej cyfry (zobacz pl.imshow)\n\n # d) Ostatnie - przetworzmy ostatnia cyfre do 1 wymiarowego wektora\n vectorized = np.reshape(images[-1], newshape=(images[-1].shape[0]*images[-1].shape[1]))\n print \"Vectorized last digit \", vectorized", "def test_menhinick(self):\n self.assertEqual(menhinick(self.TestData), 9/sqrt(22))", "def test_svm():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = r.randint(1, 10598)\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n\n # iris\n pres = \"Test pour le data set Iris (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Iris, quantum_instance)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Breast_cancer, quantum_instance)\n\n # digits (it's long so be careful)\n #pres = \"Test pour le data set Digits (difficile, classique)\"\n #test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n pres = \"Test pour le data set Wine (moyen, classique)\"\n test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func(pres, 25, 10, 2, True, Gaussian, quantum_instance)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func(pres, 10, 15, 14, True, Sequence, quantum_instance)", "def test(ndigit, elambda, showSamples, showConfusion):\n Data, Label = getData()\n trainX, trainY, testX, testY = splitData(Data, Label, ndigit)\n trainX_mean = np.mean(trainX, axis=0)\n trainX_new = trainX - trainX_mean\n eigenvectors = getEigenVectors(trainX_new, elambda)\n trainX_eigen = trainX_new.dot(eigenvectors)\n testX_new = testX - trainX_mean\n testX_eigen = testX_new.dot(eigenvectors)\n testO = []\n if showSamples:\n correct_samples = []\n correct_samples_nearest = []\n correct_samples_eigen = []\n correct_samples_nearest_eigen = []\n correct_samples_labels = []\n correct_samples_predictions = []\n wrong_samples = []\n wrong_samples_nearest = []\n wrong_samples_eigen = []\n wrong_samples_nearest_eigen = []\n wrong_samples_labels = []\n wrong_samples_predictions = []\n if showConfusion:\n conf = np.zeros((ndigit, ndigit))\n for i in xrange(testX_eigen.shape[0]):\n t = testX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n p = int(trainY[j])\n y = int(testY[i])\n if showConfusion:\n conf[p, y] += 1\n if showSamples:\n if p == y:\n if len(correct_samples) < y + 1:\n correct_samples.append(testX[i])\n correct_samples_nearest.append(trainX[j])\n correct_samples_eigen.append(testX_eigen[i])\n correct_samples_nearest_eigen.append(trainX_eigen[j])\n correct_samples_labels.append(y)\n correct_samples_predictions.append(p)\n else:\n if len(wrong_samples) < y + 1:\n wrong_samples.append(testX[i])\n wrong_samples_nearest.append(trainX[j])\n wrong_samples_eigen.append(testX_eigen[i])\n wrong_samples_nearest_eigen.append(trainX_eigen[j])\n wrong_samples_labels.append(y)\n wrong_samples_predictions.append(p)\n testO.append(p)\n testO = np.array(testO)\n train0 = []\n for i in xrange(trainX_eigen.shape[0]):\n t = trainX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n min_class = trainY[j]\n train0.append(min_class)\n train0 = np.array(train0)\n print \"for digits = %d lambda = %.2f train = %.6f test = %.6f \" % (\n ndigit, elambda, (train0 == trainY).mean(), (testO == testY).mean())\n if showConfusion:\n print conf\n if showSamples:\n displaySamples(correct_samples_labels, correct_samples_predictions,\n correct_samples, correct_samples_nearest,\n correct_samples_eigen, correct_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Correct')\n displaySamples(wrong_samples_labels, wrong_samples_predictions,\n wrong_samples, wrong_samples_nearest,\n wrong_samples_eigen, wrong_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Wrong')", "def test_generate_nb(self):\n pass", "def main():\n training_data, validation_data, test_data = mnist.load()\n\n model = nn.NeuralNetwork([784, 100, 10], learning_rate=0.01, batch_size=50)\n\n model_training = training.EarlyStoppingRegularization(model,\n training_data,\n validation_data,\n test_data,\n max_steps_without_progression=2)\n result = model_training.train()\n\n result.save('models/mnist')", "def main():\n\n NUM_TRAIN = noise.init_train_thresh\n NUM_TEST = 20\n XDIM = 1\n\n # Train the emulator\n x_train = np.random.uniform(size=(NUM_TRAIN, XDIM))\n y_train = np.array([noise(x) for x in x_train])\n\n # Output error estimates\n noise.output_err = True\n\n # Get values from the trained emulator\n x_emu = np.random.uniform(size=(NUM_TEST, XDIM))\n\n y_emu = np.zeros_like(x_emu)\n y_err = np.zeros_like(x_emu)\n\n for i, x in enumerate(x_emu):\n val, err = noise(x)\n y_emu[i] = val\n y_err[i] = err\n\n # Plot the results\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.scatter(x_train[:, 0], y_train, marker=\"+\", label=\"training values\")\n ax.errorbar(\n x_emu,\n y_emu[:, 0],\n yerr=y_err.flatten(),\n linestyle=\"None\",\n marker=\"o\",\n capsize=3,\n label=\"emulator\",\n color=\"red\",\n )\n\n ax.legend()\n\n # `__file__` is undefined when running in sphinx\n try:\n fig.savefig(__file__ + \".png\")\n except NameError:\n pass", "def run_examples():\n\n for example in examples:\n\n print(str(example) + \" : \", end=\" \")\n try:\n t, smush = analyse(example, my_env)\n print(lookup(t, smush))\n # print(\"Smush\")\n # for k,v in smush.items():\n # print(f\"\\t{k} : {v}\")\n except (ParseError, InferenceError) as e:\n print(e)", "def test_mlp(learning_rate=.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=150,\n dataset='mnist.pkl.gz', batch_size=20, n_hidden=100):\n #Note - transfer is used to check whether test_mlp is running for the first time with new weights or second time with transferred weights\n #Transfer is initialized to be false.\n #a transfer in the if statement will run the code for the Letters data set first and Numbers data set second.\n #(Not transfer) will run the code for the Numbers data set first and Letters data set second. \n\n #CHANGE FLAG - edit order datasets are run in and dataset name\n if(transfer):\n #datasets = load_data(dataset)\n f = open('HSFNums.p','rb')\n datasets = pickle.load(f)\n\n else:\n #datasets = getHSF()\n f = open('HSFLetters2.p','rb')\n datasets = pickle.load(f)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n f.close()\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\n #total size of valid data is printed\n print 'This is the vector size of the inputs' #\n print train_set_x.get_value(borrow=True).shape #\n print n_train_batches #\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\n\n #Data reduction\n if(transfer):\n train_set_x = train_set_x[0:int(1.0*n_train_batches*batch_size),:]\n train_set_y = train_set_y[0:int(1.0*n_train_batches*batch_size)]\n\n\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print '... building the model'\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n x = T.matrix('x') # the data is presented as rasterized images\n y = T.ivector('y') # the labels are presented as 1D vector of\n # [int] labels\n\n rng = numpy.random.RandomState(1234)\n\n # construct the MLP class\n #problem is you can't pass weights through here, b/c of gradient descent\n #algorithms use these parameters\n\n #Numbers have 10 classifications, Letters have 26 classifications.\n #transfer is initialized as false, so depending on which dataset should be run first, edit this\n #CHANGE FLAG - edit the order the network trains in and the number of outputs (n_out)\n if(transfer):\n classifier = MLP(\n rng=rng,\n input=x,\n n_in=28 * 28,\n n_hidden=n_hidden,\n n_out=10\n )\n else:\n classifier = MLP(\n rng=rng,\n input=x,\n n_in=28 * 28,\n n_hidden=n_hidden,\n n_out=26\n )\n\n # the cost we minimize during training is the negative log likelihood of\n # the model plus the regularization terms (L1 and L2); cost is expressed\n # here symbolically\n cost = (\n classifier.negative_log_likelihood(y)\n + L1_reg * classifier.L1\n + L2_reg * classifier.L2_sqr\n )\n\n # compiling a Theano function that computes the mistakes that are made\n # by the model on a minibatch\n test_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: test_set_x[index * batch_size:(index + 1) * batch_size],\n y: test_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n\n validate_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: valid_set_x[index * batch_size:(index + 1) * batch_size],\n y: valid_set_y[index * batch_size:(index + 1) * batch_size]\n }\n )\n\n # compute the gradient of cost with respect to theta (stored in params)\n # the resulting gradients will be stored in a list gparams\n gparams = [T.grad(cost, param) for param in classifier.params]\n\n # specify how to update the parameters of the model as a list of\n # (variable, update expression) pairs\n\n # given two lists of the same length, A = [a1, a2, a3, a4] and\n # B = [b1, b2, b3, b4], zip generates a list C of same size, where each\n # element is a pair formed from the two lists :\n # C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]\n updates = [\n (param, param - learning_rate * gparam)\n for param, gparam in zip(classifier.params, gparams)\n ]\n\n # compiling a Theano function `train_model` that returns the cost, but\n # in the same time updates the parameter of the model based on the rules\n # defined in `updates`\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n #CHANGE FLAG - edit based on the order the network rusn in and the input file name\n inputSize=100 #number of input images sampled from next dataset for transfer calculations\n if(not transfer):\n #f2 = open('HSFLetters2.p','rb')\n #f2 can be changed based on whether letters should be transferred to numbers or v.c.\n f2 = open('HSFNums.p','rb')\n datasetsTransfer = pickle.load(f2)\n train_set_x2, train_set_y2 = datasetsTransfer[0]\n inputs=train_set_x2.get_value(borrow=True) #inputs\n f2.close()\n \n ###############\n # TRAIN MODEL #\n ###############\n print '... training'\n\n # early-stopping parameters\n patience = 10000 # look as this many examples regardless\n patience_increase = 2 # wait this much longer when a new best is\n # found\n improvement_threshold = 0.995 # a relative improvement of this much is\n # considered significant\n validation_frequency = min(n_train_batches, patience / 2)\n # go through this many\n # minibatches before checking the network\n # on the validation set; in this case we\n # check every epoch\n\n best_validation_loss = numpy.inf\n best_iter = 0\n test_score = 0.\n start_time = timeit.default_timer()\n\n epoch = 0\n done_looping = False\n\n\n #opening files to print validation error to\n if(not transfer):\n outFile = open('out.txt','w')\n else:\n outFile = open('outTransfer.txt','w')\n\n\n #Inserted code for printing out validation after randomization\n validation_losses = [validate_model(i) for i\n in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n outFile.write(str(this_validation_loss*100)) #printing the error out to the file, turned to string b/c still using write function\n outFile.write('\\n')\n\n\n while (epoch < n_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in xrange(n_train_batches):\n\n minibatch_avg_cost = train_model(minibatch_index)\n # iteration number\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n # compute zero-one loss on validation set\n validation_losses = [validate_model(i) for i\n in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n outFile.write(str(this_validation_loss*100)) #printing the error out to the file, turned to string b/c still using write function\n outFile.write('\\n')\n print(\n 'epoch %i, minibatch %i/%i, validation error %f %%' %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n )\n\n\n # if we got the best validation score until now\n if this_validation_loss < best_validation_loss:\n #improve patience if loss improvement is good enough\n if (\n this_validation_loss < best_validation_loss *\n improvement_threshold\n ):\n patience = max(patience, iter * patience_increase)\n\n best_validation_loss = this_validation_loss\n best_iter = iter\n\n # test it on the test set\n test_losses = [test_model(i) for i\n in xrange(n_test_batches)]\n test_score = numpy.mean(test_losses)\n\n print((' epoch %i, minibatch %i/%i, test error of '\n 'best model %f %%') %\n (epoch, minibatch_index + 1, n_train_batches,\n test_score * 100.))\n\n if patience <= iter:\n done_looping = True\n break\n #closing file\n outFile.close()\n end_time = timeit.default_timer()\n print(('Optimization complete. Best validation score of %f %% '\n 'obtained at iteration %i, with test performance %f %%') %\n (best_validation_loss * 100., best_iter + 1, test_score * 100.))\n print >> sys.stderr, ('The code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.2fm' % ((end_time - start_time) / 60.))\n\n\n\n #Goal of block: Calculate hidden node activations and find which weights to transfer\n # Create global theano shared variable for the weights to transfer\n if(not transfer):\n \n #Set threshold to determine bounds for activated nodes - Weights leading to activated nodes with absolute values >= threshold\n #will be copied over. Other weights are re-initialized.\n threshold = 0.0\n n_in = 28*28\n #inputs are passed from the train_set_x above\n hidden1W = classifier.hiddenLayer.W.get_value()\n hidden1Wcopy = hidden1W\n #Making a copy of the first hidden layer of weights to be used in calculations for second hidden lyaer of weights\n aveList = []\n #aveList represents the average hidden node activations for layer 1\n print 'starting transfer calculations'\n for i in range(0,n_hidden):\n x = 0\n for j in range(0,inputSize):\n #Design choice to use absolute value b/c a positive activation and a negative activation were both considered important\n x += abs(numpy.tanh(numpy.tensordot(inputs[j,:],hidden1W[:,i],axes=1)))\n aveList.append(x/inputSize)\n\n print 'ending calculation'\n\n count = 0\n for i in range(0,n_hidden):\n \n if(aveList[i] < threshold):\n #If the activation is below the threshold, then the weights corresponding leading to that hidden node will be reinitialized\n hidden1W[:,i] = numpy.asarray(\n rng.uniform(\n low=-numpy.sqrt(6. / (n_in + n_hidden)),\n high=numpy.sqrt(6. / (n_in + n_hidden)),\n size=(n_in,1)\n ),\n dtype=theano.config.floatX\n ).flatten()\n else:\n count+=1\n print 'A total number of ' + str(count) + ' H1 nodes passed the threshold'\n \n #saving count of hidden nodes\n outFile3 = open('transfer.txt','w')\n outFile3.write(str(count))\n outFile3.write('\\n')\n\n\n\n hidden1Act = numpy.zeros((1,n_hidden))\n #Making a dummy hidden layer variable to edit\n\n #now for the next hidden layer :)\n hidden2W = classifier.hiddenLayer2.W.get_value()\n aveList = []\n #aveList here represents the average hidden node activations for layer 2\n print 'starting next hidden layer calculation'\n for i in range(0,n_hidden):\n x = 0\n for j in range(0,inputSize):\n for k in range(0,n_hidden):\n hidden1Act[0][k] = numpy.tanh(numpy.tensordot(inputs[j,:],hidden1Wcopy[:,k],axes=1))\n x += abs(numpy.tanh(numpy.tensordot(hidden1Act[0,:],hidden2W[:,i],axes=1)))\n aveList.append(x/inputSize)\n print 'ending hidden 2 calculation'\n count = 0\n for i in range(0,n_hidden):\n if(aveList[i] < threshold):\n hidden2W[:,i] = numpy.asarray(\n rng.uniform(\n low=-numpy.sqrt(6. / (n_hidden + n_hidden)),\n high=numpy.sqrt(6. / (n_hidden + n_hidden)),\n size = (n_hidden,1)\n ),\n dtype=theano.config.floatX\n ).flatten()\n else:\n count += 1\n print 'A total number of ' + str(count) + ' H2 nodes passed the threshold'\n\n outFile3.write(str(count))\n outFile3.close()\n\n\n #3 global variables exist. tensor and tensor2 variables are the global theano shared variables for the weights.\n #During the next run, the MLP will be initialized with these weights thereby transferring the weights from this run.\n global transfer\n transfer = True\n global tensor\n global tensor2\n tensor = theano.shared(value=hidden1W,name = 'W', borrow=True)\n tensor2 = theano.shared(value = hidden2W, name = 'tensor2', borrow=True)\n\n test_mlp() \n else:\n print 'Thank you for running this transfer program'\n print 'Below are descriptions of files that have been created'\n print 'out.txt - validation error while training'\n print 'outTransfer.txt - validation error while training after transfer learning'\n print 'transfer.txt - number of hidden nodes transferred in each layer'", "def utest_SGD_Test():\n model_fname = \"../work/model\"\n # test binary classification.\n if False:\n #test_fname = \"../work/train.bz2\"\n test_fname = \"../work/rcv1_test.binary.bz2\"\n if True:\n test_fname = \"../work/iris_multi.train\"\n test_logreg(model_fname,test_fname,prob=True,acc=True)\n pass", "def main():\n # Call testing function\n testMinivan()", "def test1():\n for test in pkl.load(open(TEST_RESOURCES_DIR / \"regression_vault.pkl\", \"rb\"))[:5]:\n init_dict, rslt = test\n np.testing.assert_array_equal(run_regression_test(init_dict), rslt)", "def get_results():\r\n #Get python results\r\n import mnist_nn\r\n import mnist_nn_gpu\r\n mnist_nn.save_results()\r\n mnist_nn_gpu.save_results()\r\n\r\n #Get cpp results\r\n import subprocess\r\n subprocess.call(['c++//./run.sh'])", "def test_mnir_image():\n # Initiate the sunglint correction class\n g = deglint.GlintCorr(odc_meta_file, sub_product)\n\n # ---------------------- #\n # NIR subtraction #\n # ---------------------- #\n mnir_xarrlist = g.glint_subtraction(\n vis_bands=[\"3\"],\n corr_band=\"6\",\n water_val=5,\n )\n\n sungc_band = mnir_xarrlist[0].lmbadj_green.values # 3D array\n\n # path to expected sunglint corrected output from NIR subtraction\n exp_sungc_band = (\n data_path\n / \"MINUS_NIR\"\n / \"ga_ls8c_lmbadj_3-2-0_091086_2014-11-06_final_band03-deglint-600m.tif\"\n )\n\n # ensure that all valid sungint corrected pixels match expected\n with rasterio.open(exp_sungc_band, \"r\") as exp_sungc_ds:\n urd_band = urd(sungc_band[0, :, :], exp_sungc_ds.read(1), exp_sungc_ds.nodata)\n assert urd_band.max() < 0.001", "def load_data(m=5000, n=100, path='D:/file/vscode/py/data/mnist.npz'):\r\n f = np.load(path)\r\n x_train, y_train = f['x_train'], f['y_train']\r\n\r\n x_test, y_test = f['x_test'], f['y_test']\r\n\r\n f.close()\r\n return (x_train, y_train), (x_test, y_test)", "def test_imsim():\n import yaml\n import astropy.units as u\n import matplotlib.pyplot as plt\n from tqdm import tqdm\n # Need these for `eval` below\n from numpy import array\n import coord\n\n with open(DATA_DIR / \"wcs_466749.yaml\", 'r') as f:\n wcss = yaml.safe_load(f)\n\n cmds = {}\n with open(DATA_DIR / \"phosim_cat_466749.txt\", 'r') as f:\n for line in f:\n k, v = line.split()\n try:\n v = int(v)\n except ValueError:\n try:\n v = float(v)\n except ValueError:\n pass\n cmds[k] = v\n\n # Values below (and others) from phosim_cat_466749.txt\n rc = cmds['rightascension']\n dc = cmds['declination']\n boresight = galsim.CelestialCoord(\n rc*galsim.degrees,\n dc*galsim.degrees\n )\n obstime = Time(cmds['mjd'], format='mjd', scale='tai')\n obstime -= 15*u.s\n band = \"ugrizy\"[cmds['filter']]\n wavelength_dict = dict(\n u=365.49,\n g=480.03,\n r=622.20,\n i=754.06,\n z=868.21,\n y=991.66\n )\n wavelength = wavelength_dict[band]\n camera = imsim.get_camera()\n\n rotTelPos = cmds['rottelpos'] * galsim.degrees\n telescope = imsim.load_telescope(f\"LSST_{band}.yaml\", rotTelPos=rotTelPos)\n # Ambient conditions\n # These are a guess.\n temperature = 293.\n pressure = 69.0\n H2O_pressure = 1.0\n\n # Start by constructing a refractionless factory, which we can use to\n # cross-check some of the other values in the phosim cmd file.\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera,\n temperature=temperature,\n pressure=0.0,\n H2O_pressure=H2O_pressure\n )\n\n aob, zob, hob, dob, rob, eo = factory._ICRF_to_observed(\n boresight.ra.rad, boresight.dec.rad, all=True\n )\n np.testing.assert_allclose(\n np.rad2deg(aob)*3600, cmds['azimuth']*3600,\n rtol=0, atol=2.0\n )\n np.testing.assert_allclose(\n (90-np.rad2deg(zob))*3600, cmds['altitude']*3600,\n rtol=0, atol=6.0,\n )\n q = factory.q * galsim.radians\n rotSkyPos = rotTelPos - q\n # Hmmm.. Seems like we ought to be able to do better than 30 arcsec on the\n # rotator? Maybe this is defined at a different point in time? Doesn't seem\n # to affect the final WCS much though.\n np.testing.assert_allclose(\n rotSkyPos.deg*3600, cmds['rotskypos']*3600,\n rtol=0, atol=30.0,\n )\n\n # We accidentally simulated DC2 with the camera rotated 180 degrees too far.\n # That includes the regression test data here. So to fix the WCS code, but\n # still use the same regression data, we need to add 180 degrees here. Just\n # rotate the camera by another 180 degrees\n telescope = telescope.withLocallyRotatedOptic(\n \"LSSTCamera\", batoid.RotZ(np.deg2rad(180))\n )\n\n # For actual WCS check, we use a factory that _does_ know about refraction.\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera,\n temperature=temperature,\n pressure=pressure,\n H2O_pressure=H2O_pressure\n )\n\n do_plot = False\n my_centers = []\n imsim_centers = []\n if do_plot:\n _, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 12))\n i = 0\n r1 = []\n d1 = []\n r2 = []\n d2 = []\n rng = np.random.default_rng(1234)\n for k, v in tqdm(wcss.items()):\n name = k[18:25].replace('-', '_')\n det = camera[name]\n cpix = det.getCenter(cameraGeom.PIXELS)\n\n wcs = factory.getWCS(det, order=2)\n wcs1 = eval(v)\n # Need to adjust ab parameters to new GalSim convention\n wcs1.ab[0,1,0] = 1.0\n wcs1.ab[1,0,1] = 1.0\n\n my_centers.append(wcs.posToWorld(galsim.PositionD(cpix.x, cpix.y)))\n imsim_centers.append(wcs1.posToWorld(galsim.PositionD(cpix.x, cpix.y)))\n\n corners = det.getCorners(cameraGeom.PIXELS)\n xs = np.array([corner.x for corner in corners])\n ys = np.array([corner.y for corner in corners])\n ra1, dec1 = wcs.xyToradec(xs, ys, units='radians')\n ra2, dec2 = wcs1.xyToradec(xs, ys, units='radians')\n if i == 0:\n labels = ['batoid', 'PhoSim']\n else:\n labels = [None]*2\n if do_plot:\n ax.plot(ra1, dec1, c='r', label=labels[0])\n ax.plot(ra2, dec2, c='b', label=labels[1])\n\n # add corners to ra/dec check lists\n r1.extend(ra1)\n d1.extend(dec1)\n r2.extend(ra2)\n d2.extend(dec2)\n # Add some random points as well\n xs = rng.uniform(0, 4000, 100)\n ys = rng.uniform(0, 4000, 100)\n ra1, dec1 = wcs.xyToradec(xs, ys, units='radians')\n ra2, dec2 = wcs1.xyToradec(xs, ys, units='radians')\n r1.extend(ra1)\n d1.extend(dec1)\n r2.extend(ra2)\n d2.extend(dec2)\n i += 1\n\n if do_plot:\n ax.legend()\n xlim = ax.get_xlim()\n ax.set_xlim(xlim[1], xlim[0])\n plt.show()\n\n dist = sphere_dist(r1, d1, r2, d2)\n print(\"sphere dist mean, max, std\")\n print(\n np.rad2deg(np.mean(dist))*3600,\n np.rad2deg(np.max(dist))*3600,\n np.rad2deg(np.std(dist))*3600,\n )\n np.testing.assert_array_less(\n np.rad2deg(np.mean(dist))*3600,\n 5.0\n )\n if do_plot:\n plt.hist(np.rad2deg(dist)*3600, bins=100)\n plt.show()\n\n if do_plot:\n r1 = np.array([c.ra.rad for c in my_centers])\n d1 = np.array([c.dec.rad for c in my_centers])\n r2 = np.array([c.ra.rad for c in imsim_centers])\n d2 = np.array([c.dec.rad for c in imsim_centers])\n cd = np.cos(np.deg2rad(cmds['declination']))\n q = plt.quiver(r1, d1, np.rad2deg(r1-r2)*3600*cd, np.rad2deg(d1-d2)*3600)\n plt.quiverkey(q, 0.5, 1.1, 5.0, \"5 arcsec\", labelpos='E')\n plt.show()", "def main():\n \"\"\"\n This is just for testing the functions\n \"\"\"\n\n x1 = np.array([1, 1, 1, 1, -1, -1, 1, 1, 1])\n x2 = np.array([1, -1, 1, 1, 1, 1, 1, -1, 1])\n x3 = np.array([-1, 1, -1, -1, 1, -1, -1, 1, -1])\n train_set = np.vstack((x1, x2))\n train_set = np.vstack((train_set, x3))\n\n\n params = {\n \"epochs\": 100,\n \"neurons\": len(x1),\n \"learn_method\": 'classic'\n }\n\n hop = hop_net.HopfieldNet(train_set, **params)\n hop.batch_train()\n show_trained(train_set)\n\n x4d = [1,1,1,1,1,1,1,1,1]\n x5d = [1,1,1,1,-1,-1,1,-1,-1]\n x45d = np.vstack((x4d, x5d))\n test_set = np.vstack((x45d, train_set))\n recalled_set = hop.recall(test_set)\n for i in range(test_set.shape[0]):\n show_tested(test_set[i], recalled_set[i])", "def pick_data(ns, digits):\n f = gzip.open('data/mnist.pkl.gz', 'rb')\n train_set, valid_set, test_set = cPickle.load(f)\n f.close()\n images, labels = train_set\n\n originals = []; \n shapes = []; \n true_labels = [];\n i = 0\n for n, d in zip(ns, digits):\n # picking n elements with digit d\n x = np.where(labels==d)[0]\n idx = np.random.choice(x, n, replace=False)\n imgs = images[idx]\n originals.append(imgs)\n contours = [mnistshape.get_shape2(im.reshape((28,28)), n=30, s=5, ir=2)\n for im in imgs]\n shapes.append(contours)\n true_labels.append([i]*n)\n i += 1\n originals = np.concatenate(originals)\n true_labels = np.concatenate(true_labels)\n \n new_shapes = []\n for cluster in shapes:\n for shape in cluster:\n new_shapes.append(shape)\n new_shapes = np.array(new_shapes)\n\n # return shuffled data\n idx = range(len(originals))\n np.random.shuffle(idx)\n return originals[idx], new_shapes[idx], true_labels[idx]", "def test_X_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X.equals(atom.mnb.X)\n assert check_scaling(atom.lr.X)", "def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test", "def test_full(args, model, device): \n test_path = '../data/full/original/'\n generate_path = '../data/full/generate/'\n test_image_num = len([name for name in os.listdir(test_path)\n if os.path.isfile(os.path.join(test_path, name))])\n\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0\n ind = 0\n for name in os.listdir(test_path):\n if os.path.isfile(os.path.join(test_path, name)):\n ind += 1\n test_original, test_style, image_height, image_width = load_test_dataset(name)\n x = torch.from_numpy(test_original).float()\n y_real = torch.from_numpy(test_style).float()\n x = x.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n y_real = y_real.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n \n y_fake = model.gen_g(x.view(-1, config.channels, image_height, image_width))\n y_fake = y_fake.view(config.channels, image_height, image_width)\n \n # Calculate PSNR & SSIM scores\n score_psnr += psnr_full(y_fake, y_real)\n \n y_fake_np = y_fake.detach().cpu().numpy().transpose(1, 2, 0)\n y_real_np = y_real.cpu().numpy().transpose(1, 2, 0)\n temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True)\n score_ssim_skimage += temp_ssim\n \n temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n score_ssim_minstar += temp_ssim\n \n score_msssim_minstar += multi_scale_ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n print('PSNR & SSIM scores of {} images are calculated.'.format(ind))\n \n utils.save_image(y_fake, os.path.join(generate_path, '{}-x.jpg'.format(name[:5] + args.model_type)))\n\n score_psnr /= test_image_num\n score_ssim_skimage /= test_image_num\n score_ssim_minstar /= test_image_num\n score_msssim_minstar /= test_image_num\n print('PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}'.format(\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))", "def get_mini_samples():\n if GLOBALS['project_root']=='':\n print('please initialize project_root in GLOBALS first')\n return None\n data_path = os.path.join(GLOBALS['project_root'], 'data/MNIST/')\n pickle_path = os.path.join(data_path, 'mnist_mini_samples.pickle')\n if os.path.exists(pickle_path):\n with open(pickle_path, 'rb') as f:\n mini_samples = pickle.load(f)\n else:\n mnist = get_mnist()\n mini_samples = mnist.train.next_batch(50)\n with open(pickle_path, 'wb') as f:\n pickle.dump(mini_samples, f, pickle.HIGHEST_PROTOCOL)\n\n return mini_samples", "def codeepneat_mnist_example(_):\n # Set standard configuration specific to TFNE but not the neuroevolution process\n logging_level = logging.INFO\n config_file_path = './codeepneat_mnist_example_config.cfg'\n backup_dir_path = './tfne_state_backups/'\n max_generations = 20\n max_fitness = None\n\n # Read in optionally supplied flags, changing the just set standard configuration\n if flags.FLAGS.logging_level is not None:\n logging_level = flags.FLAGS.logging_level\n if flags.FLAGS.config_file is not None:\n config_file_path = flags.FLAGS.config_file\n if flags.FLAGS.backup_dir is not None:\n backup_dir_path = flags.FLAGS.backup_dir\n if flags.FLAGS.max_generations is not None:\n max_generations = flags.FLAGS.max_generations\n if flags.FLAGS.max_fitness is not None:\n max_fitness = flags.FLAGS.max_fitness\n\n # Set logging, parse config\n logging.set_verbosity(logging_level)\n config = tfne.parse_configuration(config_file_path)\n\n # Initialize the environment and the specific NE algorithm\n environment = tfne.environments.MNISTEnvironment(weight_training=True, config=config, verbosity=logging_level)\n ne_algorithm = tfne.algorithms.CoDeepNEAT(config)\n\n # Initialize evolution engine and supply config as well as initialized NE algorithm and evaluation environment.\n engine = tfne.EvolutionEngine(ne_algorithm=ne_algorithm,\n environment=environment,\n backup_dir_path=backup_dir_path,\n max_generations=max_generations,\n max_fitness=max_fitness)\n\n # Start training process, returning the best genome when training ends\n best_genome = engine.train()\n print(\"Best genome returned by evolution:\\n\")\n print(best_genome)\n\n # Increase epoch count in environment for a final training of the best genome. Train the genome and then replay it.\n print(\"Training best genome for 200 epochs...\\n\")\n environment.epochs = 20\n environment.eval_genome_fitness(best_genome)\n environment.replay_genome(best_genome)\n\n # Serialize and save genotype and Tensorflow model to demonstrate serialization\n best_genome.save_genotype(save_dir_path='./best_genome_genotype/')\n best_genome.save_model(file_path='./best_genome_model/')", "def test_Gaussian_NB_estimators():", "def fixture_sim():\n\tEXAMPLE_FILE_FOLDER = str(MODULE_DIR) + \"/data/nail1/\"\n\tsim = read.load_sim(EXAMPLE_FILE_FOLDER)\n\treturn sim", "def _test_examples(self):\n checks = [\n (\n \"ex5_line-of-sight_solution\",\n [r\"RAJA sequential\", r\"RAJA OpenMP\", r\"result -- PASS\"],\n ),\n (\n \"ex6_stencil-offset-layout_solution\",\n [r\"RAJA Views \\(permuted\\)\", r\"result -- PASS\"],\n ),\n (\n \"ex8_tiled-matrix-transpose_solution\",\n [r\"parallel top inner loop\", r\"collapsed inner loops\", r\"result -- PASS\"],\n ),\n (\"kernel-dynamic-tile\", [r\"Running index\", r\"(24,24)\"]),\n (\"plugin-example\", [r\"Launching host kernel for the 10 time\"]),\n (\"tut_batched-matrix-multiply\", [r\"result -- PASS\"]),\n (\"wave-eqn\", [r\"Max Error = 2\", r\"Evolved solution to time\"]),\n ]\n for exe, expected in checks:\n reason = \"test: checking output of {0} for {1}\".format(exe, expected)\n self.run_test(\n exe,\n [],\n expected,\n installed=False,\n purpose=reason,\n skip_missing=True,\n work_dir=self._extra_tests_path,\n )", "def main():\n dims = params['dims']\n\n for d in dims:\n print('**** Running test for d={0:d} ****'.format(d))\n run_test(d)", "def test_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.test.equals(atom.mnb.test)\n assert check_scaling(atom.lr.test)", "def train_mnist():\r\n # type: () -> None\r\n\r\n # Build dataset and model\r\n dataset = MNIST_TRAIN(path=Config.video_folder)\r\n model = LSAMNIST(input_shape=dataset.shape, code_length=64,\r\n cpd_channels=100).to(device).train()\r\n\r\n # Set up result helper and perform test\r\n helper = OneClassResultHelper(dataset, model,\r\n checkpoints_dir=Config.model_ckpt,\r\n output_file='mnist.txt')\r\n helper.train_one_class_classification()", "def run_tests(): \n \n\n nextdata = [[21, 61, 42, 30], [33,45, 18, 29]]\n\n for xval, yval, snum, expect in nextdata:\n\n pmachine = PMachine()\n pmachine.serial_number = snum\n pmachine.run2_completion()\n result = pmachine.calc_square_total(xval, yval, showsquare=True)\n assert result == expect\n print(\"Got value {}={} as expected\".format(result, expect))", "def test_wf_ndst_8(plugin):\n wf = Workflow(name=\"wf_ndst_8\", input_spec=[\"x\", \"y\"])\n wf.add(add2(name=\"add2x\", x=wf.lzin.x).split(\"x\"))\n wf.add(add2(name=\"add2y\", x=wf.lzin.y).split(\"x\"))\n wf.add(\n multiply(name=\"mult\", x=wf.add2x.lzout.out, y=wf.add2y.lzout.out).combine(\n \"add2y.x\"\n )\n )\n wf.inputs.x = [1, 2, 3]\n wf.inputs.y = [11, 12]\n\n wf.set_output([(\"out\", wf.mult.lzout.out)])\n wf.plugin = plugin\n\n with Submitter(plugin=plugin) as sub:\n sub.run(wf)\n\n # checking the results\n while not wf.done:\n sleep(1)\n results = wf.result()\n\n assert len(results.output.out) == 3\n assert results.output.out[0] == [39, 42]\n assert results.output.out[1] == [52, 56]\n assert results.output.out[2] == [65, 70]", "def RunTest():\n #800nm\n RunData(g.glob('testdata/15*.fits'), out='test800nm')\n forwardModelJointFit(g.glob('testdata/15*.fits'), out='test800nmJoint', wavelength='800nm')\n _plotDifferenceIndividualVsJoined(individuals='results/test800nm?.pkl', joined='results/test800nmJoint.pkl',\n title='800nm')\n #700nm\n RunData(g.glob('testdata/17*.fits'), out='test700nm')\n forwardModelJointFit(g.glob('testdata/17*.fits'), out='test700nmJoint', wavelength='700nm')\n _plotDifferenceIndividualVsJoined(individuals='results/test700nm?.pkl', joined='results/test700nmJoint.pkl',\n title='700nm')", "def make_rmnist(n=10):\n td, vd, ts = load_data()\n indices = range(50000)\n random.shuffle(indices)\n values = [(j, td[1][j]) for j in indices]\n indices_subset = [[v[0] for v in values if v[1] == j][:n]\n for j in range(10)]\n flattened_indices = [i for sub in indices_subset for i in sub]\n random.shuffle(flattened_indices)\n td0_prime = [td[0][j] for j in flattened_indices]\n td1_prime = [td[1][j] for j in flattened_indices]\n td_prime = (td0_prime, td1_prime)\n\n train_data = td_prime[0]\n train_labels = td_prime[1]\n val_data = vd[0]\n val_labels = vd[1]\n test_data = ts[0]\n test_labels = ts[1]\n\n fname = 'data/rmnist_'+str(n)\n np.savez(fname,\n train_data = train_data,\n train_labels = train_labels,\n val_data = val_data,\n val_labels = val_labels,\n test_data = test_data,\n test_labels = test_labels)", "def get_data(numbers):\r\n numbers = numbers\r\n n_classes = len(numbers)\r\n z = zipfile.ZipFile('lab3/mnist.pkl.zip', 'r')\r\n k = z.extract('mnist.pkl') # Извлечь файл из архива\r\n with open(k, 'rb') as f:\r\n train_set, _, test_set = pickle.load(f, encoding=\"bytes\")\r\n x_train = train_set[0]\r\n x_test = test_set[0]\r\n x_train[x_train >= 0.5] = 1\r\n x_train[x_train < 0.5] = 0\r\n x_test[x_test >= 0.5] = 1\r\n x_test[x_test < 0.5] = 0\r\n y_train = train_set[1]\r\n y_test = test_set[1]\r\n idx_train = [[np.where(y_train == i)] for i in numbers]\r\n idx_test = [[np.where(y_test == i)] for i in numbers]\r\n idx_x_train = [x_train[idx_train[i][0]] for i in range(len(idx_train))]\r\n idx_x_test = [x_test[idx_test[i][0]] for i in range(len(idx_test))]\r\n idx_y_test = [y_test[idx_test[i][0]] for i in range(len(idx_test))]\r\n x_train_new = shuffle(np.concatenate(idx_x_train))\r\n x_test_new = shuffle(np.concatenate(idx_x_test))\r\n y_test_new = shuffle(np.concatenate(idx_y_test))\r\n return x_train_new, x_test_new, y_test_new, numbers, n_classes", "def test_synth_tr():\n test_path = tempfile.mkdtemp()\n x_train, metadata = synth_tr(test_path)\n try:\n assert x_train.shape == (250, 3)\n except:\n shutil.rmtree(test_path)\n raise()", "def dummy(args):\n\n task_ids = {'1': LossTypes.mse, '2': LossTypes.mse, '3': LossTypes.cross_entropy}\n input_dimension = 5000 # Dimensionality of each training set\n num_inputs_train = 750\n num_inputs_validate = 100\n num_inputs_test = 150\n\n # Training set\n x_train = np.random.random((num_inputs_train, input_dimension))\n y_train = {}\n\n # Validation set\n x_validate = np.random.random((num_inputs_validate, input_dimension))\n y_validate = {}\n\n # Testing set\n x_test = np.random.random((num_inputs_test, input_dimension))\n y_test = {}\n\n for task_id, loss_type in task_ids.iteritems():\n if loss_type is LossTypes.mse:\n y_train[task_id] = np.random.random((num_inputs_train, 1))\n y_validate[task_id] = np.random.random((num_inputs_validate, 1))\n y_test[task_id] = np.random.random((num_inputs_test, 1))\n elif loss_type is LossTypes.cross_entropy:\n # Training labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_train).reshape(1, num_inputs_train)\n y_train[task_id] = convert_to_one_hot(labels)\n\n # Validation labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_validate).reshape(1, num_inputs_validate)\n y_validate[task_id] = convert_to_one_hot(labels)\n\n # Testing labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_test).reshape(1, num_inputs_test)\n y_test[task_id] = convert_to_one_hot(labels)\n\n exp = Experiment(expt_name=\"synthetic\", task_ids=task_ids, x_train=x_train, x_validate=x_validate,\n x_test=x_test, y_train=y_train, y_validate=y_validate, y_test=y_test,\n model_class=LowLevelSharingModel, learning_rate=args.learning_rate,\n batch_size=args.batch_size, num_epochs=args.num_epochs)\n exp.initialize_network()\n exp.train()\n sys.stderr.write(\"Training complete. Logs, outputs, and model saved in \" + os.getcwd())", "def test_compute_glycemic_load(self):\n pass", "def run_example(num_points_to_sample=20, verbose=True, **kwargs):\n exp = Experiment([[0, 2], [0, 4]]) # 2D experiment, we build a tensor product domain\n # Bootstrap with some known or already sampled point(s)\n exp.historical_data.append_sample_points([\n SamplePoint([0, 0], function_to_minimize([0, 0]), 0.05), # Iterables of the form [point, f_val, f_var] are also allowed\n ])\n\n # Sample num_points_to_sample points\n for _ in range(num_points_to_sample):\n # Use MOE to determine what is the point with highest Expected Improvement to use next\n next_point_to_sample = gp_next_points(exp, **kwargs)[0] # By default we only ask for one point\n # Sample the point from our objective function, we can replace this with any function\n value_of_next_point = function_to_minimize(next_point_to_sample)\n\n if verbose:\n print \"Sampled f({0:s}) = {1:.18E}\".format(str(next_point_to_sample), value_of_next_point)\n\n # Add the information about the point to the experiment historical data to inform the GP\n exp.historical_data.append_sample_points([SamplePoint(next_point_to_sample, value_of_next_point, 0.01)]) # We can add some noise", "def Main():\n\n\n Matrice = [[0 for col in range(tailleM)] for row in range(tailleM)]\n result = 0\n tour = 1\n while result==0: \n \n temp = MiniMaxDecision(copy.deepcopy(Matrice), tailleM,0,0,copy.deepcopy(tour))\n Matrice = Result(Matrice,temp[1],temp[2],1)\n printMat(Matrice, tailleM)\n result = TerminalTest(Matrice)\n tour +=1\n userinput(Matrice, tailleM)\n printMat(Matrice, tailleM)\n result = TerminalTest(Matrice)\n tour +=1\n \n printMat(Matrice, tailleM)", "def gen_simple_test():\n count = 1\n mdict = {\n 'operating_frequency': 3e8,\n 'sample_rate': 8e3,\n 'signal': [1] * 5,\n 'origin_pos': [1000, 0, 0],\n 'dest_pos': [300, 200, 50],\n 'origin_vel': [0] * 3,\n 'dest_vel': [0] * 3,\n }\n io.savemat('{}{}_input'.format(tests_path, count), mdict)", "def get_mnist():\n from keras.datasets import mnist\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n num_classes = 10\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n print (\"Using Channels first\")\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n print(\"Channels last\")\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n return (x_train, y_train), (x_test, y_test)", "def run(gens, version):\n pop = neat.population.Population(CONFIG)\n stats = neat.statistics.StatisticsReporter()\n pop.add_reporter(stats)\n pop.add_reporter(neat.reporting.StdOutReporter(True))\n\n global DYNAMIC_PARAMS\n DYNAMIC_PARAMS = params(version)\n\n winner = pop.run(eval_fitness, gens)\n print(f\"es_hyperneat_xor_{VERSION_TEXT} done\")\n return winner, stats", "def test_loadData():\n \n sys = LVsystem.Ecosystem()\n \n sys.loadSetup('2Prey1Predator')\n \n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def wild_test(img, mod):\n img = cv2.imread(img)\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n img_resize = cv2.resize(img_gray, (28, 28))\n img_resize = img_resize.reshape((1, 28, 28))\n print (\"Image size\", img_resize.shape)\n # it is ugly, you can make this much better\n data = np.asarray([img_resize]*100)\n test_iter = mx.io.NDArrayIter(data, None, 100)\n prob = mod.predict(test_iter)\n print (\"The prediction is :\", np.argmax(prob.asnumpy()[0]))" ]
[ "0.6889731", "0.6572477", "0.6475336", "0.6470662", "0.63731503", "0.6351866", "0.62574834", "0.62303245", "0.6214079", "0.61291355", "0.6029285", "0.6016668", "0.601599", "0.6014156", "0.59936774", "0.5969918", "0.595324", "0.5940546", "0.59249955", "0.5917552", "0.58708", "0.5852096", "0.5836193", "0.5835081", "0.5788991", "0.57672447", "0.5757812", "0.5727704", "0.5723906", "0.5711445", "0.5688242", "0.56738627", "0.5672134", "0.5652877", "0.56421626", "0.56303865", "0.5606894", "0.56036067", "0.55937445", "0.5585313", "0.55843043", "0.55813795", "0.5576688", "0.5561642", "0.55504686", "0.5543815", "0.554308", "0.55323386", "0.55269194", "0.5525398", "0.5521008", "0.55181575", "0.5500016", "0.5488591", "0.5487649", "0.5484237", "0.5480651", "0.54791033", "0.5477469", "0.54745686", "0.546711", "0.5454658", "0.54501647", "0.54461527", "0.5436104", "0.54346323", "0.54334223", "0.54282826", "0.5427783", "0.5422552", "0.5419721", "0.54114324", "0.54087865", "0.5408049", "0.5406579", "0.5406004", "0.5403019", "0.5391363", "0.5386287", "0.53811055", "0.5380097", "0.5379187", "0.5372314", "0.5369747", "0.5365387", "0.53617287", "0.53574705", "0.53547317", "0.53533447", "0.53461796", "0.53391784", "0.53378093", "0.53325003", "0.5332289", "0.53311956", "0.5330723", "0.53290445", "0.53279996", "0.5322562", "0.53122485" ]
0.69393843
0
MigrateListingResponse a model defined in Swagger
def __init__(self, errors=None, inventory_item_group_key=None, inventory_items=None, listing_id=None, marketplace_id=None, status_code=None, warnings=None): # noqa: E501 # noqa: E501 self._errors = None self._inventory_item_group_key = None self._inventory_items = None self._listing_id = None self._marketplace_id = None self._status_code = None self._warnings = None self.discriminator = None if errors is not None: self.errors = errors if inventory_item_group_key is not None: self.inventory_item_group_key = inventory_item_group_key if inventory_items is not None: self.inventory_items = inventory_items if listing_id is not None: self.listing_id = listing_id if marketplace_id is not None: self.marketplace_id = marketplace_id if status_code is not None: self.status_code = status_code if warnings is not None: self.warnings = warnings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_response_paginated(paginator: PaginationBase, op: Operation) -> None:\n status_code, item_schema = _find_collection_response(op)\n\n # Switching schema to Output schema\n try:\n new_name = f\"Paged{item_schema.__name__}\"\n except AttributeError:\n new_name = f\"Paged{str(item_schema).replace('.', '_')}\" # typing.Any case\n\n new_schema = type(\n new_name,\n (paginator.Output,),\n {\n \"__annotations__\": {paginator.items_attribute: List[item_schema]}, # type: ignore\n },\n ) # typing: ignore\n\n response = op._create_response_model(new_schema)\n\n # Changing response model to newly created one\n op.response_models[status_code] = response", "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(MigrateListingResponse, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "def handle_models_request():\n # TODO: add sort and filter by creation/modification date\n return (\n json.dumps({\"models\": {\n k: d[\"data\"] for k, d in app.models.items()\n }}), 200,\n {'ContentType': 'application/json'}\n )", "def to_representation(self, instance):\n # Load the paginated descendant features\n if instance is None:\n # This happens when OPTIONS is called from browsable API\n return None\n self.add_sources(instance)\n\n ret = OrderedDict()\n fields = self._readable_fields\n\n for field in fields:\n attribute = field.get_attribute(instance)\n assert attribute is not None, (\n 'field.get_attribute return None for instance %s, field %s'\n % (instance, field))\n field_ret = field.to_representation(attribute)\n if isinstance(field, ListSerializer):\n # Wrap lists of related resources in a ReturnList, so that the\n # renderer has access to the serializer\n field_ret = ReturnList(field_ret, serializer=field)\n ret[field.field_name] = field_ret\n\n return ReturnDict(ret, serializer=self)", "def specialist_list(self, request, **dict):\n\t\tdata = self.get_serializer(self.get_queryset(), many=True).data\n\t\treturn Response(data, status.HTTP_200_OK)", "def _list(api_list_class, arg_namespace, **extra):\n if arg_namespace.starting_point:\n ordering_field = (arg_namespace.ordering or '').lstrip('-')\n if ordering_field in ('', 'datetime_uploaded', 'datetime_created'):\n arg_namespace.starting_point = parser.parse(\n arg_namespace.starting_point)\n\n items = api_list_class(\n starting_point=arg_namespace.starting_point,\n ordering=arg_namespace.ordering,\n limit=arg_namespace.limit,\n request_limit=arg_namespace.request_limit,\n **extra\n )\n items.constructor = lambda x: x\n\n try:\n pprint(list(items))\n except ValueError as e:\n print(e)", "def list(self, request):\n queryset = self.get_queryset()\n serializer = self.serializer_class(queryset, many=True)\n return Response(serializer.data)", "def docter_list(self, request, **dict):\n\t\tdata = self.get_serializer(self.get_queryset(), many=True).data\n\t\treturn Response(data, status.HTTP_200_OK)", "def api():\n from gluon.contrib.hypermedia import Collection\n rules = {\n '<tablename>': {'GET':{},'POST':{},'PUT':{},'DELETE':{}},\n }\n return Collection(db).process(request,response,rules)", "def api():\n from gluon.contrib.hypermedia import Collection\n rules = {\n '<tablename>': {'GET':{},'POST':{},'PUT':{},'DELETE':{}},\n }\n return Collection(db).process(request,response,rules)", "def api():\n from gluon.contrib.hypermedia import Collection\n rules = {\n '<tablename>': {'GET':{},'POST':{},'PUT':{},'DELETE':{}},\n }\n return Collection(db).process(request,response,rules)", "def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)", "def _create_response_model(self, data):\n pass", "def process_resource_listing_api(self, resources, listing_api, context):\n pass", "def opt_get_all_models_rest_api():\n return retrieve_all_models()", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_defects_responder(self):\n pass", "def list(self, request):\n a_viewset = [\n 'uses actions (list,create,retreive,update,partial_update)',\n 'Automatically maps to URLs using routers',\n 'provides more functionality with less code',\n ]\n return Response({'message': 'Hello!', 'a_viewset': a_viewset})", "def fake_generic_listing(object_list, object_type):\n\n return {\n 'data': object_list,\n 'has_more': False,\n 'object': 'list',\n 'total_count': len(object_list),\n 'url': '/v1/{}s'.format(object_type),\n }", "def test_list_format(self) -> None:\n r = self.perform_request('list', False)\n self.assert_json_schema(r.json(), self.get_list_schema())", "def index():\n return make_json_response(ENDPOINT_LIST)", "def list(self, request):\n\n a_viewset = [\n 'Uses action (list, create, reteieve, update, partial_update)',\n 'Automatically maps the urls using routers',\n 'provide more functionality with less code',\n ]\n\n return Response({'message': 'Hello', 'a_viewset': a_viewset})", "def list(self, request, *args, **kwargs):\n\n queryset = self.filter_queryset(self.get_queryset())\n\n page = request.query_params.get('page', 1)\n paginator = Paginator(queryset, 8)\n\n try:\n queryset = paginator.page(page)\n\n except PageNotAnInteger:\n queryset = paginator.page(1)\n\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n page = int(page)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response({'items': serializer.data, 'page': page, 'pages': paginator.num_pages})", "def _list(self, **kwargs):\n\n return self._make_request(**kwargs)", "def api_documentation(api: str, summary: str, in_model: BaseModel,\n out_model: BaseModel, out_description: str) -> Callable:\n for model, name in ((in_model, 'Input'), (out_model, 'Output')):\n doc.Object(\n make_dataclass(\n f'Api{api[1:].title()}{name}',\n [(key, val.type_, val.type_)\n for key, val in model.__dict__['__fields__'].items()]))\n im_returns = doc.JsonBody({\n key: val.type_\n for key, val in in_model.__dict__['__fields__'].items()\n })\n\n om_returns = {\n key: val.type_\n for key, val in out_model.__dict__['__fields__'].items()\n }\n\n def decorator(func):\n @doc.summary(summary)\n @doc.response(412,\n 'Error: Precondition Failed',\n description='The passed request-parameters are invalid')\n @doc.response(500,\n 'Error: Server-Error occured',\n description='An internal error occured')\n @doc.consumes(im_returns,\n content_type='application/json',\n location='body')\n @doc.produces(om_returns,\n content_type='application/json',\n description=out_description)\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n return await func(request=request, *args, **kwargs)\n\n return function_wrapper\n\n return decorator", "def list(self, request, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n\n page = self.paginate_queryset(queryset)\n\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n else:\n serializer = self.get_serializer(queryset, many=True)\n\n data = serializer.data\n\n \"\"\"\n Determine the response type based on the request.\n a) For HTTP requests (e.g. via the browsable API) return a DRF response\n b) For AJAX requests, simply return a JSON rendered response.\n\n Note: b) is about 100x quicker than a), because the DRF framework adds a lot of cruft\n \"\"\"\n\n if page is not None:\n return self.get_paginated_response(data)\n elif request.is_ajax():\n return JsonResponse(data, safe=False)\n else:\n return Response(data)", "def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def expose_models(app, HOST=\"localhost\", PORT=5000, API_PREFIX=\"/api\"):\n\n api = SAFRSAPI(app, host=HOST, port=PORT)\n api.expose_object(models.Category)\n api.expose_object(models.CustomerCustomerDemo)\n api.expose_object(models.OrderDetail)\n api.expose_object(models.Order)\n api.expose_object(models.Customer)\n api.expose_object(models.CustomerDemographic)\n api.expose_object(models.EmployeeAudit)\n api.expose_object(models.EmployeeTerritory)\n api.expose_object(models.Employee)\n api.expose_object(models.Product)\n api.expose_object(models.Region)\n api.expose_object(models.Shipper)\n api.expose_object(models.Supplier)\n api.expose_object(models.Territory)\n return api", "def list(self, **kwargs):\n data, self.endpoint = self.data_endpoint(kwargs)\n r = super(Resource, self).list(**data)\n\n # Change display settings and data format for human consumption\n self.configure_display(r)\n return r", "def _instantiateResource(self, res):\n\n\n try:\n pagination = vsdModels.Pagination(**res)\n pagination.validate() #will fail if it doesno't have totalCount\n return pagination\n except:\n resourcetype, oid = self.getResourceTypeAndId(res['selfUrl'])\n if resourcetype == 'objects':\n return vsdModels.APIObject._create(res)\n #e.g FolderLinks\n model = vsdModels.resourceTypes[resourcetype](**res)\n return model", "def _collection_from_response(data):\n return Collection(uuid=UUID(data['uuid']), title=data['title'])", "def _create_response_detail(self, request, serializer):\n def build_item(source):\n \"\"\"build time data\"\"\"\n return dict(id=source['id'],\n uuid=source['uuid'],\n creation_time=source['creation_time'],\n version=source['version'])\n if self._short_response(request):\n data = serializer.data\n if isinstance(data, (list)):\n detail = [build_item(item) for item in data]\n else:\n detail = build_item(data)\n else:\n detail = serializer.data\n return detail", "def list(self, request):\n a_viewset = [\n 'Uses actions (list, create, retrieve, update, partial_update)',\n 'Automatically maps to URLs using Routers',\n 'Provides more functionality with less code',\n ]\n\n return Response({'message': 'Hello!', 'a_viewset': a_viewset})", "def list(self,request,*args,**kwargs):\n response=super(ListAPIView,self).list(request,*args,**kwargs)\n #add applied_filters to the response which is set when filter_queryset method is called\n response=self.addAppliedFilters(response)\n #fetch data from the related views\n return self.fetch_related(request,response,*args,**kwargs)", "def list_cmd(ctx):\n client = ctx.obj['CLIENT']\n models = client.list_models()\n\n x = PrettyTable()\n x.field_names = [\"Name\",\"Tag\",\"Created\"]\n for m in models:\n x.add_row([m[\"name\"],m[\"tag\"],m[\"uploaded_at\"]])\n print(x)", "def make_response(self):\n params = {\n 'tweet.fields': 'created_at,public_metrics,entities',\n 'expansions': 'author_id',\n 'user.fields': 'description'\n }\n return self.response_limit(params)", "def __init__(self, *args, **kwargs):\n super(BasePaginationSerializer, self).__init__(*args, **kwargs)\n results_field = self.results_field\n\n try:\n object_serializer = self.Meta.object_serializer_class\n except AttributeError:\n object_serializer = DefaultObjectSerializer\n\n self.fields[results_field] = serializers.ListSerializer(\n child=object_serializer(),\n source='object_list'\n )", "def list(self):\n response = self.client.get_json(URL_MAPPING)\n response.success = response.status_code == 200\n return response", "def test_get_model_list():\n with app.test_client() as c:\n response = c.get('/REST/api/v1.0/model_list') \n assert response.status_code == 201", "def fetch_ml_model_info() -> ApiResponse:\n return _api_response(settings.ML_MODELS)", "def _bld_resp(self, status=200, entry_or_list=None):\n resp = pvm_adp.Response('meth', 'path', status, 'reason', {})\n resp.entry = None\n resp.feed = None\n if entry_or_list is None:\n resp.feed = pvm_ent.Feed({}, [])\n else:\n if isinstance(entry_or_list, list):\n resp.feed = pvm_ent.Feed({}, entry_or_list)\n else:\n resp.entry = entry_or_list\n return resp", "def _openapi_json(self):\n # We don't use Flask.jsonify here as it would sort the keys\n # alphabetically while we want to preserve the order.\n from pprint import pprint\n pprint(self.to_dict())\n return current_app.response_class(json.dumps(self.to_dict(), indent=4),\n mimetype='application/json')", "def list_percelen_adapter(obj, request):\n return {\n 'id': obj.id\n }", "def list(cls) -> t.Iterable[SDict]:\n endpoint: t.Optional[str] = cls.endpoint\n\n def process_field(v):\n if isinstance(v, dict):\n return json.dumps(v, indent=True)\n return v\n\n while endpoint:\n r = Resource(endpoint=endpoint)\n items = r.get()\n # filter the items, ordering as needed\n for x in items.results:\n yield {k: process_field(x[k]) for k in cls.list_fields if k in x}\n endpoint = items.next if items.next else None", "def response_helper(self, response, **kwargs):\n self.resolve_schema(response)\n if \"headers\" in response:\n for header in response[\"headers\"].values():\n self.resolve_schema(header)\n return response", "def response(schema):\n def _response(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n if issubclass(schema, BaseModel):\n has_root = True if '__root__' in schema.__fields__ else False\n function_res = function(*args, **kwargs)\n\n if not function_res:\n if has_root is True:\n return jsonify([])\n return jsonify({})\n\n if type(function_res) == list:\n res = schema.parse_obj(function_res)\n else:\n res = schema.from_orm(function_res)\n\n res = res.dict()\n\n if has_root is True:\n return jsonify(res['__root__'])\n\n return jsonify(res)\n elif isinstance(schema, dict):\n return jsonify(schema)\n else:\n raise CustomException('invalid response type', code=400)\n\n return wrapper\n return _response", "def get_rest_list(request):\n if request.method == \"GET\":\n rest_list = Package.objects.order_by('-location')\n serializer = PackageSerializer(rest_list, many=True)\n return JsonResponse(serializer.data, safe=False)", "def get_list(self, request, **kwargs):\n # :TODO modify top_level_serializer or pass a list with self as\n # argument?\n registry = {getattr(self._meta , 'resource_name'): self}\n content = serializers.top_level_serializer(registry)\n response = HttpResponse(\n content = content,\n content_type = 'application/xml')\n response = add_das_headers(response)\n return response", "def get_list(self, request, **kwargs):\n # :TODO modify top_level_serializer or pass a list with self as\n # argument?\n registry = {getattr(self._meta , 'resource_name'): self}\n content = serializers.top_level_serializer(registry)\n response = HttpResponse(\n content = content,\n content_type = 'application/xml')\n response = add_das_headers(response)\n return response", "def listings_list():\n\n inputs = Listing.convert_inputs(request.args)\n form = ListingSearchForm(data=inputs)\n if form.validate():\n listings = Listing.find_all(inputs)\n serialized = [listing.serialize(\n isDetailed=False\n ) for listing in listings]\n return (jsonify(listings=serialized), 200)\n else:\n return (jsonify(errors=[\"Bad request\"]), 400)", "def test_get_hyperflex_server_model_list(self):\n pass", "def show_list():\n\n response = []\n docs = SUPERHEROES.stream()\n for doc in docs:\n response.append(doc.to_dict())\n return jsonify(response), 201", "def get_list(self, request, **kwargs):\n # TODO: Uncached for now. Invalidation that works for everyone may be\n # impossible.\n objects = self.obj_get_list(request=request, **self.remove_api_resource_names(kwargs))\n sorted_objects = self.apply_sorting(objects, options=request.GET)\n\n paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(),\n limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)\n to_be_serialized = paginator.page()\n\n # Dehydrate the bundles in preparation for serialization.\n bundles = [self.build_bundle(obj=obj, request=request) for obj in to_be_serialized['objects']]\n to_be_serialized['objects'] = [self.full_dehydrate(bundle) for bundle in bundles]\n to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)\n\n self.add_shit_to_meta(request, to_be_serialized)\n\n return self.create_response(request, to_be_serialized)", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'version': 'str',\n 'tagline': 'str',\n 'keywords': 'str',\n 'short_description': 'str',\n 'usage_information': 'str',\n 'long_description': 'str',\n 'license_model_description': 'str',\n 'system_requirements': 'str',\n 'time_released': 'datetime',\n 'release_notes': 'str',\n 'categories': 'list[str]',\n 'publisher': 'Publisher',\n 'languages': 'list[Item]',\n 'screenshots': 'list[Screenshot]',\n 'videos': 'list[NamedLink]',\n 'support_contacts': 'list[SupportContact]',\n 'support_links': 'list[NamedLink]',\n 'documentation_links': 'list[DocumentationLink]',\n 'icon': 'UploadData',\n 'banner': 'UploadData',\n 'regions': 'list[Region]',\n 'package_type': 'str',\n 'default_package_version': 'str',\n 'links': 'list[Link]',\n 'is_featured': 'bool'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'version': 'version',\n 'tagline': 'tagline',\n 'keywords': 'keywords',\n 'short_description': 'shortDescription',\n 'usage_information': 'usageInformation',\n 'long_description': 'longDescription',\n 'license_model_description': 'licenseModelDescription',\n 'system_requirements': 'systemRequirements',\n 'time_released': 'timeReleased',\n 'release_notes': 'releaseNotes',\n 'categories': 'categories',\n 'publisher': 'publisher',\n 'languages': 'languages',\n 'screenshots': 'screenshots',\n 'videos': 'videos',\n 'support_contacts': 'supportContacts',\n 'support_links': 'supportLinks',\n 'documentation_links': 'documentationLinks',\n 'icon': 'icon',\n 'banner': 'banner',\n 'regions': 'regions',\n 'package_type': 'packageType',\n 'default_package_version': 'defaultPackageVersion',\n 'links': 'links',\n 'is_featured': 'isFeatured'\n }\n\n self._id = None\n self._name = None\n self._version = None\n self._tagline = None\n self._keywords = None\n self._short_description = None\n self._usage_information = None\n self._long_description = None\n self._license_model_description = None\n self._system_requirements = None\n self._time_released = None\n self._release_notes = None\n self._categories = None\n self._publisher = None\n self._languages = None\n self._screenshots = None\n self._videos = None\n self._support_contacts = None\n self._support_links = None\n self._documentation_links = None\n self._icon = None\n self._banner = None\n self._regions = None\n self._package_type = None\n self._default_package_version = None\n self._links = None\n self._is_featured = None", "def show_listings(offset):\n items = Item.query.filter(Item.status == \"listed\").order_by(desc(Item.date_listed)).offset(offset).limit(LIMIT).all()\n return jsonify(data=[item.serialize for item in items])\n #return render_template('primary_user_interface.html', items=items)", "def adapt_response(self, response):\n return response", "def adapt_response(self, response):\n return response", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_defects_responder_spaces(self):\n pass", "def GetModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def api_root(request, format=None):\n\n return Response({\n 'entities': reverse('entity-list', request=request),\n 'budgets': reverse('budget-list', request=request),\n 'actuals': reverse('actual-list', request=request),\n })", "def get_links(response: GenericResponse, endpoint: Endpoint, field: str) -> Sequence[Link]:\n responses = endpoint.definition.resolved[\"responses\"]\n if str(response.status_code) in responses:\n response_definition = responses[str(response.status_code)]\n else:\n response_definition = responses.get(\"default\", {})\n links = response_definition.get(field, {})\n return [Link.from_definition(name, definition, endpoint) for name, definition in links.items()]", "def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def obj_from_response(self, response):\n\n obj = self.model()\n serializer = self.get_serializer()\n field_data = serializer.deserialize(to_unicode(response.content))\n obj.update_fields(field_data)\n obj._full_url = response.url\n\n return obj", "def handle_filter_operations_response(self, response):\n\n if self.resource['operation'] == 'find':\n self.resource['metadata']['Results'] = \\\n [response[i] for i in range(response.count())]\n\n elif self.resource['operation'] == 'update_one':\n self.resource['metadata']['matched_count'] = \\\n response.matched_count\n self.resource['metadata']['modified_count'] = \\\n response.modified_count\n\n elif self.resource['operation'] == 'delete_many':\n self.resource['metadata']['deleted_count'] = \\\n response.deleted_count", "def user_view_list_data():\n video = VideoFactory()\n collection = video.collection\n moira_list = factories.MoiraListFactory()\n collection.view_lists.set([moira_list])\n return SimpleNamespace(video=video, moira_list=moira_list, collection=collection)", "def paginated_handling(self) -> global___Snippet.PaginatedResponseHandling:", "def list_response(wrapped):\n\n @wraps(wrapped)\n def decorated(request, *args, **kwargs):\n number_of_items = None\n offset = None\n if 'numberofitems' in request['args']:\n if request['args']['numberofitems'].isnumeric():\n number_of_items = int(request['args']['numberofitems'])\n else:\n raise UserException(ERROR_NUMERIC_REQUIRED % 'numberOfItems')\n if 'offset' in request['args']:\n if request['args']['offset'].isnumeric():\n offset = int(request['args']['offset'])\n else:\n raise UserException(ERROR_NUMERIC_REQUIRED % 'offset')\n\n if number_of_items is not None and offset is not None:\n return wrapped(request, number_of_items, offset)\n elif number_of_items is not None:\n return wrapped(request, number_of_items)\n elif offset is not None:\n return wrapped(request, offset=offset)\n else:\n return wrapped(request)\n\n return decorated", "def to_listing_dict(self) -> dict:\n data = super().to_listing_dict()\n return data", "def list(cls, client, spec, first_level=False):\n response = client.get_paged(cls.LIST.format(**spec))\n\n key = cls.LIST_KEY\n set_header = cls.set_header\n is_dir = cls.STRUCT is not None\n for headers in response:\n name = headers[key]\n yield name, cls(client, spec, set_header(headers), name), is_dir", "def get(self):\n\n try:\n args = api_parameters.parse_args()\n limit = args.get(\"limit\")\n controller = self.controller()\n schema = self.schema(many=True)\n raw_data = controller.get_list(**args)\n\n if limit:\n items = raw_data.items\n items = schema.dump(items)\n data = ResponseHandler.get_section_paginate(raw_data, items)\n\n else:\n data = schema.dump(raw_data)\n\n return ResponseHandler.render_response(data=data)\n\n except Exception as ex:\n\n return ResponseHandler.render_response(status=ERR, message=traceback.format_exc())", "def get(self, request):\n self.queryset = self.queryset.all()\n serializer = self.serializer_class(self.queryset, many=True)\n page = self.paginate_queryset(self.queryset)\n if page is not None:\n serializer = self.serializer_class(page, many=True)\n # removing body and comments to improve on transfer performance\n for article in serializer.data:\n article.pop(\"body\")\n return self.get_paginated_response(serializer.data)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def test_array_model(self):\n from petstore_api.model import animal_farm, animal\n endpoint = self.api.array_model\n assert endpoint.openapi_types['body'] == (animal_farm.AnimalFarm,)\n assert endpoint.settings['response_type'] == (animal_farm.AnimalFarm,)\n\n # serialization + deserialization works\n with patch.object(RESTClientObject, 'request') as mock_method:\n cat = animal.Animal(class_name=\"Cat\", color=\"black\")\n body = animal_farm.AnimalFarm([cat])\n json_data = [{\"className\": \"Cat\", \"color\": \"black\"}]\n mock_method.return_value = self.mock_response(json_data)\n\n response = endpoint(body=body)\n self.assert_request_called_with(mock_method, 'http://petstore.swagger.io:80/v2/fake/refs/arraymodel', json_data)\n\n assert isinstance(response, animal_farm.AnimalFarm)\n assert response == body", "def list(self, request):\n\n viewset_list = [\n 'User\\'s action (list,create,retrieve ,update , partial_update)',\n 'Automatically maps to the urls using Routers.',\n 'Provides more functionality with less code.',\n ]\n\n return Response({'message':'Hello From ViewSet' , 'viewset':viewset_list})", "def data(self):\n ret = super(serializers.ListSerializer, self).data\n return ReturnDict(ret, serializer=self)", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_event_document_types_responder(self):\n pass", "def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _handle_embedded_models(list_of_embedded_models,\n error,\n model,\n instance,\n success):\n embedded_models = instance_list_to_dict(list_of_embedded_models)\n\n return success(\n model=model,\n embedded_models=embedded_models)", "def create_resultado(self, data):\n return StatusList(**data)", "def ListModelVersions(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def list(self, request, *args, **kwargs):\n\n queryset = self.filter_queryset(self.get_queryset())\n\n page = self.paginate_queryset(queryset)\n data = dict()\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n data = self.build_list_data(serializer=serializer,\n username=request.user.username,\n add_pagination=True)\n # @TODO: somewhat inefficient usage of pagination, review\n response = self.get_paginated_response(serializer.data)\n for attr in ('count', 'next', 'previous'):\n data['header']['pagination'][attr] = response.data[attr]\n else:\n serializer = self.get_serializer(queryset, many=True)\n data = self.build_list_data(serializer=serializer, username=request.user.username)\n\n return Response(data=data, status=status.HTTP_200_OK)", "def to_representation(self, data):\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n\n iterable = data.all() if isinstance(data, models.Manager) else data\n \n if self.parent is None:\n post = [\n self.child.to_representation(item) for item in iterable\n ]\n normalized_dict = OrderedDict()\n normalized_dict[self.child.Meta.model_name] = ReturnList(post, serializer=self)\n result = [normalized_dict]\n for normalized_item in self.child.Meta.normalized_fields:\n if normalized_item in self.instancelist_dict:\n normalized_dict[normalized_item] = \\\n ReturnList(self.make_normalized_item_list(normalized_item), serializer=self)\n return result\n\n if self.field_name in self.child.Meta.normalized_fields:\n result = [ item.id for item in iterable ]\n parent = self.root\n if not self.field_name in parent.instancelist_dict:\n parent.instancelist_dict[self.field_name] = []\n parent.instance_repr_dict[self.field_name] = self.child._readable_fields\n parent.instancelist_dict[self.field_name] = \\\n list(set(parent.instancelist_dict[self.field_name]) | set(iterable))\n else:\n result = [\n self.child.to_representation(item) for item in iterable\n ]\n\n return result", "def parse_listing(self, raw_output):\n return output_parser.listing(raw_output)", "def bentity_list(request, format='csv'):\n \n \n bentities = Bentity.objects.all().order_by('bentity')\n \n \n if format == 'csv':\n # Serislize CSV for API\n return CSVResponse(\n [{'bentity_id': b.gid, 'bentity_name': b.bentity} for b in bentities],\n ('bentity_id', 'bentity_name') )\n \n else:\n # Serialize JSON for bentity-list widget\n json_objects = [{\n 'key': b.gid,\n 'display': b.bentity,\n } for b in bentities]\n \n return JSONResponse({'bentities' : json_objects})", "def walk_list_api(request):\n\n if request.method == 'GET':\n walks = Walk.objects.filter(user=request.user).order_by(\"-date\")\n serializer = WalkSerializer(walks, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = WalkSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user=request.user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def list(self, request, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n\n page = self.paginate_queryset(queryset)\n\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n else:\n serializer = self.get_serializer(queryset, many=True)\n\n data = serializer.data\n\n # Attempt to add extra context information to the historical data\n for item in data:\n deltas = item['deltas']\n\n if not deltas:\n deltas = {}\n\n # Add part detail\n if 'part' in deltas:\n try:\n part = Part.objects.get(pk=deltas['part'])\n serializer = PartBriefSerializer(part)\n deltas['part_detail'] = serializer.data\n except Exception:\n pass\n\n # Add location detail\n if 'location' in deltas:\n try:\n location = StockLocation.objects.get(pk=deltas['location'])\n serializer = StockSerializers.LocationSerializer(location)\n deltas['location_detail'] = serializer.data\n except Exception:\n pass\n\n # Add stockitem detail\n if 'stockitem' in deltas:\n try:\n stockitem = StockItem.objects.get(pk=deltas['stockitem'])\n serializer = StockSerializers.StockItemSerializer(stockitem)\n deltas['stockitem_detail'] = serializer.data\n except Exception:\n pass\n\n # Add customer detail\n if 'customer' in deltas:\n try:\n customer = Company.objects.get(pk=deltas['customer'])\n serializer = CompanySerializer(customer)\n deltas['customer_detail'] = serializer.data\n except Exception:\n pass\n\n # Add PurchaseOrder detail\n if 'purchaseorder' in deltas:\n try:\n order = PurchaseOrder.objects.get(pk=deltas['purchaseorder'])\n serializer = PurchaseOrderSerializer(order)\n deltas['purchaseorder_detail'] = serializer.data\n except Exception:\n pass\n\n # Add SalesOrder detail\n if 'salesorder' in deltas:\n try:\n order = SalesOrder.objects.get(pk=deltas['salesorder'])\n serializer = SalesOrderSerializer(order)\n deltas['salesorder_detail'] = serializer.data\n except Exception:\n pass\n\n # Add ReturnOrder detail\n if 'returnorder' in deltas:\n try:\n order = ReturnOrder.objects.get(pk=deltas['returnorder'])\n serializer = ReturnOrderSerializer(order)\n deltas['returnorder_detail'] = serializer.data\n except Exception:\n pass\n\n # Add BuildOrder detail\n if 'buildorder' in deltas:\n try:\n order = Build.objects.get(pk=deltas['buildorder'])\n serializer = BuildSerializer(order)\n deltas['buildorder_detail'] = serializer.data\n except Exception:\n pass\n\n if page is not None:\n return self.get_paginated_response(data)\n if request.is_ajax():\n return JsonResponse(data, safe=False)\n else:\n return Response(data)", "def api_root(request, format=None):\n\n return Response(OrderedDict([\n #('class', reverse(\"class-list\", request=request, format=format)),\n ]))", "def test_can_list(self):\n post_req = self.post_json(\n 'users',\n {\n \"data\": {\n \"type\": \"user\",\n \"attributes\": {\n \"uid\": \"fdb07ea6bf687872\",\n \"social\": \"PINK\",\n \"name\": \"Артур Горбунов\"\n },\n }\n }\n )\n self.assertEqual(post_req.status_code, 201)\n self.assertEqual(post_req.json.get('id'), 1)\n\n post_req = self.post_json(\n 'adverts',\n {\n \"data\": {\n \"relationships\": {\n \"author\": {\n \"data\": {\n \"type\": \"user\",\n \"id\": \"1\"\n }\n }\n },\n \"type\": \"advert\",\n \"attributes\": {\n \"title\": \"Aves\",\n \"description\": \"Институциализация формирует диссон\\\n ансный постиндустриализм, о чем будет подробнее ска\\\n зано ниже. Полифонический роман приводит современн\\\n ый контрапункт. М.М.Бахтин понимал тот факт, что д\\\n рама диссонирует феномен толпы, подчеркивает прези\\\n дент. Понятие тоталитаризма ограничивает антрополо\\\n гический пастиш.\"\n }\n }\n }\n )\n self.assertEqual(post_req.status_code, 201)\n self.assertEqual(post_req.json.get('id'), 1)", "def test_list_renders_instance_correctly(self):\n # Make a service to render\n service = self.project.services.create(name = 'service1', category = self.category)\n # In order to render the links correctly there must be a request in the context\n request = APIRequestFactory().post('/')\n serializer = ServiceListSerializer(service, context = dict(request = request))\n # Check that the right keys are present\n self.assertCountEqual(serializer.data.keys(), {'id', 'name', 'requirements', 'category', 'project', '_links'})", "def serialize_response(response: Response) -> List[Dict[str, Any]]:\n keys = [\"id\", \"title\", \"likes_count\"]\n return [{f: resp.get(f) for f in keys} for resp in response.body]", "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ExportResponseMetadata, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "def api_endpoints(self):\n # This result appears to be a PaginatedList, but objects in the list don't\n # have IDs and can't be retrieved on their own, and it doesn't accept normal\n # pagination properties, so we're converting this to a list of strings.\n if not hasattr(self, \"_api_endpoints\"):\n results = self._client.get(\n \"{}/api-endpoints\".format(LKECluster.api_endpoint), model=self\n )\n\n self._api_endpoints = [MappedObject(**c) for c in results[\"data\"]]\n\n return self._api_endpoints", "def to_api_data(self):\n raise NotImplementedError()", "def index(self, req):\n return self._get_models(req, is_detail=False)", "def get_specs(self, prefix='', status=200, **kwargs):\n return self.get_json('{0}/swagger.json'.format(prefix), status=status, **kwargs)", "def list(self, request, *args, **kwargs):\n query = self.filter_queryset(self.get_queryset())\n if isinstance(query, ErrorResponse):\n return query\n default_limit = DEFAULT_LIST_LIMIT\n limit, offset, range_errors = self.get_offset_limit_params(default_limit)\n if range_errors:\n return ErrorResponse(data=range_errors)\n\n self.object_list = get_object_list(offset, limit, query)\n\n # Default is to allow empty querysets. This can be altered by setting\n # `.allow_empty = False`, to raise 404 errors on empty querysets.\n if not self.allow_empty and not self.object_list:\n warnings.warn(\n 'The `allow_empty` parameter is due to be deprecated. '\n 'To use `allow_empty=False` style behavior, You should override '\n '`get_queryset()` and explicitly raise a 404 on empty querysets.',\n PendingDeprecationWarning\n )\n class_name = self.__class__.__name__\n error_msg = self.empty_error % {'class_name': class_name}\n raise Http404(error_msg)\n\n # the pagination is not supported, use offset and limit\n serializer = self.get_serializer(self.object_list, many=True)\n return self.generate_list_response(query, self.object_list, serializer, offset, limit)", "def detail(self, req):\n return self._get_models(req, is_detail=True)", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_pessoa': 'int',\n 'id_cartao': 'int',\n 'id_bandeira': 'int',\n 'id_tipo_cartao': 'int',\n 'numero_cartao': 'str',\n 'nome_plastico': 'str',\n 'cvv2': 'str',\n 'data_geracao': 'str',\n 'data_validade': 'str',\n 'cpf': 'str',\n 'tipo_portador': 'str',\n 'trilha1': 'str',\n 'trilha2': 'str',\n 'trilha_cvv1': 'str',\n 'trilha_cvv2': 'str',\n 'flag_virtual': 'int',\n 'nome_bandeira': 'str',\n 'flag_titular': 'int',\n 'sequencial_cartao': 'int',\n 'id_status': 'int',\n 'descricao_status_cartao': 'str',\n 'data_status': 'str',\n 'id_estagio': 'int',\n 'descricao_estagio': 'str',\n 'data_estagio': 'str',\n 'numero_bin': 'str',\n 'id_produto': 'int',\n 'descricao_produto': 'str',\n 'id_status_conta': 'int',\n 'descricao_status_conta': 'int',\n 'data_embossing': 'str',\n 'codigo_desbloqueio': 'str',\n 'nome_pessoa': 'str',\n 'tipo_pessoa': 'str',\n 'data_nascimento': 'str',\n 'id_endereco': 'int',\n 'id_tipo_endereco': 'int',\n 'descricao_tipo_endereco': 'str',\n 'cep': 'str',\n 'logradouro': 'str',\n 'numero_endereco': 'str',\n 'complemento_endereco': 'str',\n 'bairro': 'str',\n 'cidade': 'str',\n 'uf': 'str',\n 'pais': 'str',\n 'senha_criptografada': 'str',\n 'icvv': 'str',\n 'id_status_impressao': 'int'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_pessoa': 'idPessoa',\n 'id_cartao': 'idCartao',\n 'id_bandeira': 'idBandeira',\n 'id_tipo_cartao': 'idTipoCartao',\n 'numero_cartao': 'numeroCartao',\n 'nome_plastico': 'nomePlastico',\n 'cvv2': 'cvv2',\n 'data_geracao': 'dataGeracao',\n 'data_validade': 'dataValidade',\n 'cpf': 'cpf',\n 'tipo_portador': 'tipoPortador',\n 'trilha1': 'trilha1',\n 'trilha2': 'trilha2',\n 'trilha_cvv1': 'trilhaCVV1',\n 'trilha_cvv2': 'trilhaCVV2',\n 'flag_virtual': 'flagVirtual',\n 'nome_bandeira': 'nomeBandeira',\n 'flag_titular': 'flagTitular',\n 'sequencial_cartao': 'sequencialCartao',\n 'id_status': 'idStatus',\n 'descricao_status_cartao': 'descricaoStatusCartao',\n 'data_status': 'dataStatus',\n 'id_estagio': 'idEstagio',\n 'descricao_estagio': 'descricaoEstagio',\n 'data_estagio': 'dataEstagio',\n 'numero_bin': 'numeroBin',\n 'id_produto': 'idProduto',\n 'descricao_produto': 'descricaoProduto',\n 'id_status_conta': 'idStatusConta',\n 'descricao_status_conta': 'descricaoStatusConta',\n 'data_embossing': 'dataEmbossing',\n 'codigo_desbloqueio': 'codigoDesbloqueio',\n 'nome_pessoa': 'nomePessoa',\n 'tipo_pessoa': 'tipoPessoa',\n 'data_nascimento': 'dataNascimento',\n 'id_endereco': 'idEndereco',\n 'id_tipo_endereco': 'idTipoEndereco',\n 'descricao_tipo_endereco': 'descricaoTipoEndereco',\n 'cep': 'cep',\n 'logradouro': 'logradouro',\n 'numero_endereco': 'numeroEndereco',\n 'complemento_endereco': 'complementoEndereco',\n 'bairro': 'bairro',\n 'cidade': 'cidade',\n 'uf': 'uf',\n 'pais': 'pais',\n 'senha_criptografada': 'senhaCriptografada',\n 'icvv': 'icvv',\n 'id_status_impressao': 'idStatusImpressao'\n }\n\n self._id_conta = None\n self._id_pessoa = None\n self._id_cartao = None\n self._id_bandeira = None\n self._id_tipo_cartao = None\n self._numero_cartao = None\n self._nome_plastico = None\n self._cvv2 = None\n self._data_geracao = None\n self._data_validade = None\n self._cpf = None\n self._tipo_portador = None\n self._trilha1 = None\n self._trilha2 = None\n self._trilha_cvv1 = None\n self._trilha_cvv2 = None\n self._flag_virtual = None\n self._nome_bandeira = None\n self._flag_titular = None\n self._sequencial_cartao = None\n self._id_status = None\n self._descricao_status_cartao = None\n self._data_status = None\n self._id_estagio = None\n self._descricao_estagio = None\n self._data_estagio = None\n self._numero_bin = None\n self._id_produto = None\n self._descricao_produto = None\n self._id_status_conta = None\n self._descricao_status_conta = None\n self._data_embossing = None\n self._codigo_desbloqueio = None\n self._nome_pessoa = None\n self._tipo_pessoa = None\n self._data_nascimento = None\n self._id_endereco = None\n self._id_tipo_endereco = None\n self._descricao_tipo_endereco = None\n self._cep = None\n self._logradouro = None\n self._numero_endereco = None\n self._complemento_endereco = None\n self._bairro = None\n self._cidade = None\n self._uf = None\n self._pais = None\n self._senha_criptografada = None\n self._icvv = None\n self._id_status_impressao = None", "def view_all(self): # -> Collection[TEntityDto]:\n raise NotImplementedError()" ]
[ "0.6088739", "0.60347897", "0.5947383", "0.5903535", "0.5835085", "0.5733381", "0.5700714", "0.56890315", "0.563407", "0.563407", "0.563407", "0.56306905", "0.56293565", "0.5624523", "0.55867976", "0.5500636", "0.5498303", "0.5498303", "0.54847574", "0.54818034", "0.5460438", "0.54419786", "0.5348818", "0.5347111", "0.5345752", "0.534434", "0.5310411", "0.53001857", "0.52914774", "0.52912104", "0.5280774", "0.52583236", "0.5257979", "0.52571875", "0.52392083", "0.52129227", "0.5210649", "0.5196996", "0.5189758", "0.5187816", "0.5186734", "0.5185618", "0.51806307", "0.5167542", "0.51569796", "0.5145322", "0.5144345", "0.5137947", "0.51378465", "0.5137271", "0.5137271", "0.51323134", "0.5115998", "0.50937814", "0.50904083", "0.5088654", "0.50865906", "0.50747365", "0.50747365", "0.5068164", "0.5066899", "0.50587356", "0.50565165", "0.5048074", "0.5047712", "0.5047272", "0.5043997", "0.5041861", "0.5032046", "0.50226223", "0.50138766", "0.5012292", "0.5002149", "0.5002149", "0.49983314", "0.49939588", "0.49921715", "0.49779925", "0.49773797", "0.4974694", "0.49721172", "0.49584058", "0.49566555", "0.49543318", "0.49456152", "0.494046", "0.49390453", "0.493879", "0.4938186", "0.49331868", "0.49269113", "0.49205443", "0.49191925", "0.4911868", "0.49074402", "0.48932514", "0.4887645", "0.4885941", "0.488588", "0.48824954", "0.48798007" ]
0.0
-1
Sets the errors of this MigrateListingResponse.
def errors(self, errors): self._errors = errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errors(self, errors):\n \n self._errors = errors", "def validation_errors(self, validation_errors):\n self._validation_errors = validation_errors", "def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")", "def errors(self):\n return self._errors", "def errors(self):\n return self.__errors", "def errors (self):\n return self._errors", "def errors (self):\n return self._errors", "def error(self, msg, transfers):\n self.validation_exceptions.extend(self._create_exceptions(msg, transfers, ValidationType.ERROR))", "def errors(self):\n raise NotImplementedError", "def getErrorsList(self):\n return self.__errors", "def add_errors(self, errors):\n self.errors = merge_errors(self.errors, errors)", "def errors(self):\n return self._properties.get(\"errors\")", "def check_set_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def validation_errors(self):\n return self._validation_errors", "def getErrors(self):\n return self.errors", "def mark_error(self):\r\n self.status = ERROR", "def mark_failed(self):\n self.status = self.FAILED\n self.traceback = self._format_traceback()\n self.save(update_fields={'status', 'traceback', 'updated_at'})", "def set_error(self, name, value):\n self.errors[name] = value", "def errors(self) -> List[Error]:", "def __set_errors_json(self, error_count_by_operation, errors_by_operation):\n message = \"{0} error/s reported.\".format(error_count_by_operation)\n log_file_path = self.logger.file_logger.log_file_path\n message += \" The latest {0} error/s are shared in detail. To view all errors, review this log file on the machine: {1}\".format(len(errors_by_operation), log_file_path) if error_count_by_operation > 0 else \"\"\n return {\n \"code\": Constants.PatchOperationTopLevelErrorCode.SUCCESS if error_count_by_operation == 0 else Constants.PatchOperationTopLevelErrorCode.ERROR,\n \"details\": errors_by_operation,\n \"message\": message\n }", "def error_data(self):\n\n if not self.__settings:\n return []\n\n return self.__transaction_errors", "def set_error(self, index: int) -> None:\n ...", "def set_limit(self, errors):\n self.limit = errors", "def Errors(self):\n return self._get_attribute('errors')", "def errors_summary(self, errors_summary):\n\n self._errors_summary = errors_summary", "def Errors(self):\r\n\t\treturn self._get_attribute('errors')", "def pin_errors(self):\n for m in range(self.stage_width_list[-1]):\n error, _ = rqrmilib.calculate_submodel_error(self._get_native_object(), self.probe, len(self)-1, m)\n if error < 0: error = 0\n self.error_list[m] = int(error)\n self.rqrmi_state_changed = True\n return self.error_list", "def errors(self) -> Tuple[MqexsErrorInfo, ...]:\n return self.__errors", "def error_count(self, error_count):\n\n self._error_count = error_count", "def setError(self,err):\n self.error = err", "def errors(self) -> List[Error]:\n return self._errors_files + list(self._errors.values())", "def _get_resp_body_errors(self):\n\n if self._resp_body_errors and len(self._resp_body_errors) > 0:\n return self._resp_body_errors\n\n errors = []\n warnings = []\n resp_codes = []\n\n if self.verb is None:\n return errors\n\n dom = self.response.dom()\n if dom is None:\n return errors\n\n for e in dom.findall('Errors'):\n eSeverity = None\n eClass = None\n eShortMsg = None\n eLongMsg = None\n eCode = None\n\n try:\n eSeverity = e.findall('SeverityCode')[0].text\n except IndexError:\n pass\n\n try:\n eClass = e.findall('ErrorClassification')[0].text\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n except IndexError:\n pass\n\n try:\n eShortMsg = smart_encode(e.findall('ShortMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eLongMsg = smart_encode(e.findall('LongMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n if int(eCode) not in resp_codes:\n resp_codes.append(int(eCode))\n except IndexError:\n pass\n\n msg = str(\"Class: {eClass}, Severity: {severity}, Code: {code}, {shortMsg} {longMsg}\") \\\n .format(eClass=eClass, severity=eSeverity, code=eCode, shortMsg=eShortMsg,\n longMsg=eLongMsg)\n\n # from IPython import embed; embed()\n\n if eSeverity == 'Warning':\n warnings.append(msg)\n else:\n errors.append(msg)\n\n self._resp_body_warnings = warnings\n self._resp_body_errors = errors\n self._resp_codes = resp_codes\n\n if self.config.get('warnings') and len(warnings) > 0:\n log.warn(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(warnings)))\n\n if self.response.reply.Ack == 'Failure':\n if self.config.get('errors'):\n log.error(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(errors)))\n\n return errors\n\n return []", "def check_set_errors(self):\n response = self.read()\n return [] if response == \"\" else [response]", "def report_transaction_error_messages(self):\n response = self.__get_transaction_response()\n\n # get response data from response object\n response_data = response.json()\n\n # get error messages\n response_error = response_data['Error']\n response_error_messages = response_error['messages']\n\n # add all error messages to the report\n error_messages_to_report = []\n for response_error_message in response_error_messages:\n error_description = response_error_message['description']\n error_messages_to_report.append(error_description)\n\n return error_messages_to_report", "def mark_failed(self):\r\n self.require_item()\r\n\r\n url = '{0}/mark_failed'.format(self.get_url())\r\n request = http.Request('PUT', url)\r\n\r\n return request, parsers.parse_empty", "def add_error(self, field, message):\n add_list_value(self.errors, field, message)", "def setError(self, index, error):\n\t\tself.membersWithErrors[index][1] = error", "def errors(self):\n\n dict = {\"Stellar Mass Error\":[self.st_masserr1,self.st_masserr2],\n \"Stellar Radius Error\":[self.st_raderr1,self.st_raderr2]}\n\n return dict", "def set_error(self, error):\n self._set_sub_text('error', text=str(error))\n return self", "def error(self) -> list:\n return self.__err", "def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors", "def _initialize_error_dictionaries(self):\n for task_id in self.task_ids.keys():\n self.training_errors[task_id] = []\n self.validation_errors[task_id] = []", "def get_validation_errors(self):\n return [err.to_dict() for err in self._schema.validator.validation_errors]", "def process_error_response(self, resources, resource, api, operation,\n error_response, context):\n pass", "def GetAll(self):\n return self._errors.copy()", "def set_failed(self, exception):\n self.logger.info(\"status: FAILED\")\n self._callback('on_failed', exception)\n return self.update_response(self.encoder.encode_failed(exception))", "def get_errors(self, request):\n\n value = request._get_parameter_value(self)\n return value.errors", "def errors_preview(self) -> Sequence['outputs.ValidationErrorResponse']:\n return pulumi.get(self, \"errors_preview\")", "def errorResponse(self):\n return self._errorResponse", "def parse_error(self):\n\n # Check the table_parse_error flag\n return self.__table_parse_error", "def failure(self, validation_failure):\n \n self.request.response.status_int = 400\n return validation_failure.error.asdict()", "def markFailed(self, streamerList):\n fileList = []\n for streamer in streamerList:\n fileList.append( File(id = streamer['id'],\n lfn = streamer['lfn']) )\n self.subscription.failFiles(fileList)\n\n return", "def error_false(self):\n self.errors = self.errors[0:-1]\n if not self.errors:\n self.update_info()", "async def test_txn_list_with_validator_error(self):\n self.stream.preset_response(self.status.INTERNAL_ERROR)\n response = await self.get_assert_status('/transactions', 500)\n\n self.assert_has_valid_error(response, 10)", "def validate_collection_response(self, response):\n\n self.validate_response(response)\n if response.status_code not in self.model._meta['valid_get_status']:\n raise InvalidStatusError(\n self.model._meta['valid_get_status'], response\n )", "def errors(self) -> List[Error]:\n # May have inherited errors with a different path.\n for error in self._errors.values():\n error.path = self.path\n if self.is_removed: # Mark all of our errors as non-persistant.\n error.is_persistant = False\n return list(self._errors.values())", "def setErrorMessage(self, errorMessage):\n self._errorMessage = errorMessage", "def setup_error_data(self, field_setup, error_names, is_table=False, col_tag='span', single_col_tag=''):\n backup_fieldset_fields = [\n ('first', 'second'),\n 'billing_address_1',\n ('billing_city', 'billing_country_area', 'billing_postcode'),\n 'last',\n ]\n field_setup = field_setup or backup_fieldset_fields\n error_names = set(error_names or flatten(field_setup))\n col_count = max([1 if isinstance(ea, str) else len(ea) for ea in field_setup])\n error_txt = \"This is a {} test error. \"\n row_info = []\n for row in field_setup:\n if isinstance(row, str):\n row = [row]\n multi_col_row = len(row) > 1\n if is_table:\n cur_tag = 'td'\n error_settings = (cur_tag, multi_col_row, col_count, True, True)\n attr = ' colspan=\"{}\"'.format(2 if multi_col_row else 2 * col_count)\n else:\n cur_tag = col_tag if multi_col_row else single_col_tag\n error_settings = (cur_tag, multi_col_row, col_count, False, False)\n attr = ''\n error_list = [error_txt.format(name) if name in error_names else '' for name in row]\n columns = [{'errors': ea} for ea in error_list]\n expected = [err if not cur_tag else self.form._html_tag(cur_tag, err, attr) for err in error_list]\n if all(ea == '' for ea in error_list):\n expected = []\n actual = self.form.get_error_data(columns, error_settings)\n row_summary = {'expected': expected, 'actual': actual, 'field_names': row, 'settings': error_settings}\n row_summary['columns'] = columns\n row_info.append(row_summary)\n return row_info", "def path_link_errors(self):\n return self._path_link_errors", "def errors_and_warnings(self, errors_and_warnings):\n\n self._errors_and_warnings = errors_and_warnings", "def set_error_page(self, html):\n return self.manager.set_error_page(self, html)", "def errors(self) -> str:\n return self.job_errors() + self.analysis_errors()", "def set_error(cls, ekindataset, dp, col):\n if ekindataset[col][dp]['var'] == '':\n return\n if not ekindataset[col][dp].has_key('error'):\n ekindataset[col][dp]['error'] = 0\n return", "def error_details(self, error_details):\n self._error_details = error_details", "def getErrors(self) -> java.util.Collection:\n ...", "def _load_error_urls(self):\n if self.errorurls is None:\n self.errorurls = set()\n errorurlsfile = osp.join(self.basepath, 'errors.csv')\n if os.path.exists(errorurlsfile):\n reader = unicode_csv_reader(open(errorurlsfile))\n self.errorurls = set(rows[0] for rows in reader)", "def errors_preview(self) -> pulumi.Output[Sequence['outputs.ValidationErrorResponse']]:\n return pulumi.get(self, \"errors_preview\")", "def enrich_errors(self, run):\n return run", "def errors(self):\r\n if not hasattr(self, '_errors_cache'):\r\n self._errors_cache = self.form.get_field_errors(self)\r\n return self._errors_cache", "def error(self, message):\n if 'PhysicalResourceId' not in self.response:\n self.response['PhysicalResourceId'] = self.response['LogicalResourceId']\n self.response['Status'] = 'FAILED'\n self.response['Reason'] = message\n self._send_response(self.response)", "def get_errors(self, response: response_domain_model.Response, question_code: str) -> Sequence['ValidationError']:\n ...", "def show_errors(self):\n\n if self.errors:\n print('Clean error in:')\n for file in self.errors:\n print(' %s' % file)", "def sm_error_handler(self, errors):\n try:\n yield\n except Exception as e:\n if issubclass(e.__class__, ManagerError) or \\\n issubclass(e.__class__, ManagerFatalError) or \\\n isinstance(e, ConnectionError) or \\\n xmlrpclib.ProtocolError or \\\n xmlrpclib.Fault:\n\n errors.append(repr(e))\n elif isinstance(e, socket.error):\n errors.append(repr(e))\n errors.append(\"Please make sure the server port is open.\")\n else:\n raise e", "def error(self, exception=None):\n self._error = exception", "def get_errors(self, obj):\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True, context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None", "def set_error_handling(value):\n for cls in list_kg_classes() + list_embedded_metadata_classes():\n cls.set_error_handling(value)", "def error(self) -> 'outputs.StatusResponse':\n return pulumi.get(self, \"error\")", "def init_matches_errors(self) -> None:\n\n self.matches = set()\n self.ignored = set()\n self.errors = set()", "def error(self, error):\n\n self._error = error", "def error(self, error):\n\n self._error = error", "def error(self, error):\n\n self._error = error", "def set_fetch_values_exception(cls):\n cls._exception = True", "def failed(self, message=None):\n doc = {self.STATE: self.STATE_FAILED}\n\n if message:\n doc.update({self.ERROR_MESSAGE: message})\n\n self.update(doc)", "def handle_validation_error(self, error, bundle_errors):\n \n error_str = six.text_type(error)\n error_msg = self.help.format(error_msg=error_str) if self.help else error_str\n msg = {self.name: error_msg}\n\n if bundle_errors:\n return error, msg\n flask_restful.abort(400, message=msg)", "def device_stats_collection_failure_reason(self, device_stats_collection_failure_reason):\n\n self._device_stats_collection_failure_reason = device_stats_collection_failure_reason", "def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]", "def check_response_errors(self, resp):\n return True", "def formatErrors(self):\n errorlist = []\n xepsWithErrors = sorted(\n set(self.getParseErrors() + self.getBuildErrors()),\n key=lambda x: str(x))\n if self.getErrors() or xepsWithErrors:\n if self.getErrors():\n errorlist.append(\"********** Read errors **********\")\n for error in self.getErrors():\n errorlist.append(error)\n for xep in xepsWithErrors:\n errorlist.append(\n \"********** Error report for {} **********\".format(str(xep)))\n if xep.parseErrors:\n errorlist.append(\"********** Parsing Errors **********\")\n errors = list(set(xep.parseErrors))\n for error in errors:\n errorlist.append(error)\n if xep.buildErrors:\n errorlist.append(\"********** Build Errors **********\")\n for error in xep.buildErrors:\n if len(error.splitlines()) > 4:\n error = ''.join(error.splitlines()[:4])\n errorlist.append(error)\n return '\\n'.join(errorlist)\n else:\n return None", "def add_error_tables(self, error_tables):\n self.error_distribution = error_tables", "def _redirectErrors(self, other):\n other.getErrorRaisedEventManager().add_listener(self._errorRedirection)", "def AsJson(self):\n\n return json.dumps(self._errors)", "def check_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def _error_response(self):\r\n response_dict = {'success': False, 'version': 1}\r\n self.send_response(\r\n 400, content=json.dumps(response_dict),\r\n headers={'Content-type': 'application/json'}\r\n )", "def _parse_store_error(self, response):\n default_msg = \"Failure working with the Store: [{}] {!r}\".format(\n response.status_code, response.content\n )\n try:\n error_data = response.json()\n except ValueError:\n return default_msg\n\n try:\n error_info = [(error[\"message\"], error[\"code\"]) for error in error_data[\"error-list\"]]\n except (KeyError, TypeError):\n return default_msg\n\n if not error_info:\n return default_msg\n\n messages = []\n for msg, code in error_info:\n if code:\n msg += \" [code: {}]\".format(code)\n messages.append(msg)\n return \"Store failure! \" + \"; \".join(messages)", "def error_count(self):\n return len(self.errors)", "def _setErrorNodes(self, errorNodes):\n self._errorNodes = errorNodes", "def getNumErrors(self):\n return _libsbml.XMLErrorLog_getNumErrors(self)", "def set_error_page(self, loadbalancer, html):\n return loadbalancer.set_error_page(html)", "def get_errors(self):\n return {'loss': self.loss.data[0]}", "def failure(self, error):\n \n self.request.response.status_int = 400\n return None" ]
[ "0.6449945", "0.586166", "0.5679023", "0.56335896", "0.5626413", "0.56153715", "0.56153715", "0.55020136", "0.54732645", "0.5438516", "0.5403028", "0.53804886", "0.5344446", "0.53443104", "0.532449", "0.5315556", "0.5290784", "0.52629125", "0.52527165", "0.52467006", "0.5245283", "0.52259946", "0.52225095", "0.52133447", "0.518861", "0.5150667", "0.51299673", "0.5128954", "0.5104128", "0.505946", "0.50590473", "0.50573266", "0.5054114", "0.50464505", "0.50458425", "0.49863064", "0.49822056", "0.49654248", "0.4959583", "0.4956376", "0.4910352", "0.48974535", "0.48857415", "0.487305", "0.48729587", "0.48123288", "0.48075458", "0.47938704", "0.47851428", "0.47738868", "0.4772601", "0.47519332", "0.47477052", "0.47441545", "0.47322607", "0.47322306", "0.4725571", "0.47144973", "0.4713668", "0.46954563", "0.46933037", "0.46921185", "0.46864015", "0.46714172", "0.4667938", "0.46670586", "0.46626896", "0.46482602", "0.46462175", "0.46373948", "0.46356156", "0.46286345", "0.46283725", "0.46218342", "0.46089533", "0.4595176", "0.45943004", "0.45939693", "0.45898846", "0.45898846", "0.45898846", "0.45891532", "0.4582701", "0.45780897", "0.45756504", "0.4573232", "0.45531848", "0.4550619", "0.4547035", "0.45470035", "0.45405206", "0.45374164", "0.4531224", "0.45186865", "0.45131537", "0.45025826", "0.45023954", "0.44900247", "0.4486402", "0.4485403" ]
0.63594306
1
Sets the inventory_item_group_key of this MigrateListingResponse.
def inventory_item_group_key(self, inventory_item_group_key): self._inventory_item_group_key = inventory_item_group_key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def with_group_key(self, group_key):\n self.group_key = group_key\n return self", "def add_inventory_group(self, key):\n host_dict = {'hosts': [], 'vars': {}}\n self.inventory[key] = host_dict\n return", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def inventory_id(self, inventory_id):\n\n self._inventory_id = inventory_id", "def item_group_href(self, item_group_href):\n\n self._item_group_href = item_group_href", "def group_identifier(self, group_identifier):\n\n self._group_identifier = group_identifier", "def set_group(self, group):\n self._group = group", "def group(self, group):\n\n self._group = group", "def group(self, group):\n\n self._group = group", "def group(self, group):\n\n self._group = group", "def item_group_type(self, item_group_type):\n\n self._item_group_type = item_group_type", "def group(self, group):\n self._group = group", "def set_group(self, group: str) -> None:\n self.group = group", "def set_group(self, id_: str, player: str, group: list):\n self._groups[id_] = {\n 'player': player,\n 'group': group\n }", "def setitem_key_value(self):\n raise NotImplementedError", "def set(self, name_group, key, value):\n self.psettings.beginGroup(name_group)\n self.psettings.setValue(key, value)\n self.closeGroup()", "def instance_group(self, instance_group):\n if instance_group is None:\n raise ValueError(\"Invalid value for `instance_group`, must not be `None`\")\n\n self._instance_group = instance_group", "def set_group(self, group):\n # Implemented from template for osid.resource.ResourceForm.set_group_template\n if self.get_group_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_boolean(group):\n raise errors.InvalidArgument()\n self._my_map['group'] = group", "def inventory_reference_id(self, inventory_reference_id):\n\n self._inventory_reference_id = inventory_reference_id", "def signing_group_id(self, signing_group_id):\n\n self._signing_group_id = signing_group_id", "def with_group(self, group):\n\t\tself.variables['group'] = group\n\t\treturn self", "def group_oid(self, group_oid):\n\n self._group_oid = group_oid", "def signing_group_name(self, signing_group_name):\n\n self._signing_group_name = signing_group_name", "async def async_set_multiroom_group(self, multiroom_group):\n self._multiroom_group = multiroom_group", "def setGroup(self, group):\n\t\tself.config.GROUP = group", "def price_group(self, price_group: str):\n\n self._price_group = price_group", "def group(self, val):\n self.set_property(\"Group\", val)", "def group(self, group):\n self.proxy_group = group\n return self", "def setGroupId(self, groupId):\n internals.blpapi_ServiceRegistrationOptions_setGroupId(\n self.__handle, groupId.encode('utf-8'))\n # NOTE: we should convert groupId to bytes here because\n # otherwise we'll get an error in SWIG wrapper.", "def source_group_id(self, source_group_id):\n\n self._source_group_id = source_group_id", "def partition_key(self, partition_key):\n\n self._partition_key = partition_key", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def inventory(self, inventory):\n\n self._inventory = inventory", "def error_group_id(self, error_group_id):\n\n self._error_group_id = error_group_id", "def apply(self, droplet, inventory):\n\n value = droplet.get(self.group_by)\n ip = droplet[\"ip_address\"]\n\n if value is None:\n return None\n\n if self.group_match:\n m = self.group_match.match(value)\n if m is None:\n return\n if self.group_name:\n group_name = self.group_name.format(*m.groups())\n else:\n group_name = value\n else:\n group_name = (self.group_name or \"{0}\").format(value)\n\n inventory.setdefault(group_name, []).append(ip)", "def set_group(self, address, group):\n self.groups[address] = group", "def inventory_items(self, inventory_items):\n\n self._inventory_items = inventory_items", "def set_one(self, name_group, key, value):\n self.set(name_group, key, value)\n for item in self.get_all_childname(name_group):\n if item != key:\n self.set(name_group, item, False)", "def namespace_group_num(self, namespace_group_num):\n\n self._namespace_group_num = namespace_group_num", "def destination_group_id(self, destination_group_id):\n\n self._destination_group_id = destination_group_id", "def receiver_group(self, receiver_group):\n\n self._receiver_group = receiver_group", "def key(self, key):\n\n self._key = key", "def key(self, key):\n\n self._key = key", "def inventory_id(self, inventory_id):\n if inventory_id is None:\n raise ValueError(\"Invalid value for `inventory_id`, must not be `None`\") # noqa: E501\n\n self._inventory_id = inventory_id", "def featuregroup_id(self, featuregroup_id):\n\n self._featuregroup_id = featuregroup_id", "def __setitem__(self, key, item):\n if key>=len(self.trained_rqrmi):\n raise KeyError('Stage index invalid')\n self.trained_rqrmi[key]=item\n self.rqrmi_state_changed=True", "async def handle_set_group(self, match: Match[str], payload: str) -> None:\n groupid = match.group(1)\n\n try:\n group = self._bridge.groups[groupid]\n state = GroupSetState(**json.loads(payload))\n LOGGER.info(f\"Updating group {group.name}\")\n await group.set_action(**state.dict())\n except IndexError:\n LOGGER.warning(f\"Unknown group id: {groupid}\")\n except json.JSONDecodeError:\n LOGGER.warning(f\"Bad JSON on light request: {payload}\")\n except TypeError:\n LOGGER.warning(f\"Expected dictionary, got: {payload}\")\n except ValidationError as e:\n LOGGER.warning(f\"Invalid light state: {e}\")", "def gen_inventory(self):\n if isinstance(self.resource, list):\n self.my_add_group(self.resource, 'default_group')\n elif isinstance(self.resource, dict):\n for groupname, hosts_and_vars in self.resource.iteritems():\n self.my_add_group(hosts_and_vars.get(\"hosts\"), groupname, hosts_and_vars.get(\"vars\"))", "def grouping(self, grouping):\n\n self._grouping = grouping", "def set_add_dispute_line_item_group(self, line_item_group):\n self.set_value_into_input_field(self.add_dispute_line_item_group_textbox_locator, line_item_group)", "def set_group_name(self, name):\n self.groupname = name", "def set_item(self, key, value):\n # TODO: Add self.prefix\n self.table.putValue(key, value)", "def item_id(self, item_id):\n\n self._item_id = item_id", "def item_id(self, item_id):\n\n self._item_id = item_id", "def item_id(self, item_id):\n\n self._item_id = item_id", "def __init__(__self__, *,\n group_arn: pulumi.Input[str]):\n pulumi.set(__self__, \"group_arn\", group_arn)", "def join_group(self, group: InterphoneGroup) -> None:\n self._group = group\n group.register(self)", "def key(self, key):\n self._key = key", "def key(self, key):\n self._key = key", "def consistency_group(self, consistency_group):\n\n self._consistency_group = consistency_group", "def consistency_group(self, consistency_group):\n\n self._consistency_group = consistency_group", "def _set_group_name(self):\n self._scene_gen.group_name = self._group_name_le.text()\n self._refresh_view()", "def item_location(self, item_location):\n\n self._item_location = item_location", "def key_id(self, key_id):\n\n self._key_id = key_id", "def consistency_group_num(self, consistency_group_num):\n\n self._consistency_group_num = consistency_group_num", "def group_id(self, group_id: int):\n if group_id is None:\n raise ValueError(\"Invalid value for `group_id`, must not be `None`\") # noqa: E501\n\n self._group_id = group_id", "def test_convert_to_newgroup(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance1')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance1' in inventoryloader.groups\n assert inventoryloader.groups['glance_all'].has_group('glance1')\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance1'].has_host('localhost')\n assert \"management_bridge\" in inventoryloader.groups['glance1'].vars", "def upsert_group(self,\n group, # type: Group\n *options, # type: UpsertGroupOptions\n **kwargs # type: Any\n ):\n # This endpoint accepts application/x-www-form-urlencoded and requires the data be sent as form data.\n # The name/id should not be included in the form data.\n # Roles should be a comma separated list of strings.\n # If, only if, the role contains a bucket name then the rolename should be suffixed\n # with[<bucket_name>] e.g. bucket_full_access[default],security_admin.\n\n final_args = forward_args(kwargs, *options)\n final_args.update({k: v for k, v in group.as_dict.items() if k in {\n 'roles', 'description', 'ldap_group_reference'}})\n self._admin_bucket.group_upsert(group.name, **final_args)", "def setGatingGroup(self, channel, group, unitCode=0):\n resp = self.XAPCommand('GRPSEL', channel, group, unitCode=unitCode)\n return resp", "def update_by_key(\n self,\n key: str,\n version: int,\n actions: typing.List[CustomerGroupUpdateAction],\n *,\n expand: OptionalListStr = None,\n force_update: bool = False,\n ) -> CustomerGroup:\n params = self._serialize_params({\"expand\": expand}, _CustomerGroupUpdateSchema)\n update_action = CustomerGroupUpdate(version=version, actions=actions)\n return self._client._post(\n endpoint=f\"customer-groups/key={key}\",\n params=params,\n data_object=update_action,\n response_class=CustomerGroup,\n force_update=force_update,\n )", "def set_key(self, key):\n self.key = key", "def slotGroupEdit(self):\n dialog = GroupDialog(self)\n if dialog.exec_loop() == QDialog.Accepted:\n if dialog.group_id != None:\n # set group\n self.sampleGroup.globalGroupId = dialog.group_id\n self.groupLabel.setText(dialog.group_id)\n else:\n # ungroup\n self.sampleGroup.globalGroupId = None\n self.groupLabel.setText('Not\\nGrouped')\n self.emit(PYSIGNAL('groupChanged'), (self,))", "def error_group_name(self, error_group_name):\n\n self._error_group_name = error_group_name", "def set_aggregation_group_id(self, event, matchgroups):\n id = str(self.id)\n for field in self.use_fields_for_id:\n field = field.strip()\n id = id + str(event[field])\n \n attributes = matchgroups\n for i in attributes:\n id = id + i + attributes[i]\n event[\"group_id\"] = self.hash(id)", "def add_grouping_key(self, column):\n self.obj_payload[\"keys\"].append({\"column\":column})", "def consistency_group_snapshot_num(self, consistency_group_snapshot_num):\n\n self._consistency_group_snapshot_num = consistency_group_snapshot_num", "def test_convert_to_existing_group(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance_registry')\n assert 'glance_api' not in inventoryloader.groups\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance_registry'].has_host('localhost')\n assert inventoryloader.groups['glance_registry'].has_host('localhost2')\n assert \"management_bridge\" in inventoryloader.groups['glance_registry'].vars", "def group_id(self, group_id):\n # type: (string_types) -> None\n\n if group_id is not None:\n if not isinstance(group_id, string_types):\n raise TypeError(\"Invalid type for `group_id`, type has to be `string_types`\")\n\n self._group_id = group_id", "def fill_ingredient(self, ingredient: str, quantity: int) -> None:\n self.inventory_availability[ingredient] = quantity", "def set_key_id(self, key_id=''):\n self.key_id = key_id", "def set_group_name(self, name):\n params = [('groupname', name, 'cdata')]\n\n self.get(COMMAND_UIC, 'SetGroupName', params)", "async def statset_apikey(self, key):\n self._set_api_key(key)\n await self.bot.say(\"API key successfully set.\")", "def SetModifierKey(self, modifierKey):\r\n\r\n self._modifierKey = modifierKey", "def set_selected_group(self, group_id):\n self.contact_list = self.contacts_by_group_list[group_id - 1][1][1]\n\n\t# Return the contact list so far", "def set_APIKey(self, value):\n super(EntityOverviewInputSet, self)._set_input('APIKey', value)", "async def set_volume(self, group_id: int, level: int) -> None:\n if not self.VOLUME_MIN <= level <= self.VOLUME_MAX:\n raise ValueError(f'Level must be between {self.VOLUME_MIN} and {self.VOLUME_MAX}')\n\n await self._api.call('group', 'set_volume', gid=group_id, level=level)", "def add_group(self, key):\n grp = GroupExplorator(self, key)\n self._grps[key] = grp\n return grp", "def set_slicer_key(self, slicer_key):\n self._slicerKey = slicer_key\n\n return", "def regroup(self, serial, group):\n api_page = \"/configuration/object/ap_regroup\"\n url = \"{}{}?{}&UIDARUBA={}\".format(\n self.base_url,\n api_page,\n self.config_path,\n self.uidaruba)\n\n obj_dict = {'serial-num': serial, 'new-group': group}\n obj_json = json.loads(json.dumps(obj_dict))\n\n resp = self.post(url, obj_json)\n\n print(resp.status_code)\n print(resp.text)", "def __setitem__(self, key, item):\n self.set_field(key, item)", "def __init__(self, volgroup, mvip, username, password):\n for key, value in volgroup.items():\n setattr(self, key, value)\n self.ID = volgroup[\"volumeAccessGroupID\"]\n self.mvip = mvip\n self.username = username\n self.password = password", "def packaging_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"packaging_group_id\")", "def tracking_item_id(self, tracking_item_id):\n\n self._tracking_item_id = tracking_item_id", "def group_id(self):\n return self._group_id", "def group_id(self):\n return self._group_id" ]
[ "0.61608106", "0.5747907", "0.5501611", "0.5501611", "0.5501611", "0.5501611", "0.5501611", "0.5501611", "0.54646176", "0.5375903", "0.53397626", "0.53107274", "0.53079605", "0.53079605", "0.53079605", "0.5276046", "0.5275524", "0.5234625", "0.51415646", "0.5023584", "0.50226706", "0.49493527", "0.49384618", "0.49356917", "0.49240038", "0.48625207", "0.47992325", "0.4719829", "0.47021872", "0.4699198", "0.46450278", "0.46191293", "0.46135053", "0.46121255", "0.46108988", "0.46107686", "0.45853764", "0.45850533", "0.45802486", "0.45790902", "0.45506784", "0.45494723", "0.4545563", "0.45425448", "0.45308846", "0.45213985", "0.4515926", "0.4515926", "0.45140123", "0.45098993", "0.44863394", "0.44856063", "0.44777143", "0.44584316", "0.4454474", "0.44423458", "0.4438467", "0.4437099", "0.4437099", "0.4437099", "0.44363546", "0.44336367", "0.44318962", "0.44318962", "0.4430696", "0.4430696", "0.4430217", "0.4414444", "0.44076332", "0.4404656", "0.44020608", "0.4399629", "0.43876782", "0.43857706", "0.43761134", "0.43674266", "0.43372548", "0.43199164", "0.43089807", "0.4290914", "0.42759335", "0.42650044", "0.42644623", "0.42397276", "0.4235914", "0.4224403", "0.4215062", "0.42128304", "0.4212694", "0.4211963", "0.42106667", "0.41983747", "0.4196058", "0.41886696", "0.4185769", "0.4179292", "0.41668162", "0.41418493", "0.41363478", "0.41363478" ]
0.8215857
0
Sets the inventory_items of this MigrateListingResponse.
def inventory_items(self, inventory_items): self._inventory_items = inventory_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inventory(self, inventory):\n\n self._inventory = inventory", "def inventory_id(self, inventory_id):\n\n self._inventory_id = inventory_id", "def items(self, items: List[InlineResponse200Items]):\n if items is None:\n raise ValueError(\"Invalid value for `items`, must not be `None`\") # noqa: E501\n\n self._items = items", "def inventory(self):\n data = self.client.inventory(self.creds, self.transaction, self.environment)\n return list(data) if isinstance(data, set) else data", "def load_inventory(self):\n for item in self.items:\n self.rooms[int(item.initial_room_id) - 1].inventory.add(item)", "def set_ingredients(self, ingredients: [Ingredient]):\n self.ingredients = ingredients", "def clean_up_inventory(self):\n self.inventory = [i for i in self.inventory if i.quantity != 0]", "def inventory(self):\n return self._inventory", "def inventory_id(self, inventory_id):\n if inventory_id is None:\n raise ValueError(\"Invalid value for `inventory_id`, must not be `None`\") # noqa: E501\n\n self._inventory_id = inventory_id", "def populate_initial_inventory(self):\r\n\r\n weapons_file = open('initial-inventory.json', \"r\")\r\n json_data = json.loads(weapons_file.read())\r\n weapons_file.close()\r\n\r\n weapons = json_data['weapons']\r\n for weapon in weapons:\r\n requests.post(\"http://\" + self.ip_address + \":3000/Weapons\", data=weapon)", "def update(self):\n try:\n data = self.api.get_inventory(self.site_id)\n inventory = data[\"Inventory\"]\n except KeyError:\n _LOGGER.error(\"Missing inventory data, skipping update\")\n return\n except (ConnectTimeout, HTTPError):\n _LOGGER.error(\"Could not retrieve data, skipping update\")\n return\n\n self.data = {}\n self.attributes = {}\n\n for key, value in inventory.items():\n self.data[key] = len(value)\n self.attributes[key] = {key: value}\n\n _LOGGER.debug(\"Updated SolarEdge inventory: %s, %s\", self.data, self.attributes)", "def get_users_inventory_with_http_info(self, **kwargs):\n\n all_params = ['inactive', 'size', 'page', 'filter_item_name', 'filter_item_id', 'filter_username', 'filter_group', 'filter_date']\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_users_inventory\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'inactive' in params:\n query_params.append(('inactive', params['inactive']))\n if 'size' in params:\n query_params.append(('size', params['size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n if 'filter_item_name' in params:\n query_params.append(('filter_item_name', params['filter_item_name']))\n if 'filter_item_id' in params:\n query_params.append(('filter_item_id', params['filter_item_id']))\n if 'filter_username' in params:\n query_params.append(('filter_username', params['filter_username']))\n if 'filter_group' in params:\n query_params.append(('filter_group', params['filter_group']))\n if 'filter_date' in params:\n query_params.append(('filter_date', params['filter_date']))\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']\n\n return self.api_client.call_api('/inventories', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PageResourceUserInventoryResource',\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def populate_variants(self, inventory=None):\n self.variants = list()\n\n option_combos = self.generate_option_combos()\n\n for combo in option_combos:\n self.variants.append(Variant(\n self.style_number,\n option_combo=combo,\n inventory=inventory))", "def set_all(self, value):\n self.__items = value", "def __init__(self, items: List[InlineResponse200Items]=None): # noqa: E501\n self.swagger_types = {\n 'items': List[InlineResponse200Items]\n }\n\n self.attribute_map = {\n 'items': 'items'\n }\n self._items = items", "def setitems(self, items):\n self.clear()\n # FIXME: this allows you to pass in an OrderedDict as well :-)\n self.update(items)", "def get_items_from(self, inventory=False):\n # if no outer inventory is provided, assume own inventory is needed\n if not inventory:\n inventory = self.inventory\n # get items normally\n items_ = MetaBeing.get_items_from(self, inventory)\n # return items in question\n return items_", "def add_item_to_user_inventory_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'user_inventory_add_request']\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_item_to_user_inventory\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `add_item_to_user_inventory`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'user_inventory_add_request' in params:\n body_params = params['user_inventory_add_request']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']\n\n return self.api_client.call_api('/users/{id}/inventory', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InvoiceResource',\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def inventory_reference_id(self, inventory_reference_id):\n\n self._inventory_reference_id = inventory_reference_id", "async def update_items(self):\n items = self.steam_api.get_game_items()\n\n with open(\"Dota/items.json\", 'w') as f:\n json.dump(items, f, ensure_ascii=True, indent=4)", "async def list_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n start_date = request.args[\"start_date\"][0]\n end_date = request.args[\"end_date\"][0]\n inventory = model.list_inventory(hotel_id, start_date, end_date)\n if inventory == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"inventory\": inventory})", "def add_to_inventory(self, item_to_add_to_inventory):\n raise NotImplementedError(\"Subclasses define what adding to the inventory entails\")", "def update(self):\n inventoryJson = self.__agent__.getInventoryJson()\n itemsLeft = len(inventoryJson) != 0\n itemTypesInObservation = []\n itemsAdded = []\n itemsDeleted = []\n\n # Loop over all item types in the observation\n while (itemsLeft):\n itemType = inventoryJson[0][\"type\"]\n itemTypesInObservation.append(itemType)\n numOfItemInObs = inventoryJson[0][\"quantity\"]\n\n if itemType not in self.__inventory__: # Add an array of ids for this item type if it was never discovered\n self.__inventory__[itemType] = []\n numOfItemInInv = len(self.__inventory__[itemType])\n\n for i in range(1, len(inventoryJson)): # Loop over remaining items, and for each item of matching type, add to counter\n if inventoryJson[i][\"type\"] == itemType:\n numOfItemInObs += inventoryJson[i][\"quantity\"]\n inventoryJson = [item for item in inventoryJson if item[\"type\"] != itemType] # Remove all of those inventory items\n \n if numOfItemInObs > numOfItemInInv: # Add more items with unique id of this type to inventory\n for i in range(numOfItemInInv, numOfItemInObs):\n newItem = self.addItem(itemType)\n itemsAdded.append(newItem)\n elif numOfItemInObs < numOfItemInInv: # Remove some items of this type from inventory\n for i in range(numOfItemInObs, numOfItemInInv):\n if len(self.__inventory__[itemType]) > 0:\n lostItem = self.__inventory__[itemType].pop(0)\n itemsDeleted.append(lostItem)\n\n # Only perform another iteration if there are more items of different types that we have not yet checked\n if len(inventoryJson) == 0:\n itemsLeft = False\n \n # For any items in the inventory that was not in the observation, set the quantity to 0\n for itemType in self.__inventory__:\n if itemType not in itemTypesInObservation:\n self.__inventory__[itemType].clear()\n\n return (itemsAdded, itemsDeleted)", "def set_invocation_metadata(self, items: Tuple[Tuple[str, str]]):\n self._invocation_metadata = items", "def SetItems(self, items: Union[Iterable, dict]):\n if not items:\n return\n if isinstance(items, dict):\n items = [[key, str(value)] for key, value in items.items()]\n if self._sorted:\n items = sorted(items, key=lambda x: x[1])\n self._items = [key for key, _ in items]\n super().SetItems([value for _, value in items])\n else:\n if self._sorted:\n self._items = tuple(sorted(items))\n else:\n self._items = tuple(items)\n super().SetItems([str(v) for v in self._items])\n self.SetSelection(0)", "async def update(self) -> None:\n data = await self._state.http.get_user_inventory(self.owner.id64, self.game.app_id, self.game.context_id)\n self._update(data)", "def equip_items(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/Items/EquipItems/\"))", "def items(self, items: List[RadioStation]):\n if items is None:\n raise ValueError(\"Invalid value for `items`, must not be `None`\") # noqa: E501\n\n self._items = items", "def Inventory(cls):\r\n l = ServerSet()\r\n rs = cls.find()\r\n for server in rs:\r\n l.append(server)\r\n return l", "def items(self) -> 'ItemsView[str, str]':\n return _EntityFixupItems(self)", "def fill_ingredient(self, ingredient: str, quantity: int) -> None:\n self.inventory_availability[ingredient] = quantity", "def ansible_inventory(self):\n path_inventory = u'%s/inventories/%s' % (self.ansible_path, self.environment)\n path_lib = u'%s/library/beehive/' % (self.ansible_path)\n runner = Runner(inventory=path_inventory, verbosity=self.verbosity, \n module=path_lib)\n res = runner.get_inventory()\n resp = []\n for k,v in res.items():\n resp.append({u'group':k, u'hosts':u', '.join(v)})\n self.logger.debug(u'Ansible inventory nodes: %s' % res)\n self.result(resp, headers=[u'group', u'hosts'])", "def item_types(self, item_types):\n\n self._item_types = item_types", "def test_update_inventory(self):\n pass", "def FillInventoryServicePropertiesDuringEscrow(self, entity, request):\n return", "def __init__(self, errors=None, inventory_item_group_key=None, inventory_items=None, listing_id=None, marketplace_id=None, status_code=None, warnings=None): # noqa: E501 # noqa: E501\n self._errors = None\n self._inventory_item_group_key = None\n self._inventory_items = None\n self._listing_id = None\n self._marketplace_id = None\n self._status_code = None\n self._warnings = None\n self.discriminator = None\n if errors is not None:\n self.errors = errors\n if inventory_item_group_key is not None:\n self.inventory_item_group_key = inventory_item_group_key\n if inventory_items is not None:\n self.inventory_items = inventory_items\n if listing_id is not None:\n self.listing_id = listing_id\n if marketplace_id is not None:\n self.marketplace_id = marketplace_id\n if status_code is not None:\n self.status_code = status_code\n if warnings is not None:\n self.warnings = warnings", "def item_tag(self, name_parts: ResourceIdentifier, *values: Union[ResourceIdentifier, JsonObject], replace: bool = None):\n self.tag(name_parts, 'items', *values, replace=replace)", "def print_inventory(self):\r\n for item in self._inventory:\r\n print(item, '\\n')", "def preset_items(self):\r\n\r\n raise NotImplementedError", "def _clone_with_inventory(self, inventory):\n clone = copy.copy(self)\n clone._inventory = inventory\n\n return clone", "def addItems(c, items):\n\t\tcontainer.containersToSave[c['id_item_container']] = item.inventory.addItems(\n\t\t\titem.inventory.fromStr(c['items']),\n\t\t\titems\n\t\t)", "def get_set_inventory(self, itemid: str)->pd.DataFrame:\n json_inv = self.rc.get_subsets(itemid, ItemType.SET)\n if self.validate_json_set(json_inv):\n inv_list = self._json_inv_to_dict_list(json_inv)\n df = self._inv_dict_list_to_dataframe(inv_list)\n else:\n raise TypeError(\"Bricklink inventory must be a set\")\n return df", "def export_inventory_to_magento(self):\n Location = Pool().get('stock.location')\n\n product_templates = []\n instance = self.instance\n\n locations = Location.search([('type', '=', 'storage')])\n\n for magento_product_template in self.magento_product_templates:\n product_template = magento_product_template.template\n product_templates.append(product_template)\n\n with Transaction().set_context({'locations': map(int, locations)}):\n product_data = {\n 'qty': product_template.quantity,\n 'is_in_stock': '1' if product_template.quantity > 0\n else '0',\n }\n\n # Update stock information to magento\n with magento.Inventory(\n instance.url, instance.api_user, instance.api_key\n ) as inventory_api:\n inventory_api.update(\n magento_product_template.magento_id, product_data\n )\n\n return product_templates", "def line_items(self, line_items):\n\n self._line_items = line_items", "def vitamins(self, vitamins: List[RecipeObjectNutrientsCalories]):\n\n self._vitamins = vitamins", "def from_inventory(self, inventory, version=None):\n self.id = inventory.get('id', None)\n if 'versions' not in inventory:\n raise VersionMetadataException(\"No versions object in inventory\")\n if version is None:\n if 'head' not in inventory:\n raise VersionMetadataException(\"No head version specified in inventory\")\n version = inventory['head']\n # Now find version metadata\n if version not in inventory['versions']:\n raise VersionMetadataException(\"No version block for %s in inventory\")\n inv_version = inventory['versions'][version]\n self.version = version\n if 'created' in inv_version:\n self.created = inv_version['created']\n if 'message' in inv_version:\n self.message = inv_version['message']\n if 'user' in inv_version:\n if 'name' in inv_version['user']:\n self.name = inv_version['user']['name']\n if 'address' in inv_version['user']:\n self.address = inv_version['user']['address']", "def items(self) -> List[InlineResponse200Items]:\n return self._items", "def do_inventory(self, arg):\r\n\r\n if len(inventory) == 0:\r\n print('Inventory:\\n (nothing)')\r\n return\r\n\r\n # first get a count of each distinct item in the inventory\r\n itemCount = {}\r\n for item in inventory:\r\n if item in itemCount.keys():\r\n itemCount[item] += 1\r\n else:\r\n itemCount[item] = 1\r\n\r\n # get a list of inventory items with duplicates removed:\r\n print('Inventory:')\r\n for item in set(inventory):\r\n if itemCount[item] > 1:\r\n print(' %s (%s)' % (item, itemCount[item]))\r\n else:\r\n print(' ' + item)", "def get_inventory(self):\n from noc.inv.models.object import Object\n\n return list(Object.objects.filter(data__management__managed_object=self.id))", "def item_location(self, item_location):\n\n self._item_location = item_location", "def invoice_ids(self, invoice_ids):\n\n self._invoice_ids = invoice_ids", "def SetExpansionStateOfChildren(self, listOfExpandedItems, item):\n\n for child in self.GetItemChildren(item):\n self.SetExpansionStateOfItem(listOfExpandedItems, child)", "def display_inventory(self) -> None:\n\n print(\"Your current inventory includes:\\n\" + \" | \".join(self.player.inventory))", "def get_item_inventory(self, item):\n return [item_data for item_data in self.inventory if item_data['item_name'] == item]", "def update_game_items(self):\n _save_dict_to_file(self.get_game_items(), \"items.json\")", "def inventory_item_group_key(self, inventory_item_group_key):\n\n self._inventory_item_group_key = inventory_item_group_key", "def getInventory(self):\n return str(self.inventory)", "def _stash_items(sender, **kwargs):\n json_values = kwargs[\"json_values\"]\n stash = kwargs[\"stash\"]\n\n if \"items\" not in json_values:\n return\n\n json_items = json_values[\"items\"]\n\n stash[\"updated_items\"] = []\n stash[\"new_items\"] = []\n\n # create the items\n for item in json_items:\n # put the item in either new or updated items\n if \"id\" in item:\n stash[\"updated_items\"].append(ItemStash(item))\n else:\n stash[\"new_items\"].append(ItemStash(item))", "def restore_inventory(self):\n if config.get(\"aws\", \"s3_bucket\"):\n loaded_archives = self.load_archives_from_s3()\n\n with glacier_shelve() as d:\n archives = {}\n for a in loaded_archives:\n print a\n archives[a[\"filename\"]] = a[\"archive_id\"]\n d[\"archives\"] = archives\n else:\n raise Exception(\"You must set s3_bucket in order to backup/restore inventory to/from S3.\")", "def post(self, request, format=None):\n inventory_id = request.data.get('inventory_id', None)\n\n user_id = None\n if hasattr(request, 'user'):\n user_id = request.user.id\n\n if inventory_id is not None and user_id is not None:\n try:\n inventory_item = self.nv_models.Inventory.objects.get(\n pk=inventory_id)\n except self.nv_models.Inventory.DoesNotExist:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n try:\n user = self.nv_models.ExtendedUser.objects.get(pk=user_id)\n except self.nv_models.ExtendedUser.DoesNotExist:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n try:\n new_data = {}\n new_data['inventory'] = inventory_item\n new_data['customer'] = user\n new_order = self.nv_models.InventoryOrder(**new_data)\n new_order.save()\n\n return Response(self.InventoryOrderSerializer(new_order).data)\n except Exception as e:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)", "def import_catalog_items(\n self,\n request: import_.ImportCatalogItemsRequest = None,\n *,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operation.Operation:\n # Create or coerce a protobuf request object.\n\n request = import_.ImportCatalogItemsRequest(request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method.wrap_method(\n self._transport.import_catalog_items,\n default_timeout=None,\n client_info=_client_info,\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)\n\n # Wrap the response in an operation future.\n response = operation.from_gapic(\n response,\n self._transport.operations_client,\n import_.ImportCatalogItemsResponse,\n metadata_type=import_.ImportMetadata,\n )\n\n # Done; return the response.\n return response", "def yaml_inventory(self):\n inventory_file = 'inventory_file'\n with open(inventory_file, 'w') as invfile:\n yaml.dump(self.inventory_dict, invfile, default_flow_style=False, sort_keys=False)", "def add_revert_data(self, revert_items: list):\n if not isinstance(revert_items, (list, tuple)):\n return\n \n for item in revert_items:\n self.update_request.append('revert_items', item)", "def addToInventory(modList, item):\r\n modList.append(item)", "def update(self, items: Mapping[Any, Any]) -> None:\n self.extend(list(items.values()))\n return", "def setItems(self, items):\n self._nsObject.removeAllItems()\n for item in items:\n if isinstance(item, NSMenuItem):\n menu = self._nsObject.menu()\n menu.addItem_(item)\n else:\n self._nsObject.addItemWithTitle_(item)", "def set_vectors(self, vecs):\n self.vecs = vecs[:]", "def _configure(self):\n Values._configure(self)\n self.values = [self.inventory.one, self.inventory.two]\n return", "def get_items(self):\r\n item_list = []\r\n for item in self._inventory:\r\n item_list.append(item._name)\r\n return item_list", "def virtual_machines(self, virtual_machines):\n\n self._virtual_machines = virtual_machines", "def get_inventory(self):\n raise NotImplementedError(\"Subclasses define what returning the inventory entails\")", "def set(self, episodes):\n self.episode_set = episodes", "def setContents(self, item):\n if item == None:\n self.pot.a(None, 0)\n else:\n self.pot.a(CraftMagicNumbers.getItem(item.getItemType()), item.getData())\n # PAIL: rename", "def items(items_json_folder, locations_json):\n output_filepath = os.path.join(\n current_app.config['CDS_MIGRATOR_KIT_LOGS_PATH'],\n 'items_{0}.json'\n )\n\n with open(locations_json, 'r') as fp_locations:\n locations = json.load(fp_locations)\n internal_locations = locations['internal_locations']\n\n total_import_records = 0\n total_migrated_records = 0\n _files = glob.glob(os.path.join(items_json_folder, \"*.json\"))\n for i, items_json in enumerate(_files):\n _log = \"Importing #{0} file\".format(i)\n logger.info(_log)\n click.secho(_log, fg='yellow')\n\n with open(items_json, 'r') as fp_items:\n items = json.load(fp_items)\n total_import_records += len(items)\n\n records = ItemsMigrator(items, internal_locations).migrate()\n total_migrated_records += len(records)\n\n with open(output_filepath.format(i), 'w') as fp:\n json.dump(records, fp, indent=2)\n\n _log = \"Total number of migrated records: {0}/{1}\".format(\n total_migrated_records, total_import_records)\n logger.info(_log)\n\n click.secho(_log, fg='green')", "def set_all_item_field_value(**kwargs):\n\n # Filters\n filters = {\n 'disabled': 0,\n 'name': ('like', '%{0}%'.format(kwargs['keyword']))\n }\n\n # Get all Item fields based from the filters\n items = frappe.get_all('Item', filters=filters, fields=['name'])\n\n # Counters\n cur_index = 1\n max_index = len(items)\n\n print \"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\"\n print \"Setting all Item {0} field to the value {1}.\".format(kwargs['field'], kwargs['value'])\n print \"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\"\n\n for item in items:\n print \"Processing item {0}/{1}...\".format(cur_index, max_index)\n\n frappe.db.set_value('Item', item.name, kwargs['field'], kwargs['value'])\n\n cur_index = cur_index + 1\n\n print \"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\"\n print \"Done setting {0} items.\".format(max_index)\n print \"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\"", "def show_inventory(self):\n\t\tclear_screen()\n\n\t\tprint(\"# INVENTORY #\\n\")\n\t\tprint(\"Weapon{:.>15} \".format(self.inventory['Weapon']))\n\t\tprint(\"Clothing{:.>13} \".format(self.inventory['Clothing']))\n\t\tprint(\"Items{:.>16} \".format(self.inventory['Items']))\n\n\t\tpress_enter()", "def inventory(env):\n envs = environments()\n check_env(env, envs)\n\n headers = [] # a list of fact descriptions to go\n # in the table header\n fact_names = [] # a list of inventory fact names\n fact_data = {} # a multidimensional dict for node and\n # fact data\n\n # load the list of items/facts we want in our inventory\n try:\n inv_facts = app.config['INVENTORY_FACTS']\n except KeyError:\n inv_facts = [('Hostname', 'fqdn'),\n ('IP Address', 'ipaddress'),\n ('OS', 'lsbdistdescription'),\n ('Architecture', 'hardwaremodel'),\n ('Kernel Version', 'kernelrelease')]\n\n # generate a list of descriptions and a list of fact names\n # from the list of tuples inv_facts.\n for desc, name in inv_facts:\n headers.append(desc)\n fact_names.append(name)\n\n query = AndOperator()\n fact_query = OrOperator()\n fact_query.add([EqualsOperator(\"name\", name) for name in fact_names])\n\n if env != '*':\n query.add(EqualsOperator(\"environment\", env))\n\n query.add(fact_query)\n\n # get all the facts from PuppetDB\n facts = puppetdb.facts(query=query)\n\n for fact in facts:\n if fact.node not in fact_data:\n fact_data[fact.node] = {}\n\n fact_data[fact.node][fact.name] = fact.value\n\n return Response(stream_with_context(\n stream_template(\n 'inventory.html',\n headers=headers,\n fact_names=fact_names,\n fact_data=fact_data,\n envs=envs,\n current_env=env\n )))", "async def add_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n room_inventory = request.args[\"room_inventory\"][0]\n model.add_inventory(hotel_id, room_type, room_inventory)\n return json({\"success\": True})", "def virtual_volumes(self, virtual_volumes):\n\n self._virtual_volumes = virtual_volumes", "def replenish(self, amount: int):\n self._inventory += amount", "def inventory(self):\n\n #when the item list is 0 , print out having no items \n if len(self.items) == 0:\n \n print('The player has no items')\n\n #if not, print out the item list \n else:\n print(self.items)", "def change_product_qty(self):\n Inventory = self.env['stock.inventory']\n\n\n for wizard in self:\n product = wizard.product_id.with_context(location=wizard.location_id.id, lot_id=wizard.lot_id.id)\n line_data = wizard._action_start_line()\n\n\n if wizard.product_id.id and wizard.lot_id.id:\n inventory_filter = 'none'\n elif wizard.product_id.id:\n inventory_filter = 'product'\n else:\n inventory_filter = 'none'\n inventory = Inventory.create({\n 'name': _('INV: %s') % tools.ustr(wizard.product_id.display_name),\n 'filter': inventory_filter,\n 'product_id': wizard.product_id.id,\n 'location_id': wizard.location_id.id,\n 'lot_id': wizard.lot_id.id,\n 'line_ids': [(0, 0, line_data)],\n })\n inventory.action_done()\n return {'type': 'ir.actions.act_window_close'}", "def update(self):\n # convert the text list of item identifiers into a list of parsed identifiers\n item_identifiers = filter(None, self.packing_list.list_items.replace('\\r', '').split('\\n'))\n # loop through list of parsed identifiers\n for item_identifier in item_identifiers:\n # 1. get the 'item' instance for this identifier and update it (e.g. SubjectRequisition, Aliquot)\n # 2. create a 'packing_list_item' instance related to this packing_list\n for item_model in self.packing_list.item_models:\n try:\n try:\n item = item_model.objects.get(specimen_identifier=item_identifier)\n optional_attrs = {'panel': item.panel, 'item_priority': item.priority}\n except FieldError:\n item = item_model.objects.get(aliquot_identifier=item_identifier)\n optional_attrs = {}\n user = self.user or item.user_modified\n self._update_item(item, user)\n self._create_or_update_packinglistitem(\n item_identifier,\n item,\n user,\n optional_attrs=optional_attrs)\n except item_model.DoesNotExist:\n pass", "def get_inventory(self, resources):\n uri = '/api/services/inventory'\n body = {'resources': resources}\n result = self.session.post(uri, body=body)\n return result", "def inventory_add(self, item):\n if (len(self.ItemList) >= self.InventorySize):\n # Inventory full\n return 2\n self.ItemList.append(item)\n return 0", "def parse_inventory_item(record):\n if not isinstance(record, dict):\n raise Exception\n\n item = models.InventoryItem()\n modifier_map = {}\n\n for key in record:\n # handles parsing of item id field name to id\n if key == 'item id':\n record['id'] = record[key]\n del record[key]\n key = 'id'\n\n # handles removal of currency symbol from value\n value = str(record[key]).replace('$', '')\n\n if value is None:\n continue\n\n # set model property for non-modifier keys; modifier keys state stored in modifier_map\n if key in ITEM_SANITISATION:\n value = ITEM_SANITISATION[key](value)\n setattr(item, validate.enforce_key_consistency(key), value)\n elif 'modifier_' in key:\n modifier_map = __create_update_price_modifiers(key, record[key], modifier_map)\n else:\n setattr(item, validate.enforce_key_consistency(key), record[key])\n\n # updates modifiers when all keys have been parsed\n modifiers = [modifier_map[key] for key in modifier_map]\n setattr(item, validate.enforce_key_consistency('modifiers'), modifiers)\n\n return item", "def setArmor(self, armor):\n self.av = armor", "def update_list(self):\n Asset.update_list(self, uri_keys=('ems_sys', 'list'))", "def update_one_set_inventory(set_num):\n set_inv = reapi.pull_set_inventory(set_num)", "def inventory():\n try:\n check50.run(run_command).stdin(\"INVENTORY\").stdout(\"Your inventory is empty.\")\n except check50.Failure as error:\n raise check50.Failure(f\"Let the player know they have no items.\\n {error}\")\n check = check50.run(run_command)\n moves = [\"IN\", \"TAKE keys\", \"INVENTORY\"]\n\n for move in moves:\n check.stdout(\"> \")\n check.stdin(move, prompt=False)\n\n check.stdout(\"KEYS: a set of keys\")", "def update_matsubs_inventory(scs_matsubs_inventory, matsubs_library_rel_path, readonly=False):\n matsubs_library_filepath = _path_utils.get_abs_path(matsubs_library_rel_path)\n if matsubs_library_filepath:\n matsubs_container = _sii.get_data_from_file(matsubs_library_filepath)\n if matsubs_container:\n\n # CLEAR INVENTORY\n scs_matsubs_inventory.clear()\n\n # ADD \"NONE\" ITEM IN INVENTORY\n matsubs_item = scs_matsubs_inventory.add()\n matsubs_item.name = \"None\"\n matsubs_item.item_id = 'none'\n matsubs_item.item_description = \"No Material Substance\"\n\n # ADD ALL THE OTHER ITEMS FROM CONTAINER INTO INVENTORY\n for item in matsubs_container:\n if item.type == 'game_substance':\n if item.id.startswith('.'):\n if 'name' in item.props:\n matsubs_name = item.props['name']\n else:\n continue\n matsubs_item = scs_matsubs_inventory.add()\n matsubs_item.name = matsubs_name\n matsubs_item.item_id = item.id[1:]\n # matsubs_item.item_description = \"\"\n\n if not readonly:\n update_item_in_file('Paths.MatSubsRelFilePath', matsubs_library_rel_path)", "def gen_inventory(self):\n if isinstance(self.resource, list):\n self.my_add_group(self.resource, 'default_group')\n elif isinstance(self.resource, dict):\n for groupname, hosts_and_vars in self.resource.iteritems():\n self.my_add_group(hosts_and_vars.get(\"hosts\"), groupname, hosts_and_vars.get(\"vars\"))", "def SetExpansionStateOfItem(self, listOfExpandedItems, item):\n \n if self.GetItemIdentity(item) in listOfExpandedItems:\n self._window.Expand(item)\n self.SetExpansionStateOfChildren(listOfExpandedItems, item)\n else:\n self._window.Collapse(item)", "def print_items(self):\n for items in inventory:\n print(f\"- {items.upper()}\")", "def move_inventory(item, from_location, to_location, quantity, user=None):\n\n remove_from = InventoryTransaction.remove_from_inventory(\n item=item, location=from_location, quantity=quantity, user=user)\n \n add_to = InventoryTransaction.add_to_inventory(\n item=item, location=to_location, quantity=quantity, user=user)\n\n remove_from.pair_transaction = add_to\n add_to.pair_transaction = remove_from\n \n return (remove_from, add_to)", "def set_item(self, item):\n self.item = item", "def set_item(self, item):\n self.item = item", "def list_inventory(self):\n\n print('Your inventory contains:')\n #i = 1\n #inv_dict = {}\n for item in self.bag_of_holding:\n if 'casted' not in item.name:\n try:\n print(item.name)\n except:\n pass\n\n #inv_dict[str(i)] = item\n #i += 1\n #return inv_dict", "def build_inventory(self):\n self.inventory = {\n 'all': {\n 'hosts': [],\n 'vars': self.group_variables\n },\n '_meta': {'hostvars': {}}\n }\n\n # add all droplets by id and name\n for droplet in self.data['droplets']:\n for net in droplet['networks']['v4']:\n if net['type'] == 'public':\n dest = net['ip_address']\n else:\n continue\n\n self.inventory['all']['hosts'].append(dest)\n\n self.add_host(droplet['id'], dest)\n\n self.add_host(droplet['name'], dest)\n\n # groups that are always present\n for group in ('digital_ocean',\n 'region_' + droplet['region']['slug'],\n 'image_' + str(droplet['image']['id']),\n 'size_' + droplet['size']['slug'],\n 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']),\n 'status_' + droplet['status']):\n self.add_host(group, dest)\n\n # groups that are not always present\n for group in (droplet['image']['slug'],\n droplet['image']['name']):\n if group:\n image = 'image_' + DigitalOceanInventory.to_safe(group)\n self.add_host(image, dest)\n\n if droplet['tags']:\n for tag in droplet['tags']:\n self.add_host(tag, dest)\n\n # hostvars\n info = self.do_namespace(droplet)\n self.inventory['_meta']['hostvars'][dest] = info", "def allItems(self):\n items = []\n for itemType in self.__inventory__:\n for item in self.__inventory__[itemType]:\n items.append(item)\n return items" ]
[ "0.64316744", "0.5630393", "0.56029606", "0.53314924", "0.52890193", "0.52213633", "0.5127491", "0.5053964", "0.50323474", "0.50142586", "0.49879345", "0.4986131", "0.48904306", "0.48881936", "0.48744634", "0.4842486", "0.48069787", "0.466679", "0.4665811", "0.46414807", "0.4606507", "0.45951387", "0.4577093", "0.45438474", "0.45054206", "0.45002583", "0.447945", "0.44677284", "0.44659328", "0.4442392", "0.4430345", "0.4418893", "0.44105655", "0.44037482", "0.43988213", "0.43907702", "0.43817294", "0.4363546", "0.43569526", "0.4355752", "0.43556985", "0.43259528", "0.42927852", "0.4290323", "0.42872265", "0.42846796", "0.42763972", "0.42675143", "0.424937", "0.42383298", "0.42371145", "0.4227697", "0.42265093", "0.4225945", "0.4225343", "0.4217313", "0.4217288", "0.42092973", "0.42066756", "0.4204065", "0.41958633", "0.41848633", "0.41839296", "0.41809234", "0.4162579", "0.4156937", "0.41527203", "0.41316885", "0.41244444", "0.41153333", "0.40992093", "0.4091696", "0.40806568", "0.40741584", "0.40655667", "0.40649077", "0.40637055", "0.4052", "0.40481237", "0.4044765", "0.4041554", "0.40400985", "0.40341455", "0.403246", "0.4022064", "0.4013124", "0.4002392", "0.399818", "0.3995161", "0.39915845", "0.39876705", "0.39825237", "0.39811033", "0.39795923", "0.39788747", "0.3977173", "0.3977173", "0.39714578", "0.3971455", "0.39704806" ]
0.78284794
0
Sets the listing_id of this MigrateListingResponse.
def listing_id(self, listing_id): self._listing_id = listing_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_listing(request, listing_id):\n listing = get_object_or_404(Listing, pk=listing_id)\n\n listing.delete()\n messages.success(\n request,\n 'Your listing has been removed from the database.')\n\n return redirect(reverse('addlisting'))", "def update(self, amz_listing):\n amz_listing.sku = self.asin\n amz_listing.title = self.title\n amz_listing.brand = self.brand\n amz_listing.model = self.model\n amz_listing.upc = self.upc\n amz_listing.quantity = self.quantity\n amz_listing.url = self.url\n amz_listing.salesrank = self.salesrank\n amz_listing.offers = self.offers\n amz_listing.hasprime = self.prime\n\n # Only update price if price information is provided\n if self._tag.xpath('.//Offers'):\n amz_listing.price = self.price", "def view_and_edit_listing(request, listing_id):\n categories = Category.objects.all()\n listing = get_object_or_404(Listing, pk=listing_id)\n\n if request.method == 'POST':\n editform = AddListingForm(\n request.POST,\n request.FILES,\n instance=listing)\n if editform.is_valid():\n listing.save()\n messages.success(\n request,\n 'Thank you. Your listing has been updated')\n return redirect(reverse('addlisting'))\n else:\n editform = AddListingForm(instance=listing)\n\n context = {\n 'editform': editform,\n 'listing': listing,\n 'categories': categories\n }\n return render(request, 'editlisting.html', context)", "def listing_show(listing_id):\n\n listing = Listing.query.get_or_404(listing_id)\n return (jsonify(listing=listing.serialize(isDetailed=True)), 200)", "def set_id_number(self, id_number):\n self.id_number = id_number", "def set_id(self, id_):\n\n self.id_ = id_", "def sportsbook_id(self, sportsbook_id):\n\n self._sportsbook_id = sportsbook_id", "def setID(self, idf):\n self.id = idf", "def deleteListing(id):\n try:\n # Call delete_one() on listings collection\n db.listings.delete_one({\"_id\": id})\n return redirect(url_for(\"main.landingPage\"))\n except (ValueError):\n # Return custom 500 error page, set status code to 500\n return render_template(\"500.html\"), 500", "def listing_create():\n listing_data = request.json.get(\"listing\")\n form = ListingCreateForm(data=listing_data)\n\n if form.validate():\n listing = Listing.create(form)\n db.session.commit()\n # TODO: reevaluate error with a try and except later\n return (jsonify(listing=listing.serialize(isDetailed=True)), 201)\n else:\n errors = []\n for field in form:\n for error in field.errors:\n errors.append(error)\n return (jsonify(errors=errors), 400)", "def set_id(self, id):\n self.data['id'] = id", "def add_comment(request, listing_id):\n if request.method == \"POST\":\n try:\n listing = Listing.objects.get(pk=listing_id)\n except Listing.DoesNotExist:\n return render(request, \"auctions/errors.html\", {\"error_message\":\n \"something went wrong, the id url argument is not valid\"})\n\n CommentForm = modelform_factory(Comment, exclude=(\"commenter\",\"listing\"))\n # validate and save from the formdata to the database\n form = CommentForm(request.POST)\n try:\n comment = form.save(commit=False)\n comment.commenter = request.user\n comment.listing = listing\n comment.save()\n except:\n # if something went wrong with comment form \n return render(request, \"auctions/errors.html\", {\"error_message\":\n \"something went wrong with the submission of your comment, try again\"})\n\n return redirect(reverse(\"single_listing\", \n args=[listing.title]) +f\"?id={listing.id}\")", "def loan_id(self, loan_id):\n\n self._loan_id = loan_id", "def get(self, request, listing_id, format=None):\n try:\n fav_listing = FavoriteListing.objects.get(user_id=request.user.id, listing_id=listing_id)\n except FavoriteListing.DoesNotExist:\n fav_listing = None\n\n response = {}\n\n # this should not be necessary since we have declared above that the person who access this url needs IsAuthenticated permission\n # if self.request.user.is_authenticated:\n response['status'] = 'ok'\n if fav_listing:\n fav_listing.delete()\n response['message'] = 'Listing removed from favorites!'\n else:\n fav_listing = FavoriteListing(user_id=request.user,\n listing_id=Listing.objects.get(id=listing_id))\n fav_listing.save()\n response['message'] = 'Listing added to favorites!'\n # else:\n # response['status'] = 'error'\n # response['message'] = 'You must be authenticated for this.'\n\n return Response(response)", "def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id", "def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id", "def recipe_id(self, recipe_id):\n\n self._recipe_id = recipe_id", "def building_id(self, building_id):\n if self.local_vars_configuration.client_side_validation and building_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `building_id`, must not be `None`\") # noqa: E501\n\n self._building_id = building_id", "def instance_id(self, instance_id):\n\n self._instance_id = instance_id", "def feed_id(self, feed_id):\n\n self._feed_id = feed_id", "def id(self, _id):\n self.metadata[\"id\"] = _id", "def set_id(self, id):\n self.__id = id", "def batch_id(self, batch_id):\n\n self._batch_id = batch_id", "def status_id(self, status_id):\n\n self._status_id = status_id", "def id_status(self, id_status):\n self._id_status = id_status", "def close_bid(request, listing_id): \n try:\n listing = Listing.objects.get(pk=listing_id) \n except Listing.DoesNotExist:\n return JsonResponse({\"success\":False})\n\n if request.user == listing.seller:\n listing.isActive = False\n listing.save()\n return JsonResponse({\"success\":True})\n\n return JsonResponse({\"success\":False})", "def update(self, listing):\n q = QSqlQuery()\n\n # Add the product group if it isn't there already\n q.exec_(\"INSERT OR IGNORE INTO ProductGroups(ProductGroupName) VALUES('{}')\".format(listing.productgroup))\n q.exec_(\"SELECT ProductGroupId FROM ProductGroups WHERE ProductGroupName='{}'\".format(listing.productgroup))\n q.first()\n\n productgroupId = q.value(0)\n\n # Get the category association\n q.exec_(\"SELECT CategoryId FROM ProductGroups WHERE ProductGroupId={}\".format(productgroupId))\n q.first()\n\n categoryId = q.value(0)\n\n # Add the merchant name\n merchname = re.sub(r\"'\", \"\\'\\'\", listing.merchant) # SQL uses two single quotes to escape a single quote...\n q.exec_(\"INSERT OR IGNORE INTO Merchants(MerchantName) VALUES('{}')\".format(merchname))\n q.exec_(\"SELECT MerchantId FROM Merchants WHERE MerchantName='{}'\".format(merchname))\n q.first()\n\n merchantId = q.value(0)\n\n tracking = 0\n myprice = 0\n mycost = 0\n fbafees = 0\n monthlyvolume = 0\n\n # Check if the listing has already been added to the database\n q.exec_(\"SELECT * FROM Products WHERE Asin='{}'\".format(listing.asin))\n q.first()\n if q.isValid():\n record = q.record()\n # The listing is already in the database. Add it's current values to the observation table\n q.prepare(\"INSERT INTO Observations(Asin, Timestamp, SalesRank, Offers, Prime, Price, MerchantId) \"\n \"VALUES(?, ?, ?, ?, ?, ?, ?)\")\n q.addBindValue(record.value('Asin'))\n q.addBindValue(record.value('Timestamp'))\n q.addBindValue(record.value('SalesRank'))\n q.addBindValue(record.value('Offers'))\n q.addBindValue(record.value('Prime'))\n q.addBindValue(record.value('Price'))\n q.addBindValue(record.value('MerchantId'))\n q.exec_()\n\n # Grab values that we don't want to overwrite\n q.exec_(\"SELECT Tracking, MyPrice, MyCost, FBAFees, MonthlyVolume FROM Products WHERE Asin='{}'\".format(\n listing.asin))\n q.first()\n tracking = q.value(0)\n myprice = q.value(1)\n mycost = q.value(2)\n fbafees = q.value(3)\n monthlyvolume = q.value(4)\n\n # Calculate the CRank\n crank = self.getListingRank(categoryId, listing.salesrank, listing.offers, listing.prime)\n\n # Determine if it is a private label product\n if (fuzz.partial_ratio(listing.merchant.lower(), listing.title.lower()) > 80) or \\\n (fuzz.partial_ratio(listing.merchant.lower(), listing.make.lower()) > 80):\n privatelabel = True\n else:\n privatelabel = False\n\n time = QDateTime.currentDateTimeUtc().toTime_t()\n\n q.prepare(\n 'INSERT OR REPLACE INTO Products(Tracking, CRank, Timestamp, Asin, ProductGroupId, CategoryId, SalesRank, Offers,'\n 'Prime, Price, MerchantId, Title, Url, PrivateLabel, Manufacturer, PartNumber, Weight, ItemLength,'\n 'ItemWidth, ItemHeight, MyPrice, MyCost, FBAFees, MonthlyVolume, UPC) '\n 'VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)')\n\n fields = [tracking, crank, time, listing.asin, productgroupId, categoryId, listing.salesrank, listing.offers,\n listing.prime, listing.price, merchantId, listing.title, listing.url, privatelabel,\n listing.make, listing.model, listing.weight / 100, listing.length / 100,\n listing.width / 100, listing.height / 100, myprice, mycost, fbafees, monthlyvolume, listing.upc]\n\n for field in fields:\n q.addBindValue(field)\n\n q.exec_()\n\n if q.lastError().type() != QSqlError.NoError:\n print('Could not insert record: ' + q.lastError().text())\n\n self.select()", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id" ]
[ "0.54151386", "0.50424653", "0.49489492", "0.49403444", "0.454123", "0.4485277", "0.4483447", "0.44773117", "0.44772324", "0.44687784", "0.4437293", "0.44051337", "0.4390934", "0.4387617", "0.43608093", "0.43608093", "0.4329542", "0.430929", "0.4308864", "0.4278942", "0.42528984", "0.42509404", "0.423614", "0.42245173", "0.4179956", "0.41413036", "0.41024202", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386", "0.40986386" ]
0.8304697
0
Sets the marketplace_id of this MigrateListingResponse.
def marketplace_id(self, marketplace_id): self._marketplace_id = marketplace_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_AWSMarketplaceId(self, value):\n super(ListOrdersInputSet, self)._set_input('AWSMarketplaceId', value)", "def registration_marketplace_id(self, registration_marketplace_id):\n\n self._registration_marketplace_id = registration_marketplace_id", "def listing_id(self, listing_id):\n\n self._listing_id = listing_id", "def feed_id(self, feed_id):\n\n self._feed_id = feed_id", "def sportsbook_id(self, sportsbook_id):\n\n self._sportsbook_id = sportsbook_id", "def get_marketplace(self, marketplace_id):\n return MarketplaceResource(self._config).get(marketplace_id)", "def team_id(self, team_id):\n\n self._team_id = team_id", "def team_id(self, team_id):\n\n self._team_id = team_id", "def team_id(self, team_id):\n\n self._team_id = team_id", "def team_id(self, team_id):\n\n self._team_id = team_id", "def branding_theme_id(self, branding_theme_id):\n\n self._branding_theme_id = branding_theme_id", "def set_AWSMerchantId(self, value):\n super(ListOrdersInputSet, self)._set_input('AWSMerchantId', value)", "def financial_offer_id(self, financial_offer_id):\n\n self._financial_offer_id = financial_offer_id", "def set_id(self, id_):\n\n self.id_ = id_", "def merchant_id(self, merchant_id):\n\n self._merchant_id = merchant_id", "def put_place_by_id(place_id):\n place_obj = storage.get(\"Place\", place_id)\n if place_obj is None:\n abort(404)\n json_obj = request.get_json()\n if not request.json:\n return jsonify(\"Not a JSON\"), 400\n ignore = [\"id\", \"user_id\", \"city_id\", \"created_at\", \"updated_at\"]\n for key, value in json_obj.items():\n if key not in ignore:\n setattr(place_obj, key, value)\n place_obj.save()\n updated_place = place_obj.to_dict()\n return jsonify(updated_place), 200", "def put_place(place_id):\n place = storage.get('Place', place_id)\n if place is None:\n abort(404)\n kwargs = request.get_json()\n if kwargs is None:\n return ('Not a JSON', 400)\n for k, v in kwargs.items():\n setattr(place, k, v)\n place.save()\n return (jsonify(place.to_json()), 200)", "def set_id(self, id):\n self.data['id'] = id", "def set_merchant_transaction_id(self, transaction_id):\n self.merchant_transaction_id = transaction_id", "def market(self, market):\n self._market = market", "def _set_id(self):\n raise NotImplementedError()", "def warehouse_id(self, warehouse_id):\n\n self._warehouse_id = warehouse_id", "def set_available_places_for_run(\n self,\n run_id: str,\n actual_available_places: int,\n listed_available_places: int,\n ) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n UPDATE runs\n SET actual_available_places = ?, listed_available_places = ?\n WHERE run_id = ?\n \"\"\",\n (\n actual_available_places,\n listed_available_places,\n run_id,\n ),\n )\n conn.commit()\n return None", "def update_place(place_id):\n place = storage.get(Place, place_id)\n\n if place is None:\n abort(404)\n\n put_data = request.get_json()\n if not put_data:\n abort(400, 'Not a JSON')\n\n for k, v in put_data.items():\n if k not in ['id', 'user_id', 'city_id', 'created_at',\n 'updated_at']:\n setattr(place, k, v)\n place.save()\n storage.save()\n return make_response(jsonify(place.to_dict()), 200)", "def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id", "def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id", "def item_id(self, item_id):\n\n self._item_id = item_id", "def item_id(self, item_id):\n\n self._item_id = item_id", "def item_id(self, item_id):\n\n self._item_id = item_id", "def stock_id(self, stock_id):\n\n self._stock_id = stock_id", "def sentence_id(self, sentence_id):\n\n self._sentence_id = sentence_id", "def response_id(self, response_id):\n\n self._response_id = response_id", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def set_fragment_id(self, fragment_id):\n assert isinstance(fragment_id, str)\n\n if self.chain is not None:\n chk_frag = self.chain.get_fragment(fragment_id)\n if chk_frag is not None or chk_frag != self:\n raise FragmentOverwrite()\n\n self.fragment_id = fragment_id\n\n for atm in self.iter_atoms():\n atm.set_fragment_id(fragment_id)\n\n if self.chain is not None:\n self.chain.sort()", "def id(self, _id):\n self.metadata[\"id\"] = _id", "def button_id(self, button_id):\n\n self._button_id = button_id", "def set_model_id(self, model_id):\n assert isinstance(model_id, int)\n for atm in self.iter_alt_loc():\n atm.model_id = model_id", "def station_id(self, station_id: str):\n\n self._station_id = station_id", "def origin_id(self, origin_id):\n\n self._origin_id = origin_id", "def set_id(self, id):\n self.__id = id", "def set_company_id_value(self, company_id_value):\n self.company_id_value = company_id_value", "def _set_locator_id(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"locator-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"locator_id must be of a type compatible with base=unicode, is_leaf=True, yang_name=\"locator-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True\"\"\")\n self.__locator_id = t\n if hasattr(self, '_set'):\n self._set()", "def setID(self, id):\n self._id = id\n return self.callRemote('setID', id)", "def setid(self):\n return self.__setid", "def site_id(self, site_id):\n\n self._site_id = site_id", "def site_id(self, site_id):\n\n self._site_id = site_id", "def business_id(self, business_id):\n\n self._business_id = business_id", "def place(self, place):\n if self.local_vars_configuration.client_side_validation and place is None: # noqa: E501\n raise ValueError(\"Invalid value for `place`, must not be `None`\") # noqa: E501\n\n self._place = place", "def _set_id(self, value):\n pass", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id" ]
[ "0.68738204", "0.60930204", "0.5767067", "0.5177802", "0.5172527", "0.49900728", "0.49834523", "0.49834523", "0.49834523", "0.49834523", "0.49759004", "0.49217004", "0.4853357", "0.48184267", "0.47949788", "0.47403908", "0.4719058", "0.46994156", "0.46824563", "0.4669242", "0.4631705", "0.4620464", "0.460385", "0.45889878", "0.45663267", "0.45663267", "0.4558081", "0.4558081", "0.4558081", "0.45579296", "0.45441967", "0.45415765", "0.4532535", "0.4532535", "0.45239", "0.45181572", "0.45045763", "0.45021886", "0.44987258", "0.44851583", "0.44834772", "0.44630623", "0.44591388", "0.4436774", "0.4433307", "0.44231483", "0.44231483", "0.4414408", "0.44051772", "0.43964174", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542", "0.43903542" ]
0.7607272
0
Sets the status_code of this MigrateListingResponse.
def status_code(self, status_code): self._status_code = status_code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status_code(self, status_code):\n allowed_values = [1, 100, 101, 102, 103, 104, 105] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status_code not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status_code` ({0}), must be one of {1}\" # noqa: E501\n .format(status_code, allowed_values)\n )\n\n self._status_code = status_code", "def extract_status(self, status_headers):\n self.status = status_headers.get_statuscode()\n if not self.status:\n self.status = '-'", "def setStatus(self, status):\n self.__status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def status_code(self, status_code):\n allowed_values = [\"DRAFT\", \"IN_PROGRESS\", \"CREATED\", \"COMPLETED\", \"PARTIAL\", \"FAILED\", \"REFUNDED\", \"CANCELLED\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status_code not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status_code` ({0}), must be one of {1}\" # noqa: E501\n .format(status_code, allowed_values)\n )\n\n self._status_code = status_code", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def set_status(self, status: HTTPProxyStatus) -> None:\n self._status = status\n self.update_actor_details(status=self._status)", "def status_code(self):\n return self._status_code", "def status(self, status: str):\n\n self._status = status", "def status(self, status: str):\n\n self._status = status", "def set_status( code ):", "def status(self, status: int):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n\n self._status = status", "def status_code(self):\n return int(self.status.split()[1])", "def status_code(self) -> int:\n raise NotImplementedError # pragma: no cover", "def status(self, status):\n self._set_property_(self.STATUS, str(status))", "def status(self, value):\r\n if isinstance(value, (int, long)):\r\n if 100 <= value <= 999:\r\n st = _RESPONSE_STATUSES.get(value, '')\r\n if st:\r\n self._status = '%d %s' % (value, st)\r\n else:\r\n self._status = str(value)\r\n else:\r\n raise ValueError('Bad response code: %d' % value)\r\n elif isinstance(value, basestring):\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n if _RE_RESPONSE_STATUS.match(value):\r\n self._status = value\r\n else:\r\n raise ValueError('Bad response code: %s' % value)\r\n else:\r\n raise TypeError('Bad type of response code.')", "def test_status_code(self):\n assert self.list_response.status_code == 200", "def error_code(self, obj, statusCode):\n pass", "def status(self, value):\n if isinstance(value, (long, int)):\n if 100 <= value <= 900:\n status = _RESPONSE_STATUSES.get(value, '')\n if status:\n self._status = '%d %s' % (value, status)\n else:\n self._status = str(value)\n else:\n raise ValueError('Bad response code: %d' % value)\n elif isinstance(value, basestring):\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n if _RE_RESPONSE_STATUS.match(value):\n self._status = value\n else:\n raise ValueError('Bad response code: %d' % value)\n else:\n raise TypeError('Bad type of response code.')", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def get_status_code(self, response):\n if hasattr(response, 'status_int'):\n return response.status_int\n return response.status", "def response_code(self, response_code):\n\n self._response_code = response_code", "def response_code(self, response_code):\n\n self._response_code = response_code", "def get_status_code(self):\n return self.__response.status_code", "def status_code(self):\n return int(self._status[:3])", "def get_status_code(self, response):\r\n if hasattr(response, 'status_int'):\r\n return response.status_int\r\n else:\r\n return response.status_code", "def get_status_code(self, response):\r\n if hasattr(response, 'status_int'):\r\n return response.status_int\r\n else:\r\n return response.status_code", "def _set_status(self, status):\n with self.status_lock:\n if (status in _ENDING_STATUSES) or (not self.status in _ENDING_STATUSES):\n self.status = status", "def status(self, status):\n allowed_values = [\"unknown\", \"failed\", \"done\", \"running\", \"waiting\", \"skipped\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def status(self, status):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\")\n allowed_values = [\"success\", \"warning\", \"error\", \"pending\"]\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\"\n .format(status, allowed_values)\n )\n\n self._status = status", "def SetStatus(self, status):\r\n self.status = status", "def status(self, status):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n if status is not None and len(status) < 1:\n raise ValueError(\"Invalid value for `status`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._status = status", "def status_code(self) -> int:\n return pulumi.get(self, \"status_code\")", "def set_zi_migration_status(self, zi_migration_status):\n self.zi_migration_status = zi_migration_status", "def status_code(self):\r\n return int(self._status[:3])", "def status_id(self, status_id):\n\n self._status_id = status_id", "def _add_status_code(runner, return_value):\n if isinstance(return_value, Mapping):\n status_code = return_value.get('statusCode')\n if status_code:\n runner.resource['metadata']['status_code'] = status_code", "def status(self, status):\n allowed_values = [\"loaned\", \"finished\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\".format(status, allowed_values) # noqa: E501\n )\n\n self._status = status", "def setStatus(self, status, details=None):\n self.onStatusSent(None, status)", "def setResponseCode(code, message=None):", "def setStatus(self, newStatus):\n self._status = newStatus", "def set_status(self, status: str) -> None:\n\n try:\n self.status = Buddy.status_map[status.lower()]\n except KeyError:\n self.status = status", "def add_status_code(code):\n def class_decorator(cls):\n cls.status_code = code\n return cls\n return class_decorator", "def status(self, status):\n allowed_values = [\"REQUESTED\", \"CREATE_IN_PROGRESS\", \"AVAILABLE\", \"UPDATE_IN_PROGRESS\", \"UPDATE_REQUESTED\", \"UPDATE_FAILED\", \"CREATE_FAILED\", \"ENABLE_SECURITY_FAILED\", \"PRE_DELETE_IN_PROGRESS\", \"DELETE_IN_PROGRESS\", \"DELETE_FAILED\", \"DELETE_COMPLETED\", \"STOPPED\", \"STOP_REQUESTED\", \"START_REQUESTED\", \"STOP_IN_PROGRESS\", \"START_IN_PROGRESS\", \"START_FAILED\", \"STOP_FAILED\", \"WAIT_FOR_SYNC\", \"MAINTENANCE_MODE_ENABLED\"]\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\"\n .format(status, allowed_values)\n )\n\n self._status = status", "def set_status(self, scenario_id, status):\n self.cur.execute(\n \"UPDATE execute_list SET status = %s WHERE id = %s\",\n (status, scenario_id),\n )", "def status(self, status):\n if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n\n self._status = status", "def status(self, status):\n allowed_values = [\"D\", \"P\", \"V\", \"S\", \"M\", \"I\", \"R\", \"C\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def status(self, status):\n allowed_values = [\"I\", \"A\", \"S\", \"T\", \"D\"]\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status`, must be one of {0}\"\n .format(allowed_values)\n )\n self._status = status", "def setstatus(self, status):\n with self.lock:\n self.status = status", "def code(self):\n\t\treturn self.status_code", "def status(self, status):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n allowed_values = [\"EXECUTING\", \"SUCCESS\", \"FAILURE\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def status_code(self) -> Optional[int]:\n if self.response is not None:\n return self.response.status_code\n return None", "def status(self, status):\n allowed_values = [\"NEW\", \"ACCEPTED\", \"DECLINED\", \"REJECTED\", \"DELIVERED\", \"EMAILED\", \"COMPLETED\", \"CANCELLED\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def status(self, status):\n allowed_values = [\"Pending\", \"Running\", \"Success\", \"Failed\", \"Skipped\", \"SuccessWithWarning\", \"Canceled\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def response_status_id(self, response_status_id):\n\n self._response_status_id = response_status_id", "def _set_status(self, action, status):\n raise NotImplementedError(\"Base class: cannot be called directly\")", "def status(self, status):\n if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n status is not None and len(status) < 1):\n raise ValueError(\"Invalid value for `status`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._status = status", "def assertStatusCode(self, response, status):\n self.assertEqual(response.status_code, status)\n return", "def set_status(self, status: Status) -> None:\n if status.status_code == StatusCode.ERROR:\n self.elastic_span.outcome = constants.OUTCOME.FAILURE\n elif status.status_code == StatusCode.OK:\n self.elastic_span.outcome = constants.OUTCOME.SUCCESS\n else:\n self.elastic_span.outcome = constants.OUTCOME.UNKNOWN", "def process_status_code(self, status_code):\n LogParser.increment_count(self.status_codes, status_code)", "def status(self, code, content_length=None):", "def status_detail(self, status_detail):\n\n self._status_detail = status_detail", "def status(self, status):\n allowed_values = [\"open\", \"finished\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\".format(status, allowed_values) # noqa: E501\n )\n\n self._status = status", "def status(self, status):\n allowed_values = [1, 2, 3] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\".format(status, allowed_values) # noqa: E501\n )\n\n self._status = status", "def setExecutionStatus(self, return_code):\n if return_code == 0:\n self.execution_status = 'executed'\n else:\n self.execution_status = 'failed'", "def custom_block_response_status_code(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"custom_block_response_status_code\")", "def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n self.db_handler.update_item({'api_version': TsV2CatalogHandler.api_version}, self.status)", "def status(self, status: str):\n allowed_values = [\"OPEN\", \"WAITING_RESOLUTION\", \"CONFIRMED\", \"CANCELLED\", \"COMPLETED\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\"\n .format(status, allowed_values)\n )\n\n self._status = status", "def exit_status(self, exit_status):\n\n self._exit_status = exit_status", "def status_detail(self, status_detail):\n self._status_detail = status_detail" ]
[ "0.62368333", "0.61369103", "0.59862417", "0.5981169", "0.5981169", "0.5981169", "0.5980721", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.59049493", "0.59049493", "0.59049493", "0.59049493", "0.59049493", "0.59049493", "0.59049493", "0.5897389", "0.58920586", "0.5881357", "0.5881357", "0.5864345", "0.58280146", "0.57727724", "0.57675195", "0.5735527", "0.5714673", "0.5692065", "0.5681176", "0.56718177", "0.5642569", "0.5632915", "0.5599345", "0.5599345", "0.5588259", "0.55691785", "0.55686176", "0.55686176", "0.556826", "0.55549634", "0.55547744", "0.5535361", "0.5527823", "0.5519602", "0.5487687", "0.5471368", "0.54583377", "0.5457941", "0.5452131", "0.5446784", "0.5442185", "0.5435709", "0.5420114", "0.54174054", "0.5411989", "0.54070437", "0.53756326", "0.53736067", "0.5372969", "0.53716654", "0.5366684", "0.5358284", "0.5357374", "0.53432506", "0.53202873", "0.5295178", "0.52946764", "0.5283405", "0.52832067", "0.5271304", "0.5271201", "0.52593786", "0.5250099", "0.52409565", "0.5224009", "0.5220837", "0.5220253", "0.52177155", "0.5207522", "0.5201935", "0.5182303" ]
0.6872091
0
Sets the warnings of this MigrateListingResponse.
def warnings(self, warnings): self._warnings = warnings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def warnings(self):\n return self.__warnings", "def allow_warnings(self, allow_warnings):\n self._allow_warnings = allow_warnings", "def setwarnings(self, on):\n # diese Funktion macht eigentlich nichts, ist aber wegen der Kombatibilitaet vorhanden\n print(f\"setwarnings: {on}\")", "def warnings(self) -> List[Error]:\n return self._get_warnings()", "def get_warnings(self):\n pass", "def warnings(self) -> List[Error]:", "def warning_count(self, warning_count):\n\n self._warning_count = warning_count", "def warning_count(self, warning_count):\n\n self._warning_count = warning_count", "def warnings(self):\n return self.warning_buffer.warnings", "def warning(self, warning):\n pass", "def _warn(self, warning=None):\r\n debug.err('Warning: %s' % warning)\r\n\r\n if core.FW_conf['settings'].TestRun.ExecutionMode == 'Leader' and warning != None:\r\n executeInFollower(\"self.warn('%s')\" % (warning,))\r\n\r\n if type(warning) != types.ListType:\r\n warning = [warning]\r\n\r\n self.result.addStepWarning(warning)", "def warning(self, msg, transfers):\n self.validation_exceptions.extend(self._create_exceptions(msg, transfers, ValidationType.WARNING))", "def warning_spoilers(self, warning_spoilers):\n\n self._warning_spoilers = warning_spoilers", "def show_warnings(self):\n for w in self.warnings:\n w()", "def allow_warnings(self):\n return self._allow_warnings", "def set_warning(warning):\n impl.set_warning(**locals())", "async def setwarns(self, ctx, user: discord.Member, warnings: int = None):\r\n server = ctx.message.guild\r\n await self._create_warn(server, user)\r\n dataIO.save_json(self.JSON, self.data)\r\n if not warnings:\r\n del self.data[str(server.id)][\"user\"][str(user.id)]\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been reset\".format(user.name))\r\n return\r\n if warnings == 0:\r\n del self.data[str(server.id)][\"user\"][str(user.id)]\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been reset\".format(user.name))\r\n return\r\n if warnings <= 0:\r\n await ctx.send(\"You can set warnings to 1-4 only :no_entry:\")\r\n return\r\n if warnings >= 5:\r\n await ctx.send(\"You can set warnings to 1-4 only :no_entry:\")\r\n return\r\n self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] = warnings\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been set to **{}**\".format(user.name, warnings))", "def warnings_active(self) -> List[Error]:", "def warning(self) -> 'outputs.AnyResponse':\n return pulumi.get(self, \"warning\")", "def get_warning(self) -> List[str]:\n return []", "def get_warning(self) -> List[str]:\n return []", "def eval_warnings(self):\n\n # Ensure the minimum number of warnings were raised.\n assert len(self.war) >= len(self.warn_msgs)\n\n # Test the warning messages, ensuring each attribute is present.\n testing.eval_warnings(self.war, self.warn_msgs)\n return", "async def warnings(self, ctx):\n server = ctx.message.server\n server_id = server.id\n if not (server_id in self.warnlist2 and self.warnlist2[server_id]):\n await self.bot.say(\"No users are currently punished.\")\n return\n\n def getmname(mid):\n member = discord.utils.get(server.members, id=mid)\n if member:\n if member.nick:\n return '%s (%s)' % (member.nick, member)\n else:\n return str(member)\n else:\n return '(member not present, id #%d)'\n\n headers = ['Member', 'Warning Number', 'Moderator', 'Reason']\n table = []\n disp_table = []\n now = time.time()\n for member_id, data in self.warnlist2[server_id].items():\n\n #if not member_id.isdigit():\n #continue\n print (\"704\")\n member_name = getmname(data['User'])\n warnnum = data['Warning Number']\n punisher_name = getmname(data['Mod'])\n reason = data['Reason']\n table.append((member_name, warnnum, punisher_name, reason))\n\n #for _, name, warnum, mod, reason in sorted(table, key=lambda x: x[0]):\n disp_table.append((member_name, warnnum, punisher_name, reason))\n\n for page in pagify(tabulate(disp_table, headers)):\n await self.bot.say(box(page))", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def set_warning(warningTxt):\r\n if not core.does_item_exist(\"Warning##Warning\"):\r\n with simple.collapsing_header(\"Warning##Warning\", parent=\"##GroupStats\",\r\n default_open=True,\r\n closable=False,\r\n bullet=True):\r\n core.add_text(\"Warning\", default_value=warningTxt, color=(255, 255, 0, 255))", "def warning(self, msg, *args, **kwargs):\n pass", "def warnings_active(self) -> List[Error]:\n return self._get_warnings(is_active=True)", "def log_check_warnings(self):\n self._log_check_warnings_object(self._info)\n self._log_check_warnings_object(self._tags)\n self._log_check_warnings_object(self._schemes)\n self._log_check_warnings_object(self._paths)\n self._log_check_warnings_object(self._securityDefinitions)\n self._log_check_warnings_object(self._definitions)\n pass", "def add_warning(self, msg):\n self._add_message(msg, self._warnings)", "def set_warning_message(msg):\n set_message(msg, TYPE_WARNING)", "def list_warnings(self):\n lwarn = []\n r = (220,0,0) # Red\n w = (244,234,244) # White\n g = (144,238,144) # Green\n w = (255,255,255) # White\n c = cf.gs.game.character\n ci = c.inventory\n f = ci.sorted_items['food'].amount\n if f > 0 and f < 10:\n lwarn.append(\n {'item':None,'value':'Low food!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif f <= 0:\n lwarn.append(\n {'item':None,'value':'0 food: HP -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n lwarn.append(\n {'item':None,'value':'0 food: Sanity -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n h = cf.gs.game.character.selected_house\n if h == 'Staying with Friends':\n lwarn.append(\n {'item':None,'value':'No house: Sanity -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if c.health == 1:\n lwarn.append(\n {'item':None,'value':'Low health!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif c.health <= 0:\n lwarn.append(\n {'item':None,'value':'0 health!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if len(cf.gs.game.events.inactive_events) == 5:\n lwarn.append(\n {'item':None,'value':'5 events: Activating!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if c.sanity > 0 and c.sanity < 10:\n lwarn.append(\n {'item':None,'value':'Low sanity!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif c.sanity <= 0:\n lwarn.append(\n {'item':None,'value':'0 sanity!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n cash = ci.sorted_items['cash'].amount\n if cash > 0 and cash < 4000:\n lwarn.append(\n {'item':None,'value':'Low cash!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif cash <= 0:\n lwarn.append(\n {'item':None,'value':'0 cash: Sanity-=1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if len(lwarn) == 0:\n lwarn.append(\n {'item':None,'value':'Green means go!',\n 'selected_bgcolor':g,'bgcolor':g,'font_size':20})\n return lwarn", "def add_warning(self, message):\n self.warnings.append(message)", "def warnings():\n return THE_LOGGER.warnings", "def warn(self) -> list:\n return self.__wrn", "def warning_types(self, warning_types):\n allowed_values = [\"EmptyTranslation\", \"TrailingPunctuation\", \"Formatting\", \"JoinTags\", \"MissingNumbers\", \"MultipleSpaces\", \"NonConformingTerm\", \"NotConfirmed\", \"TranslationLength\", \"TrailingSpace\", \"UnresolvedComment\", \"EmptyPairTags\", \"InconsistentTranslationTargetSource\", \"InconsistentTranslationSourceTarget\", \"ForbiddenString\", \"SpellCheck\", \"RepeatedWords\", \"InconsistentTagContent\", \"EmptyTagContent\", \"Malformed\", \"ForbiddenTerm\", \"NewerAtLowerLevel\", \"LeadingAndTrailingSpaces\", \"TargetSourceIdentical\", \"SourceOrTargetRegexp\", \"UnmodifiedFuzzyTranslation\", \"Moravia\", \"ExtraNumbers\", \"UnresolvedConversation\", \"NestedTags\"] # noqa: E501\n if not set(warning_types).issubset(set(allowed_values)):\n raise ValueError(\n \"Invalid values for `warning_types` [{0}], must be a subset of [{1}]\" # noqa: E501\n .format(\", \".join(map(str, set(warning_types) - set(allowed_values))), # noqa: E501\n \", \".join(map(str, allowed_values)))\n )\n\n self._warning_types = warning_types", "def warns(*warnings, **opts):\r\n import warnings as warnings_\r\n\r\n captured = []\r\n old_filters, old_showwarning = warnings_.filters, warnings_.showwarning\r\n warnings_.filters = old_filters[:]\r\n\r\n def showwarning(message, category, *args, **kwargs):\r\n if category not in warnings:\r\n old_showwarning(message, category, *args, **kwargs)\r\n return\r\n captured.append(message)\r\n warnings_.showwarning = showwarning\r\n\r\n for warning in warnings:\r\n warnings_.simplefilter(\"always\", warning)\r\n\r\n try:\r\n yield captured\r\n if opts.get(\"any\", False):\r\n assert captured\r\n else:\r\n assert set(warnings) == set(map(type, captured))\r\n finally:\r\n warnings_.filters = old_filters\r\n warnings_.showwarning = old_showwarning", "def errors_and_warnings(self, errors_and_warnings):\n\n self._errors_and_warnings = errors_and_warnings", "def warning(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['warning']:\n self.print_lines(self.colored(('magenta', 'bold'), lines))", "def warning(self) -> str:\n return pulumi.get(self, \"warning\")", "def warn(self) -> bool:\n return self._warn", "def warning(self, *args, **kwargs):", "def migrate_hidden_warnings_to_notes(apps, schema_editor):\n Infraction = apps.get_model('api', 'Infraction')\n\n for infraction in Infraction.objects.filter(type=\"warning\", hidden=True):\n infraction.type = \"note\"\n infraction.save()", "def warning(self, message):\n return self.log(\"WARNING\", message)", "def ReportWarning(self, warning_message, status=200):\n logging.warning('Reporting warning: %r', warning_message)\n self.response.set_status(status)\n self.response.out.write('%s\\nrequest_id:%s\\n' %\n (warning_message, utils.GetRequestId()))", "def has_warnings(self) -> bool:\n return len(self.warnings) > 0", "async def warnlist(self, ctx, page: int = None):\r\n server = ctx.message.guild\r\n if not page:\r\n page = 1\r\n if page < 0:\r\n await ctx.send(\"Invalid page :no_entry:\")\r\n return\r\n try:\r\n if page > math.ceil(len(self.data[str(server.id)][\"user\"]) / 20):\r\n await ctx.send(\"Invalid page :no_entry:\")\r\n return\r\n except:\r\n await ctx.send(\"No one has been warned in this server :no_entry:\")\r\n return\r\n s = await self._list_warns(server, page)\r\n try:\r\n await ctx.send(embed=s)\r\n except:\r\n await ctx.send(\"There are no users with warnings in this server :no_entry:\")", "def add_warnings_and_errors(self, output_data):\n # add the dictionary with warnings and errors\n warnings = self.retrieved.get_object_content(self.node.get_option(\"scheduler_stderr\"))\n # for some reason, errors may be in the stdout, but not the log.lammps\n stdout = self.retrieved.get_object_content(self.node.get_option(\"scheduler_stdout\"))\n errors = [line for line in stdout.splitlines() if line.startswith(\"ERROR\")]\n\n for error in errors:\n self.logger.error(error)\n\n output_data.update({'warnings': warnings})\n output_data.update({'errors': errors})", "def isWarning(self):\n return _libsbml.XMLError_isWarning(self)", "async def warning_list(self, context: Context, user: discord.User):\n warnings_list = await db_manager.get_warnings(user.id, context.guild.id)\n embed = discord.Embed(title=f\"Warnings of {user}\", color=0x9C84EF)\n description = \"\"\n if len(warnings_list) == 0:\n description = \"This user has no warnings.\"\n else:\n for warning in warnings_list:\n description += f\"• Warned by <@{warning[2]}>: **{warning[3]}** (<t:{warning[4]}>) - Warn ID #{warning[5]}\\n\"\n embed.description = description\n await context.send(embed=embed)", "def _enabled_warnings(self):\n with warnings.catch_warnings():\n if self.warnings:\n # if self.warnings is set, use it to filter all the warnings\n warnings.simplefilter(self.warnings)\n # if the filter is 'default' or 'always', special-case the\n # warnings from the deprecated unittest methods to show them\n # no more than once per module, because they can be fairly\n # noisy. The -Wd and -Wa flags can be used to bypass this\n # only when self.warnings is None.\n if self.warnings in ['default', 'always']:\n warnings.filterwarnings(\n 'module',\n category=DeprecationWarning,\n message=r'Please use assert\\w+ instead.')\n yield", "def warn(self, msg):\n\n self(msg, WARN)", "def warning ( self , message , *args , **kwargs ) :\n return self.logger.warning ( message , *args , **kwargs )", "def warning(self, message, code=None):\n\n if code is None:\n code = ''\n self._add_message( message, self.WARNING, code=code )\n self.n_warnings += 1", "def set_warning(self, warning):\n if self.log_file_exist(self.file_path_name):\n logging.warning (warning)\n else:\n print \"The log \"+ self.name_log + \"does not exist in the directory\"", "def has_warning(self, value: bool):\n self._has_warning = value", "def warning(self) -> Optional[str]:\n return pulumi.get(self, \"warning\")", "def handle_warning(self, api, command):\n return self.handle_log(api, command, level=logging.WARNING)", "def print_warning_msgs():\n for err in TypeWarning.warnings:\n print err", "def warning(self, _strMessage=\"\"):\n self.edLogging.warning(_strMessage)", "def __init__(self, warning: bool = True):\n super().__init__()\n\n self.warning = warning", "def format_warn(self, *args):\n if self._pretty:\n return self.format_multiline_message(*args, color='magenta', start='[WARN] ', multiline=' ~~ ')\n return self.format_multiline_message(*args)", "def is_warning(self):\n\n return self.severity == AlertSeverity.TOLERABLE", "def notice(self, warning):\n pass", "def test_edit_warnings(self, mock_create, mock_msg_mgr):\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type1.name, self.job_type1.version)\n json_data = {\n 'is_active': False\n }\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertEqual(len(results['warnings']), 1)\n self.assertEqual(results['warnings'][0]['name'], 'DEPRECATED_RECIPES')", "def warning(self, msg):\n\n self.logger.warning(msg)", "def warning(self) -> Optional[pulumi.Input['AnyArgs']]:\n return pulumi.get(self, \"warning\")", "def record_warning(self, message, keys=None, **kwargs):\n keys = list(keys) if keys is not None else []\n self.warnings.append(dict(message=message, keys=keys, **kwargs))", "def test_sanitize_warnings(self):\n input = {\n \"warnings\": [\n \"! Change will take effect only after switch reboot at line 11\\\\n\\\\n\",\n \"! \\\\nWARNING!\\\\nChanging TCAM profile will cause forwarding agent(s) to exit and restart.\\\\nAll traffic through the forwarding chip managed by the restarting\\\\nforwarding agent will be dropped.\\\\n at line 392\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs\",\n \"concentrators\",\n \"switches\",\n \"bridges\",\n \"etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 2\\\\n\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs\",\n \"concentrators\",\n \"switches\",\n \"bridges\",\n \"etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 4\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 6\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\",\n \"! Interface does not exist. The configuration will not take effect until the module is inserted. at line 2799\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 1247\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\"\n\n ],\n \"warningCount\": 14,\n \"errors\": [\n {\n \"lineNo\": \" 6\",\n \"error\": \"> ruter bgp 1512% Invalid input (at token 0: 'ruter') at line 6\",\n }\n ],\n \"errorCount\": 1,\n }\n expected = {\n \"warnings\": [\n \"! Change will take effect only after switch reboot at line 11\\\\n\\\\n\",\n \"! \\\\nWARNING!\\\\nChanging TCAM profile will cause forwarding agent(s) to exit and restart.\\\\nAll traffic through the forwarding chip managed by the restarting\\\\nforwarding agent will be dropped.\\\\n at line 392\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 2\\\\n\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 4\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 6\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\",\n \"! Interface does not exist. The configuration will not take effect until the module is inserted. at line 2799\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\",\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 1247\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\"\n ],\n \"warningCount\": 7,\n \"errors\": [\n {\n \"lineNo\": \" 6\",\n \"error\": \"> ruter bgp 1512% Invalid input (at token 0: 'ruter') at line 6\",\n }\n ],\n \"errorCount\": 1,\n }\n assert self.api.sanitize_warnings(input) == expected", "def create_warning_notes(warnings: List[Text]) -> Text:\n warning_title = template.SUB_SUB_SECTION_TITLE.format(\n content='Warnings'\n )\n return warning_title + create_content_list(warnings)", "def test_sanitize_warnings_skip(self):\n input = {\n \"result\": [\n {\n \"output\": \"enter input line by line; when done enter one or more control-d\\n\\n> spanning-tree portfast\\n! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 2\\nCopy completed successfully.\\n\",\n \"messages\": [\"Copy completed successfully.\"],\n },\n {\n \"output\": \"! Command: show session-configuration named capiVerify-2002-f8a137cac96e11ed89be020000000000\\n! device: tp-avd-leaf2 (vEOS-lab, EOS-4.29.1F)\\n!\\n! boot system flash:/vEOS-lab-4.29.1F.swi\\n!\\nno aaa root\\n!\\ntransceiver qsfp default-mode 4x10G\\n!\\nservice routing protocols model ribd\\n!\\nspanning-tree mode mstp\\n!\\ninterface Ethernet1\\n spanning-tree portfast\\n!\\ninterface Ethernet2\\n!\\ninterface Ethernet3\\n!\\ninterface Ethernet4\\n!\\ninterface Ethernet5\\n!\\ninterface Management1\\n!\\nno ip routing\\n!\\nend\\n\"\n },\n ],\n \"warnings\": [\n \"! portfast should only be enabled on ports connected to a single host. Connecting hubs, concentrators, switches, bridges, etc. to this interface when portfast is enabled can cause temporary bridging loops. Use with CAUTION. at line 2\"\n ],\n \"id\": \"Arista-3-4826123409839743\",\n \"warningCount\": 1,\n \"jsonrpc\": \"2.0\",\n }\n # The result should not change\n assert self.api.sanitize_warnings(input) == input", "def __init__(self, warnings):\n self.tables = list()\n self.text = list()\n self.context = ''\n self.ancestor = ''\n self.content = ''\n self.warnings = warnings", "def set_log_level_warning(loggers=[\"vaex\"]):\n set_log_level(loggers, logging.WARNING)", "def warning(self, msg):\n self.__logger.warning(msg)", "def mark_failed(self):\n self.status = self.FAILED\n self.traceback = self._format_traceback()\n self.save(update_fields={'status', 'traceback', 'updated_at'})", "async def warnings(self, ctx, user: discord.Member):\r\n server = ctx.message.guild\r\n try:\r\n if self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] == 1:\r\n action = \"Mute\"\r\n if self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] == 2:\r\n action = \"Kick\"\r\n if self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] >= 3:\r\n action = \"Ban\"\r\n if not self.data[str(server.id)][\"user\"][str(user.id)][\"reasons\"]:\r\n reasons = \"None\"\r\n else:\r\n reasons = \", \".join([x for x in self.data[str(server.id)][\"user\"][str(user.id)][\"reasons\"]])\r\n if self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] == 1:\r\n s = discord.Embed(description=\"{} is on 1 warning\".format(user), colour=user.colour)\r\n s.set_author(name=str(user), icon_url=user.avatar_url)\r\n s.add_field(name=\"Next Action\", value=action, inline=False)\r\n s.add_field(name=\"Reasons\", value=reasons, inline=False)\r\n await ctx.send(embed=s)\r\n else:\r\n try:\r\n s = discord.Embed(description=\"{} is on {} warnings\".format(user, self.data[str(server.id)][\"user\"][\r\n str(user.id)][\"warnings\"]), colour=user.colour)\r\n s.set_author(name=str(user), icon_url=user.avatar_url)\r\n s.add_field(name=\"Next Action\", value=action, inline=False)\r\n s.add_field(name=\"Reasons\", value=reasons, inline=False)\r\n await ctx.send(embed=s)\r\n except:\r\n await ctx.send(\"That user has no warnings :no_entry:\")\r\n except:\r\n await ctx.send(\"That user has no warnings :no_entry:\")", "def warning(self, message: str) -> None:\n\n self.__add_log(self.WARNING, message)", "def svn_fs_set_warning_func(*args):\r\n return _fs.svn_fs_set_warning_func(*args)", "def warning(self, msg: str):\n self._logger.warning(msg)", "def warning(self, msg):\r\n self.logger.warning(msg)", "def options(self, parser, env):\n super(WarningFilter, self).options(parser, env)\n parser.add_option(\"--warningfilters\",\n default=None,\n help=\"Treat warnings that occur WITHIN tests as errors.\")", "def warn(self, message):\n return self.log(\"WARNING\", message)", "def on_warning(self, warning):\n log.warning(\"Received stall warning: %s\", warning)", "def warn():\n pass", "def warning(self, *args, **kwargs):\n self.msg(logging.WARNING, *args, **kwargs)", "def __update_warning(self, message: Dict[str, int]) -> None:\n # print('Warning message:', message)\n\n warning_data = bytearray(base64.b64decode(message[\"b\"]))\n warning_type = warning_data[6]\n\n # If warning shows current module works fine, return immediately\n if not warning_type:\n return\n\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n\n module_id = message[\"s\"]\n module_type = self.__get_type_from_uuid(module_uuid_res)\n\n # No need to update Network module's STM firmware\n if module_type == 'Network':\n return\n\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n # Note that more than one warning type 2 message can be received\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n # TODO: Handle warning_type of 7 and 10\n # print(\"Unsupported warning type:\", warning_type)\n pass", "def _strict_warning(self):\n if self.options.get('strict', True):\n return ('Strict mode enabled (the default), so this could be due to an '\n 'integer key, such as an HTTP status code.')\n return ('Strict mode disabled. Prance cannot help you narrow this further '\n 'down, sorry.')", "def test_warning_message(self):\n utcmock = MagicMock()\n utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))\n with patch(\"datetime.datetime\", utcmock):\n msg = \"Your e-mail is now being delivered by USPS\"\n out = saltsupport.LogCollector()\n out.warning(msg)\n assert saltsupport.LogCollector.WARNING in out.messages\n assert (\n type(out.messages[saltsupport.LogCollector.WARNING])\n == saltsupport.LogCollector.MessagesList\n )\n assert out.messages[saltsupport.LogCollector.WARNING] == [\n \"00:00:00.000 - {}\".format(msg)\n ]", "def PatchWarnings():\n # Since we are dealing with binary classification we calculate\n # precesion / recall / F1 wrt only the positive class.\n FLAGS.batch_results_averaging_method = \"binary\"\n # NOTE(github.com/ChrisCummins/ProGraML/issues/13): F1 score computation\n # warns that it is undefined when there are missing instances from a class,\n # which is fine for our usage.\n warnings.filterwarnings(\"ignore\", category=UndefinedMetricWarning)", "def StepWarning(self):\n return recipe_api.StepWarning", "def get_warnings(self, path: str,\n is_ancillary: bool = False,\n is_system: bool = False,\n is_removed: bool = False) -> List[str]:", "def warn(cls, message):\n print('[WARN] {0}'.format(message))", "async def warning(self, check, *, note=None):\n return await self.mark(check, \"warning\", note=note)", "def has_warning(self) -> bool:\n return self._has_warning" ]
[ "0.6353443", "0.6246493", "0.61963874", "0.6170872", "0.61554074", "0.60583675", "0.6048729", "0.6048729", "0.5980356", "0.5967424", "0.5896825", "0.58848745", "0.5873911", "0.5829192", "0.5811515", "0.56476164", "0.56447476", "0.56372", "0.562612", "0.5593209", "0.5593209", "0.55876017", "0.548944", "0.5425909", "0.5425909", "0.5425909", "0.5425909", "0.5425909", "0.5425909", "0.5425909", "0.5425909", "0.538804", "0.5371701", "0.53433275", "0.5337017", "0.53073174", "0.52993447", "0.52949196", "0.5268338", "0.52441686", "0.5222651", "0.521446", "0.51944077", "0.5157785", "0.51477826", "0.5146273", "0.5139701", "0.5134754", "0.5076917", "0.50616795", "0.50295025", "0.5023562", "0.5015013", "0.50099367", "0.49952376", "0.49943134", "0.49920994", "0.49812302", "0.49768263", "0.49544528", "0.4950637", "0.4935645", "0.4935215", "0.49322492", "0.49316058", "0.49273464", "0.492034", "0.49156895", "0.49155268", "0.48971376", "0.48777148", "0.4875602", "0.48707038", "0.48582956", "0.48468474", "0.48345077", "0.4831489", "0.48195583", "0.4817458", "0.4815712", "0.48105195", "0.48085725", "0.4797273", "0.47817647", "0.47779977", "0.47700533", "0.4763135", "0.47434348", "0.4737573", "0.4719743", "0.4693239", "0.46811447", "0.46710607", "0.46636572", "0.46543616", "0.4650424", "0.46471837", "0.46465272", "0.46371424", "0.46294865" ]
0.74362904
0
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(MigrateListingResponse, dict): for key, value in self.items(): result[key] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n for prop in self.__properties__\n if prop in vars(self)\n }\n rv.update(self._props)\n return rv", "def get_properties(self):\n return self.properties", "def get_properties():", "def getProperties():", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def getProperties(self):\n return self.properties", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})", "def get_model_properties(self):\n properties = {}\n\n filename = self._get_data_filename(\"modelargs.json\")\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n properties[\"image_size\"] = results.get(\"image_size\")\n properties[\"num_classes\"] = results.get(\"num_classes\")\n properties[\"model\"] = results.get(\"model\")\n properties[\"name\"] = results.get(\"name\")\n properties[\"filter_size\"] = results.get(\"filter_size\", 3)\n properties[\"increase_factor\"] = results.get(\"increase_factor\", 0)\n self.model = properties[\"name\"] # regardless of the name of the folder, this will get the proper model name (i.e. <modelname>.cntk)\n\n # optional property\n properties[\"trainer\"] = results.get(\"trainer\", \"CNTK 2.2\")\n\n self._ensure_model_file()\n properties[\"size_mb\"] = round(os.path.getsize(self.model_file) / (1000 * 1000))\n\n return properties", "def as_dict(self):\n result = {}\n for attr in self.__attr:\n result[attr] = getattr(self, attr)\n return result", "def to_dict_model(self) -> dict:\n return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def get_modelDict(self):\n return self.__modelDict", "def attributes(self):\n return dict(self.__attributes)", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def to_dict(self):\n result = {}\n for p in self.json_properties():\n value = getattr(self, p)\n if isinstance(value, datetime.datetime):\n value = value.strftime('%s%f')[:-3]\n result[Jsonifiable.transform_to_camelcase(p)] = value\n return result", "def properties(self):\n return self._props", "def properties(self):\n pass", "def to_dict(self):\n d = {}\n for attr in self.__class__.attributes:\n d[attr] = getattr(self, attr)\n return d", "def properties_get(self):\n return self._get('properties')", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def getPropertyDict(self):\n \n d = self.getChild('__properties')\n if d:\n return d.getDict()\n else:\n return {}", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def to_dict(self, include=None):\n _MODEL = type(self)\n repr_dict = {}\n if include is None:\n include = []\n for name, prop in _MODEL._properties.iteritems():\n if hasattr(prop, 'public') and getattr(prop, 'public', False):\n include.append(name)\n\n for name in include:\n # check if this property is even allowed to be public\n # or has a value set\n if not hasattr(self, name):\n continue\n\n value = getattr(self, name)\n if type(getattr(_MODEL, name)) == ndb.StructuredProperty:\n if isinstance(value, list):\n items = []\n for item in value:\n items.append(item.to_dict(include=None))\n repr_dict[name] = items\n else:\n repr_dict[name] = value.to_dict(include=None)\n elif isinstance(value, date):\n repr_dict[name] = value.isoformat()\n elif isinstance(value, ndb.Key):\n repr_dict[name] = value.urlsafe()\n else:\n repr_dict[name] = value\n\n if self._key:\n repr_dict['key'] = self.get_key_urlsafe()\n return repr_dict", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k == 'POSSIBLE_METHODS':\n continue\n if k == 'keysamplers':\n properties[k] = [i.to_dict() for i in self.__dict__[k] if hasattr(i,'to_dict')]\n elif k in {'pooler'}:\n properties[k] = self.__dict__[k].to_dict()\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def properties(self):\n\n return self._properties", "def ToDict(self):\n atributes_dictionary = {}\n for key, value in self.__dict__.iteritems():\n atributes_dictionary[key] = value\n return atributes_dictionary", "def properties(self):", "def properties(self):", "def properties(self):", "def modelPropertiesDictionary(sql_row_list):\n \n properties_dictionary = \\\n {\n \"id\": sql_row_list[0],\n \"name\": sql_row_list[1],\n \"last_deploy_timestamp\": sql_row_list[2],\n \"active_version\": sql_row_list[3],\n \"build_id\": sql_row_list[4]\n };\n\n return properties_dictionary;", "def as_dict(self):\n data = dict()\n for name in self.fields:\n val = getattr(self, name)\n if isinstance(val, Model):\n val = val.as_dict()\n elif isinstance(val, list) and val and isinstance(val[0], Model):\n val = [sub.as_dict() for sub in val]\n data[name] = val\n return data", "def to_dict(self):\n if self._dict is not None:\n return self._dict\n\n result = {}\n for key in self.ATTRIBUTES:\n value = getattr(self, key)\n if value:\n result[key] = value\n self._dict = result\n return result", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'created':\n _dict[f.name] = str(f.value_from_object(self))\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "def to_dict(self):\r\n return self.__dict__", "def properties(self):\n return None", "def properties(self):\n return None", "def to_dict(self):\n return attr.asdict(self)", "def as_dict(self):\n return self.__dict__", "def _get_model_state(self) -> dict:\n return dict(model=self.model, kwargs=self._model_kwargs)", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n # \"created_by\": self.created_by,\n # \"created_on\": self.created_on,\n # \"modified_by\": self.modified_by,\n # \"modified_on\": self.modified_on\n }", "def properties(self):\r\n return resources.Properties(self)", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def to_dict(self):\n return vars(self)", "def to_dict(self):\n\n # Check if is the right instance.\n if isinstance(self, db.Model):\n # construct a dictionary from column names and values.\n dict_representation = {c.name: getattr(self, c.name) for c in self.__table__.columns}\n return dict_representation\n else:\n raise AttributeError(type(self).__name__ + \" is not instance of \" + db.Model.__name__)", "def bson_properties(self):\n return []", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name\n }", "def get_dict(self):\n return", "def to_dict(self):\n return to_dict(self.__dict__)", "def to_json(self):\n properties = self.to_dict()\n if isinstance(self, db.Model):\n properties['id'] = unicode(self.key().id())\n return json.dumps(properties)", "def to_dict(self):", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def get_attributes(self) -> Dict[str, str]:\n pass", "def config(self) -> ModelConfigDict:\n return self.config_obj.to_dict()", "def properties(self):\n return self.properties_with_uid[1:]", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def serialise(self):\n return {\n 'id': self.id,\n 'category_id': self.category_id,\n 'name': self.name,\n 'description': self.description,\n 'quantity': self.quantity,\n 'price': self.price,\n 'user_id': self.user_id\n }", "def getPropertiesAll():", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def model_info(self):\n if not self._model_info:\n self._load_model_info()\n try:\n data = json.loads(self._model_info)\n except (TypeError, ValueError):\n data = {}\n return data", "def to_dict(self):\n return {\n 'name': self.get_name(),\n 'description': self.get_description()\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def as_dict(self):\n return self.__dict__", "def to_dict(self):\r\n\r\n return {\r\n 'product_id': self.product_id,\r\n 'product_name': self.product_name\r\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }", "def asdict(self):\n return attr.asdict(self)", "def to_dict(self) -> dict:", "def getDict(self):\n res = {}\n for attr, value in self.__dict__.iteritems():\n if type(attr) is IntType or type(attr) is StringType or type(attr) is LongType or type(attr) is UnicodeType:\n res[attr] = value\n elif isinstance(attr, datetime.datetime):\n res[attr] = value.isoformat('-')\n \n return res", "def attributes(self):\n return self.__dict.keys()", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}" ]
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.690053", "0.6881568", "0.6881568", "0.6857664", "0.68415916", "0.68122137", "0.680096", "0.67914945", "0.6757063", "0.6753585", "0.6741746", "0.6741746", "0.6741746", "0.6735291", "0.67126125", "0.6697801", "0.6695801", "0.6689893", "0.6680752", "0.66802895", "0.66802895", "0.66802895", "0.66547817", "0.66495687", "0.6633999", "0.6619567", "0.6619567", "0.66156983", "0.66049474", "0.6590706", "0.6590706", "0.6590206", "0.6587873", "0.65861845", "0.65822417", "0.65794736", "0.65792733", "0.657747", "0.6571183", "0.65662557", "0.65637356", "0.6539919", "0.65396816", "0.65283066", "0.65252614", "0.6513477", "0.65098846", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.6507418", "0.6505772", "0.65015876", "0.64951885", "0.64951885", "0.64951885", "0.64857763", "0.6474329", "0.6469453", "0.64684683", "0.6453606", "0.6453024", "0.6453024", "0.6430734", "0.6429058", "0.6426903", "0.64215595", "0.64201874", "0.6417152", "0.6414739", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.64035517" ]
0.0
-1
Returns the string representation of the model
def to_str(self): return pprint.pformat(self.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n model_fields = get_model_fields(\n opts.model,\n foreign=False,\n m2m=False,\n exclude=self.exclude_from_str\n )\n # TODO: replace the above with the below to remove the get_model_fields call:\n # model_fields = [\n # f for f in opts.get_fields()\n # if f.concrete\n # and not (f.primary_key or f.is_relation or f.name in self.exclude_from_str)\n # ]\n result = \" \".join(\n [\n str(fld.value_from_object(self))\n for fld in model_fields\n if fld.value_from_object(self)\n ]\n )\n return result.strip() or super().__str__()", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __repr__(self):\n\n mod = f\"{self.__class__.__name__} Model\"\n try:\n mod += f': {self.filename}'\n except AttributeError:\n pass\n s = [mod]\n for name, v in self.metadata.items():\n s += [f\"{name:16} : {v}\"]\n return '\\n'.join(s)", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def model_info(self) -> str:\n return self._model_info(self.model).decode(\"utf-8\")", "def __str__(self):\n return str(self.serialize())", "def __str__ (self) :\n\n return self.as_string()", "def __str__(self):\n\n return self.toString()", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return grid_search_to_str(self.model)", "def __str__(self):\n return self.toString()", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def to_representation(self) -> str:\n raise NotImplementedError()", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return self.make_flat()", "def dump_model(self):", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self) -> str:\n model_str = [\"\\nModel info:\\n\", \" Unimodal encoder:\\n\"]\n\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_encoder[modality]}\")\n\n model_str.append(\"\\n\\n Unimodal decoder:\\n\")\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_decoder[modality]}\")\n\n if self.multimodal_decoder is not None:\n model_str.append(\"\\n\\n Multimodal decoder:\\n\")\n model_str.append(f\" {self.multimodal_decoder}\")\n\n return \"\".join(model_str)", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def to_string(self):\r\n return self.__str__()", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __str__(self):\n return str(self.get_data())", "def __str__(self):\n return f\"model {self._name}\"", "def __str__(self):\n\n return self.raw_field", "def __repr__(self):\n \n s = 'text model name: ' + self.name + '\\n' \n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' number of word stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of commas counts: ' + str(len(self.commas_per_sentence)) + '\\n'\n return s", "def serialize(self):\n\n\t\treturn str(self)", "def __str__(self):\n return self.get_str()", "def serialize(self):\n\n return str(self)", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__(self):\n return self.s", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths))\\\n + '\\n'\n s += ' number of punctuation types: ' + str(len(self.punctuation))\n return s", "def dumps(self, indent=0):\n outstr = \" \"*indent + \"MewloDbModel object '{0}' attribute values:\\n\".format(self.__class__.__name__)\n public_props = (name for name in dir(object) if not name.startswith('_'))\n for name in public_props:\n outstr += \" \"*indent + \"{0}: {1}\\n\".format(name, str(getattr(self,name)))\n return outstr", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.title or str(_(\"Empty title\"))\n\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442643", "0.74416703", "0.7433768", "0.7411771", "0.7405439", "0.7379557", "0.7361716", "0.7361716", "0.732774", "0.7325511", "0.732528", "0.73097324", "0.73078936", "0.73001266", "0.7296789", "0.7292791", "0.7289445", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7279803", "0.7261615", "0.7250399", "0.7244789", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068" ]
0.0
-1
For `print` and `pprint`
def __repr__(self): return self.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_out():\n pass", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def _printable(self):\n pass", "def _print_custom(self):\n pass", "def pypprint(*args, **kwargs): # type: ignore\n from typing import Iterable\n\n if len(args) != 1:\n print(*args, **kwargs)\n return\n x = args[0]\n if isinstance(x, dict):\n for k, v in x.items():\n print(f\"{k}:\", v, **kwargs)\n elif isinstance(x, Iterable) and not isinstance(x, str):\n for i in x:\n print(i, **kwargs)\n else:\n print(x, **kwargs)", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def print(*args, **kwargs):\n with P_LOCK:\n __builtins__.print(*args, **kwargs)", "def print(self):\n # Your implementation here", "def p(value):\n pp.pprint(value)", "def static_print(*args, __p=print, **kwargs):\n __p(*args, **kwargs)", "def print(self, *args, **kwargs):\n print(*args, **kwargs)", "def pprint(self):\n print(self.pprint_str())", "def pprint(obj):\n for argname in sorted([x for x in dir(obj) if not x.startswith('__')]):\n # Skip callables\n if hasattr(getattr(obj, argname), '__call__'):\n continue\n print(\"{} : {}\".format(argname, getattr(obj, argname)))", "def print_(self, s: str) -> None:", "def my_pprint(obj, intend = 0):\n if isinstance(obj, dict):\n for key, value in obj.items():\n print(intend*\" \"+str(key)+\" : \")\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, list):\n for value in obj:\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, bytes):\n print(\"<binary data>\")\n \n else:\n try:\n print(intend*\" \"+str(obj))\n except UnicodeDecodeError:\n print(intend*\" \"\"<?>\")", "def test_print(chikin):\n chikin.print()", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def out(*args):\r\n print(*args)", "def __pprint(object, stream=None, indent=1, width=80, depth=None):\n printer = PrettyPrinterExt(\n stream=stream, indent=indent, width=width, depth=depth)\n printer.pprint(object)", "def DumpPprint(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import pprint\n \n text = pprint.pformat(data)\n \n return text", "def repl_print_statements():\n pass", "def test_03_pass_print(self):\n print('Hello World!')", "def p(self):\n self.printstdout = True", "def print(*args, **kwargs):\n new_args = []\n for arg in args:\n if builtins.isinstance(arg, models.Point):\n new_args.append(\"({0}, {1})\".format(arg.x, arg.y))\n else:\n new_args.append(arg)\n\n builtins.print(*new_args, **kwargs)", "def real_print(*args, **kwargs):\n\n kwargs.setdefault('file', real_stdout)\n _python_print_function(*args, **kwargs)", "def to_print_out(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('print')\n else:\n self.output('print')", "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "def print(self):\r\n self.print_avec_separateur()", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])", "def print(self):\n print(self.pretty_str())", "def test_print4(self):\n writer = StringIO()\n collatz_print(writer, 1, 1, 1)\n self.assertEqual(writer.getvalue(), \"1 1 1\\n\")", "def eprint(*args, **kwargs):\n\tprint(*args, file=sys.stderr, **kwargs)", "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "def use_pypprint_for_implicit_print(self) -> None:\n if self.implicit_print is not None:\n self.implicit_print.func.id = \"pypprint\" # type: ignore\n # Make sure we import it later\n self.undefined.add(\"pypprint\")", "def test_print(self):\n writer = StringIO()\n collatz_print(writer, 1, 10, 20)\n self.assertEqual(writer.getvalue(), \"1 10 20\\n\")", "def pprint(self):\n return pformat(repr(self))", "def printer(message):\n if VERBOSITY:\n pprint(message)", "def rec_print(p):\n if len(p) == 0:\n return\n t = p.pop(0)\n print t\n rec_print(p)", "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def printc(*a, **kw):\n print(*a, **kw)", "def pr(x):\n Card.print_pretty_cards(x)", "def debug_print(self, *content):\n if self.debug:\n print(*content)", "def pprint(object, stream=None):\r\n printer = PrettyPrinter(stream=stream)\r\n printer.pprint(object)", "def safe_print(*objs, errors=\"replace\"):\n\tprint(*(to_stdout(str(o), errors) for o in objs))", "def magic_Pprint(self, parameter_s=''):\n \n self.shell.outputcache.Pprint = 1 - self.shell.outputcache.Pprint\n print 'Pretty printing has been turned', \\\n ['OFF','ON'][self.shell.outputcache.Pprint]", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def _Print(self, t):\n self.RaiseError(t, \"Print not supported\")", "def vprint(*args, **kwargs ):\n\n forceprint = False\n for key in kwargs:\n if key == \"forceprint\":\n forceprint =kwargs[key]\n \n line = ''\n if debug or forceprint : \n for arg in args:\n line += str(arg) +\" \"\n log = open(exepath + 'pyframe.log', 'a') \n log.write(line + \"\\n\")\n log.close() \n print line", "def eprint(*pargs, **kargs):\n print('\\u001b[31m', end='', file=sys.stderr)\n print(*pargs, file=sys.stderr, **kargs)\n print('\\u001b[0m', end='', file=sys.stderr)", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def _print(self, *args):\n return _ida_hexrays.qstring_printer_t__print(self, *args)", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def hook_print():\n sys.stdout = PrintHook()", "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "def print_list(self):\r\n pass", "def debugprint(obj, depth=-1, print_type=False,\r\n file=None, ids='CHAR', stop_on_name=False):\r\n if file == 'str':\r\n _file = StringIO()\r\n elif file is None:\r\n _file = sys.stdout\r\n else:\r\n _file = file\r\n done = dict()\r\n results_to_print = []\r\n order = []\r\n if isinstance(obj, gof.Variable):\r\n results_to_print.append(obj)\r\n elif isinstance(obj, gof.Apply):\r\n results_to_print.extend(obj.outputs)\r\n elif isinstance(obj, Function):\r\n results_to_print.extend(obj.maker.fgraph.outputs)\r\n order = obj.maker.fgraph.toposort()\r\n elif isinstance(obj, (list, tuple)):\r\n results_to_print.extend(obj)\r\n elif isinstance(obj, gof.FunctionGraph):\r\n results_to_print.extend(obj.outputs)\r\n order = obj.toposort()\r\n elif isinstance(obj, (int, long, float, numpy.ndarray)):\r\n print obj\r\n else:\r\n raise TypeError(\"debugprint cannot print an object of this type\", obj)\r\n for r in results_to_print:\r\n debugmode.debugprint(r, depth=depth, done=done, print_type=print_type,\r\n file=_file, order=order, ids=ids,\r\n stop_on_name=stop_on_name)\r\n if file is _file:\r\n return file\r\n elif file == 'str':\r\n return _file.getvalue()\r\n else:\r\n _file.flush()", "def _get_print_fn(file=sys.stdout):\n def _print_fn(op, xin,):\n for attr in op.attrs:\n temp = getattr(xin, attr)\n if callable(temp):\n pmsg = temp()\n else:\n pmsg = temp\n print(op.message, attr, '=', pmsg, file=file)\n return _print_fn", "def test_print1(self):\n writer = StringIO()\n collatz_print(writer, 100, 200, 125)\n self.assertEqual(writer.getvalue(), \"100 200 125\\n\")", "def printOutput(self):\n pass", "def _print(self, *args):\n return _ida_hexrays.cnumber_t__print(self, *args)", "def setPrint():\n (e,d,sr,sw) = codecs.lookup('utf-8')\n unicode_to_utf8 = sw(sys.stdout)\n sys.stdout = unicode_to_utf8", "def pr(string, verbose):\n if(verbose):\n print(string)", "def print(*args, sep=\" \"):\n pass", "def printv(self, *arg):\n if self.verbose:\n print(*arg)", "def print(self):\n\n print(self)", "def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)", "def test_print2(self):\n writer = StringIO()\n collatz_print(writer, 201, 210, 89)\n self.assertEqual(writer.getvalue(), \"201 210 89\\n\")", "def print_pointers(self):\n\n ### FILL IN ###", "def foo_printer(self):\n print(\"\\nHi I'm {}\".format(self.foo))", "def printed(method):\n\t\tdef wrapper(cls, *args):\n\t\t\tif cls.verbose:\n\t\t\t\treturn method(cls, *args)\n\t\treturn wrapper", "def print_me(self, tabs=0, tab=' '):\n pre = tab*tabs\n print(pre+'Producer:')\n print(pre+' produces:', self._produces)\n print(pre+' consumes:', self._consumes)\n print(pre+' transfer:', self._transfer)\n print(pre+' capacity:', self._capacity)", "def _print(cls, quad):\n\t\tprint(\"\\nLIGHT OUTPUT:\\n<<<<{}>>>>\".format(ast.literal_eval(str(cls.get_address_value(quad.result)))))\n\t\tprint(\"END\")\n\n\t\tvar = cls.get_address_value(quad.result)\n\t\tif isinstance(var, collections.Iterable):\n\t\t\tprint(\"DEEP COPY\")\n\t\t\tcls.print_queue.enqueue(copy.deepcopy(var))\n\t\telse:\n\t\t\tcls.print_queue.enqueue(var)", "def printout(*args, **kwargs):\n console_print(sys.stdout, *args, **kwargs)", "def pprint(x):\n if is_theano_object(x):\n return _gettheano().printing.pprint(x)\n else:\n return str(x)", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def print(self):\n self.print_avec_separateur(\" \")", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def pprint_helper(self, angle, indent):\n # just here for defining the interface; work is done in subclasses\n pass", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n param_names = [p for p in params.keys() if p is not \"cost\"]\n param_names.sort()\n\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, name in enumerate(param_names):\n value = params[name]\n if isinstance(value, float):\n this_repr = '%s=%s' % (name, str(value))\n else:\n this_repr = '%s=%s' % (name, printer(value))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if (this_line_length + len(this_repr) >= 75 or '\\n' in this_repr):\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n # options = np.get_printoptions()\n # np.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def init_printing(pretty_print=True, order=None, use_unicode=None):\n if pretty_print:\n stringify_func = lambda arg: pretty(arg, order=order, use_unicode=use_unicode)\n else:\n stringify_func = sstrrepr\n\n try:\n import IPython\n\n ip = IPython.ipapi.get()\n\n if ip is not None:\n def result_display(self, arg):\n \"\"\"IPython's pretty-printer display hook.\n\n This function was adapted from:\n\n ipython/IPython/hooks.py:155\n\n \"\"\"\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)\n\n ip.set_hook('result_display', result_display)\n return\n except ImportError:\n pass\n\n import __builtin__, sys\n\n def displayhook(arg):\n \"\"\"Python's pretty-printer display hook.\n\n This function was adapted from:\n\n http://www.python.org/dev/peps/pep-0217/\n\n \"\"\"\n if arg is not None:\n __builtin__._ = None\n print stringify_func(arg)\n __builtin__._ = arg\n\n sys.displayhook = displayhook", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def _print(self, *args):\n return _ida_hexrays.cinsn_t__print(self, *args)", "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")", "def sequential_print_statements():\n pass", "def print_post():\n print('| | |'),", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def debugprint(r, prefix='', depth=-1, done=None, print_type=False,\r\n file=sys.stdout, print_destroy_map=False,\r\n print_view_map=False, order=None, ids='CHAR',\r\n stop_on_name=False, prefix_child=None):\r\n if depth == 0:\r\n return\r\n\r\n if order is None:\r\n order = []\r\n\r\n if done is None:\r\n done = dict()\r\n\r\n if print_type:\r\n type_str = ' <%s>' % r.type\r\n else:\r\n type_str = ''\r\n\r\n if prefix_child is None:\r\n prefix_child = prefix\r\n\r\n def get_id_str(obj):\r\n if obj in done:\r\n id_str = done[obj]\r\n elif ids == \"id\":\r\n id_str = \"[@%s]\" % str(id(r))\r\n elif ids == \"int\":\r\n id_str = \"[@%s]\" % str(len(done))\r\n elif ids == \"CHAR\":\r\n id_str = \"[@%s]\" % char_from_number(len(done))\r\n elif ids == \"\":\r\n id_str = \"\"\r\n done[obj] = id_str\r\n return id_str\r\n\r\n if hasattr(r.owner, 'op'):\r\n # this variable is the output of computation,\r\n # so just print out the apply\r\n a = r.owner\r\n\r\n r_name = getattr(r, 'name', '')\r\n # normally if the name isn't set, it'll be None, so\r\n # r_name is None here\r\n if r_name is None:\r\n r_name = ''\r\n\r\n if print_destroy_map:\r\n destroy_map_str = str(getattr(r.owner.op, 'destroy_map', ''))\r\n else:\r\n destroy_map_str = ''\r\n\r\n if print_view_map:\r\n view_map_str = str(getattr(r.owner.op, 'view_map', ''))\r\n else:\r\n view_map_str = ''\r\n if destroy_map_str and destroy_map_str != '{}':\r\n destroy_map_str = 'd=' + destroy_map_str\r\n if view_map_str and view_map_str != '{}':\r\n view_map_str = 'v=' + view_map_str\r\n\r\n o = ''\r\n if order:\r\n o = str(order.index(r.owner))\r\n already_printed = a in done # get_id_str put it in the dict\r\n id_str = get_id_str(a)\r\n\r\n if len(a.outputs) == 1:\r\n print >> file, '%s%s %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n id_str,\r\n type_str, r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n else:\r\n print >> file, '%s%s.%i %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n a.outputs.index(r),\r\n id_str, type_str,\r\n r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n if not already_printed:\r\n if (not stop_on_name or\r\n not (hasattr(r, 'name') and r.name is not None)):\r\n new_prefix = prefix_child + ' |'\r\n new_prefix_child = prefix_child + ' |'\r\n for idx, i in enumerate(a.inputs):\r\n if idx == len(a.inputs) - 1:\r\n new_prefix_child = prefix_child + ' '\r\n\r\n debugprint(i, new_prefix, depth=depth - 1, done=done,\r\n print_type=print_type, file=file, order=order,\r\n ids=ids, stop_on_name=stop_on_name,\r\n prefix_child=new_prefix_child)\r\n else:\r\n #this is an input variable\r\n id_str = get_id_str(r)\r\n print >> file, '%s%s %s%s' % (prefix, r, id_str, type_str)\r\n\r\n return file", "def bpprint(self, out=None):\n if out is None:\n out = sys.stdout\n print(self.bpformat(), file=out)", "def vprint(expr, **settings):\n\n outstr = vsprint(expr, **settings)\n\n import builtins\n if (outstr != 'None'):\n builtins._ = outstr\n print(outstr)", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n options = numpy.get_printoptions()\n numpy.set_printoptions(precision=5, threshold=64, edgeitems=2)\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, (k, v) in enumerate(sorted(params.items())):\n if isinstance(v, float):\n # use str for representing floating point numbers\n # this way we get consistent representation across\n # architectures and versions.\n this_repr = '%s=%s' % (k, str(v))\n else:\n # use repr of the rest\n this_repr = '%s=%s' % (k, printer(v))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if this_line_length + len(this_repr) >= 75 or '\\n' in this_repr:\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n\n numpy.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def print_(*args, **kwargs):\n fp = kwargs.pop(\"file\", sys.stdout)\n if fp is None:\n return\n\n def write(data):\n if not isinstance(data, basestring):\n data = str(data)\n fp.write(data)\n want_unicode = False\n sep = kwargs.pop(\"sep\", None)\n if sep is not None:\n if isinstance(sep, unicode):\n want_unicode = True\n elif not isinstance(sep, str):\n raise TypeError(\"sep must be None or a string\")\n end = kwargs.pop(\"end\", None)\n if end is not None:\n if isinstance(end, unicode):\n want_unicode = True\n elif not isinstance(end, str):\n raise TypeError(\"end must be None or a string\")\n if kwargs:\n raise TypeError(\"invalid keyword arguments to print()\")\n if not want_unicode:\n for arg in args:\n if isinstance(arg, unicode):\n want_unicode = True\n break\n if want_unicode:\n newline = unicode(\"\\n\")\n space = unicode(\" \")\n else:\n newline = \"\\n\"\n space = \" \"\n if sep is None:\n sep = space\n if end is None:\n end = newline\n for i, arg in enumerate(args):\n if i:\n write(sep)\n write(arg)\n write(end)" ]
[ "0.75577617", "0.73375154", "0.6986672", "0.698475", "0.6944995", "0.692333", "0.6899106", "0.6898902", "0.68146646", "0.6806209", "0.6753795", "0.67497987", "0.6744008", "0.6700308", "0.6691256", "0.6674591", "0.6658083", "0.66091245", "0.6606931", "0.6601862", "0.6563738", "0.6561717", "0.65549695", "0.6494838", "0.6473391", "0.64491546", "0.6411177", "0.6340302", "0.6339321", "0.6335031", "0.6332035", "0.6315847", "0.631272", "0.6297732", "0.62969106", "0.6283717", "0.6279154", "0.6271603", "0.62673396", "0.6265511", "0.62629336", "0.6258366", "0.6258278", "0.62501305", "0.6248315", "0.62459755", "0.6244254", "0.6242083", "0.62393075", "0.62156516", "0.6208198", "0.62068796", "0.62062824", "0.62062824", "0.6194123", "0.6189738", "0.6183852", "0.6183035", "0.61697906", "0.61614454", "0.6160741", "0.61544997", "0.61528033", "0.6150831", "0.6147288", "0.61380607", "0.613793", "0.61300766", "0.61278135", "0.6125416", "0.6114217", "0.61126333", "0.6100682", "0.60998785", "0.6096818", "0.6081694", "0.6076982", "0.6072701", "0.6060028", "0.60581726", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6043662", "0.6037599", "0.60336643", "0.6030174", "0.60290223", "0.60242903", "0.6016989", "0.6004274", "0.60005474", "0.60005474", "0.60003483", "0.599558", "0.59923434", "0.5979316", "0.59777945" ]
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, MigrateListingResponse): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if isinstance(self, other.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n # Ensure same class and values match\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "def is_equal(self, a, b):\n return a is b", "def is_equal(self, a, b):\n return a == b", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\r\n if isinstance(other, self.__class__):\r\n return self.__dict__ == other.__dict__\r\n else:\r\n return False", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def __eq__(self,other):\n return self is other", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def __eq__(self, other):\n return type(self) == type(other) and self.id == other.id", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Single2HaObject):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__hash__() == other.__hash__()\n return False", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if self.primary != other.primary:\n return False\n return True", "def __eq__(self, other) -> bool:\n if other is None:\n return False\n return self.__hash__() == other.__hash__()", "def __eq__(self, other):\n if not isinstance(other, ObjectInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __eq__(self, other):\n return id(self) == id(other)", "def __eq__(self, other) -> bool:\n return type(self) == type(other) and \\\n self._id == other.id and \\\n self.code == other.code and \\\n self.name == other.name and \\\n self.gender == other.gender and \\\n self.date_of_birth == other.date_of_birth", "def equals(self, other): # -> bool:\n ...", "def equals(self, obj: object) -> bool:\n ...", "def __eq__(self, other):\n for attr in self._attrs_to_save:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n return True", "def __eq__(self, other):\n if type(other) is type(self):\n return (self.x == other.x and self.y == other.y and self.z == other.z)\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, self.__class__):\n return NotImplemented\n\n return (\n self.name,\n self.submit_at,\n self.subreddit,\n self.title,\n self.body_template,\n ) == (\n other.name,\n other.submit_at,\n other.subreddit,\n other.title,\n other.body_template,\n )", "def __eq__(self, other):\n # Check that we share the same class as this object\n if not isinstance(other, type(self)):\n return False\n\n return hash(self) == hash(other)", "def __eq__(self, other):\n if not isinstance(other, PreviewObjectAutofill):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return equal(self, other)", "def __eq__(self, other: Any) -> bool:\n return self.__class__ is other.__class__ and self.identifier == other.identifier", "def __eq__(self, other):\n return self.__id == other.get_id()", "def __eq__ (self, other):\n if type(self) == type(other):\n return self._m == other._m\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Referent):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.properties == other.properties", "def __eq__(self, other):\n return self.items() == other.items()", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, other):\n\n if self is other:\n return True\n return hash(self) == hash(other)", "def __eq__(self, other):\n if other._field1 == self._field1:\n return True\n return False", "def same_as(self, other):\n return super().__eq__(other)", "def __eq__(self, other):\n try:\n return other and \\\n self.id == other.id\n\n except AttributeError:\n return False", "def __eq__(self, other):\r\n\t\treturn self._to_pylist() == other._to_pylist()", "def __eq__(self, other):\n if not isinstance(other, Fiddle):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.7961088", "0.7961088", "0.79433626", "0.79303336", "0.7926563", "0.7897525", "0.78826123", "0.78826123", "0.78806067", "0.7872423", "0.7868354", "0.78668815", "0.7825702", "0.7819993", "0.78162885", "0.78078854", "0.78068274", "0.7796298", "0.7794721", "0.7784825", "0.77790844", "0.7769397", "0.77534705", "0.7746211", "0.7741107", "0.77282816", "0.7725766", "0.7719537", "0.770273", "0.7685999", "0.7677552", "0.76739407", "0.7664857", "0.76557016", "0.7655046", "0.76282835", "0.7625795", "0.76242626", "0.76237214", "0.76237214", "0.76237214", "0.7617347", "0.7600536", "0.7599156", "0.7595863", "0.75945824", "0.7594092", "0.75899327" ]
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n return not self.__ne__(other)", "def __ne__(self, other):\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__ (self, other):\n return not self == other" ]
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Return a string representation of the data source.
def __repr__(self): cls_name = self.__class__.__name__ conn_name = str(self._connection) tbl_name = self._table return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_source_info(self) -> 'outputs.DatasourceResponse':\n return pulumi.get(self, \"data_source_info\")", "def __repr__(self):\n return f'{self.data.to_string(max_rows = None)}'", "def __str__(self):\n \n result = [\"rows: \" + str(self.rows),\n \"columns: \"+str(self.columns),\n \"data: \"+str(self.data)]\n return \"\\n\".join(result)", "def __repr__(self) -> str:\n output = f\"<Dataset(id={self.id}, name={self.dataset_name}\"\n output += f\"organisation name={self.organization_name},n\"\n output += f\"reference period={self.dataset_date}, update frequency={self.update_frequency}, \"\n output += f\"review_date={str(self.review_date)}, last_modified={str(self.last_modified)},\"\n output += f\"updated_by_script={str(self.updated_by_script)}, metadata_modified={str(self.metadata_modified)})>\"\n return output", "def data_source_info(self) -> pulumi.Input['DatasourceArgs']:\n return pulumi.get(self, \"data_source_info\")", "def dataAsString(self):\n\n # Force generation of .array\n d = self.asArray()\n slist = []\n for l in self.array:\n s = \"%s %s\" % (self.name, self.rowAsString(l))\n slist.append(s)\n return '\\n'.join(slist)", "def __str__(self):\n return str(self.get_data())", "def __str__(self): # pragma: no cover\n ret_str = \"<{0} ({1}): {2} {3}, {4} objects>\"\n\n proj = self.project if self.project else \"<no project>\"\n ver = \"v\" + self.version if self.version else \"<no version>\"\n\n return ret_str.format(\n type(self).__name__, self.source_type.value, proj, ver, self.count\n )", "def __str__(self):\n\t\treturn str(self.__dStore)", "def __repr__(self) -> str:\n return_string = str()\n\n return_string += f\"Representation of dataset with {len(self.internal_types)} elements:\\n\"\n return_string += f\"List of categories:\\t{self.internal_types}\\n\"\n return_string += f\"First and last 5 features:\\n\"\n for i in range(5):\n return_string += f\"\\t{self.internal_data[i]}\\n\"\n return_string += f\"\\t...\\n\"\n for i in range(4, -1, -1):\n return_string += f\"\\t{self.internal_data[i]}\\n\"\n return_string += \"For more information, use debugger.\"\n\n return return_string", "def __str__(self):\n return str(self.__data)", "def __str__(self):\n return ' '.join([self.source, self.name, str(self.outputs)])", "def __str__(self):\n return str(self._data)", "def __str__(self):\n return '\\n\\n'.join(str(item) for item in self._data)", "def __str__(self):\n\n styled = partial(prettyformat, indent=4, compact=True)\n text = \"<xbout.BoutDataset>\\n\" + \\\n \"Contains:\\n{}\\n\".format(str(self.data)) + \\\n \"Metadata:\\n{}\\n\".format(styled(self.metadata))\n if self.options:\n text += \"Options:\\n{}\".format(styled(self.options))\n return text", "def __str__(self):\n return '{trait_name} ({phv}): dataset {pht}'.format(trait_name=self.i_trait_name,\n phv=self.full_accession,\n pht=self.source_dataset.full_accession)", "def __str__(self):\n lst = [str(i) for i in self.data]\n if self.column:\n return '[' + ', '.join(lst) + ']\\''\n else:\n return '[' + ', '.join(lst) + ']'", "def __str__(self) -> str:\n return str(self.data)", "def __str__(self):\n return self.data.__str__()", "def __str__(self):\n return self.data.__str__()", "def source_code(self):\n return str(self.source)", "def debug_string(self):\n\n raise NotImplementedError", "def dump(self):\n outputs = [\"Code object : %s\" % self.name]\n outputs.append(\" Type : %s\" % self.object_type)\n for source_line in self.source:\n # Each line is a (line_number, code) pair\n outputs.append('%d: %s' % source_line)\n return \"\".join(outputs)", "def asString(self):\n\n res = []\n for v in list(self.vars.values()):\n res.append(v.asString())\n res.append('')\n for e in list(self.enums.values()):\n res.append(e.asString())\n res.append('')\n for s in list(self.structs.values()):\n res.append(s.defAsString())\n res.append('')\n for s in list(self.structs.values()):\n res.append(s.dataAsString())\n\n return '\\n'.join(res)", "def as_string(self):\n return self.__repr__()", "def source(self) -> str:\n return self._source", "def __repr__(self) :\n\n return str(self.data)", "def get_datasource(self):\n return None", "def __repr__(self):\n return str(self.data)", "def __str__(self):\n rows = ['[' + ', '.join([str(i) for i in row]) + ']' for row in self.data]\n return '\\n'.join(rows)", "def __str__(self):\n s = \"Projection info:\\n\"\n s += \" #instances: \" + str(self.data_ninstances) + \"\\n\"\n s += \" data dimension: \" + str(self.data_dim) + \"\\n\"\n s += \" projection dimension: \" + str(self.projection_dim) + \"\\n\"\n s += \" data: \" + str(self.data[0]) + \"\\n\"\n s += \" \" + str(self.data[1]) + \"...\\n\"\n s += \" projection: \" + str(self.projection[0]) + \"\\n\"\n s += \" \" + str(self.projection[1]) + \"...\"\n return s", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return '\\n%(source)s > %(type)s (0x%(type_d).2x)\\n%(data)s' % \\\n {'type': DGTL.pkt_type_str[self.type], 'type_d': self.type,\n 'data': str(self.decoded) if self.decoded else 'Unknown raw data.',\n 'source': self.source}", "def __str__(self):\n\n styled = partial(prettyformat, indent=4, compact=True)\n text = (\n \"<xbout.BoutDataset>\\n\"\n + \"Contains:\\n{}\\n\".format(str(self.data))\n + \"Metadata:\\n{}\\n\".format(styled(self.metadata))\n )\n if self.options:\n text += \"Options:\\n{}\".format(self.options)\n return text", "def __str__(self):\n DataND_str = \"\"\n # Get the properties inherited from Data\n DataND_str += super(DataND, self).__str__() + linesep\n if len(self.axes) == 0:\n DataND_str += \"axes = []\"\n for ii in range(len(self.axes)):\n DataND_str += (\n \"axes[\"\n + str(ii)\n + \"] = \"\n + str(self.axes[ii].as_dict())\n + \"\\n\"\n + linesep\n + linesep\n )\n DataND_str += \"normalizations = \" + str(self.normalizations) + linesep\n DataND_str += \"FTparameters = \" + str(self.FTparameters) + linesep\n DataND_str += \"values = \" + linesep + str(self.values)\n return DataND_str", "def __str__(self):\n return json.dumps(self._data, indent=4)", "def data_source(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"data_source\"), kwargs)", "def data_source(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"data_source\"), kwargs)", "def data_source(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"data_source\"), kwargs)", "def toString(self) -> str:\n raise NotImplementedError", "def __str__(self):\n return ', '.join(str(item) for item in self._data)", "def __str__(self):\n return \"'The object created is the dataframe: '{}', the target column: '{}', and the text input column:'{}'\".format(self.name, self.target, self.posts)", "def __str__(self):\n return self.sql()", "def to_string(self):\r\n return self.__str__()", "def __str__(self) -> str:\n\n sources_joined = \" \".join(self.sources)\n sources = f\"[SOURCES]\\n{sources_joined}\"\n\n sp_joined = \"\\n\".join(map(str, self.source_parameters))\n source_parameters = f\"[SOURCE-PARAMETERS]\\n{sp_joined}\"\n\n hints_joined = \"\\n\".join(map(str, self.hints))\n general = f\"[GENERAL]\\n{hints_joined}\"\n\n return \"\\n\".join([\n \"# Augustus extrinsic hints config file.\",\n (\"# Automatically generated by [augustus_optimiser]\"\n \"(https://github.com/darcyabjones/augustus_optimiser)\"),\n \"\",\n sources,\n \"\",\n source_parameters,\n \"\",\n general,\n ])", "def source(self) -> Dict:\n return self._db_data.metadata[\"_source\"]", "def __str__(self):\n return str([self.fields[col] for col in self.columns])", "def __str__(self):\n data_string = \"\"\n for list_el in self.data_list:\n for inner_list_el in list_el:\n data_string += str(inner_list_el)\n data_string += \"\\t\"\n data_string += \"\\n\"\n return data_string", "def __str__(self) -> str:\n string = ''\n for row in self._grid_display:\n string += row.__str__() + '\\n'\n return string", "def __str__(self) -> str:\n if self.data is not None:\n list_of_params = []\n for key, data_dict in self.data.to_dict(orient=\"index\").items():\n data_dict[\"index\"] = key\n list_of_params.append(data_dict)\n formated_list_of_params = self.format_params(list_of_params)\n return f\"\\n{tabulate(formated_list_of_params, headers='keys', tablefmt='fancy_grid')}\"\n else:\n return \"Empty DataFrame\"", "def asString(self):\n q = self.copy()\n q.addFunction(Query.Function.AsString)\n return q", "def source(self):\n return some.dap.source(\"<string>\")", "def __repr__(self):\n return '{}({})'.format(self.__class__.__name__, self._data)", "def __str__(self):\r\n tmp = \"\"\r\n for (name, value) in self.__table__.items():\r\n tmp += str(name) + \"\\n\" + str(value) + \"\\n\"\r\n return(tmp)", "def __repr__(self) -> str:\r\n\r\n return 'RawData(\\'{}\\', {}, {}, {})'.format(self.label, repr(self.value),\r\n self.timestamp, self.version)", "def to_string(self):\n return str(vars(self))", "def to_string(self):\n return str(vars(self))", "def to_string(self):\n return str(vars(self))", "def to_string(self):\n return str(vars(self))", "def to_string(self):\n return str(vars(self))", "def __str__ (self):\n return \", \".join(str(row) for row in self.rows()).join(\"()\")", "def generate(self):\n output = \"\"\n if self.source is not None:\n output += f\"{self.source}>\"\n else:\n raise GenerateError(\"Missing source address\")\n\n if self.destination is not None:\n output += f\"{self.destination},\"\n else:\n raise GenerateError(\"Missing destination address\")\n\n if self.path is not None:\n output += f\"{self.path}:\"\n else:\n raise GenerateError(\"Missing path\")\n\n info = self.info\n\n if self.data_type_id is not None:\n output += f\"{self.data_type_id}\"\n else:\n raise GenerateError(\"Missing data type ID\")\n\n if info is not None:\n output += f\"{info}\"\n else:\n raise GenerateError(\"Missing info\")\n\n return output", "def source(self):\n return \"\\n\".join(self.lines)", "def __str__(self):\n\n desc = self.description\n if desc is not None:\n return str(desc)\n\n desc = self.debugDescription\n if desc is not None:\n return str(desc)\n\n return repr(self)", "def __str__ (self) :\n\n return self.as_string()", "def __str__(self):\n return (\n f'{self.__class__.__name__}'\n f'\\n> defined by: {self._str_meta_()}'\n f'\\n> with columns: {self._str_colnames()}'\n f'\\n> {len(self)} objects'\n f'\\n{APtable.__str__(self)}'\n )", "def source(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"source\")", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())" ]
[ "0.68854165", "0.6721115", "0.6685485", "0.6684266", "0.6650188", "0.6645341", "0.66202325", "0.65916854", "0.65053624", "0.65022147", "0.64303386", "0.64116", "0.63948935", "0.6373004", "0.63408643", "0.63251483", "0.6323788", "0.63087815", "0.62860817", "0.62860817", "0.6268616", "0.6266412", "0.6254074", "0.62513965", "0.6218467", "0.6213435", "0.6207252", "0.62066483", "0.620239", "0.6201842", "0.619673", "0.61868685", "0.61868685", "0.61868685", "0.61868685", "0.61868685", "0.61805034", "0.6169034", "0.61469513", "0.6138477", "0.6136791", "0.6136791", "0.6136791", "0.61198056", "0.6110054", "0.61091197", "0.6092851", "0.60918844", "0.6084199", "0.60817534", "0.6079626", "0.60757697", "0.6064288", "0.6063823", "0.6058682", "0.6057326", "0.6043706", "0.6031507", "0.603106", "0.6018094", "0.6018094", "0.6018094", "0.6018094", "0.6018094", "0.6012027", "0.6010381", "0.6006936", "0.6005852", "0.5997586", "0.59940034", "0.598608", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466", "0.5979466" ]
0.6042121
58
Return list of column names.
def columns(self): cursor = self._connection.cursor() cursor.execute('PRAGMA table_info(' + self._table + ')') return [x[1] for x in cursor.fetchall()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getColumnNames(self):\n return self.colnames", "def getColumnsNames(self):\r\n ColsName = []\r\n for i in range(len(self.columns)):\r\n ColsName.append(self.columns[i].getColName())\r\n return ColsName", "def column_names(self):\n return self.data.columns.values", "def getColumnNames(self):\n return self.columnNames", "def columns_names(self):\r\n return self._columns_names", "def get_columns(self):\n columns = []\n for column in self.columns:\n columns.append(column.data.name)\n return columns", "def columns(self) -> List[str]:\n\n return [column.name for column in self.plaincolumns]", "def get_column_names(cls):\n return cls._meta.get_column_names()", "def column_names(self):\n return self._hndl.column_names()", "def column_names(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"column_names\")", "def columns(self):\n return self._column_names", "def get_sql_columns(self, request):\n cur = self.execute(request)\n col_name_list = [tuple[0] for tuple in cur.description]\n cur.close()\n return col_name_list", "def names(self):\n return self._names_to_cols.keys()", "def columns(self) -> List[str]:\n return self._columns.tolist()", "def get_column_names(self, table):\n try:\n logging.info(f'Getting column names of table `{table}`')\n return list(self.execute(f'SELECT * FROM `{table}`'))\n except:\n logging.exception('Something went wrong getting column names. Check trace.')\n return", "def get_colnames(self):\n\n cd = self.conn.execute('select * from atom')\n print('Possible column names are:')\n names = list(map(lambda x: x[0], cd.description))\n print('\\trowID')\n for n in names:\n print('\\t'+n)", "def list_columns(self):\n columns = []\n for icol in range(0, self.ncolumns()):\n columns.append(self.table_column(icol).title())\n return columns", "def columns(self):\n return self._names_to_cols.values()", "def columns(self):\n result = self.execute(self.commands.table_columns(self.name))\n return [x[0] for x in result]", "def column_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"column_names\")", "def get_columns(self):\n if self.dbtype == 'pg':\n q = \"select attname from pg_class, pg_attribute where relname = %s and attrelid = pg_class.oid and attnum > 0 and attisdropped = false;\"\n else:\n q = \"select columns.name from columns, tables where tables.name = %s and tables.id = columns.table_id;\"\n ret = []\n for (attr,) in self.query(q, self.tablename):\n ret.append(str(attr))\n return ret", "def columns(self, table_name):\n table = self._create_table(table_name)\n return [c.name for c in table.c]", "def names(self):\n \n return self.column_names.copy()", "def get_column_names(self):\n names = []\n names.append(self.question_column + \"_agree_lot\")\n names.append(self.question_column + \"_agree_little\")\n names.append(self.question_column + \"_neither\")\n names.append(self.question_column + \"_dis_little\")\n names.append(self.question_column + \"_dis_lot\")\n return names", "def column_names(\n self,\n table: exp.Table | str,\n only_visible: bool = False,\n dialect: DialectType = None,\n normalize: t.Optional[bool] = None,\n ) -> t.List[str]:", "def column_headers(self) -> Sequence[str]:\n return self._column_headers", "def get_columns(self) -> List[str]:\n return self.get_dyf().toDF().columns", "def getColumnNames(self, tablename):\n\n # Check if tablename exists in database\n if tablename in self.getTableNames():\n # The specific command depends on whether we are using mysql or\n # sqlite\n if self.connector == 'mysql':\n sqlcmd = \"SHOW COLUMNS FROM \" + tablename\n self._c.execute(sqlcmd)\n columnnames = [el[0] for el in self._c.fetchall()]\n else:\n sqlcmd = \"PRAGMA table_info(\" + tablename + \")\"\n self._c.execute(sqlcmd)\n columnnames = [el[1] for el in self._c.fetchall()]\n\n return columnnames\n\n else:\n print('Error retrieving column names: Table does not exist on ' +\n 'database')\n return []", "def columns(self):\n return self._columns.keys()", "def get_colnames(self, model):\n return [\n field.column \n for field in model._meta.get_fields() \n if getattr(field, 'di_show', False)\n ]", "def header(self):\n\n return [c.name for c in self.columns]", "def _columns(cursor, table):\n cursor.execute('''\n SELECT column_name FROM information_schema.columns WHERE table_schema = 'public' AND table_name = %s\n ''', (table, ))\n return [column['column_name'] for column in cursor.fetchall()]", "def get_column_names(self):\n # here, creating combined column/volue column names for uniqueness\n colname_temp = list()\n for column in self.col_value:\n colname_temp.append(self.question_column + \"-\" + str(column))\n return colname_temp", "def columns(self):\n return self.__column_list", "def names(self):\n if self.dtype.fields:\n return list(self.dtype.names)\n elif getattr(self, \"_coldefs\", None) is not None:\n return self._coldefs.names\n else:\n return None", "def get_columns_display(self):\n columns = []\n for column in self.columns:\n if None != column.display.name:\n columns.append(column.display.name)\n else:\n columns.append(column.data.name)\n return columns", "def get_all_columns(self):\n df = self.get_prep_data()\n col = [c for c in df.columns if c not in ['target', 'idd', 'ft_data_dt']]\n return col", "def limmag_colnames(self):\n return self.__limmag_colnames", "def get_columns(self):\n return self.columns", "def _str_colnames(self):\n return ', '.join(self.galcat.colnames)", "def columns(self):\n if self._default_index:\n return list(self._df.columns)\n return list(self._df.index.names) + list(self._df.columns)", "def _str_colnames(self):\n return ', '.join(self.colnames)", "def colNames_string(self):\n # SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'some_table';\n return \"SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = \"", "def get_columns(self):\r\n return self.__columns", "def member_names(self) -> Iterator[str]:\n return yield_column_names(self.schema)", "def list_data(self, as_strings=False):\n if self.df is None:\n return [] \n if as_strings:\n return [str(col) for col in self.df.columns]\n else:\n return list(self.df.columns.values)", "def headers(self):\n return [column.header if column else '' for column in self.columns]", "def cols(self) -> List[str]:\n if self._cols:\n cols = self._cols\n else:\n if os.path.isfile(self.path):\n cols = io.list_columns_in_parquet(self.path)\n else:\n cols = db.list_cols(fqtable=self.fqtable)\n self._cols = cols\n return cols", "def columns(self):\n return self._coldefs", "def _columns(cls, schema: dsl.Source.Schema) -> typing.Sequence[str]:\n return tuple(f.name for f in schema)", "def column_names(self):\n if self._is_vertex_frame():\n return self.__graph__.__proxy__.get_vertex_fields()\n elif self._is_edge_frame():\n return self.__graph__.__proxy__.get_edge_fields()", "def dataset_headers(dataset):\n return list(dataset.columns.values)", "def column_names(self) -> D2TXTColumnNameView:\n return D2TXTColumnNameView(self._column_names)", "def output_columns(self) -> List[str]:", "def _get_column_names(self, query):\n\n try:\n logger.info(\"Retrieving column names\")\n sql = \"SELECT * FROM ({}) WHERE 1 = 0\".format(query)\n self.execute(sql)\n results = [desc for desc in self.cursor.description]\n if len(results) > 0:\n return [result[0].strip() for result in results]\n else:\n return None\n except Exception as e:\n logger.error(\"Error retrieving column names\")\n raise", "def show_columns(self):\n\n df = self.__df_timings\n return df.columns", "def columns(self):\n return list(self._scala.observationColumns())", "def columns(self):\n return self.__columns", "def columns(self):\n return self._columns", "def columns(self):\n return self._columns", "def _model_columns(db_model):\n return [c.name for c in db_model.__table__.columns]", "def all_columns(self):\r\n try:\r\n csv_file = open(self.file_path,'rbU')\r\n csv_rows = csv.DictReader(csv_file)\r\n _all_columns = csv_rows.fieldnames\r\n csv_file.close()\r\n return _all_columns\r\n except:\r\n return []", "def column_names(self) -> Tuple[List[str], List[str]]:\n return (\n np.array(self._dataset_columns)[self._input_column_indices].tolist(),\n np.array(self._dataset_columns)[self._output_column_indices].tolist(),\n )", "def get_table_columns(self):\n raise NotImplementedError(\"Please implement this method\")", "def names():\n\n # Use Pandas to perform the sql query\n stmt = db.session.query(metadata).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n\n # Return a list of the column names (sample names)\n return jsonify(list(df[\"ID\"]))", "def graphcols(self):\n columns = []\n table = self.__parent_table\n for col in self.__column_list:\n columns.append(table.table_column(col).title())\n return columns", "def mag_colnames(self):\n return self.__mag_colnames", "def get_colnames(cur, table):\n cur.execute(\"\"\"DESCRIBE {}\"\"\".format(table))\n cols = cur.fetchall()\n return [col[0] for col in cols]", "def _columns(cls):\n columns = []\n for name, member in inspect.getmembers(cls):\n if (not name.startswith('_') and\n isinstance(member, InstrumentedAttribute)):\n columns.append(name)\n return columns", "def columns(self):\n return self._columns\n # return Index(self._data_columns)", "def _get_extra_column_names(self):\n if isinstance(self.extra_columns, int):\n my_columns = \"%s unnamed columns\" % self.extra_columns\n elif isinstance(self.extra_columns, list):\n if all([isinstance(X, tuple) for X in self.extra_columns]):\n my_columns = \",\".join([X[0] for X in self.extra_columns])\n elif all([isinstance(X, str) for X in self.extra_columns]):\n my_columns = \",\".join(self.extra_columns)\n\n return my_columns", "def header(self) -> list:\n cols = self.data.columns.tolist()\n header = [\"index\"]\n for col_int in cols:\n header.append(col_int)\n return header", "def freedom_columns():\n\n # Use Pandas to perform the sql query\n stmt = db.session.query(Freedom_short).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n\n # Return a list of the column names (sample names)\n return jsonify(list(df.columns)[2:])", "def get_table_column_name(self, table):\n c = self.conn.cursor()\n c.execute(\"SELECT * FROM %s\" % table)\n names = list(map(lambda x: x[0], c.description))\n return names", "def columns():\n project = request.args.get('project')\n\n with sqlite3.connect('projects/%s.db'%project) as connection:\n cursor = connection.execute('select * from movements')\n names = list(map(lambda x: x[0], cursor.description))\n return ','.join(names) + '\\n'", "def Columns(self):\n return self.columns", "def columns(self):\n columns = self.query.get_columns()\n # Adjust any column names which don't match field names\n for query_name, model_name in self.translations.items():\n # Ignore translations for nonexistent column names\n try:\n index = columns.index(query_name)\n except ValueError:\n pass\n else:\n columns[index] = model_name\n return columns", "def key_columns(self):\n return [str(column) for id, column in self._columns.iteritems() if column.is_key]", "def cols(self):\n\n return []", "def columns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"columns\")", "def dropped_column_name_list(self):\n column_list = []\n new_tbl_columns = [col.name for col in self._new_table.column_list]\n for col in self._old_table.column_list:\n if col.name not in new_tbl_columns:\n column_list.append(col.name)\n return column_list", "def get_table_columns(conn, table_name):\n database_name = get_credentials()[-1]\n table_schema = table_name.split('.')[0]\n table_name = '.'.join(table_name.split('.')[1:])\n table_columns = pd.read_sql(\"select column_name from information_schema.columns \"\n f\"where table_catalog = '{database_name}' \"\n f\"and table_schema = '{table_schema}' \"\n f\"and table_name = '{table_name}';\", conn)\n return table_columns['column_name'].tolist()", "def get_annot_colnames() -> List[str]:\n\n target_annot_cols = list()\n\n for col in JOINT_COLS:\n target_annot_cols.append('{}_{}'.format(col, 'cf'))\n\n target_annot_cols += ANNOT_COLS\n\n return target_annot_cols", "def getColnames(self, dataset=\"X\"):\n if dataset in (\"X\",\"x\",):\n return [c[\"colname\"] for c in self._columns if c[\"dataset\"]==\"X\"]\n elif dataset in (\"y\",\"Y\",):\n return [c[\"colname\"] for c in self._columns if c[\"dataset\"]==\"y\"]\n else:\n raise Exception(\"Dataset unknown: {}\".format(dataset))", "def get_all_columns_name(input_glob):\n reader = tf.python_io.TableReader(input_glob,\n selected_cols=\"\",\n excluded_cols=\"\",\n slice_id=0,\n slice_count=1,\n num_threads=0,\n capacity=0)\n schemas = reader.get_schema()\n return set([col_name for col_name, _, _ in schemas])", "def __get_column_list(self, table):\n cursor = self.__database.cursor()\n sql_query = \"PRAGMA table_info(\" + table + \")\"\n column_list = []\n metadata = cursor.execute(sql_query)\n for item in metadata:\n if item[1] != 'cycle':\n column_list.append(item[1])\n return sorted(column_list)", "def get_vendors_grid_column_names_by_order(self):\n self.column_name_list = self.get_grid_column_names_by_order(self.vendors_div_id)\n return self.column_name_list", "def _get_table_sql_columns(columns=[]):\n\n\tif len(columns) == 0:\n\t\tsql_columns = '*'\n\n\telse: \n\t\tsql_columns = \",\".join(columns)\n\n\treturn sql_columns", "def columns(self):\n return self.frame.columns", "def columns(self):\n return self._meta.columns + self.new_columns", "def table_columns(auth, table_name):\n return [row[0] for row in DBMySQL.csfr(auth, \"describe \" +table_name)]", "def get_headers(df):\n return df.columns.values", "def get_columns(hdu, columns):\n if columns is not None:\n columns = columns.split(',')\n columns = [c.lower() for c in columns]\n else:\n columns = hdu.get_colnames()\n\n return columns", "def get_columns(self, table):\n if table not in self.columns:\n self.columns[table] = [\n row[0] for row in self.db.iter('describe ' + table)]\n return self.columns[table]", "def _generate_column_names(self):\n names = []\n # Assuming same number of compounds for every reaction!\n\n names = ['compound_{}'.format(i) for i in range(self.total_compounds)]\n names += ['compound_{}_amount'.format(i)\n for i in range(self.total_compounds)]\n for grid_param in self.params_grid_data.keys():\n names.append(grid_param)\n\n return names", "def getTableKeys(self, tableName):\n sql = \"SHOW COLUMNS FROM %s\" % tableName\n resultSet = []\n try:\n results = self.selectOpt(sql)\n for r in results:\n resultSet.append(r['Field'])\n except:\n print(\"[ERROR] Table '%s' does not exist.\" % tableName)\n return resultSet", "def columns(self):\r\n _columns = self.base_columns + self.veg_columns\r\n return _columns", "def print_column_names(self):\n counter = 1\n try:\n for col_names in self.cursor.description:\n # print(self.cursor.description[col_names][0])\n print(\"\"\"Attribut{}: {:<5}, Typ: {:<5}, DisplaySize: {} InternalSize: {:<5}, Precision: {},\n \"Scale: {}, Null_Ok: {}\"\"\"\n .format(counter,\n col_names[0],\n col_names[1],\n col_names[2],\n col_names[3],\n col_names[4],\n col_names[5],\n col_names[6]))\n counter += 1\n except p.Error as exception:\n print(exception.pgerror)\n except Exception as general_exception:\n print(general_exception)", "def get_cols(self) :\n\n return list(self.cols)[1:]" ]
[ "0.8711412", "0.8577636", "0.8565291", "0.85644644", "0.8475408", "0.83783364", "0.83479637", "0.8311044", "0.8298709", "0.8245695", "0.82290727", "0.81678593", "0.8163773", "0.8153073", "0.8139276", "0.8136085", "0.8114441", "0.80130196", "0.7956286", "0.7935837", "0.79200006", "0.7918403", "0.79019845", "0.7831752", "0.7829562", "0.7828419", "0.78227437", "0.7788773", "0.77561027", "0.77389824", "0.7673932", "0.76626813", "0.76308185", "0.7602217", "0.7597343", "0.7575155", "0.753775", "0.75325453", "0.7521267", "0.7518614", "0.7518259", "0.7509962", "0.74888515", "0.7475872", "0.7471653", "0.743059", "0.74290884", "0.74229205", "0.74083716", "0.7388344", "0.73665595", "0.73581094", "0.73279816", "0.7271036", "0.7269046", "0.72544056", "0.72541875", "0.72480863", "0.7240795", "0.7240795", "0.72300136", "0.7229686", "0.7219741", "0.71943426", "0.7137921", "0.7134281", "0.71269864", "0.7076054", "0.704646", "0.70424354", "0.70238394", "0.70020074", "0.699128", "0.69755185", "0.6959693", "0.693707", "0.6933496", "0.689485", "0.68826497", "0.6875797", "0.6874523", "0.6873603", "0.6855592", "0.6849576", "0.68449193", "0.68405724", "0.6828476", "0.6827315", "0.68251806", "0.68185544", "0.680018", "0.6786618", "0.6782879", "0.678272", "0.67788255", "0.677408", "0.676952", "0.67581606", "0.67516583" ]
0.7413343
49
Return iterable of dictionary rows (like csv.DictReader).
def __iter__(self): cursor = self._connection.cursor() cursor.execute('SELECT * FROM ' + self._table) column_names = self.columns() dict_row = lambda x: dict(zip(column_names, x)) return (dict_row(row) for row in cursor.fetchall())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ler(self) -> Iterable[Dict[str, str]]:\n with open(self.arquivo) as f:\n for linha in DictReader(f, dialect=self.DialetoCsv):\n yield linha", "def rows(self):\r\n _rows = []\r\n try:\r\n csv_file = open(self.file_path,'rbU')\r\n csv_rows = csv.DictReader(csv_file)\r\n for row in csv_rows:\r\n _rows.append(row)\r\n csv_file.close()\r\n return _rows\r\n except:\r\n return _rows", "def iter_dict(self):\n\n itr = iter(self)\n\n headers = next(itr)\n\n for row in itr:\n yield dict(zip(headers, row))", "def row_generator(fname, fieldnames):\n with open(fname, 'r') as f:\n reader = csv.DictReader(f, fieldnames, delimiter='\\t')\n for i, row in enumerate(reader):\n logger.debug('On row %i', i)\n yield row", "def DictData(self):\n reader = csv.DictReader( open( self.file, \"rU\" ), dialect = \"excel\" )\n return reader", "def get_csv_data(file_name: str) -> Iterator[list]:\n with open(file_name) as f:\n # creating a reader instance that can iterate over rows of file.\n reader = DictReader(f)\n\n # iterating over rows:\n for row in reader:\n yield dict(row) # returning the dicts for each row in the dataset.", "def read_csv_rows(path: str) -> list[dict[str, str]]:\n file_handle = open(\"survey\", encoding=\"utf8\")\n csv_reader = DictReader(file_handle)\n rows: list[dict[str, str]] = []\n for row in csv_reader:\n rows.append(row)\n file_handle.close()\n return rows", "def _get_raw_extract_iter(self) -> Iterable[Dict[str, Any]]:\n rows = self.alchemy_helper.execute_query()\n for row in rows:\n yield row", "def load_data(filepath):\n with open(filepath) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n yield row", "def iterate_csv(path, num_skip = 0, **kwargs):\n import csv\n with open(path, \"rU\") as fp:\n for row_as_dict in csv.DictReader(fp, **kwargs):\n if num_skip > 0:\n num_skip -= 1\n continue\n yield row_as_dict", "def extract(input):\n reader = csv.DictReader(input)\n return reader", "def _get_raw_extract_iter(self):\n # type: () -> Iterator[Dict[str, Any]]\n row = self._alchemy_extractor.extract()\n while row:\n yield row\n row = self._alchemy_extractor.extract()", "def _get_raw_extract_iter(self) -> Iterator[Dict[str, Any]]:\n row = self._alchemy_extractor.extract()\n while row:\n yield row\n row = self._alchemy_extractor.extract()", "def read_csv_rows(filename: str) -> list[dict[str, str]]:\n result: list[dict[str, str]] = []\n \n \"\"\"Open a handle to the data file.\"\"\"\n file_handle = open(filename, \"r\", encoding=\"utf8\")\n\n \"\"\"Prepare to read the data in the file as CSV instead of just strings.\"\"\"\n csv_reader = DictReader(file_handle)\n\n \"\"\"Read each row of the CSV line by line.\"\"\"\n for row in csv_reader:\n result.append(row)\n\n \"\"\"Close file.\"\"\"\n file_handle.close()\n\n return result", "def __iter__(self):\n cols = tuple([col[0] for col in self.description])\n for r in self._rows:\n yield OrderedDict(zip(cols, r))", "def __iter__(self):\n for key, value in self.read():\n yield key, value", "def __iter__(self):\n return iter(self.__rows)", "def itervalues(self):\n return DictValuesIterator(self)", "def __iter__(self):\n for line in self._fp:\n row = line.decode(\"utf-8\").split(\"\\t\")\n yield row[1], row[2], row[3], json.loads(row[4])['weight']", "def __iter__(self):\n return iter(self.rows)", "def __iter__(self):\n with open(self.path, 'r') as data:\n reader = csv.DictReader(data)\n for idx, row in enumerate(reader):\n row['rowid'] = idx + 1\n yield row", "def __iter__(self):\n for row in self.rows:\n yield row", "def csv_dict_reader(file_obj):\n reader = csv.DictReader(file_obj, delimiter=',')\n for line in reader:\n print(line[\"first_name\"]),\n print(line[\"last_name\"])", "def csv_dict_reader(file_path):\r\n with open(file_path, 'r') as file_obj:\r\n\r\n reader = csv.DictReader(file_obj, delimiter=',')\r\n for line in reader:\r\n #print(line[\"variable_name \"]),\r\n print(line[\"dataset\"])", "def walk_csv(self, filepath: str):\n with open(filepath, encoding='ISO-8859-1') as f:\n reader = csv.DictReader(f)\n for row in reader:\n logger.debug('Loading map {}'.format(row.get('id', None)))\n yield row", "def unicode_csv_reader(utf8_data, **kwargs):\r\n\r\n csv_reader = csv.DictReader(utf8_data, **kwargs)\r\n for row in csv_reader:\r\n yield {unicode(key, 'utf-8'): unicode(value, 'utf-8') for key, value in row.iteritems()}", "def parse(filehandle):\n for row in csv.DictReader(filehandle):\n yield cccalc.types.Fill(row)", "def csv(self):\n lines = self._parsecsv(self.raw)\n\n # set keys from header line (first line)\n keys = next(lines)\n\n for line in lines:\n yield dict(zip(keys, line))", "def rowgen(searchcursor_rows):\n rows = searchcursor_rows\n row = rows.next() \n while row:\n yield row\n row = rows.next()", "def iteritems(self):\n return DictItemsIterator(self)", "def select_rows_dict_cursor(self, query):\r\n self.connect()\r\n with self.conn.cursor(cursor_factory=DictCursor) as cur:\r\n cur.execute(query)\r\n records = cur.fetchall()\r\n cur.close()\r\n return records", "def rows(self):\n with self.input().open('r') as fobj:\n for line in fobj:\n yield line.strip('\\n').split('\\t')", "def read_csv(path: str) -> list[dict[str, str]]:\n with open(path, 'r') as f:\n return list(csv.DictReader(f))", "def CSVReader(self, input_file):\n f = open(input_file, 'r')\n reader = csv.reader(f)\n headers = reader.next()\n reader = csv.DictReader(f, headers)\n return reader", "def source_rows(gbf_database_path, dataset_name):\r\n conn = sqlite3.connect(gbf_database_path)\r\n cursor = conn.cursor()\r\n sql = \"select * from {}\".format(dataset_name)\r\n cursor.execute(sql)\r\n column_names = [column[0] for column in cursor.description]\r\n for row in cursor:\r\n yield dict(zip(column_names, row))", "def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row, parsers)", "def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row, parsers)", "def svevent_reader(in_file):\n with open(in_file) as in_handle:\n while 1:\n line = in_handle.next()\n if line.startswith(\">\"):\n break\n header = line[1:].rstrip().split(\"\\t\")\n reader = csv.reader(in_handle, dialect=\"excel-tab\")\n for parts in reader:\n out = {}\n for h, p in zip(header, parts):\n out[h] = p\n yield out", "def read_file(filename):\n with open(filename) as fp:\n reader = csv.DictReader(fp)\n return list(reader)", "def iterrows(self):\n return (self.Row(*row_vals) for row_vals in izip(*self.columns))", "def csv_dict_reader(file_obj):\n #import re\n #file = open(file_obj)\n\n # reader = csv.DictReader(file_obj)\n # for line in reader:\n # print(line[\"Name\"])", "def __iter__(self):\n\n return iter([key for key in self._data.keys()])", "def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row,parsers)", "def iter_dicts(self, with_index=True) -> Generator[Dict[Column, Any], None, None]:\n for index in self.index:\n row = {\"index\": index} if with_index else {}\n for column in self._columns:\n row[column] = self.get_cell(index, column)\n yield row", "def read_spreadsheet():\n txtlines = open(DATA_FILEPATH).readlines()\n rows = list(csv.DictReader(txtlines))\n return rows", "def next(self):\n try:\n # get entries:\n entries = self.fhandle.readline().rstrip().split()\n # if not a polya-samples row, skip it:\n if (entries[0] != 'polya-samples'):\n return next(self)\n # format row and return:\n else:\n row = { self.names[k]: entries[k] for k in range(len(self.names)) }\n return row\n except:\n raise StopIteration()", "def __iter__(self):\n columns = self.columns()\n if self._default_index:\n for row in self._df.itertuples(index=False):\n yield dict(zip(columns, row))\n else:\n mktup = lambda x: x if isinstance(x, tuple) else tuple([x])\n flatten = lambda x: mktup(x[0]) + mktup(x[1:])\n for row in self._df.itertuples(index=True):\n yield dict(zip(columns, flatten(row)))", "def __iter__(self) -> iter:\n return iter(self._dict)", "def csv_iterator(f_csv, clean=True, _PARALLEL=False):\n\n with open(f_csv, encoding=\"utf-8\") as FIN:\n CSV = csv.DictReader(FIN)\n\n for row in CSV:\n yield row", "def __iter__(self):\n for item in self._reader:\n yield item", "def get_iter(self, reader: DataReader):\n\n if reader is None:\n return None\n\n xs, ys = get_dataset(reader)\n\n return self.prepare_dataset(xs, ys)", "def read_csv(path):\r\n output = []\r\n for row in csv.DictReader(open(path)):\r\n output.append(row)\r\n return output", "def __iter__(self):\n # Return an iterator for the keys in the underlying dictionary.\n return iter(self.data)", "def read_data(options):\n reader = csv.reader(open(options.datafile, \"U\"))\n raw_rows = list(reader)\n fieldnames = raw_rows[0]\n rows = []\n for raw_row in raw_rows[1:]:\n row = {}\n try:\n for i in range(len(fieldnames)):\n row[fieldnames[i]] = raw_row[i]\n rows.append(row)\n except IndexError:\n print \"SHORT ROW:\", raw_row\n print \"Read %d rows\" % len(rows)\n return rows", "def read_records_from_input(self, input_stream: BinaryIO) -> Iterator[dict]:\n\n self.reader = parse_xml(input_stream).getroot()\n\n for record in self.reader:\n yield {attribute.tag: attribute.text for attribute in record}", "def iter_rp(self):\n\n from .rowproxy import RowProxy\n\n itr = iter(self)\n\n headers = next(itr)\n\n row_proxy = RowProxy(headers)\n\n for row in itr:\n yield row_proxy.set_row(row)", "def records(self):\r\n if not self.fields:\r\n raise Exception(\"Can not provide records: fields for pipe are not initialized.\")\r\n fields = self.fields.names()\r\n for row in self.rows():\r\n yield dict(zip(fields, row))", "def read_csv(path):\n output = []\n for row in csv.DictReader(open(path)):\n output.append(row)\n return output", "def getNOAARows(file):\n\n with open(file) as f:\n rows = [row for row in csv.DictReader(f)]\n return rows", "def __iter__(self) -> Generator:\n for k in self.raw.keys():\n yield k", "def __iter__(self):\n if not self.resolved:\n self._resolve_reader()\n\n if isinstance(self.resolved, WriterType):\n raise TypeError('Writer is not iterable')\n\n def gen():\n for row in self.resolved:\n if self.header and self.rowklass:\n row = self.rowklass(*row)\n yield row\n\n return gen()", "def _dictfetchall(self):\n return [dict(zip([col[0] for col in self.cursor.description], row)) \\\n for row in self.cursor.fetchall()]", "def read_csv(filename):\n with open(filename, 'rb') as f:\n reader = unicodecsv.DictReader(f)\n return list(reader)", "def read_report(file_obj):\n return [row for row in csv.DictReader(file_obj, delimiter=';')]", "def read_csv(path):\r\n data = []\r\n csv_file = open(path)\r\n for row in csv.DictReader(csv_file):\r\n data.append(row)\r\n csv_file.close() \r\n return data", "def itervalues(self):\n return iter(kvp.value for kvp in self.keyvaluepair_set.all())", "def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)", "def datagetter(cls):\n with open('myfile', 'rt') as f:\n rows = [r for r in csv.reader(f)]\n dothing = lambda _: [i for i, v in enumerate(_)]\n rows = [dothing(_) for _ in rows]\n raise NotImplementedError('You need to implement this yourlself!')\n return rows", "def rows(self):\n if self.header is None:\n raise ValueError('header cannot be None.')\n header = list(enumerate(self.header))\n return ({'count': v, **{f: k[i] for i, f in header}} for k, v in self.items())", "def __iter__(self) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in self._table.items())", "def __iter__(self):\r\n for item in self._data:\r\n yield item # yield the KEY\r", "def iteritems(self):\n return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())", "def __iter__(self):\r\n for column, value in self.items():\r\n # this uses __getitem__, using the name (rather than the accessor)\r\n # is correct – it's what __getitem__ expects.\r\n yield value", "def iter_row(self):\n yield from self.url.generator.iter_rp", "def csv_readline(line):\n for row in csv.reader([line]):\n return row", "def csv_readline(line):\n for row in csv.reader([line]):\n return row", "def read(self, start_key=\"\", end_key=None, limit=None):\n count = 0\n for key, value in self.read_typed_(start_key, end_key):\n if value[0] == recordio_entry_types.STRING:\n yield key, value[1:]\n elif value[0] == recordio_entry_types.MARSHAL:\n yield key, marshal.loads(value[1:])\n elif value[0] == recordio_entry_types.CPICKLE:\n yield key, cPickle.loads(value[1:])\n else:\n raise ValueError()\n count += 1\n if limit != None and count >= limit:\n break", "def Iterator():\n return _table.Iterator()", "def iteritems(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n key = keys.next()\n yield (key, self[key])\n return make_iter()", "def itervalues(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n yield self[keys.next()]\n return make_iter()", "def getItemIter(self):\n for key, raw in self.db.getAllItemIter(db=self.sdb, split=False):\n keys = tuple(key.decode(\"utf-8\").split('.'))\n yield (keys, coring.Serder(raw=bytes(raw)))", "def csv_dict_reader(file_obj, data = [], cost = []):\n reader = csv.DictReader(file_obj, delimiter=',')\n for line in reader:\n data.append(line[\"Дата\"]),\n cost.append(line[\"Расход\"])", "def csv2dicts(csvfile, names=None):\n data = []\n for row_index, row in enumerate(csvfile):\n if row_index == 0:\n if names:\n keys = names\n else:\n keys = row\n print(keys)\n continue\n data.append({key: value for key, value in zip(keys, row)})\n return data", "def objects(self):\n _, c = self.get_column(0)\n size = len(c)\n headers = self.headers()\n for i in range(size):\n obj = {}\n for h in headers:\n _, col = self.get_column(h)\n val = col[i]\n obj[h] = val\n yield obj", "def unicode_csv_reader(data, **kwargs):\r\n\tdata_file = csv.reader(data, **kwargs)\r\n\tfor row in data_file:\r\n\t\tyield [str(cell) for cell in row]", "def iteroriginal(self):\n for key in self:\n vals = _dict_getitem(self, key)\n for val in vals[1:]:\n yield vals[0], val", "def __iter__(self):\n yield from chain.from_iterable(self.data.values())", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def iteritems(self):\n\t\tself.filep.seek(self.start + 2048)\n\n\t\t# iterate until we hit the enddata marker\n\t\twhile self.filep.tell() < self.enddata - 1:\n\t\t\t# fetch the lengths of the key and value\n\t\t\t(klen, vlen) = unpack('<LL', self.filep.read(8))\n\n\t\t\t# yield the key and value as a tuple\n\t\t\tyield (self.filep.read(klen), self.filep.read(vlen))", "def input_row():\n return {\n 'foo': 1,\n 'bar': 2,\n 'spam': 3,\n 'eggs': 4\n }", "def values(self):\n\t\treturn iter(self.data)", "def get_rows(project_id, dataset_id, table_name, max_results=None):\n client = bigquery.Client(project=project_id)\n dataset = client.dataset(dataset_id)\n table = dataset.table(table_name)\n table.reload()\n fields = [x.name for x in table.schema]\n result = table.fetch_data(max_results=max_results)\n token = result.next_page_token\n while True:\n for row in result:\n yield _row_to_dict(row, fields)\n if token is None:\n break\n result = table.fetch_data(page_token=token, max_results=max_results)\n token = result.next_page_token\n raise StopIteration", "def load_csv(path: Path) -> Any:\n with open(path, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n items = list(reader)\n return items", "def _yield_rows(self, ws_name):\n for idx, row in self._get_rows(ws_name):\n if idx >= WS_ROW_OFFSETS[ws_name] and \\\n len(row[0].value):\n yield idx, row", "def getItemIter(self):\n for key, val in self.db.getAllItemIter(db=self.sdb, split=False):\n keys = tuple(key.decode(\"utf-8\").split('.'))\n yield (keys, self.klas(qb64b=bytes(val)))", "def parse_csv_input_file(input_file):\n with open(input_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for item in reader:\n dict = {i: x for i, x in item.items()}\n yield(dict)", "def __iter__(self):\n return self._data_dict.__iter__()", "def itervalues(self):\n for key in self:\n yield self[key]" ]
[ "0.7213585", "0.7172376", "0.7104608", "0.6873515", "0.6866606", "0.66950583", "0.66675246", "0.66160226", "0.6596041", "0.65936637", "0.6593291", "0.65640014", "0.6563573", "0.654688", "0.6439478", "0.6418613", "0.64027905", "0.6361362", "0.6346992", "0.6337884", "0.63189554", "0.631142", "0.62311065", "0.6210248", "0.6184283", "0.61772054", "0.61769783", "0.61687773", "0.6141384", "0.61269593", "0.6122971", "0.6109356", "0.61077666", "0.60995215", "0.60784596", "0.6069019", "0.6069019", "0.6036994", "0.6036384", "0.6026659", "0.6026207", "0.60063446", "0.6003704", "0.5979554", "0.5975413", "0.596862", "0.5968594", "0.5953868", "0.59410816", "0.593401", "0.592114", "0.5913267", "0.5908533", "0.5904586", "0.5902973", "0.58806247", "0.58777237", "0.58677816", "0.5835538", "0.5833605", "0.58305806", "0.5827405", "0.5823586", "0.58157754", "0.58036256", "0.5794662", "0.57919633", "0.579103", "0.5789955", "0.57873255", "0.5776614", "0.57657963", "0.57578504", "0.5757201", "0.5750546", "0.5750546", "0.5749828", "0.5741079", "0.5739603", "0.5735601", "0.57287806", "0.57275414", "0.57216585", "0.5715342", "0.5714664", "0.56762534", "0.566983", "0.5668099", "0.5668099", "0.5657369", "0.56544447", "0.56418276", "0.5639012", "0.5625656", "0.5617552", "0.56124777", "0.5604918", "0.5603474", "0.56014174" ]
0.66041005
9
Return iterable of tuples containing distinct columns values.
def distinct(self, columns, **kwds_filter): if not _is_nsiterable(columns): columns = (columns,) self._assert_columns_exist(columns) select_clause = [self._normalize_column(x) for x in columns] select_clause = ', '.join(select_clause) select_clause = 'DISTINCT ' + select_clause cursor = self._execute_query(select_clause, **kwds_filter) return CompareSet(cursor)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique_cols(self):\n return list(set([coord[1] for coord in self.landscape]))", "def unique_column_values(rows, column_name):\r\n\r\n values = [] #Create an empty list\r\n for row in rows: #Iterate through each row\r\n values.append(row[column_name]) \r\n values = set(values)\r\n return values", "def unique_column_values(rows, column_name):\n # declare a set that guarantees no duplicates in the answer\n value_set = set()\n # for all rows, add the value of indicated column to the set\n for row in rows:\n \tvalue_set.add(row[column_name])\n return value_set", "def iterall(self):\r\n return (column for name, column in self.iteritems())", "def unique_vals(rows, col):\n return set([row[col] for row in rows])", "def unique_values(self):\n for key in self.metadb.unique_values():\n yield key, self.datadb[key]", "def unique_vals(client, proj, dataset, table, col_name):\n if not client.check_table(dataset, table):\n return []\n res = run_bq_query(client, \"SELECT %s FROM [%s:%s.%s] GROUP BY %s ORDER BY %s\" % (col_name, proj, dataset, table, col_name, col_name), 120)\n return [rec[col_name] for rec in res]", "def collect_columns():\n return ((x, y) for x in range(72) for y in range(x + 9, 81, 9))", "def get_unique_elements(column_name:str) -> list:\n c=data_to_cluster[column_name].values.tolist()\n cuisines_list=[]\n for i in range(len(c)):\n item=ast.literal_eval(c[i])\n for j in range(len(item)):\n cuisines_list.append(item[j][0])\n c_s=set(cuisines_list)\n cuisines=list(c_s)\n return cuisines", "def get_values(self):\n return set(self._table.keys())", "def unique_rows(self):\n return list(set([coord[0] for coord in self.landscape]))", "def list_unique(df):\n\n # print unique values of each column\n for col in df.columns:\n print(f\"{col}:\")\n print(f\"{list(df[col].unique())}\\n\")", "def distinct(x):\n return list(set(x))", "def unique_values(df):\n cols = list(df.columns)\n\n for col in cols:\n uniques = (df[col]).unique()\n print(f\"{len(uniques)} unique items in {col}: {df[col].loc[0]},{df[col].loc[1]}, {df[col].loc[2]}...\")", "def __create_unique_lists(dataframe: pd.DataFrame) -> (list, list):\n return dataframe['movieId'].unique(), dataframe['userId'].unique()", "def get_all_attributes(dataset):\n return dataset.flatMap(lambda x: x.keys()).distinct().collect()", "def __iter__(self):\r\n for column_id in self._columns.keys():\r\n yield column_id", "def unique (a_data,a_column) :\n return list(__np.unique(a_data[a_column]))", "def get_unique(self):\n unique_values = len(self.df[self.col_name].unique())\n return unique_values", "def unique(self):\n num_rows = len(self._rows)\n if num_rows == 0:\n raise NoResults()\n elif num_rows > 1:\n raise MultipleResults(num_rows)\n cols = [col[0] for col in self.description]\n return OrderedDict(zip(cols, self._rows[0]))", "def getValuesForColumn(self, columnname):\n return list(self.abundance_df[columnname].unique())", "def distinct(self, columns, **kwds_filter):\n if not nonstringiter(columns):\n columns = (columns,)\n self._assert_columns_exist(columns)\n select_clause = [self._normalize_column(x) for x in columns]\n select_clause = ', '.join(select_clause)\n select_clause = 'DISTINCT ' + select_clause\n\n cursor = self._execute_query(select_clause, **kwds_filter)\n return CompareSet(cursor)", "def get_tuples(self):\n pattern = list()\n for gi in self.gradual_items:\n temp = tuple([gi.attribute_col, gi.symbol])\n pattern.append(temp)\n return pattern", "def possible_values(self) -> Set[int]:\n return {x for x in SudokuTile.UNIVERSE_OF_TILE_VALUES if\n (x not in self._row) and\n (x not in self._column) and\n (x not in self._box)}", "def unique(self):\n seen = {}\n result = []\n for p in map(tuple, self):\n if p not in seen:\n seen[p] = True\n result.append(p)\n return Pairs(result)", "def distinct(iterable):\n\n def distincting(iterable_):\n set_of_distinct_values = set()\n for i in iterable_:\n if i not in set_of_distinct_values:\n set_of_distinct_values.add(i)\n yield i\n\n return distincting(iterable)", "def triples(self):\n _, c = self.get_column(0)\n headers = self.headers()\n\n vert = 0\n for y in c:\n for x in headers[1:]:\n _, col = self.get_column(x)\n yield x, y, col[vert]\n vert += 1", "def duplicate_columns(df: pd.DataFrame) -> Iterator[Tuple[str, str]]:\n columns = set(df.columns)\n for column1, column2 in combinations(df.columns, 2):\n if column1 not in columns or column2 not in columns:\n continue\n series1 = df[column1]\n series2 = df[column2]\n # convert dtypes to strings since numpy raises \"TypeError: data type not understood\"\n # when comparing to pandas's dtypes extensions\n if str(series1.dtype) == str(series2.dtype) and all(series1 == series2):\n yield column2, column1\n columns.remove(column2)\n return df", "def unique_columns(inval, axis=0):\n # this is a nice trick taking advantage of structed arrays where each row or column\n # is the value, so returl unique works\n # np.ascontiguousarray() is to be really sure it will work\n if axis == 0:\n val = np.ascontiguousarray(np.transpose(inval))\n else:\n val = np.ascontiguousarray(inval)\n b = val.view(np.dtype((np.void, val.dtype.itemsize * val.shape[1])))\n unique_a = np.unique(b).view(val.dtype).reshape(-1, val.shape[1])\n return unique_a", "def iteritems(self):\r\n for name in self.table.sequence:\r\n if name not in self.table.exclude:\r\n yield (name, self.columns[name])", "def flat(self):\n if len(self.description) != 1:\n msg = \"Results set with %d cols cannot be treated as flat\"\n raise TypeError(msg % len(self.description))\n return [r[0] for r in self._rows]", "def _findUniqueMappingValues(mapping):\n uniqueMappingValues = set()\n for entries in viewvalues(mapping):\n if len(entries) == 1:\n uniqueMappingValues.update(entries)\n return uniqueMappingValues", "def values(self):\n return iter(util.annotate(v) for _, v in self._pairs())", "def columns(self) -> java.util.Collection:\n ...", "def columns_values(self):\r\n return self._columns_values", "def get_unique_values(df, colname):\n\treturn list(dict(df[colname].value_counts(ascending=False, dropna=False)).keys())", "def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))", "def distinct(self, columns, **kwds_filter):\n fn = lambda source: source.distinct(columns, **kwds_filter)\n results = (fn(source) for source in self._sources)\n results = itertools.chain(*results)\n return CompareSet(results)", "def unique_drugs(self):\n if self.results is not None:\n return tuple(self.results['drug'].unique())", "def items(self):\r\n for column in self.table.columns:\r\n yield (column, self[column.name])", "def __hashvalue__(self):\n return (tuple((column, self[column])\n for column in filter(lambda x: x != \"__psvcolumnstracker__\", sorted(self.keys()))))", "def _get_set_pairs(uprow, types):\n pairs = []\n for key, val in uprow.items():\n pairs.append(\"{0}={1}\".format(key, _for_pgsql(val, types[key])))\n return \", \".join(pairs)", "def _get_columns(source):\n return _get_tuple(source)", "def columns(self):\n return set(self.native_schema)", "def unique_rows(data):\n udict = dict()\n for row in range(len(data)):\n row_data = tuple(data[row,:])\n if not row_data in udict:\n udict[row_data] =row\n uInd=udict.values()\n uRows=np.c_[udict.keys()]\n return uInd, uRows", "def columns(self) -> List[List]:\n return list(map(list, zip(*self.rows)))", "def get_values(self, col) :\n\n if col not in self.cols :\n raise Exception('Column %s not in data' % col)\n\n select_sql = 'SELECT \"%s\" FROM \"%s\" ORDER BY __ROWID ASC' % (col, self.name)\n cur = self.con.cursor()\n cur.execute(select_sql)\n vs = cur.fetchall()\n return [v[0] for v in vs]", "def values(self):\n return [row.values for row in self]", "def values(self, cols=None) :\n\n if not cols or cols == self.cols :\n return self.data\n\n def extractor(col) :\n if col in self.cols :\n return self.data[self.cols.index(col)]\n else :\n return None\n \n return [extractor(col) for col in cols]", "def stations():\r\n for row in rows():\r\n yield tuple(row)", "def levshape(self) -> Tuple[int, ...]:\n result = self._internal.spark_frame.agg(\n *(F.countDistinct(c) for c in self._internal.index_spark_columns)\n ).collect()[0]\n return tuple(result)", "def _selected_columns(self):\n selected_columns = set()\n for feature in self.features:\n columns = feature[0]\n if isinstance(columns, list):\n selected_columns = selected_columns.union(set(columns))\n else:\n selected_columns.add(columns)\n return selected_columns", "def _columns(cls, schema: dsl.Source.Schema) -> typing.Sequence[str]:\n return tuple(f.name for f in schema)", "def unique_interactors(self):\n if self.results is not None:\n return tuple(self.results['interactor_name'].unique())", "def cols(self):\n for col in range(self.min_col, self.max_col+1):\n yield tuple('%s%d' % (get_column_letter(col), row)\n for row in range(self.min_row, self.max_row+1))", "def GetColumnIterator(self):\n return self.columns.__iter__()", "def get_unique_tags(df):\n tags = []\n\n for index, row in df.iterrows():\n tags = list(set(tags + ast.literal_eval(row.tags)))\n\n pdb.set_trace()", "def unique(self):\n return self.element_wise(lambda seqs: list(set(seqs)))", "def values(self):\n\t\treturn tuple(self.dist.values())", "def distinct(self, key):\n return self.database.command({'distinct': self.name,\n 'key': key})['values']", "def columns(self):\n return list(self._scala.observationColumns())", "def iterrows(self):\n return (self.Row(*row_vals) for row_vals in izip(*self.columns))", "def extractColumnKeys(array):\n columnKeys = set()\n for i in list(array.values()):\n columnKeys |= set(i.keys())\n return sorted(list(columnKeys), key=cmp_to_key(compareFn))", "def get_unique_columns(table):\n for constraint in table.constraints:\n if isinstance(constraint, sqla.UniqueConstraint):\n return constraint.columns\n # We should never get this far.\n # All tables in my db should have unique constraints\n assert False", "def getUniqueValues(self, colName):\n if not colName in six.iterkeys(self.nameToCol):\n raise (\n PE.PyAValError(\n \"No column named '\" + str(colName) + \"'.\",\n solution=\"Choose one of: \" + str(self.nameToCol.keys()),\n )\n )\n\n return np.unique(self.dat[::, self.nameToCol[colName]])", "def list_unique_values(series: pd.Series) -> str:\n return \", \".join(set(str(v) for v in pd.Series.unique(series)))", "def filtered_xyz(self) -> tuple[int, int, int]:", "def uninformative_columns(df: pd.DataFrame) -> Iterator[Tuple[str, Any]]:\n # TODO: support DataFrames where df.columns is a MultiIndex\n for column in df.columns:\n series = df[column]\n series_iter = iter(df[column])\n try:\n exemplar = next(series_iter)\n except StopIteration:\n # no rows => nothing to check :|\n continue\n # nan is a special case, since np.nan != np.nan\n if series.dtype == np.float and np.isnan(exemplar):\n if all(np.isnan(item) for item in series_iter):\n yield column, exemplar\n elif all(item == exemplar for item in series_iter):\n yield column, exemplar", "def components(self) -> Iterable[Mapping[T, Set[T]]]:", "def cartesian(*colls):\n if len(colls) == 0:\n return ((),)\n s = set([])\n for e in colls[0]:\n for cp in cartesian(*colls[1:]):\n s.add((e,) + cp)\n return s", "def cells(self):\n return ((row, col) for row in self.rows for col in self.cols)", "def create_value_set(self, col):\n\n value_set = set()\n\n for df in self:\n value_set.update(df[col])\n return value_set", "def values(self):\n return [entry.value for entry in self.table if entry.value is not None]", "def get_value_tuple(self):\r\n\r\n retval = tuple()\r\n for val in self.VALUES:\r\n retval += (getattr(self, val),)\r\n return retval", "def get_user_list(dataset):\n res = dataset\\\n .map(lambda x: x[0])\\\n .collect()\n return list(set(res))", "def _unique(iterable):\n return list(dict.fromkeys(iterable))", "def get_annotation_values(nested_annotation_column1, nested_annotation_column2):\n flat_list1 = [item for sublist in nested_annotation_column1 for item in sublist]\n flat_list2 = [item for sublist in nested_annotation_column2 for item in sublist]\n uniques = set(flat_list1 + flat_list2)\n return(list(uniques))", "def get_column_names(self):\n # here, creating combined column/volue column names for uniqueness\n colname_temp = list()\n for column in self.col_value:\n colname_temp.append(self.question_column + \"-\" + str(column))\n return colname_temp", "def get_all_columns_name(input_glob):\n reader = tf.python_io.TableReader(input_glob,\n selected_cols=\"\",\n excluded_cols=\"\",\n slice_id=0,\n slice_count=1,\n num_threads=0,\n capacity=0)\n schemas = reader.get_schema()\n return set([col_name for col_name, _, _ in schemas])", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n values: list[str] = []\n for row in table:\n values.append(row[column])\n return values", "def unique(self):\n return self.d_series.map_partitions(\n lambda s: s.list.unique(), meta=self.d_series._meta\n )", "def column_names(self) -> Tuple[List[str], List[str]]:\n return (\n np.array(self._dataset_columns)[self._input_column_indices].tolist(),\n np.array(self._dataset_columns)[self._output_column_indices].tolist(),\n )", "def deduplicate(values, column_names=False, separator='_'):\n final_values = []\n\n for i, value in enumerate(values):\n if column_names:\n if not value:\n new_value = letter_name(i)\n warn_unnamed_column(i, new_value)\n elif isinstance(value, str):\n new_value = value\n else:\n raise ValueError('Column names must be strings or None.')\n else:\n new_value = value\n\n final_value = new_value\n duplicates = 0\n\n while final_value in final_values:\n final_value = new_value + separator + str(duplicates + 2)\n duplicates += 1\n\n if column_names and duplicates > 0:\n warn_duplicate_column(new_value, final_value)\n\n final_values.append(final_value)\n\n return tuple(final_values)", "def __iter__(self) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in self._table.items())", "def apply(self):\n next_one = super().apply()\n next_both = set()\n\n for tup in next_one:\n if (tup[1], tup[0]) in next_one:\n next_both.add(tup)\n\n return list(next_both)", "def flat(self):\n flat = []\n for kind, id in self.__pairs:\n flat.append(kind)\n flat.append(id)\n return tuple(flat)", "def get_unique_hstore_keys(\n session: 'Session',\n column: 'Column[dict[str, Any]]'\n) -> set[str]:\n\n base = session.query(column.keys()).with_entities( # type:ignore\n sqlalchemy.func.skeys(column).label('keys'))\n\n query = sqlalchemy.select(\n [sqlalchemy.func.array_agg(sqlalchemy.column('keys'))],\n distinct=True\n ).select_from(base.subquery())\n\n keys = session.execute(query).scalar()\n return set(keys) if keys else set()", "def values(self):\n return [kvp.value for kvp in self.keyvaluepair_set.all()]", "def columns(self):\n return self.__column_list", "def column_values(table: list[dict[str, str]], column_name: str) -> list[str]:\n column_values: list[str] = []\n for row in table:\n item: str = row[column_name]\n column_values.append(item)\n return column_values", "def rows(self):\n if self._cached_rows is None:\n self._cached_rows = tuple(self.iterrows())\n return self._cached_rows", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n result: list[str] = []\n for row in table:\n item: str = row[column]\n result.append(item)\n\n return result", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n result: list[str] = []\n for row in table:\n item: str = row[column]\n result.append(item)\n\n return result", "def cells(self):\n return chain.from_iterable(self.cols)", "def get_unit_pairs(self) -> AbstractSet[Tuple[Variable, Variable]]:\n unit_pairs = set()\n for variable in self._variables:\n unit_pairs.add((variable, variable))\n productions = [x\n for x in self._productions\n if len(x.body) == 1 and isinstance(x.body[0], Variable)]\n productions_d = get_productions_d(productions)\n to_process = list(unit_pairs)\n while to_process:\n var_a, var_b = to_process.pop()\n for production in productions_d.get(var_b, []):\n temp = (var_a, production.body[0])\n if temp not in unit_pairs:\n unit_pairs.add(temp)\n to_process.append(temp)\n return unit_pairs", "def _findUniqueMappingKeys(mapping):\n\n uniqueMappingKeys = set()\n for key, entries in viewitems(mapping):\n if len(entries) == 1:\n uniqueMappingKeys.add(key)\n return uniqueMappingKeys", "def get_unique_values(local_data, attr):\n\tvalues = []\n\tfor element in local_data:\n\t\tif element[attr] not in values:\n\t\t\tvalues.extend([element[attr]])\n\treturn values", "def items(self) -> tuple[tuple[Any, Any], ...]: # type: ignore\n return tuple(zip(self.keys(), self.values()))", "def items(self):\n return [(kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all()]", "def data(self) -> List[List[Any]]:\n\n column_wise = [column.values for column in self.plaincolumns]\n row_wise = [list(row) for row in zip(*column_wise)]\n\n return row_wise" ]
[ "0.6917783", "0.67479295", "0.66289616", "0.6525466", "0.64980805", "0.645801", "0.6347434", "0.62989324", "0.6135534", "0.60231894", "0.6020389", "0.59943646", "0.59631413", "0.59360003", "0.5925494", "0.5885717", "0.5880182", "0.5871802", "0.584933", "0.5826537", "0.5789985", "0.5784337", "0.5781932", "0.5765357", "0.57436556", "0.5743464", "0.5736202", "0.57331914", "0.5715885", "0.5706685", "0.5677465", "0.5656193", "0.5640747", "0.5637581", "0.56338245", "0.56163913", "0.56124043", "0.5605796", "0.5605775", "0.5605347", "0.5595213", "0.5593701", "0.55879325", "0.5571624", "0.5571085", "0.55540675", "0.55529296", "0.55527943", "0.5516016", "0.5492433", "0.547368", "0.5467097", "0.54663706", "0.54642797", "0.54636073", "0.5458387", "0.54399455", "0.5432757", "0.5428883", "0.54133654", "0.54054683", "0.5394881", "0.5386806", "0.5382701", "0.53753036", "0.5365778", "0.53493184", "0.53424156", "0.5338048", "0.5334372", "0.5328516", "0.53259414", "0.5308617", "0.52925766", "0.5288799", "0.5262374", "0.5256223", "0.52417624", "0.5241709", "0.52408946", "0.52408034", "0.5237733", "0.52249336", "0.52235466", "0.52224326", "0.52121156", "0.5210792", "0.5205867", "0.5203657", "0.5203364", "0.5196347", "0.5194824", "0.5194824", "0.51936096", "0.5184788", "0.5180136", "0.51702714", "0.5167268", "0.5162654", "0.51606053" ]
0.5745467
24
Aggregates values using SQL function selecte.g., 'COUNT()', 'SUM(col1)', etc.
def _sql_aggregate(self, sql_function, keys=None, **kwds_filter): # TODO: _sql_aggregate has grown messy after a handful of # iterations look to refactor it in the future to improve # maintainability. if not _is_nsiterable(sql_function): sql_function = (sql_function,) if keys == None: sql_function = ', '.join(sql_function) cursor = self._execute_query(sql_function, **kwds_filter) result = cursor.fetchone() if len(result) == 1: return result[0] return result # <- EXIT! if not _is_nsiterable(keys): keys = (keys,) group_clause = [self._normalize_column(x) for x in keys] group_clause = ', '.join(group_clause) select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function)) trailing_clause = 'GROUP BY ' + group_clause cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter) pos = len(sql_function) iterable = ((row[:-pos], getvals(row)) for row in cursor) if pos > 1: # Gets values by slicing (i.e., row[-pos:]). iterable = ((row[:-pos], row[-pos:]) for row in cursor) else: # Gets value by index (i.e., row[-pos]). iterable = ((row[:-pos], row[-pos]) for row in cursor) return CompareDict(iterable, keys)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aggregate_query(self):\n raise NotImplementedError", "def get_select(self):\n if self.is_count_qry is True:\n col = (Col(\"__count\", None), (\"COUNT(*)\", []), None)\n klass_info = {\"model\": self.query.model, \"select_fields\": [\"__count\"]}\n annotations = dict()\n return (col,), klass_info, annotations\n else:\n return super().get_select()", "def aggregate(self, func, *columns):\n if not columns:\n columns = ['*']\n\n self.aggregate_ = {\n 'function': func,\n 'columns': columns\n }\n\n previous_columns = self.columns\n\n results = self.get(*columns)\n\n self.aggregate_ = None\n\n self.columns = previous_columns\n\n if len(results) > 0:\n return dict((k.lower(), v) for k, v in results[0].items())['aggregate']", "def _sql_aggregate(self, sql_function, keys=None, **kwds_filter):\n # TODO: _sql_aggregate has grown messy after a handful of\n # iterations look to refactor it in the future to improve\n # maintainability.\n if not nonstringiter(sql_function):\n sql_function = (sql_function,)\n\n if keys == None:\n sql_function = ', '.join(sql_function)\n cursor = self._execute_query(sql_function, **kwds_filter)\n result = cursor.fetchone()\n if len(result) == 1:\n return result[0]\n return result # <- EXIT!\n\n if not nonstringiter(keys):\n keys = (keys,)\n group_clause = [self._normalize_column(x) for x in keys]\n group_clause = ', '.join(group_clause)\n\n select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function))\n trailing_clause = 'GROUP BY ' + group_clause\n\n cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter)\n pos = len(sql_function)\n iterable = ((row[:-pos], getvals(row)) for row in cursor)\n if pos > 1:\n # Gets values by slicing (i.e., row[-pos:]).\n iterable = ((row[:-pos], row[-pos:]) for row in cursor)\n else:\n # Gets value by index (i.e., row[-pos]).\n iterable = ((row[:-pos], row[-pos]) for row in cursor)\n return CompareDict(iterable, keys)", "def aggregate(self, **named_exprs):\n agg_base = self.columns[0] # FIXME hack\n\n named_exprs = {k: to_expr(v) for k, v in named_exprs.items()}\n strs = []\n base, _ = self._process_joins(*named_exprs.values())\n for k, v in named_exprs.items():\n analyze(v, self._global_indices, {self._row_axis}, set(self.columns))\n replace_aggregables(v._ast, agg_base)\n strs.append(v._ast.to_hql())\n\n result_list = base._jkt.query(jarray(Env.jvm().java.lang.String, strs))\n ptypes = [Type._from_java(x._2()) for x in result_list]\n\n annotations = [ptypes[i]._convert_to_py(result_list[i]._1()) for i in range(len(ptypes))]\n d = {k: v for k, v in zip(named_exprs.keys(), annotations)}\n return Struct(**d)", "def agg_cursor(self,table_name,agg_functions_list,group:list=None,sort_by=None):\n collection=self.db[table_name]\n \n if group is None:\n group=['all']\n group=list(map(str,group))\n grouper={el:'$'+el for el in group}\n agg_pipe={\"_id\": grouper}\n \n for el in agg_functions_list:\n func,apply_column,agg_column_name=el\n if func=='count':\n agg_pipe[agg_column_name]={\"$sum\": 1}\n elif func=='sum':\n agg_pipe[agg_column_name]= {\"$sum\": \"${}\".format(apply_column)}\n else:\n pass\n\n pipeline=[{\"$group\": agg_pipe}]\n #print(pipeline)\n if sort_by:\n pipeline=\tpipeline+[{\"$sort\": SON([el for el in sort_by])}]\n\n return collection.aggregate(pipeline)", "def aggregate(self, arg):\n return self.agg(arg)", "def grouped_sql(table, columns):\n\n sql = \\\n \" ( \" + \\\n \"SELECT \" + \",\".join(columns) + \", COUNT(*) \" + \\\n \"AS COUNT \" + \\\n \" FROM \" + \",\".join(table) + \\\n \" GROUP BY \" + \",\".join(columns) + \\\n \" ) \"\n return sql", "def __add_select_and_aggregate(self, select, groupby, where, window, tree):\r\n tuple_descriptor = TupleDescriptor()\r\n fields_to_verify = []\r\n all_fields = chain(select, where)\r\n if groupby != ['']:\r\n groupby = groupby[1:][0]\r\n all_fields = chain(all_fields, groupby)\r\n self.__remove_all(groupby, QueryTokens.EMPTY_STRING) \r\n for field in all_fields:\r\n (field_descriptors, verify) = self.__parse_field(field, self.twitter_td, True, False)\r\n fields_to_verify.extend(verify)\r\n tuple_descriptor.add_descriptor_list(field_descriptors)\r\n for field in fields_to_verify:\r\n self.__verify_and_fix_field(field, tuple_descriptor)\r\n \r\n # at this point, tuple_descriptor should contain a tuple descriptor\r\n # with fields/aliases that are correct (we would have gotten an\r\n # exception otherwise. built select_descriptor/group_descriptor\r\n # from it\r\n select_descriptor = TupleDescriptor()\r\n group_descriptor = TupleDescriptor()\r\n aggregates = []\r\n for field in select:\r\n (field_descriptors, verify) = self.__parse_field(field, tuple_descriptor, True, True)\r\n select_descriptor.add_descriptor_list(field_descriptors)\r\n if field_descriptors[0].field_type == FieldType.AGGREGATE:\r\n aggregates.append(field_descriptors[0])\r\n # add WHERE clause fields as invisible attributes\r\n for field in where:\r\n (field_descriptors, verify) = self.__parse_field(field, tuple_descriptor, True, False)\r\n select_descriptor.add_descriptor_list(field_descriptors)\r\n if len(aggregates) > 0:\r\n if window == None:\r\n raise QueryException(\"Aggregate expression provided with no WINDOW parameter\")\r\n for field in groupby:\r\n (field_descriptors, verify) = self.__parse_field(field, tuple_descriptor, True, True)\r\n group_descriptor.add_descriptor_list(field_descriptors)\r\n for alias in select_descriptor.aliases:\r\n select_field = select_descriptor.get_descriptor(alias)\r\n group_field = group_descriptor.get_descriptor(alias)\r\n if group_field == None and \\\r\n select_field.field_type != FieldType.AGGREGATE and \\\r\n select_field.visible:\r\n raise QueryException(\"'%s' appears in the SELECT but is is neither an aggregate nor a GROUP BY field\" % (alias))\r\n tree = operators.GroupBy(tree, group_descriptor, aggregates, window)\r\n tree.assign_descriptor(select_descriptor)\r\n return tree", "def analysis():\r\n data_frame = load_from_mysql('core', 'BDFMHQAA_D')\r\n data_frame.registerTempTable('business')\r\n gd = data_frame.select('AA03CSNO', 'AA08PRON')\r\n\r\n def merge_count(a, b):\r\n r = {}\r\n for p, c in a.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n for p, c in b.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n return r\r\n result = gd.map(lambda row: (row.AA03CSNO, {row.AA08PRON: 1})).reduceByKey(merge_count)\r\n pron_count = gd.map(lambda row: (row.AA08PRON, 1)).reduceByKey(lambda a, b: a + b)\r\n\r\n # result = gd.map(lambda row: (row.AA03CSNO, row.AA08PRON))\r\n print(result.take(10))\r\n print('----------------pron count-----------------')\r\n print(pron_count.collect())\r\n\r\n print(gd)", "def aggregate(self, *args, **kwargs):\n if self.query.distinct_fields:\n raise NotImplementedError(\"aggregate() + distinct(fields) not implemented.\")\n self._validate_values_are_expressions(\n (*args, *kwargs.values()), method_name=\"aggregate\"\n )\n for arg in args:\n # The default_alias property raises TypeError if default_alias\n # can't be set automatically or AttributeError if it isn't an\n # attribute.\n try:\n arg.default_alias\n except (AttributeError, TypeError):\n raise TypeError(\"Complex aggregates require an alias\")\n kwargs[arg.default_alias] = arg\n\n return self.query.chain().get_aggregation(self.db, kwargs)", "def load_fact_traffic_violations_count_agg(cur,code):\n cur.execute(code)", "def _aggregate(self, method_name, *args, **kwargs):\n qc_result = self._call_qc_method(method_name, *args, **kwargs)\n return self._dataframe.__constructor__(query_compiler=qc_result)", "def aggregator():\n return Aggregator(\n agg_col=\"col_a\", values_col=\"col_b\", aggregates=[\"min\", \"max\", \"avg\", \"sum\"]\n )", "def execQ2():\n # Put columns together\n frame = pan.DataFrame(data, columns=['Product', 'Amount'] )\n amount = frame.groupby(['Product']).count()\n return amount", "def get_counts(df,col_name):\n return df.groupBy(col_name).count().show()", "def aggregate(self, **named_exprs):\n agg_base = self._parent.columns[0] # FIXME hack\n\n named_exprs = {k: to_expr(v) for k, v in named_exprs.items()}\n\n strs = []\n base, cleanup = self._parent._process_joins(*(tuple(v for _, v in self._groups) + tuple(named_exprs.values())))\n for k, v in named_exprs.items():\n analyze(v, self._parent._global_indices, {self._parent._row_axis}, set(self._parent.columns))\n replace_aggregables(v._ast, agg_base)\n strs.append('`{}` = {}'.format(k, v._ast.to_hql()))\n\n group_strs = ',\\n'.join('`{}` = {}'.format(k, v._ast.to_hql()) for k, v in self._groups)\n return cleanup(\n Table(self._hc, base._jkt.aggregate(group_strs, \",\\n\".join(strs), joption(self._npartitions))))", "def get_grouped_prod(all_customers_data, trans_column, prod_l_cat):\n return all_customers_data.select(trans_column, prod_l_cat)\\\n.groupBy(prod_l_cat).agg(F.countDistinct(trans_column).alias('hhds'))", "def aggregate_results(self):\n\n raise NotImplementedError", "def summarize(self, query=None, select=None,\n group_str=None, limit=0, order_by=None):\n # interpret none as all\n if not group_str:\n group_str = self.ALL\n\n # split group in case of multigroups\n groups = self.split_groups(group_str)\n\n # if select append groups to select\n if select:\n select = json.loads(select)\n if not isinstance(select, dict):\n raise ArgumentError('select argument must be a JSON dictionary'\n ', found: %s.' % select)\n select.update(dict(zip(groups, [1] * len(groups))))\n select = json.dumps(select)\n\n self.reload()\n dframe = self.dframe(query=query, select=select,\n limit=limit, order_by=order_by)\n\n return summarize(self, dframe, groups, group_str, query or select)", "def aggregateAll(exprs, typography):\n aggr = aggregateBySubject(aggregateByPredicate(exprs))\n return wrapStatement(typography, aggr)", "def agg(self, values, agg_func):\n assert len(values) > 0, \"Empty list of values\"\n f = agg_func.strip().lower()\n assert f in self.__AGGREGATIONS, \"Aggregation function \" + agg_func + \" is not valid\"\n\n ret = 0 # just to avoid \"Local variable might be referenced before assignment\" warning\n if f == self.__MAX:\n ret = max(values)\n elif f == self.__SUM:\n ret = sum(values)\n elif f == self.__AVG:\n ret = mean(values)\n return ret", "def aggregate_variable(estimate, code):\n estimates = [\n variable.estimates.get(division__code=code).estimate\n for variable in estimate.variable.label.variables.all()\n ]\n method = estimate.variable.label.aggregation\n if method == 's':\n aggregate = sum(estimates)\n elif method == 'a':\n aggregate = statistics.mean(estimates)\n elif method == 'm':\n aggregate = statistics.median(estimates)\n else:\n aggregate = None\n return aggregate", "def _sql_gen_intermediate_pi_aggregate(params, table_name=\"df_e\"):\n\n gamma_cols_expr = \", \".join(params._gamma_cols)\n\n sql = f\"\"\"\n select {gamma_cols_expr}, sum(match_probability) as expected_num_matches, sum(1- match_probability) as expected_num_non_matches, count(*) as num_rows\n from {table_name}\n group by {gamma_cols_expr}\n \"\"\"\n return sql", "def test_function(self):\n\n s = select([users,\n (users.c.user_id * 2).label('concat'),\n func.count(addresses.c.address_id).label('count')],\n users.c.user_id == addresses.c.user_id,\n group_by=[c for c in users.c]).alias('myselect')\n\n mapper(User, s)\n sess = create_session()\n l = sess.query(User).all()\n for u in l:\n print \"User\", u.user_id, u.user_name, u.concat, u.count\n assert l[0].concat == l[0].user_id * 2 == 14\n assert l[1].concat == l[1].user_id * 2 == 16", "def agg_statistics(df, uid, value, agg_func, suffix=''):\n suffix = '_' + suffix if suffix else suffix\n tmp = df[uid + value].groupby(uid).agg(agg_func)\n tmp.columns = ['_'.join(col) for col in tmp.columns]\n tmp.columns = [col + suffix for col in tmp.columns]\n return tmp.reset_index(drop=False)", "def agg(self):\n result = float(self.column.__getattribute__(self.formula_name)())\n return self._value_to_dframe(result)", "def get_column_grouped_aggregations(column: str, agg_column: str) -> List[str]:\n config = current_app.config\n\n table_name = config['TABLE_NAME']\n db = get_db()\n\n # SQL AVG ignores NULL values\n aggregations = db.execute(f\"\"\"SELECT `{column}`, AVG({agg_column}) , COUNT(*)\n FROM {table_name}\n WHERE `{column}` IS NOT NULL\n GROUP BY `{column}`\"\"\"\n ).fetchall()\n\n return aggregations", "def agg(self, arg):\n # DataFrame{'a': [1, 1, 2], 'b': [1, 2, 3], 'c': [2, 2, 1]})\n # a.groupby('a').agg('sum') -- applied on rest\n # a.groupby('a').agg(['sum', 'min']) -- both applied on rest\n # a.groupby('a').agg({'b': ['min', 'mean']}) -- applied on\n # TODO\n # a.groupby('a').aggregate( a= me['a'].mean(), b_min =me['b'].min(), b_mean=me['c'].mean()))\n # f1 = lambda x: x.quantile(0.5); f1.__name__ = \"q0.5\"\n # f2 = lambda x: x.quantile(0.75); f2.__name__ = \"q0.75\"\n # a.groupby('a').agg([f1, f2])\n\n res = {}\n for f, c in zip(self._key_fields, self._unzip_group_keys()):\n res[f.name] = c\n for agg_name, field, op in self._normalize_agg_arg(arg):\n res[agg_name] = self._apply1(field, op)\n return self._parent._fromdata(res, None)", "def fetch_aggregation(self):\n return None", "def _aggregate_func(self, aggregate):\n funcs = {\"sum\": add, \"min\": min, \"max\": max}\n func_name = aggregate.lower() if aggregate else 'sum'\n try:\n return funcs[func_name]\n except KeyError:\n raise TypeError(\"Unsupported aggregate: {}\".format(aggregate))", "def aggregate(self, func, field_path=None):\n return self._collection.aggregate(func, field_path)", "def data_count():\n\n parameters = request.form.to_dict()\n if not \"totalCol\" in parameters:\n parameters[\"totalCol\"] = \"datos.Total\"\n if not \"subTotalCol\" in parameters:\n parameters[\"subTotalCol\"] = \"datos.SubTotal\"\n try:\n cfdis = pay_service.find_agg(\n [\n {\n \"$match\": {\n parameters[\"fieldMatch\"]: parameters[\"user\"],\n \"datos.Fecha\": {\n \"$gte\": parameters[\"dateBegin\"],\n \"$lte\": parameters[\"dateEnd\"],\n },\n \"datos.Cancelado\": None,\n }\n },\n {\n \"$project\": {\n parameters[\"fieldMatch\"]: 1,\n \"count\": {\"$sum\": 1},\n \"total\": {\"$sum\": \"$\" + parameters[\"totalCol\"]},\n \"subTotal\": {\"$sum\": \"$\" + parameters[\"subTotalCol\"]},\n }\n },\n {\n \"$group\": {\n \"_id\": \"$\" + parameters[\"fieldMatch\"],\n \"count\": {\"$sum\": \"$count\"},\n \"total\": {\"$sum\": \"$total\"},\n \"subTotal\": {\"$sum\": \"$subTotal\"},\n }\n },\n ]\n )\n if cfdis is None or len(cfdis) == 0:\n resp = make_response(\n dumps({\"status\": True, \"data\": []}),\n 200,\n )\n resp = make_response(dumps({\"status\": True, \"data\": cfdis}), 200)\n except Exception as e:\n app.logger.error(e)\n resp = make_response(\n dumps(\n {\"status\": False, \"message\": \"No se encontro ningun recibo de nomina\"}\n ),\n 404,\n )\n\n resp.headers[\"Content-Type\"] = \"application/json\"\n return resp", "def aggregate(self, aggregate, column):\n self._aggregates += ((aggregate, column),)", "def aggregate(self, *args):\n self._aggregate.extend(args)\n cypher = self.cypher()\n value = self.corpus.execute_cypher(cypher, **self.cypher_params())\n if self._group_by or any(not x.collapsing for x in self._aggregate):\n return list(value)\n elif len(self._aggregate) > 1:\n return list(value)[0]\n else:\n return list(list(value)[0].values())[0]", "def aggregate(self, agpath):\n return data.Aggregate(self, agpath)", "def aggregate_statistics(self, stat_col, stat_agg):\n self.module.aggregate_statistics(stat_col, stat_agg)", "def get_totals_query(self, property_statistics):\n query = f\"\"\"\nSELECT (COUNT(*) as ?count) WHERE {{\n ?entity {property_statistics.selector_sparql}\n FILTER(EXISTS {{{self.get_filter_for_info()}\n }})\n}}\n\"\"\"\n return query", "def aggregate(self, applyfunc):\n if hasattr(applyfunc,'__iter__'):\n retVal = self._aggregate_multiple(applyfunc)\n else:\n try:\n result = self._aggregate_simple(applyfunc)\n except Exception:\n result = self._aggregate_named(applyfunc)\n\n retVal = Series(result)\n\n return retVal", "def summary():\n\n summary_result = session.query(Summary.Count, Summary.Total).all()\n session.close()\n\n # Return a List of Column Names (Sample Names)\n return jsonify(summary_result)", "def count(self, query):", "def count_summary(df, group_columns: list, count_columns: list=None, fn: str='sum'):\n \n if not count_columns:\n count_columns = df.columns.tolist()\n elif isinstance(count_columns, str):\n count_columns = [count_columns]\n \n summary_df = pd.DataFrame()\n for _ in range(len(group_columns)):\n if fn == 'sum':\n current_df = df.groupby(by=group_columns)[count_columns].agg(fn, numeric_only=True)\n else:\n current_df = df.groupby(by=group_columns)[count_columns].agg(fn)\n current_df.reset_index(inplace=True)\n if summary_df.empty:\n summary_df = current_df.copy()\n else:\n summary_df = pd.concat([summary_df, current_df])\n # increase group size\n group_columns.pop()\n return summary_df", "def count_summary(df, group_columns: list, count_columns: list=None, fn: str='sum'):\n \n if not count_columns:\n count_columns = df.columns.tolist()\n elif isinstance(count_columns, str):\n count_columns = [count_columns]\n \n summary_df = pd.DataFrame()\n for _ in range(len(group_columns)):\n if fn == 'sum':\n current_df = df.groupby(by=group_columns)[count_columns].agg(fn, numeric_only=True)\n else:\n current_df = df.groupby(by=group_columns)[count_columns].agg(fn)\n current_df.reset_index(inplace=True)\n if summary_df.empty:\n summary_df = current_df.copy()\n else:\n summary_df = pd.concat([summary_df, current_df])\n # increase group size\n group_columns.pop()\n return summary_df", "def get_extra_select(self, *args, **kawrgs):\n if self.is_count_qry is True:\n return tuple()\n else:\n return super().get_extra_select(*args, **kawrgs)", "def _aggregate(self, method, column, keys=None, **kwds_filter):\n fn = lambda src: getattr(src, method)(column, keys, **kwds_filter)\n results = (fn(source) for source in self._sources) # Perform aggregation.\n\n if not keys:\n return sum(results) # <- EXIT!\n\n total = defaultdict(lambda: 0)\n for result in results:\n for key, val in result.items():\n total[key] += val\n return CompareDict(total, keys)", "def _agg(self, df, period):\n\n df = df.resample(period)['author'].agg(['count'])\n return df", "def aggregate_value(request):\n try:\n model = request.GET.get('model', 'item')\n start_date = request.GET.get('startDate', None)\n end_date = request.GET.get('endDate', None)\n\n items = __getQuerysetGivenInterval(model, start_date, end_date)\n\n agg_values = list(items.values('documented_at')\n .annotate(\n total_value=Sum(\n F('value')*F('quantity'),\n output_field=FloatField())))\n result = {'result': __castDecimalToFloat(agg_values)}\n\n return JsonResponse(result, status=200)\n except BaseException as e:\n print(e.args)\n return HttpResponseBadRequest()", "def auto_agg(sco_type, prop, col_type):\n\n # Don't aggregate certain columns; ignore them\n last = get_last(prop)\n if last in ['x_root', 'x_contained_by_ref', 'type', 'id']:\n return None\n\n if prop == 'number_observed':\n return 'SUM(\"number_observed\") AS \"number_observed\"'\n elif prop in ['first_observed', 'start']:\n return f'MIN(\"{prop}\") AS \"{prop}\"'\n elif prop in ['last_observed', 'end']:\n return f'MAX(\"{prop}\") AS \"{prop}\"'\n\n if ((sco_type == 'network-traffic' and prop.endswith('_port'))\n or (sco_type == 'process' and prop.endswith('pid'))):\n agg = f'COUNT(DISTINCT \"{prop}\")'\n alias = f'\"unique_{prop}\"'\n elif col_type.lower() in ['integer', 'bigint']:\n agg = f'AVG(\"{prop}\")'\n alias = f'\"mean_{prop}\"'\n else:\n agg = f'COUNT(DISTINCT \"{prop}\")'\n alias = f'\"unique_{prop}\"'\n\n if len(alias) > 63:\n # PostgreSQL has a limit of 63 chars per identifier\n return None\n\n return f'{agg} AS {alias}'", "def _aggregate(group_df):\n out = {}\n for col in data_columns:\n # The name of the error column (if it exists)\n error_col = f\"{col}_moe\"\n\n # remove any NaN rows\n subset = group_df.dropna(subset=[col], how=\"any\")\n\n # aggregat if we had any rows left\n if len(subset):\n\n # column values, margin of error (if it exists)\n args = np.column_stack(\n [subset[col], subset.get(error_col, np.zeros(len(subset)))]\n )\n\n # do the aggregation\n aggval, moe = cda.approximate_sum(*args)\n else:\n aggval = moe = np.nan\n\n # store\n out[col] = aggval\n if error_col in subset.columns:\n out[f\"{col}_moe\"] = moe\n\n out[\"geometry\"] = group_df.geometry.unary_union\n return pd.Series(out)", "def _aggregate(self, method_name, *args, as_index=None, **kwargs):\n res = self._groupby_obj._wrap_aggregation(\n qc_method=type(self._query_compiler).groupby_rolling,\n numeric_only=False,\n agg_args=args,\n agg_kwargs=kwargs,\n agg_func=method_name,\n rolling_kwargs=self.rolling_kwargs,\n )\n\n if as_index is None:\n as_index = self._as_index\n\n if not as_index:\n res = res.reset_index(\n level=[i for i in range(len(self._groupby_obj._internal_by))],\n drop=False,\n )\n\n return res", "def aggregate_field_count(self, field, projection={\"parameter\": 1},\n match={\"parameter.observed_name\": \"Ki\"},\n unwind=None, group={\"$group\": {\"count\": {\"$sum\": 1}}}):\n result = []\n if projection is not None:\n result.append({\"$project\": projection})\n if match is not None:\n result.append({\"$match\": match})\n if unwind is not None:\n result.append(unwind)\n if group is not None:\n group[\"$group\"][\"_id\"] = \"$.{}\".format(field)\n result.append(group)\n return result", "def group_values(df,col1,agg1):\r\n grouping=df.groupby(col1).agg(agg1)\r\n return grouping", "def _aggregate(self, method, column, keys=None, **kwds_filter):\n unwrap_src = self.__wrapped__\n unwrap_col = self._unwrap_columns(column)\n unwrap_keys = self._unwrap_columns(keys)\n try:\n unwrap_flt = self._unwrap_filter(kwds_filter)\n except _FilterValueError:\n if keys:\n result = CompareDict({}, keys)\n else:\n result = 0\n return result # <- EXIT!\n\n # If all *columns* are missing, build result of missing values.\n if not unwrap_col:\n distinct = self.distinct(keys, **kwds_filter)\n result = ((key, 0) for key in distinct)\n return CompareDict(result, keys) # <- EXIT!\n\n # Get method ('sum' or 'count') and perform aggregation.\n aggregate = getattr(unwrap_src, method)\n result = aggregate(unwrap_col, unwrap_keys, **unwrap_flt)\n\n rewrap_col = self._rewrap_columns(unwrap_col)\n rewrap_keys = self._rewrap_columns(unwrap_keys)\n return self._rebuild_comparedict(result, rewrap_col, column,\n rewrap_keys, keys, missing_col=0)", "def getAggregations(self,table,field,forQuery=False):\n if forQuery:\n res = db.session.query(table,db.func.stdev(field).label(\"stdev\"),db.func.max(field).label(\"max\"),db.func.min(field).label(\"min\"),db.func.sum(field).label(\"sum\"),db.func.avg(field).label(\"avg\")).filter(field!='').outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeVideoMeta.id)).filter_by(youtube_query_id=self.id)\n else:\n res = db.session.query(table,db.func.stdev(field).label(\"stdev\"),db.func.max(field).label(\"max\"),db.func.min(field).label(\"min\"),db.func.sum(field).label(\"sum\"),db.func.avg(field).label(\"avg\")).filter(field!='')\n row = res.one()\n return row", "def _aggregate(group_df, sampling_percentage=5 * 2.5):\n out = {}\n dist = []\n total_count = 0\n for i, col in enumerate(columns):\n\n n = group_df[col].sum()\n total_count += n\n dist.append(dict(min=bins[i][0], max=bins[i][1], n=n))\n\n # only aggregate if we have data!\n if total_count:\n aggval, moe = cda.approximate_median(\n dist, sampling_percentage=sampling_percentage\n )\n else:\n aggval = np.nan\n moe = np.nan\n\n result = {}\n result[\"median\"] = aggval\n result[\"median_moe\"] = moe\n result[\"geometry\"] = group_df.geometry.unary_union\n\n return pd.Series(result)", "def get_total_trans(all_customers_data, trans_column):\n return all_customers_data.select(trans_column).distinct().count()", "def sum(self, column):\n return self.aggregate('sum', *[column])", "def _aggregate(modelclass, window_date, supply_point, base_supply_points, fields,\n additonal_query_params=None):\n additonal_query_params = additonal_query_params or {}\n additonal_query_params[\"date\"] = window_date\n return _aggregate_raw(modelclass, supply_point, base_supply_points, fields, \n additonal_query_params)", "def random_aggregation_query(\n self, collection_name: str, groupby: int = 1, metrics: int = 1\n ):\n schema = self.collection_schema(collection_name)\n full_aggregation_query = {\"groupby\": [], \"metrics\": []}\n for s in schema:\n if schema[s] == \"text\":\n full_aggregation_query[\"groupby\"].append(\n {\"name\": s, \"field\": s, \"agg\": \"texts\"}\n )\n elif schema[s] == \"numeric\":\n full_aggregation_query[\"metrics\"].append(\n {\"name\": s, \"field\": s, \"agg\": \"avg\"}\n )\n return {\n \"groupby\": random.sample(full_aggregation_query[\"groupby\"], groupby),\n \"metrics\": random.sample(full_aggregation_query[\"metrics\"], metrics),\n }", "def aggregate(self, xs: List[Tensor]):\n if self.aggr == \"concat\":\n return torch.cat(xs, dim=-1)\n\n x = torch.stack(xs, dim=-1)\n if self.aggr == \"add\":\n return x.sum(dim=-1)\n elif self.aggr == \"mean\":\n return x.mean(dim=-1)\n elif self.aggr == \"max\":\n return x.max(dim=-1)[0]\n elif self.aggr == \"mul\":\n return x.prod(dim=-1)[0]", "def _get_attribute_functions(self, attributes):\n subqueries = []\n columns = []\n for attr in attributes:\n function = attributes[attr]\n if function == 'sum':\n sq = DBSession.query(\n self.db_item.id.label('item_id'),\n cast(self.db_value.value, Float).label('v')\n ). \\\n join(self.db_taggroup). \\\n join(\n self.db_tag,\n self.db_taggroup.id == self.db_tag.fk_tag_group). \\\n join(\n self.db_value,\n self.db_value.id == self.db_tag.fk_value). \\\n join(self.db_key, self.db_key.id == self.db_tag.fk_key). \\\n filter(self.db_key.key == attr). \\\n subquery()\n subqueries.append(sq)\n columns.append(func.sum(sq.c.v))\n elif function == 'count' or function == 'count distinct':\n if attr == 'Activity' or attr == 'Stakeholder':\n columns.append(func.count())\n else:\n sq = DBSession.query(\n self.db_item.id.label('item_id'),\n self.db_value.value.label('v')\n ). \\\n join(self.db_taggroup). \\\n join(\n self.db_tag,\n self.db_taggroup.id == self.db_tag.fk_tag_group). \\\n join(self.db_value). \\\n join(self.db_key). \\\n filter(self.db_key.key == attr). \\\n subquery()\n subqueries.append(sq)\n if (function == 'count distinct'):\n columns.append(func.count(distinct(sq.c.v)))\n else:\n columns.append(func.count(sq.c.v))\n return subqueries, columns", "def sql(self):\n text = \"\"\"\n STRUCT('{display_name}' AS usage,\n STRUCT(\n COUNTIF(days_since_{col_name} < 1) AS dau,\n COUNTIF(days_since_{col_name} < 7) AS wau,\n COUNTIF(days_since_{col_name} < 28) AS mau,\n SUM(udf_bitcount_lowest_7(days_{col_name}_bits)) AS active_days_in_week\n ) AS metrics_daily,\n STRUCT(\n COUNTIF(days_since_created_profile = 6 AND udf_active_n_weeks_ago(days_{col_name}_bits, 0)) AS active_in_week_0,\n SUM(IF(days_since_created_profile = 6, udf_bitcount_lowest_7(days_{col_name}_bits), 0)) AS active_days_in_week_0\n ) AS metrics_1_week_post_new_profile,\n STRUCT(\n COUNTIF(days_since_created_profile = 13 AND udf_active_n_weeks_ago(days_{col_name}_bits, 0)) AS active_in_week_1,\n COUNTIF(days_since_created_profile = 13 AND udf_active_n_weeks_ago(days_{col_name}_bits, 1) AND udf_active_n_weeks_ago(days_{col_name}_bits, 0)) AS active_in_weeks_0_and_1\n ) AS metrics_2_week_post_new_profile)\"\"\".format( # noqa\n **asdict(self)\n )\n if self.dau_only:\n lines = []\n for line in text.split(\"\\n\"):\n before_as, separator, after_as = line.partition(\" AS \")\n if separator and (\n after_as not in [\"usage,\", \"dau,\"]\n and not after_as.startswith(\"metrics_\")\n ):\n line = \" NULL AS \" + after_as\n lines.append(line)\n text = \"\\n\".join(lines)\n return text", "def get_results_from_aggregation_sources(self, context):", "def get_total_data():\n return pd.merge(compute_aggregate_load_data(), compute_aggregate_weather_data(),on=\"Date\")", "def sql_select(sql):\n cur = c.cursor()\n cur.execute(sql)\n results = cur.fetchall()\n return results", "def aggregate(self, *args, **kwargs):\n return AggregateQuerySet(self, args, kwargs)", "def sql(query):\n cursor = db.execute_sql(query)\n list_of_tuples = cursor.fetchall()\n lis = [i[0] for i in list_of_tuples]\n dictionary = {element: lis.count(element) for element in lis}\n return dictionary", "def aggregate(self, func_or_funcs, *args, **kwargs):\n if not isinstance(func_or_funcs, dict) or \\\n not all(isinstance(key, str) and isinstance(value, str)\n for key, value in func_or_funcs.items()):\n raise ValueError(\"aggs must be a dict mapping from column name (string) to aggregate \"\n \"functions (string).\")\n\n sdf = self._kdf._sdf\n groupkeys = self._groupkeys\n groupkey_cols = [s._scol.alias('__index_level_{}__'.format(i))\n for i, s in enumerate(groupkeys)]\n reordered = []\n for key, value in func_or_funcs.items():\n if value == \"nunique\":\n reordered.append(F.expr('count(DISTINCT `{0}`) as `{0}`'.format(key)))\n else:\n reordered.append(F.expr('{1}(`{0}`) as `{0}`'.format(key, value)))\n sdf = sdf.groupby(*groupkey_cols).agg(*reordered)\n internal = _InternalFrame(sdf=sdf,\n data_columns=[key for key, _ in func_or_funcs.items()],\n index_map=[('__index_level_{}__'.format(i), s.name)\n for i, s in enumerate(groupkeys)])\n return DataFrame(internal)", "def get_total_stateless(db):\n pass", "def aggregateFunction():\r\n global aggFunc\r\n aggFunc = []\r\n for objFunc in P_prime:\r\n aggFunc.append(objFunc[0]*FileSettings.settingsdict['weights'][0] +\r\n objFunc[1]*FileSettings.settingsdict['weights'][1] +\r\n objFunc[2]*FileSettings.settingsdict['weights'][2] +\r\n objFunc[3]*FileSettings.settingsdict['weights'][3])\r\n return aggFunc", "def _make_aggregation_query(assets: List[str], search_by: str, partitioned_cols: Dict[str, Set[str]], date_col: str,\n start: pd.Timestamp = None, end: pd.Timestamp = None) -> Tuple[\n List[Dict[str, any]], List[str]]:\n\n # this always needs to be made no matter if there is no static wanted\n static_projection = {field: 1 for field in (partitioned_cols['static'] if partitioned_cols['static'] else [])}\n static_projection['_id'] = 0\n static_projection[search_by] = 1\n\n if partitioned_cols['timeseries']:\n # making the timeseries projection dict\n timeseries_projection = {field: '$timeseries.' + field for field in partitioned_cols['timeseries']}\n timeseries_projection['date'] = f'$timeseries.{date_col}'\n\n aggregation_query = [\n {'$match': {search_by: {'$in': assets}}},\n {'$unwind': '$timeseries'},\n {'$match': {f'timeseries.{date_col}': {'$gte': start, '$lt': end}}},\n {'$project': {**static_projection, **timeseries_projection}}\n ]\n primary_key = ['date', search_by]\n\n else:\n aggregation_query = [{'$match': {search_by: {'$in': assets}}},\n {'$project': static_projection}]\n primary_key = [search_by]\n\n return aggregation_query, primary_key", "def cal(sql):\r\n try:\r\n db = psycopg2.connect(dbname=\"news\")\r\n c = db.cursor()\r\n c.execute(sql)\r\n car = c.fetchall()\r\n db.close()\r\n return car\r\n except Exception:\r\n print(\"Unable to connect to the database\")", "def count(self, sql, params=None, error_message=None, connection=None):\n fresh_connection = False\n try:\n if not connection:\n connection = self.database.connection()\n fresh_connection = True\n # self.context.logger.debug(connection.cursor.mogrify(sql, params))\n result = single_value_sql(connection, sql, params)\n except psycopg2.Error, e:\n if error_message is None:\n error_message = \"Failed to execute count against PostgreSQL\"\n error_message = \"%s - %s\" % (error_message, str(e))\n logger.error(error_message, exc_info=True)\n raise DatabaseError(error_message)\n finally:\n if connection and fresh_connection:\n connection.close()\n\n return result", "def statistics_query(owner_name):\n\n \"\"\"\n WITH p_count AS (\n SELECT\n type_name,\n owner_name,\n COUNT(type_name) product_count\n FROM products\n GROUP BY (owner_name, type_name)\n ),\n \"\"\"\n p_count = db.session.query(\n Products.type_name,\n Products.owner_name,\n func.count(Products.type_name).label('product_count'),\n func.count(case([\n (Products.product_condition == True, Products.type_name)\n ],\n else_=None\n )).label('valid')\n )\\\n .select_from(Products)\\\n .filter(Products.owner_name == owner_name)\\\n .group_by(Products.owner_name, Products.type_name)\\\n .subquery(name='p_count')\n \"\"\"\n so AS (\n SELECT\n type_name,\n supplier_name,\n SUM(quantity) ordered\n FROM orders\n JOIN specific_orders USING(order_id)\n GROUP BY (type_name, supplier_name)\n )\n\n \"\"\"\n specific_orders = db.session.query(\n SpecificOrders.type_name,\n Orders.supplier_name,\n func.sum(SpecificOrders.quantity).label('ordered')\n )\\\n .select_from(Orders)\\\n .join(SpecificOrders, Orders.order_id == SpecificOrders.order_id)\\\n .filter(Orders.supplier_name == owner_name)\\\n .group_by(SpecificOrders.type_name, Orders.supplier_name)\\\n .subquery(name='specific_orders')\n \"\"\"\n SELECT\n owner_name,\n p.type_name,\n so.ordered,\n p.product_count\n FROM p_count p\n LEFT JOIN so ON p.type_name = so.type_name AND p.owner_name = so.supplier_name\n ORDER BY 1\n \"\"\"\n stats_query = db.session.query(\n p_count.c.owner_name,\n p_count.c.type_name.label('Type'),\n p_count.c.product_count.label('Amount'),\n p_count.c.valid.label('Amount of functional products'),\n (func.coalesce(specific_orders.c.ordered, 0)).label('Ordered amount'),\n CriticalLevels.critical_amount.label('Critical level')\n )\\\n .select_entity_from(p_count)\\\n .outerjoin(CriticalLevels, and_(CriticalLevels.business == p_count.c.owner_name,\n CriticalLevels.type_name == p_count.c.type_name))\\\n .outerjoin(specific_orders, and_(p_count.c.type_name == specific_orders.c.type_name,\n p_count.c.owner_name == specific_orders.c.supplier_name))\\\n .order_by(p_count.c.owner_name)\n return stats_query", "def _aggregate(self, row):\n if not self._head:\n self._head = self._create_head(row)\n if self._args.head:\n return\n\n if 'key' not in self._state:\n self._state['key'] = self._replace_fields(self._args.key)\n\n if 'begin' not in self._state:\n self._state['begin'] = eval(self._replace_fields(self._args.begin))\n\n if 'reduce' not in self._state:\n self._state['reduce'] = self._replace_fields(self._args.reduce)\n\n if 'result_map' not in self._state:\n self._state['result_map'] = {}\n\n r = list(map(self._convert, row))\n\n key = eval(self._state['key'])\n\n if key not in self._state['result_map']:\n self._state['result_map'][key] = len(self._result)\n entry = deepcopy(self._state['begin'])\n self._result.append([key, entry])\n\n index = self._state['result_map'][key]\n\n ns = {\n '__result__': self._result[index][1],\n 'r': r\n }\n exec(self._state['reduce'], ns)\n self._result[index][1] = ns['__result__']", "def find_db_sum(self,table,**query_dict):\n sql = \"SELECT COUNT(*) AS sum FROM \" + table + ' WHERE '\n for index in query_dict:\n if not isinstance(query_dict[index],dict): sql += \" %s = '%s' and\" % (index,query_dict[index]) \n else: sql += \" %s %s '%s' and\" % (index,query_dict[index]['rule'],query_dict[index]['value'])\n sql = sql[0:-3]\n # self.out(sql)\n try: sum = self.db.get(sql)['sum']\n except Exception,e: self.treat_except(e)\n return int(sum) if int(sum) else False", "def test_sum_and_average(pawprint_default_tracker_db_with_table):\n\n tracker = pawprint_default_tracker_db_with_table\n\n metadata = str('{\"val\": 1}').replace(\"'\", '\"')\n\n # Add a bunch of events\n query = (\n \"\"\"\n INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),\n ('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),\n ('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),\n ('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),\n ('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')\n \"\"\"\n ).format(table=tracker.table, metadata=metadata)\n\n pd.io.sql.execute(query, tracker.db)\n\n x_sum_daily_all = tracker.sum(\"metadata__val\")\n x_sum_daily = tracker.sum(\"metadata__val\", event=\"logged_in\")\n\n x_avg_daily_all = tracker.average(\"metadata__val\", event=\"logged_in\")\n x_avg_daily = tracker.average(\"metadata__val\", event=\"logged_in\")\n\n assert len(x_sum_daily) == 7\n\n assert np.all(x_sum_daily_all[\"sum\"].values == [3, 2, 1, 1, 1, 2, 3])\n assert np.all(x_sum_daily[\"sum\"].values == [3, 2, 1, 1, 1, 2, 2])\n\n assert np.all(x_avg_daily_all[\"avg\"].values == [1, 1, 1, 1, 1, 1, 1])\n assert np.all(x_avg_daily[\"avg\"] == x_avg_daily_all[\"avg\"])", "def aggregateCounts(self, timestamps, op = OP_TOTAL,timeResolution = None ):\n timeResolution = timeResolution or self.timeResolutions[0]\n\n if op not in (BitmapCounter.OP_INTERESECT, BitmapCounter.OP_TOTAL, BitmapCounter.OP_AVG):\n raise ValueError(\"Invalid aggregation op %s\" % op)\n\n if op == BitmapCounter.OP_INTERESECT:\n bitop = 'AND'\n else:\n bitop = 'OR'\n\n dest = 'aggregate:%s:%s' % (self.metric,hash(timestamps))\n pipe = self._getPipeline()\n pipe.execute_command('BITOP',bitop, dest, *(self.__getKey(timestamp, timeResolution) for timestamp in timestamps))\n pipe.execute_command('BITCOUNT', dest)\n rx = pipe.execute()\n ret = rx[1]\n if op == BitmapCounter.OP_AVG:\n return float(ret)/len(timestamps)\n else:\n return ret", "def _aggregate_metrics(metrics, aggfunc, base):\n return base.Struct(**_UNCOMPRESSED_METRICS)(\n left_side_bearing=aggfunc(_m.left_side_bearing for _m in metrics),\n right_side_bearing=aggfunc(_m.right_side_bearing for _m in metrics),\n character_width=aggfunc(_m.character_width for _m in metrics),\n character_ascent=aggfunc(_m.character_ascent for _m in metrics),\n character_descent=aggfunc(_m.character_descent for _m in metrics),\n character_attributes=0,\n )", "def query_two(self, table_name):\n\n query = (\n \"SELECT MAX(count) as Maximum,\"\n \"MIN(count) as Minimum,\"\n \"AVG(count) as Average \"\n \"FROM (SELECT COUNT(*) as count FROM %s GROUP BY user_id) as c\"\n )\n\n self.cursor.execute(query % (table_name))\n rows = self.cursor.fetchall()\n print(\"Data from table %s, tabulated:\" % table_name)\n print(tabulate(rows, headers=self.cursor.column_names))\n return rows", "def _register_arithmetic_agg(\n name: str,\n np_name: str,\n doc: str = \"\"\n) -> Callable:\n @register_func(None, context=Context.EVAL)\n def _arithmetric(x: Iterable, na_rm: bool = False) -> Iterable:\n \"\"\"Arithmetric function\"\"\"\n # na_rm not working for numpy functions\n # with x is a Series object\n if isinstance(x, Series):\n return getattr(x, np_name)(skipna=na_rm)\n\n fun_name = f\"nan{np_name}\" if na_rm else np_name\n return getattr(numpy, fun_name)(x)\n\n _arithmetric.__name__ = name\n _arithmetric.__doc__ = doc\n return _arithmetric", "def tran_count(df, *args):\n # Compute the count\n df_res = DataFrame(\n df.groupby([*args]).size()\n ).reset_index()\n # Change column name\n col = list(df_res.columns)\n col[-1] = \"n\"\n df_res.columns = col\n\n return df_res", "def aggregate_client(df, group_vars, df_names):\n \n # Aggregate the numeric columns\n df_agg = agg_numeric(df, parent_var= group_vars[0], df_name = df_names[0])\n \n # If there are categorical variables\n if any(df.dtypes == 'category'):\n \n # Count the categorical columns\n df_counts = agg_categorical(df, parent_var= group_vars[0], df_name = df_names[0])\n\n # Merge the numeric and categorical\n df_by_loan = df_counts.merge(df_agg, on = group_vars[0], how = 'outer')\n\n gc.enable()\n del df_agg, df_counts\n gc.collect()\n\n # Merge to get the client id in dataframe\n df_by_loan = df_by_loan.merge(df[[group_vars[0], group_vars[1]]], on = group_vars[0], how = 'left')\n\n # Remove the loan id\n df_by_loan = df_by_loan.drop(columns = [group_vars[0]])\n\n # Aggregate numeric stats by column\n df_by_client = agg_numeric(df_by_loan, parent_var = group_vars[1], df_name = df_names[1])\n\n \n # No categorical variables\n else:\n # Merge to get the client id in dataframe\n df_by_loan = df_agg.merge(df[[group_vars[0], group_vars[1]]], on = group_vars[0], how = 'left')\n \n gc.enable()\n del df_agg\n gc.collect()\n \n # Remove the loan id\n df_by_loan = df_by_loan.drop(columns = [group_vars[0]])\n \n # Aggregate numeric stats by column\n df_by_client = agg_numeric(df_by_loan, parent_var = group_vars[1], df_name = df_names[1])\n \n # Memory management\n gc.enable()\n del df, df_by_loan\n gc.collect()\n\n return df_by_client", "def agg(self, args):\n result = DataFrame()\n add_col_values = True\n\n ctx = ffi.new('gdf_context*')\n ctx.flag_sorted = 0\n ctx.flag_method = self._method\n ctx.flag_distinct = 0\n\n sort_result = True\n\n if not isinstance(args, str) and isinstance(\n args, collections.abc.Sequence):\n if (len(args) == 1 and len(self._val_columns) == 1):\n sort_result = False\n for agg_type in args:\n\n val_columns_out = [agg_type + '_' +\n val for val in self._val_columns]\n\n result = self._apply_agg(\n agg_type, result, add_col_values, ctx, self._val_columns,\n val_columns_out, sort_result=sort_result)\n\n add_col_values = False # we only want to add them once\n\n elif isinstance(args, collections.abc.Mapping):\n if (len(args.keys()) == 1):\n if(len(list(args.values())[0]) == 1):\n sort_result = False\n for val, agg_type in args.items():\n\n if not isinstance(agg_type, str) and \\\n isinstance(agg_type, collections.abc.Sequence):\n for sub_agg_type in agg_type:\n val_columns_out = [sub_agg_type + '_' + val]\n result = self._apply_agg(sub_agg_type, result,\n add_col_values, ctx, [val],\n val_columns_out,\n sort_result=sort_result)\n elif isinstance(agg_type, str):\n val_columns_out = [agg_type + '_' + val]\n result = self._apply_agg(agg_type, result,\n add_col_values, ctx, [val],\n val_columns_out,\n sort_result=sort_result)\n\n add_col_values = False # we only want to add them once\n\n else:\n result = self.agg([args])\n\n return result", "def aggregate_count_data(df, groupby, id_vars=[]):\n # Make sure we have the column we are grouping by\n if groupby not in df.columns:\n raise ValueError(\n f\"the specified column to group by '{by}' is not in the input data\"\n )\n\n # data columns\n data_columns = [\n col\n for col in df.columns\n if not col.startswith(\"geo\") and not col.endswith(\"moe\")\n ]\n\n def _aggregate(group_df):\n \"\"\"\n The function that aggregates each group\n \"\"\"\n out = {}\n for col in data_columns:\n # The name of the error column (if it exists)\n error_col = f\"{col}_moe\"\n\n # remove any NaN rows\n subset = group_df.dropna(subset=[col], how=\"any\")\n\n # aggregat if we had any rows left\n if len(subset):\n\n # column values, margin of error (if it exists)\n args = np.column_stack(\n [subset[col], subset.get(error_col, np.zeros(len(subset)))]\n )\n\n # do the aggregation\n aggval, moe = cda.approximate_sum(*args)\n else:\n aggval = moe = np.nan\n\n # store\n out[col] = aggval\n if error_col in subset.columns:\n out[f\"{col}_moe\"] = moe\n\n out[\"geometry\"] = group_df.geometry.unary_union\n return pd.Series(out)\n\n # this is the aggregated data, with index of \"by\", e.g., group label\n agg_df = df.groupby(groupby).apply(_aggregate)\n\n # Return a GeoDataFrame\n out = gpd.GeoDataFrame(agg_df, geometry=\"geometry\", crs=df.crs).reset_index()\n\n # Add in any id variables from\n if len(id_vars):\n if groupby not in id_vars:\n id_vars.append(groupby)\n out = out.merge(df[id_vars], on=groupby).drop_duplicates(subset=[groupby])\n\n return out", "def doQuery( connection ): # function definition\r\n cursor = connection.cursor()\r\n \r\n query = \"\"\"select Title, sum( UnitPrice ) as cost\r\n from Album natural join Track\r\n group by AlbumId\r\n order by cost desc\"\"\"\r\n \r\n cursor.execute( query )\r\n \r\n print( \"Album titles and cost of tracks\" )\r\n print( \"Title\\t\\t\\t\\tCost\" )\r\n for (Title, cost) in cursor:\r\n print( Title, \"\\t\\t\\t$\", cost )\r\n \r\n cursor.close()", "def aggregate_quantity(request):\n try:\n model = request.GET['model']\n start_date = request.GET.get('startDate', None)\n end_date = request.GET.get('endDate', None)\n\n items = __getQuerysetGivenInterval(model, start_date, end_date)\n\n # Count all models as 1 except for items which has quantity field\n count_method = Count('pk') if model != 'item' else Sum('quantity')\n\n agg_qty = list(items.values('documented_at')\n .annotate(total_quantity=count_method))\n result = {'result': agg_qty}\n\n return JsonResponse(result, status=200)\n except BaseException as e:\n print(e.args)\n return HttpResponseBadRequest()", "def _aggregation_target(self):\n ...", "def total_things(self, table_name, spam=None, all_time=None):\r\n t = tdb_sql.types_name[table_name]['thing_table']\r\n s = sa.select([sa.func.count(t.c.thing_id)])\r\n if spam:\r\n s.append_whereclause(t.c.spam==spam)\r\n s.append_whereclause(t.c.deleted=='f')\r\n s = self.append_date_clause(t, s, all_time=all_time)\r\n\r\n return s.execute().fetchone()[0]", "def sum(self):\n return self._summarize(lambda c: c.sum)", "def aggregate(korpus):\n return pd.DataFrame(korpus.fillna(0).mean(axis=1))", "def aggregate(self, cls, *args, **kwargs):\n m = mapper(cls)\n return self.impl.aggregate(m.collection, *args, **kwargs)", "def fast_count(query):\n count_query = (query\n .statement.with_only_columns([func.count()]).order_by(None))\n count = query.session.execute(count_query).scalar()\n return count", "def count(query):\n cursor = db.execute_sql(query)\n result = cursor.fetchone()[0]\n return result", "def test_execute_sum_query(self):\n url = \"?\"\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n aggregates = handler._mapper.report_type_map.get(\"aggregates\")\n current_totals = self.get_totals_by_time_scope(aggregates)\n query_output = handler.execute_query()\n self.assertIsNotNone(query_output.get(\"data\"))\n self.assertIsNotNone(query_output.get(\"total\"))\n total = query_output.get(\"total\")\n\n self.assertEqual(total.get(\"usage\", {}).get(\"value\"), current_totals.get(\"usage\"))\n self.assertEqual(total.get(\"request\", {}).get(\"value\"), current_totals.get(\"request\"))\n self.assertEqual(total.get(\"cost\", {}).get(\"value\"), current_totals.get(\"cost\"))\n self.assertEqual(total.get(\"limit\", {}).get(\"value\"), current_totals.get(\"limit\"))", "def aggregate(predictions, aggfunc):\n return [aggfunc(sublist) for sublist in np.transpose(predictions)]", "def test_enforce_aggregation(self):\n\n good_examples = \"\"\"\n [score] -> sum(datatypes.score)\n [ScORE] -> sum(datatypes.score)\n [ScORE] + [ScORE] -> sum(datatypes.score + datatypes.score)\n max([ScORE] + [ScORE]) -> max(datatypes.score + datatypes.score)\n max(score) - min(score) -> max(datatypes.score) - min(datatypes.score)\n max(scores.score) -> max(scores.score)\n max([score] - [scores.score]) -> max(datatypes.score - scores.score)\n \"\"\"\n\n for field, expected_sql in self.examples(good_examples):\n expr, _ = self.builder.parse(field, enforce_aggregation=True, debug=True)\n self.assertEqual(expr_to_str(expr), expected_sql)", "def count(self, column=\"*\"):\n self.aggregate(\"COUNT\", \"{column}\".format(column=column))\n return self", "def select_category(\n dgid,\n group_by,\n where,\n column_name,\n column_value,\n where_description,\n computed_columns,\n where_expr,\n):\n conn = get_database_connection(dgid)\n cur = conn.cursor()\n\n unify_computed_columns(computed_columns)\n metadata = get_metadata(conn)\n columns = list(metadata.keys())\n select_expr_as = [get_field_name(column, metadata) for column in columns]\n databases = [\"datagrid\"]\n\n if computed_columns or where_expr:\n where_sql = update_state(\n computed_columns,\n metadata,\n databases,\n columns,\n select_expr_as,\n where_expr,\n )\n if where_sql:\n where = where_sql\n\n where = where if where else \"1\"\n\n column_type = metadata[column_name][\"type\"]\n field_name = get_field_name(column_name, metadata)\n field_expr = get_field_expr(column_name, metadata)\n group_by_field_name = get_field_name(group_by, metadata)\n group_by_field_expr = get_field_expr(group_by, metadata)\n\n column_value = get_column_value(column_value, group_by, metadata)\n\n try:\n rows = get_group_by_rows(\n cur,\n group_by_field_name,\n group_by_field_expr,\n field_name,\n field_expr,\n column_value,\n where,\n databases,\n select_expr_as,\n )\n except sqlite3.OperationalError as exc:\n LOGGER.error(\"SQL: %s\", exc)\n raise Exception(str(exc))\n\n # These are categories (ints or strings):\n results_json = {\"type\": \"verbatim\", \"value\": \"\", \"columnType\": column_type}\n if rows:\n row = rows[0]\n if row:\n raw_value = row[0]\n if raw_value:\n values = [v.replace(\"&comma;\", \",\") for v in raw_value.split(\",\")]\n else:\n values = []\n\n counts = Counter(values)\n length = len(values)\n unique_values = list(counts.keys())\n ulength = len(unique_values)\n\n if length == 0:\n results_json = {\n \"type\": \"verbatim\",\n \"value\": plural(length, \"value\"),\n \"columnType\": column_type,\n }\n elif length == 1:\n results_json = {\n \"type\": \"verbatim\",\n \"value\": values[0],\n \"columnType\": column_type,\n }\n elif ulength == 1:\n results_json = {\n \"type\": \"verbatim\",\n \"value\": \"%s (%s of them)\" % (values[0], length),\n \"columnType\": column_type,\n }\n elif ulength > MAX_CATEGORIES:\n if length == ulength:\n results_json = {\n \"type\": \"verbatim\",\n \"value\": plural(length, \"unique value\"),\n \"columnType\": column_type,\n }\n else:\n results_json = {\n \"type\": \"verbatim\",\n \"value\": (\n plural(length, \"value\")\n + \", \"\n + (\"%s %s\" % (ulength, \"unique\"))\n ),\n \"columnType\": column_type,\n }\n else:\n counts = {\n key: value\n for (key, value) in sorted(counts.items(), key=lambda item: item[1])\n }\n # values: {\"Animal\": 37, \"Plant\": 12}\n results_json = {\n \"type\": \"category\",\n \"values\": counts,\n \"column\": column_name,\n \"columnType\": column_type,\n \"groupBy\": group_by,\n \"groupByValue\": column_value,\n \"whereDescription\": where_description,\n \"computedColumns\": computed_columns,\n }\n\n return results_json", "def aggregate(self, applyfunc):\n axis_name = 'columns' if self.axis else 'index'\n getter = lambda df, group: df.reindex(**{axis_name : group})\n result_d = self._aggregate_generic(getter, applyfunc,\n axis=self.axis)\n\n result = DataMatrix(result_d)\n\n if self.axis == 0:\n result = result.T\n\n return result" ]
[ "0.6447261", "0.6286424", "0.6268693", "0.61533105", "0.61249465", "0.6095222", "0.60426706", "0.60377187", "0.59553", "0.5910795", "0.5815274", "0.58027256", "0.57948333", "0.57921916", "0.56742746", "0.5662127", "0.56422573", "0.56114566", "0.55796754", "0.5578842", "0.5568994", "0.55639416", "0.55624706", "0.55514526", "0.55472744", "0.5546232", "0.5530956", "0.5514738", "0.5491253", "0.547356", "0.5469837", "0.5439834", "0.54330295", "0.5409441", "0.54021317", "0.5368328", "0.5356416", "0.5349236", "0.53486127", "0.53094625", "0.53090376", "0.5281014", "0.5281014", "0.52578247", "0.5246481", "0.52372146", "0.51902777", "0.51676655", "0.5146863", "0.51464206", "0.5138957", "0.51149523", "0.5091749", "0.5089983", "0.50765467", "0.5075019", "0.5063964", "0.5060962", "0.50567615", "0.50529265", "0.504848", "0.5047199", "0.50422156", "0.50232416", "0.5017028", "0.5008096", "0.5005386", "0.4992518", "0.49924797", "0.49908504", "0.49855503", "0.49849665", "0.49824497", "0.4975992", "0.4972063", "0.49712583", "0.496228", "0.4961965", "0.49504876", "0.49482265", "0.49429694", "0.49305418", "0.49301344", "0.49280488", "0.49183095", "0.49165577", "0.49066594", "0.4900954", "0.49006015", "0.48969936", "0.48946595", "0.48936135", "0.4888435", "0.4881556", "0.48776954", "0.48770547", "0.48760772", "0.48677203", "0.48644072", "0.48628458" ]
0.6102247
5
Execute query and return cursor object.
def _execute_query(self, select_clause, trailing_clause=None, **kwds_filter): try: stmnt, params = self._build_query(self._table, select_clause, **kwds_filter) if trailing_clause: stmnt += '\n' + trailing_clause cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') #print(stmnt, params) cursor.execute(stmnt, params) except Exception as e: exc_cls = e.__class__ msg = '%s\n query: %s\n params: %r' % (e, stmnt, params) raise exc_cls(msg) return cursor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cursorExecute(self, query):\n return self.cursor.execute(query)", "def _execute(self, *args):\n cursor = self.db.cursor()\n cursor.execute(*args)\n return cursor", "def execute(cls, sql):\n cursor = cls.get_conn().cursor()\n cursor.execute(sql)\n return cursor", "def execute(self, query):\n try:\n cursor = self._get_cursor()\n cursor.execute(query)\n return cursor\n except pymssql.Error as e:\n raise PluginError('Error during query execution !\\n'\n 'Query: %s' % query, e.message)", "def run_query(query):\n conn = connection.get_db_connection()\n cursor = conn.cursor()\n cursor.execute(query)\n return cursor", "def __execute_query(self, query: str, params: Union[None, tuple] = None) -> sqlite3.Cursor:\n if params is None:\n params = ()\n\n with self.conn:\n return self.conn.execute(query, params)", "def execute(self, query):\n with self.conn.cursor() as cur:\n # Execute the query\n try:\n cur.execute(query)\n except Exception as exc:\n print(\"Unable to execute query. Error was {0}\".format(str(exc)))\n exit()\n rows = cur.fetchall()\n return rows", "def execute(self, qry):\n def internal():\n print 'qry = ', qry\n self._cur = self.get_cursor()\n print 'self._cur = ', self._cur\n self._cur.execute(qry)\n # self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor).execute(qry)\n rows = self._cur.fetchall()\n return rows\n\n return self._retry(internal)", "def oracle_cursor(query):\n conf_file_dir = os.path.dirname(os.path.realpath(__file__))\n conf_file = conf_file_dir + os.sep + '.setup.conf'\n (username, password, host, port, sid) = credential_setup(conf_file)\n dsn_tns = cx_Oracle.makedsn(host, port, sid)\n connection = cx_Oracle.connect(username, password, dsn_tns)\n cursor = connection.cursor()\n cursor.execute(query)\n\n return cursor", "def execute_query(self, query):\n with self.db_engine.connect() as conn:\n try:\n result = conn.execute(query)\n\n except Exception as e:\n logger.error(e)\n\n result = None\n \n return result", "def execute_query(query):\n c.execute(query)\n return c.fetchall()", "def cursor():\n dbh = handle()\n return dbh.cursor()", "def _run_query(self, query):\n cursor = self.conn.cursor()\n cursor.execute(query)\n return cursor.fetchall()", "def _cursor(self):\n cursor = self.conn.cursor()\n\n return cursor", "def query(self, query, *args, **kwargs):\n # TODO: do not return an sqlite3 Errors\n\n if not self.begin():\n return None\n\n self.execute(query, *args, **kwargs)\n\n # create clever result\n ret = list()\n for row in self.cursor:\n if len(row) == 1: # (2)\n ret.append(row[0])\n else: # (3)\n ret.append(row)\n if len(ret) == 1: # (1)\n ret = ret[0]\n\n self.end()\n\n return ret", "def execute(query: str):\r\n try:\r\n global connection\r\n global cursor\r\n\r\n cursor.execute(query)\r\n records = cursor.fetchall()\r\n connection.commit()\r\n return records\r\n\r\n except sqlite3.Error as error:\r\n logger.error(f\"Error while connecting to sqlite: {error}\")", "def __execute__(self, sql, commit=True):\n cursor = None\n PDEBUG('Executing SQL: %s'%sql)\n try:\n cursor = self.conn.execute(sql)\n except sqlite3.IntegrityError as e:\n print('DUP: %s' % e)\n except Exception as e:\n print('FATAL: %s -- %s' % (e, sql))\n else:\n if commit:\n self.conn.commit()\n\n return cursor", "def _get_cursor(self):\n conn = self._connect()\n conn.autocommit = True\n cursor = conn.cursor()\n return cursor", "def db_execute_query(db_connection, query, query_args):\n cursor = db_connection.cursor()\n #datalab_logger_connections.info(\"reading database[Query. May Take Time]...\")\n cursor.execute(query, query_args)\n #datalab_logger_connections.info(\"finish to query database\")\n return cursor", "def execute(self, query):\n cur = self.conn.cursor()\n try:\n cur.execute(query)\n self.conn.commit()\n return cur\n except:\n self.conn.rollback()\n print(\"Error: error executing query `\" + query + \"`\")\n return False", "def execute(\n self,\n query: Query,\n params: Optional[Params] = None,\n *,\n prepare: Optional[bool] = None,\n binary: bool = False,\n ) -> Cursor[Row]:\n cur = self.cursor()\n if binary:\n cur.format = Format.BINARY\n\n try:\n return cur.execute(query, params, prepare=prepare)\n except e.Error as ex:\n raise ex.with_traceback(None)", "def _query_mysql(self):\n mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)\n conn = mysql.get_conn()\n cursor = conn.cursor()\n cursor.execute(self.sql)\n return cursor", "def execute(query):\n print query\n cursor.execute(query)", "def execute(cursor, query):\n while True:\n try:\n cursor.execute(query)\n break\n except Exception as e:\n print(\"Database query: {} {}\".format(cursor, query))\n print(\"Database retry reason: {}\".format(e))\n return cursor", "def executeQuery(query):\n c = db.cursor()\n c.execute(query)\n rows = c.fetchall()\n db.close()\n return rows", "def q(self, sql, return_curs=False):\n curs = self._db.cursor()\n curs.execute(sql)\n\n if return_curs:\n return curs\n else:\n curs.close()", "def cursor(self):\n return self._conn.cursor()", "def query(self, query, *parameters, **kwparameters):\n cursor = self._cursor()\n try:\n cursor.execute(query, kwparameters or parameters)\n result = cursor.fetchall()\n return result\n finally:\n cursor.close()", "def create_cursor(self):\r\n cursor = self.connection.cursor()\r\n return cursor", "def query(self, query):\n\n config = {\n 'user' : self.db_user,\n 'password' : self.db_pwd,\n 'database' : self.db_name,\n 'host' : self.db_host,\n 'unix_socket' : self.db_socket,\n 'port' : self.db_port,\n 'charset' : 'utf8'\n }\n\n try: \n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor(dictionary=True) # To return rows as dictionaries.\n\n cursor.execute(query[0], query[1])\n \n # Putting the result rows in an array.\n result = []\n for row in cursor:\n result.append(row)\n\n except mysql.connector.Error as err:\n return None\n else:\n cursor.close()\n cnx.close()\n return result", "def query(self, query, dict_cursor=False):\n try:\n if dict_cursor:\n self.dict_cursor.execute(query)\n results = self.dict_cursor.fetchall()\n else:\n self.cursor.execute(query)\n results = self.cursor.fetchall()\n return results\n except MySQLdb.Error as e:\n self.connection.rollback()\n try:\n print(\"MySQL Error {}: {}\".format(e.args[0], e.args[1]))\n except IndexError:\n print(\"MySQL Error: {}\".format(str(e)))", "def cursor(self):\n if self.__connection is None:\n self.connect()\n return self.__connection.cursor()", "async def query(self, stmt, *args):\n\n with (await self.application.db.cursor()) as cur:\n await cur.execute(stmt, args)\n return [self.row_to_obj(row, cur)\n for row in await cur.fetchall()]", "def query(self, query, cs_type=None, *parameters, **kwparameters):\n cursor = self._cursor(cs_type)\n try:\n self._execute(cursor, query, parameters, kwparameters)\n if cs_type in [\"SSCursor\", \"SSDictCursor\"]:\n while 1:\n try:\n row = cursor.fetchone()\n except Exception, e:\n cursor.close()\n raise e\n if row:\n yield row\n else:\n break\n else:\n yield [Row(row) if isinstance(row, dict) else row for row in cursor]\n except Exception, e:\n cursor.close()", "def get_cursor(self):\n return self.connection.cursor()", "def execute(self, sql, parameters=None):\n if parameters is None:\n parameters = {}\n self._cursor = iter(self._hndl.execute(sql, parameters))\n return self", "def execute_query(conn, query):\r\n cur = conn.cursor()\r\n cur.execute(query)\r\n conn.commit()\r\n return cur.fetchall()", "def execute(\n self, query: str, table: str = None, values: IterOpt = None\n ) -> sqlite3.Cursor:", "def execute_query(self, query_, return_results_=False):\n\n try:\n if return_results_:\n logging.info(\"Fetching results...\")\n self.query_results = self.cursor.execute(query_)\n else:\n self.cursor.execute(query_)\n logging.info(\"Query ran successfully.\")\n\n except Exception as e:\n if self.airflow:\n # For Airflow, forces task to fail and set it up for re-try\n raise AirflowException(\"Error running query. {}\"\n .format(str(e)))\n else:\n logging.exception(\"Error running query.\")\n raise e", "def cursor(self):\n return self.conn.cursor()", "def get_cursor():\n cur = conn.cursor(cursor_factory=DictCursor)\n return cur", "def exec_get_cur(self, request, params={}, extra_params = None):\n con = self.create_connection()\n cur = con.cursor()\n cur_exec(cur, request, params)\n return cur", "def get_cursor():\n return _thread_local.connection.cursor()", "def get_cursor(self):\n self.cur = self.dbcon.cursor()\n return self.cur", "def cursor(self):\n with self.connection() as conn:\n cursor = conn.cursor(prepared=True)\n try:\n yield cursor\n finally:\n cursor.close()", "def get_cursor(self):\n try:\n self.cursor = self.connection.cursor()\n logging.getLogger(__name__).info(\"Cursor was created.\")\n except Error as er:\n logging.getLogger(__name__).error(\"Something went wrong with cursor creating. %s\" %er)\n finally:\n return self.cursor", "def query_to_cur(dbh, query, verbose=verbose):\n if verbose : \n print query\n cur = dbh.cursor()\n cur.execute(query)\n\n return cur", "def execute_query(self, query):\n\n cnx = self.util.get_connection()\n cursor = cnx.cursor()\n try:\n cursor.execute(query)\n handler.logHelper.log_it_query(query)\n except ProgrammingError:\n handler.logHelper.log_it_query(query, status='Fail')\n finally:\n cnx.commit()\n cursor.close()\n self.util.close_connection(cnx)", "def execute(self, *args, **kwargs):\n return self.engine.execute(*args, **kwargs)", "def execute(self, *args, **kwargs):\n return self.engine.execute(*args, **kwargs)", "def run_query(self, sql_query='', *parameters):\n if not self.cursor:\n raise BaseException(\"Database not selected\")\n\n return self.cursor.execute(sql_query, parameters)", "def _execute_query(self, query, values):\n with self as plasticDB:\n cursor = plasticDB.connection.cursor()\n cursor.execute(query,values)\n if not cursor.description:\n return []\n rs = RecordSet(initialData=cursor.fetchall(), recordType=next(zip(*cursor.description)))\n return rs", "def cursor(self):\n cursor = Cursor(self, self.__aceQLHttpApi)\n return cursor", "def execute(self, sql, params=None):\n if params and not isinstance(params, Mapping):\n raise TypeError(\"Expected dict or other mapping object\")\n\n cursor = self.cursor()\n sql, params = utils.change_param_style(self.driver.paramstyle, sql, params)\n cursor.execute(sql, params)\n return cursor", "def execute(\n self, query: str, table: str = None, values: IterOpt = None\n ) -> sqlite3.Cursor:\n values = values if values else []\n query = query.format(table=table)\n\n try:\n return self.cursor.execute(query, values)\n except sqlite3.Error as error:\n print(f\"Error: execute {query}\")\n raise error", "def query(self, sql):\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n return result", "def execute_query(query):\n try:\n # enter your code here to get a database connection and cursor,\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # execute the query\n c.execute(query)\n # store the results\n results = c.fetchall()\n # close the database connection\n db.close()\n # return the results\n return results\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)", "def cursor(self):\r\n if self._closed:\r\n raise Error('The connection to the database has been closed.')\r\n return Cursor(self)", "def execute_query(query):\n\n db = psycopg2.connect(database=\"news\")\n cursor = db.cursor()\n cursor.execute(query)\n query_result = cursor.fetchall()\n db.close()\n return query_result", "def execute(self, sql):\n with self.connection.cursor() as dbc:\n if sql[-1] != ';':\n sql += ';'\n dbc.execute(sql)\n self.last_row = dbc.lastrowid\n try:\n return dbc.fetchall()\n except:\n return", "def execute(self, query, classname=None, query_args=None):\r\n data = self.db.execute(query, query_args)\r\n return data", "def query_to_cur(dbh, qry, args):\n if args.debug:\n print(datetime.datetime.strftime(datetime.datetime.now(), \"%D %H:%m:%S\"), qry, file=sys.stderr)\n t0 = time.time()\n cur = dbh.cursor()\n cur.execute(qry)\n print(\"query took\", time.time() - t0, \"seconds\")\n return cur", "def dbExecute(con, statement, args=[], skipTrace=False):\n cursor = con.cursor()\n stmt = cursor.mogrify(statement, args);\n if not skipTrace:\n trace(\"executing:\" + str(stmt))\n cursor.execute(stmt)\n global quiet\n if not skipTrace:\n trace(\"statusmessage=\" + cursor.statusmessage + \", rowcount=\" + str(cursor.rowcount))\n return cursor", "def execute_statement(self, statement):\n context = self.__context\n session = context.session()\n with session as connection:\n query_result = connection.execute(statement)\n\n return query_result", "def cursor(self):\n with self.conn as c:\n yield c.cursor()", "def cursor(self):\n with self.conn as c:\n yield c.cursor()", "def query(cursor, query):\n out = []\n c = cursor.execute(query)\n out.append(c.fetchall())\n # will return list of tuples for each query\n return out", "def execute_query_and_close(self, query, values):\n retval = self.cursor.execute(query, values)\n self.__close_db()\n return retval", "def execute(self, sql):\n\n res = self.cur.execute(sql)\n self.cxn.commit()\n\n return res", "def query(self, query: str, *args, **kwargs):\n cursor = self._cursor()\n try:\n self._execute(cursor, query, args, kwargs)\n column_names = [d[0] for d in cursor.description]\n return [Row(zip(column_names, row)) for row in cursor]\n finally:\n cursor.close()", "def execute(self, exetuple):\n\t\tcur = self.connect.cursor(MySQLdb.cursors.DictCursor)\n\t\ttry:\n\t\t\tcur.execute(*exetuple)\n\t\texcept Exception, e:\n\t\t\traise e\n\t\treturn cur", "def execute(self):\n if self.sql is None:\n self.sql = self.construct_query()\n # Only SQL strings can be split, not (e.g.) SQLAlchemy statements.\n if self.multiple_statements and isinstance(self.sql, str):\n statements = self._split_sql()\n else:\n statements = [self.sql]\n single_statement = True if len(statements) == 1 and self.filename else False\n try:\n for statement in statements:\n result_proxy = self.cm.conn.execute(statement)\n log_string = self.filename if single_statement else str(statement)[:25]\n self.logger.info(\"Executed {} against {}\".format(log_string, self.cm.db))\n if result_proxy.cursor:\n return self.fetch_results(result_proxy)\n except Exception as e:\n self.logger.exception(e)\n raise", "def query(self, query: str, values: Tuple = None) -> None:\n cursor = self.connection.cursor()\n cursor.execute(query, values)\n result = cursor.fetchall()\n cursor.close()\n return result", "def fetchone_query_and_close(self, query, values):\n self.cursor.execute(query, values)\n retval = self.cursor.fetchone()\n self.__close_db()\n return retval", "def execute(self, closure, *args, **kwargs):\n if self.withwith:\n with self.conn:\n with self.conn.cursor() as cur:\n return closure(cur, *args, **kwargs)\n else:\n cur = self.conn.cursor()\n ret = closure(cur, *args, **kwargs)\n self.conn.commit()\n cur.close()\n return ret", "def _exec (self, sql, **kwargs):\n\n kwargs['id'] = self.id\n cursor = self.connection.cursor ()\n cursor.execute (sql.replace ('table_name', self.table_name), kwargs)\n return cursor", "def execute_sql(conn, query):\n\ttry:\n\t\t\tc = conn.cursor()\n\t\t\tc.execute(query)\n\texcept Error as e:\n\t\t\tprint(f\"SQL error :{e}\")\n\t\t\tprint(f\"Attempted to run: {query}\")\n\t\t\t\n\treturn c", "def get_cursor(self):\n return self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)", "def _execute_query(self, select_clause, trailing_clause=None, **kwds_filter):\n try:\n stmnt, params = self._build_query(self._table, select_clause, **kwds_filter)\n if trailing_clause:\n stmnt += '\\n' + trailing_clause\n cursor = self._connection.cursor()\n #print(stmnt, params)\n cursor.execute(stmnt, params)\n except Exception as e:\n exc_cls = e.__class__\n msg = '%s\\n query: %s\\n params: %r' % (e, stmnt, params)\n raise exc_cls(msg)\n return cursor", "def execute(self, statement):\n return self._engine.connect().execute(statement)", "def _query_and_fetchall(self, query):\n with self._connect() as conn:\n cur = conn.cursor()\n cur.execute(query)\n results = cur.fetchall()\n\n return results", "def executeQuery(connection,cursor,query,query_type):\n\tif query_type =='select':\n\t\tcursor.execute(query)\n\t\tresult = cursor.fetchall()\n\t\treturn result\n\t\t\n\telif query_type in ['insert','update','delete']:\n\t\tcursor.execute(query)\n\t\tconnection.commit()\n\t\t#generate a log msg in the future", "def cursor(self) -> NamedTupleCursor:\n return self.connection.cursor", "def cursor(self):\n return self._adapter.cursor()", "async def operation(self, query: str, args: Optional[Iterable]=None) -> Any:\n stats.inc('queries', 'SQL')\n async with self.pool.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(query, args)\n ret = cur.lastrowid\n await conn.commit()\n return ret", "def db_execute(self, database_name, statement, params):\n with self.db_create_cursor(database_name) as cursor:\n if self.debug:\n self.logger.debug(\"Running statement: \" + statement)\n return cursor.execute(statement, params)", "def query(self, sql, params=None):\n\n # check if connection is alive, else reconnect\n try:\n self.cur.execute(sql, params)\n except pymysql.OperationalError:\n # todo fix timeout and reconnect\n self.connect()\n self.cur.execute(sql, params)\n except IOError:\n print('%s\\nQuery Failed!' % sql)\n raise\n\n return self.cur", "def _execute(self, stmt) -> sa.engine.ResultProxy:\n return self._engine.execute(stmt)", "def do(self, executor):\n sql, kw = self._assemble()\n return executor.execute(\n sql, kw\n )", "def run_query(conn, query):\n\tcur = conn.cursor()\n\tcur.execute(query)\n\trows = cur.fetchall()\n\treturn rows", "def execute_query(query):\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(query)\n results = c.fetchall()\n db.close()\n return results", "def execute(self, query, *parameters, **kwparameters):\n return self._execute_lastrowid(query, *parameters, **kwparameters)", "def execute_query(self, query):\n self.recordset = self.con.execute(query)\n return self", "def do(query, values=None, dofetch=True, quietish=False):\n con = cursor()\n if not quietish:\n log.info('Executing: %s...' % query)\n with log.Timer() as tt:\n if not values:\n res = con.execute(query)\n else:\n res = con.executemany(query, values)\n DB_HANDLE.commit()\n if not quietish:\n log.info('...Done in %.4s seconds with result %s' % (tt.elapsed / 1000.0, res))\n if dofetch and not values:\n return con.fetchall()\n return None", "def execute_query(self,query):\n try:\n self.cursor.execute(query)\n except Exception as error:\n raise error", "def query(self, q, *args):\n\n if self.dbtype == 'pg':\n print q\n return self.db.execute(q, *args).fetchall()\n else:\n cur = self.db.cursor()\n try:\n print q\n print args\n if args:\n cur.execute(q, args)\n else:\n cur.execute(q)\n ret = cur.fetchall()\n return ret\n except:\n self.db.rollback()\n raise\n finally:\n cur.close()", "def query(self, query):\n cursor = self.database.cursor()\n cursor.execute(query)\n # If it's a query that's expected to return a value (EG: SELECT)\n if query.strip().lower().startswith('select'): return cursor.fetchall()", "def execute(self, sql):\n return self.db.execute(sql)", "def query(self, sql):\n try:\n res_cursor = self.connection.execute(text(sql))\n except Exception as e: \n raise e(\"SQL execution error!\")\n \n rows = (Row(res_cursor.keys(), record) for record in res_cursor)\n results = RowsCollection(rows)\n return results", "def cud_operations(self, query=None, val=None):\n with self.conn:\n with self.conn.cursor() as curr:\n curr.execute(query, val)\n row = curr.fetchone()\n # self.conn.close()\n return row" ]
[ "0.81860805", "0.78586006", "0.785281", "0.77011305", "0.7543426", "0.75012374", "0.74967825", "0.7448869", "0.73151606", "0.7281152", "0.7221862", "0.7184796", "0.71583337", "0.7113725", "0.71003574", "0.7088783", "0.70538986", "0.70525205", "0.7023728", "0.70222384", "0.69898397", "0.6967812", "0.69575006", "0.6956461", "0.69498944", "0.6940671", "0.6937417", "0.6929622", "0.69188476", "0.68445945", "0.6828594", "0.6828102", "0.6822415", "0.6822107", "0.68076926", "0.6788892", "0.67888415", "0.6777645", "0.67754894", "0.6763494", "0.6743659", "0.6741225", "0.6727431", "0.6721111", "0.6709465", "0.6706356", "0.6704298", "0.6690311", "0.66858774", "0.66858774", "0.6639485", "0.6638472", "0.662837", "0.66107136", "0.6606794", "0.66022366", "0.65943515", "0.6592811", "0.65879345", "0.65846103", "0.6584564", "0.6573811", "0.6572637", "0.65687495", "0.65570873", "0.65570873", "0.6549741", "0.65397745", "0.6539188", "0.65349597", "0.65212566", "0.6513662", "0.65119386", "0.65103805", "0.65087014", "0.6504454", "0.6502193", "0.65001696", "0.64870316", "0.6485796", "0.6483674", "0.6468696", "0.64494264", "0.6446061", "0.6444789", "0.6435781", "0.6429051", "0.6425895", "0.64155453", "0.6414454", "0.64137024", "0.6401575", "0.64010864", "0.6399786", "0.639591", "0.63914526", "0.6383779", "0.63778764", "0.6375887", "0.6375496" ]
0.6438476
85
Return 'WHERE' clause that implements kwds_filter constraints.
def _build_where_clause(**kwds_filter): clause = [] params = [] items = kwds_filter.items() items = sorted(items, key=lambda x: x[0]) # Ordered by key. for key, val in items: if _is_nsiterable(val): clause.append(key + ' IN (%s)' % (', '.join('?' * len(val)))) for x in val: params.append(x) else: clause.append(key + '=?') params.append(val) clause = ' AND '.join(clause) if clause else '' return clause, params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_where_clause(**kwds_filter):\n clause = []\n params = []\n items = kwds_filter.items()\n items = sorted(items, key=lambda x: x[0]) # Ordered by key.\n for key, val in items:\n if nonstringiter(val):\n clause.append(key + ' IN (%s)' % (', '.join('?' * len(val))))\n for x in val:\n params.append(x)\n else:\n clause.append(key + '=?')\n params.append(val)\n\n clause = ' AND '.join(clause) if clause else ''\n return clause, params", "def get_where_clause(self, params: Dict) -> str:\n return ''", "def get_where_clause(self, feature, params=()):\n where_clause = []\n for pk in self.pk_cols:\n params += (feature[pk],)\n where_clause.append(pk + \" = (?)\")\n\n where_clause = \" WHERE \" + \" AND \".join(where_clause)\n return where_clause, params", "def where(self, *wheres, **kw):\n if wheres: # arbitrary expressions\n self._whereskw.update(kw)\n for where in wheres:\n self._wheres.append(where)\n else:\n # plain x=<val> expressions\n self._kw.update(kw)\n return self", "def _getSQLWhere(self, inputTable, queryMeta):\n\t\tsqlPars = {}\n\t\tinputPars = dict((p.name, p.value) for p in inputTable.iterParams())\n\t\treturn base.joinOperatorExpr(\"AND\",\n\t\t\t[cd.asSQL(inputPars, sqlPars, queryMeta)\n\t\t\t\tfor cd in self.condDescs]), sqlPars", "def where(self, cond):\n return self.filter(lambda x: _(x).contains(cond))", "def where(condition):\r\n return ('', []) if condition.clause == '' else (f'WHERE {condition.clause}', list(condition.params))", "def where(condition):\n return partial(filter, condition)", "def _extract_where(self, query) :\n\t\tquery = copy.copy(query)\n\t\t\n\t\t# discard the insert information\n\t\tif self.n.sparql.insert in query :\n\t\t\tdel query[self.n.sparql.insert]\n\t\t\n\t\t# discard the delete information\n\t\tif self.n.sparql.delete in query :\n\t\t\tdel query[self.n.sparql.delete]\n\t\t\n\t\t# build the where clause with outlined variables\n\t\treturn self.python_to_SPARQL_long(query)", "def where(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"where\")", "def where(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"where\")", "def where(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"where\")", "def where(self, condition):\n raise NotImplementedError(\"This should have been implemented.\")", "def sqlwhere(dictionary, grouping=' AND '):\n return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)", "def condition_filter(self, filter_id):\r\n return filters.Filter(self, filter_id)", "def build_where_statement(attr_converter):\n statements = []\n values = []\n # iterate over the querystring params\n for attr, val in request.args.iteritems():\n try:\n statements.append(attr_converter[attr]['converter'](\n attr_converter[attr]['column']))\n values.append(val) # add after the possible keyerror\n except KeyError:\n # TODO: handle invalid params somehow or continue to ignore\n pass\n if statements:\n return 'WHERE '+' AND '.join(statements), values\n return '', []", "def where(cls, *lst, **dct):\n cls.runtime.set_where(lst, dct)\n return cls", "def _build_where(where):\n # Build limit\n if 'rn' in where:\n limit_sql = 'limit 1 offset {}'.format(where['rn'])\n else:\n limit_sql = ''\n # Build where\n where_sql = ''\n where_values = []\n for key, value in where.items():\n if isinstance(value, list):\n if len(value) == 1:\n where_sql += ' and {} = {}'.format(key, value[0])\n else:\n where_sql += ' and {} in {}'.format(key, tuple(value))\n elif key == '*': # Literal where clause\n where_sql += ' and {}'.format(value)\n elif key == 'rn': # Ignore\n pass\n else:\n if value is None:\n where_sql += ' and {} is Null'.format(key)\n else:\n where_sql += ' and {} = ?'.format(key)\n where_values.append(value)\n if len(where_sql) > 0:\n where_sql = 'where ' + where_sql[5:]\n # Done\n return where_sql, where_values, limit_sql", "def where(self, predicate=lambda row: True):\n where_table = Table(self.columns)\n where_table.rows = list(filter(predicate, self.rows))\n return where_table", "def _sql_where(cur, tables, andalso, orelse, prefix=None, aggregate=False):\n disjunctions = []\n andsql = _cond_where_sql(cur, andalso, tables, prefix=prefix,\n aggregate=aggregate)\n andsql = ' AND '.join(andsql)\n\n if len(andsql) > 0:\n andsql = '(%s)' % andsql\n disjunctions.append(andsql)\n disjunctions += _cond_where_sql(cur, orelse, tables, prefix=prefix,\n aggregate=aggregate)\n\n if len(disjunctions) == 0:\n return ''\n return '(%s)' % (' OR '.join(disjunctions))", "def where(self, column, *args):\n\n operator, value = self._extract_operator_value(*args)\n\n if value is None:\n value = \"\"\n elif value is True:\n value = \"1\"\n elif value is False:\n value = \"0\"\n\n if inspect.isfunction(column):\n builder = column(self.new())\n self._wheres += (\n (QueryExpression(None, operator, SubGroupExpression(builder))),\n )\n elif isinstance(value, QueryBuilder):\n self._wheres += (\n (QueryExpression(column, operator, SubSelectExpression(value))),\n )\n else:\n self._wheres += ((QueryExpression(column, operator, value, \"value\")),)\n return self", "def _blacklisted_pairings_filter_query(self):\n if self._restrict_exceptions_list:\n blacklisted_filter_sql = sql.SQL('is_blacklisted IS TRUE')\n else:\n blacklisted_filter_sql = sql.SQL('TRUE')\n return blacklisted_filter_sql", "def amh_attr_filter_query(self):\n \n attr_filter_query = \"\"\"\n WITH {final_cte_name} as (\n -- Pull list of devices that were active (has any row; don't need TVT >0) in the past 4 weeks\n SELECT DISTINCT device_id\n FROM tubidw.all_metric_hourly\n WHERE DATE_TRUNC('week',hs) >= dateadd('week',-4,DATE_TRUNC('week',GETDATE()))\n AND DATE_TRUNC('week',hs) < DATE_TRUNC('week',GETDATE())\n {attr_filter} -- attribute filters dynamically populate here\n -- TODO: currently can't get a metric/attribute combo filter, like \"devices that watched at least 50% of a specific content_id\"\n )\n \"\"\"\n return attr_filter_query", "def where_raw(self, query: str, bindings=()):\n self._wheres += ((QueryExpression(query, \"=\", None, \"value\", raw=True)),)\n return self", "def AddWhereTerms(self, where_cond_pairs, **kwargs):\n where_cond_pairs = where_cond_pairs or []\n\n for cond, args in where_cond_pairs:\n assert _IsValidWhereCond(cond), cond\n assert cond.count('%s') == len(args), cond\n self.where_conds.append(cond)\n self.where_args.extend(args)\n\n for col, val in sorted(kwargs.items()):\n assert _IsValidColumnName(col), col\n eq = True\n if col.endswith('_not'):\n col = col[:-4]\n eq = False\n\n if isinstance(val, set):\n val = list(val) # MySQL inteface cannot handle sets.\n\n if val is None or val == []:\n op = 'IS' if eq else 'IS NOT'\n self.where_conds.append(col + ' ' + op + ' NULL')\n elif isinstance(val, list):\n op = 'IN' if eq else 'NOT IN'\n # Sadly, MySQLdb cannot escape lists, so we flatten to multiple \"%s\"s\n self.where_conds.append(\n col + ' ' + op + ' (' + PlaceHolders(val) + ')')\n self.where_args.extend(val)\n else:\n op = '=' if eq else '!='\n self.where_conds.append(col + ' ' + op + ' %s')\n self.where_args.append(val)", "def _validate_select_where(self):\r\n #check that there's either a = or IN relationship with a primary key or indexed field\r\n equal_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)]\r\n token_comparison = any([w for w in self._where if isinstance(w.value, Token)])\r\n if not any([w.primary_key or w.index for w in equal_ops]) and not token_comparison:\r\n raise QueryException('Where clauses require either a \"=\" or \"IN\" comparison with either a primary key or indexed field')\r\n\r\n if not self._allow_filtering:\r\n #if the query is not on an indexed field\r\n if not any([w.index for w in equal_ops]):\r\n if not any([w.partition_key for w in equal_ops]) and not token_comparison:\r\n raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset')", "def sql_filter(my_table='', colName='', var='', **kw):\n\tif (my_table=='') or (colName=='') or (var==''):\n\t\treturn dict(sql='',clauseTables=[])\n\telse:\n\t\tsql = my_table+\".\"+colName+\" LIKE '%\"+var+\"%'\"\n\t\treturn dict(sql=sql,clauseTables=[])", "def where(self, cond, other, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.where)(\n self, cond=cond, other=other, **kwargs\n )", "def where(self, table, what='*', order=None, group=None, limit=None, \n offset=None, _test=False, **kwargs):\n where = self._where_dict(kwargs) \n return self.select(table, what=what, order=order, \n group=group, limit=limit, offset=offset, _test=_test, \n where=where)", "def condition_filters(self):\r\n return filters.Filters(self)", "def make_where(cls, key, value, operation=\"=\"):\n\n if operation not in cls.__VALID_WHERE_OPERATION_LIST:\n raise SqlSyntaxError(\"operation not supported: \" + str(operation))\n\n if value is not None:\n return \"%s %s %s\" % (\n cls.to_attr_str(key), operation, cls.to_value_str(value))\n\n if operation == \"=\":\n return \"%s IS NULL\" % (cls.to_attr_str(key))\n elif operation == \"!=\":\n return \"%s IS NOT NULL\" % (cls.to_attr_str(key))\n\n raise SqlSyntaxError(\n \"Invalid operation (%s) with None right-hand side\" % (operation))", "def resolve_where(self, parsed_terms: event_filter.ParsedTerms) -> List[WhereType]:\n where_conditions: List[WhereType] = []\n for term in parsed_terms:\n if isinstance(term, event_search.SearchFilter):\n condition = self.format_search_filter(term)\n if condition:\n where_conditions.append(condition)\n\n return where_conditions", "def conditions_as_sql(self, prewhere=False):\n q_object = self._prewhere_q if prewhere else self._where_q\n return q_object.to_sql(self._model_cls)", "def where(self, *conditions):\n\n if len(conditions) == 0:\n return self\n\n values = []\n for i, condition in enumerate(conditions):\n value = eval_expression(condition, {\"me\": self})\n values.append(value)\n\n reduced_values = functools.reduce(lambda x, y: x & y, values)\n return self[reduced_values]", "def filter(self, *args, **kwargs):\r\n #add arguments to the where clause filters\r\n clone = copy.deepcopy(self)\r\n for operator in args:\r\n if not isinstance(operator, WhereClause):\r\n raise QueryException('{} is not a valid query operator'.format(operator))\r\n clone._where.append(operator)\r\n\r\n for arg, val in kwargs.items():\r\n col_name, col_op = self._parse_filter_arg(arg)\r\n quote_field = True\r\n #resolve column and operator\r\n try:\r\n column = self.model._get_column(col_name)\r\n except KeyError:\r\n if col_name == 'pk__token':\r\n if not isinstance(val, Token):\r\n raise QueryException(\"Virtual column 'pk__token' may only be compared to Token() values\")\r\n column = columns._PartitionKeysToken(self.model)\r\n quote_field = False\r\n else:\r\n raise QueryException(\"Can't resolve column name: '{}'\".format(col_name))\r\n\r\n if isinstance(val, Token):\r\n if col_name != 'pk__token':\r\n raise QueryException(\"Token() values may only be compared to the 'pk__token' virtual column\")\r\n partition_columns = column.partition_columns\r\n if len(partition_columns) != len(val.value):\r\n raise QueryException(\r\n 'Token() received {} arguments but model has {} partition keys'.format(\r\n len(val.value), len(partition_columns)))\r\n val.set_columns(partition_columns)\r\n\r\n #get query operator, or use equals if not supplied\r\n operator_class = BaseWhereOperator.get_operator(col_op or 'EQ')\r\n operator = operator_class()\r\n\r\n if isinstance(operator, InOperator):\r\n if not isinstance(val, (list, tuple)):\r\n raise QueryException('IN queries must use a list/tuple value')\r\n query_val = [column.to_database(v) for v in val]\r\n elif isinstance(val, BaseQueryFunction):\r\n query_val = val\r\n else:\r\n query_val = column.to_database(val)\r\n\r\n clone._where.append(WhereClause(column.db_field_name, operator, query_val, quote_field=quote_field))\r\n\r\n return clone", "def filter(self, *args, **kwargs):\n return FilteredQuery(self, F(*args, **kwargs))", "def filter(self, **kwargs):\n kwargs['query'] += ' FROM {0}'\n return kwargs", "def _sql_where(self, cursor, table, prefix=None, aggregate=False):\n assert False, \"subclass responsibility\"", "def where(condition, x, y):\n raise NotImplementedError", "def get_filter_rows(self, keys=None, attrs=None, where=None):\n qobj = self.parsed.get_filter_qobj(keys=keys)\n if attrs:\n qobj.select = Select(attrs)\n if where:\n qobj.where.append(where)\n\n params = list(self.params)\n if keys:\n params.append(tuple(list(keys)))\n\n return query(self.db, str(qobj), [params])", "def find_where(self, cond):\n return self.find_item(lambda x: _(x).contains(cond))", "def narrow(self, clause):\n query = self.statement.split()\n i=0\n\n hasWhereOrHaving=False\n hasFrom=False\n\n while i<len(query):\n token = query[i]\n lower_token = token.lower()\n if lower_token == \"from\":\n hasFrom=True\n\n elif lower_token in [\"where\", \"having\"] and hasFrom:\n hasWhereOrHaving=True\n query[i] = \" \".join([token, clause, \"and\"])\n i+=1\n\n if hasFrom and not hasWhereOrHaving:\n query.append(\"where \"+clause)\n\n return self.set_child_and_return(' '.join(query))", "def where(self, **where):\n\n where = _handle_where(where)\n return self._post(\"\", Table, **where)", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def filter(self, **args ):\n query = TXLOG.select('*')\n for key, value in args.items():\n if '__' in key:\n key, op = key.split('__')\n else:\n op = 'eq'\n\n if not key in self.schema:\n raise BadArgument(\"Key %s not a valid argument\" % key )\n\n if not isinstance(value, basestring ):\n value = str(value)\n\n query = query.where({key:value}, self.operators[op])\n\n items = query.list()\n return items", "def test_where_clause_rendering(self):\r\n wc = WhereClause('a', EqualsOperator(), 'c')\r\n wc.set_context_id(5)\r\n self.assertEqual('\"a\" = :5', unicode(wc))\r\n self.assertEqual('\"a\" = :5', str(wc))", "def sql_filter_bool(my_table='', colName='', var='', **kw):\n\tif (my_table=='') or (colName=='') or (var==''):\n\t\treturn dict(sql='',clauseTables=[])\n\telse:\n\t\tsql = my_table+\".\"+colName+\" == \"+var\n\t\treturn dict(sql=sql,clauseTables=[])", "def sql_filter_foreignkey(my_table='', my_col='', join_table='', colName='', var='', **kw):\n\tif (my_table=='') or (join_table=='') or (colName=='') or (var=='') or (my_col==''):\n\t\treturn dict(sql='',clauseTables=[])\n\telse:\n\t\tclauseTables=[join_table]\n\t\tsql = my_table+\".\"+my_col+\" = \"+join_table+\".id AND \"+join_table+\".\"+colName+\" LIKE '%\"+var+\"%'\"\n\t\treturn dict(sql=sql, clauseTables=clauseTables)", "def test_filter_method_where_clause_generation(self):\r\n query1 = self.table.objects(test_id=5)\r\n self.assertEqual(len(query1._where), 1)\r\n where = query1._where[0]\r\n self.assertEqual(where.field, 'test_id')\r\n self.assertEqual(where.value, 5)\r\n\r\n query2 = query1.filter(expected_result__gte=1)\r\n self.assertEqual(len(query2._where), 2)\r\n\r\n where = query2._where[0]\r\n self.assertEqual(where.field, 'test_id')\r\n self.assertIsInstance(where.operator, EqualsOperator)\r\n self.assertEqual(where.value, 5)\r\n\r\n where = query2._where[1]\r\n self.assertEqual(where.field, 'expected_result')\r\n self.assertIsInstance(where.operator, GreaterThanOrEqualOperator)\r\n self.assertEqual(where.value, 1)", "def filter_criteria(self) -> Optional[pulumi.Input['EventSourceMappingFilterCriteriaArgs']]:\n return pulumi.get(self, \"filter_criteria\")", "def filter(self, *q, **kwargs):\n return self._filter_or_exclude(*q, **kwargs)", "def where(**kwargs):\n return QueryBuilder(Card).where(**kwargs)", "def create_search_query(words, *filters):\n if not filters:\n filters = filters + ('title',)\n\n query = ''\n\n for filter in filters:\n query += filter + ' LIKE \"%'\n\n for letter in words:\n if letter == '%':\n letter = '%\" AND ' + filter + ' LIKE \"%'\n elif letter == ' ':\n letter = '%\" OR ' + filter + ' LIKE \"%'\n\n query += letter\n\n if filter == filters[-1]:\n query += '%\"'\n else:\n query += '%\" OR '\n\n return query", "def apply_dataset_query_conditions(dataset, query, event_types, discover=False):\n if not discover and dataset == QueryDatasets.TRANSACTIONS:\n return query\n if event_types:\n event_type_conditions = \" OR \".join(\n [f\"event.type:{event_type.name.lower()}\" for event_type in event_types]\n )\n elif dataset in DATASET_CONDITIONS:\n event_type_conditions = DATASET_CONDITIONS[dataset]\n else:\n return query\n\n return f\"({event_type_conditions}) AND ({query})\"", "def build_where_clause(args: dict) -> str:\n args_dict = {\n 'source_ip': 'source_ip.value',\n 'dest_ip': 'dest_ip.value',\n 'rule_matched': 'rule_matched',\n 'from_zone': 'from_zone',\n 'to_zone': 'to_zone',\n 'source_port': 'source_port',\n 'dest_port': 'dest_port',\n 'action': 'action.value',\n 'file_sha_256': 'file_sha_256',\n 'file_name': 'file_name',\n 'app': 'app',\n 'app_category': 'app_category',\n 'dest_device_port': 'dest_device_port',\n 'dest_edl': 'dest_edl',\n 'dest_dynamic_address_group': 'dest_dynamic_address_group',\n 'dest_location': 'dest_location',\n 'dest_user': 'dest_user',\n 'file_type': 'file_type',\n 'is_server_to_client': 'is_server_to_client',\n 'is_url_denied': 'is_url_denied',\n 'log_type': 'log_type',\n 'nat_dest': 'nat_dest',\n 'nat_dest_port': 'nat_dest_port',\n 'nat_source': 'nat_source',\n 'nat_source_port': 'nat_source_port',\n 'rule_matched_uuid': 'rule_matched_uuid',\n 'severity': 'severity',\n 'source_device_host': 'source_device_host',\n 'source_edl': 'source_edl',\n 'source_dynamic_address_group': 'source_dynamic_address_group',\n 'source_location': 'source_location',\n 'source_user': 'source_user',\n 'sub_type': 'sub_type.value',\n 'time_generated': 'time_generated',\n 'url_category': 'url_category',\n 'url_domain': 'url_domain'\n }\n if args.get('ip') and (args.get('source_ip') or args.get('dest_ip')):\n raise DemistoException('Error: \"ip\" argument cannot appear with either \"source_ip\" nor \"dest_ip\"')\n\n if args.get('port') and (args.get('source_port') or args.get('dest_port')):\n raise DemistoException('Error: \"port\" argument cannot appear with either \"source_port\" nor \"dest_port\"')\n\n non_string_keys = {'dest_port', 'source_port'}\n if 'query' in args:\n # if query arg is supplied than we just need to parse it and only it\n return args['query'].strip()\n\n where_clause = ''\n if args.get('ip'):\n ips = argToList(args.pop('ip'))\n # Creating a query for ip argument using source ip and dest ip\n where_clause += '(' + ' OR '.join(f'source_ip.value = \"{ip}\" OR dest_ip.value = \"{ip}\"' for ip in ips) + ')'\n if any(args.get(key) for key in args_dict) or args.get('port') or args.get('url'):\n where_clause += ' AND '\n\n if args.get('port'):\n ports = argToList(args.pop('port'))\n # Creating a query for port argument using source port and dest port\n where_clause += '(' + ' OR '.join(f'source_port = {port} OR dest_port = {port}' for port in ports) + ')'\n if any(args.get(key) for key in args_dict):\n where_clause += ' AND '\n\n if args.get('url'):\n urls = argToList(args.pop('url'))\n # Creating a query for url argument using uri and referer\n where_clause += '(' + ' OR '.join(f'uri LIKE \"%{url}%\" OR referer LIKE \"%{url}%\"' for url in urls) + ')'\n if any(args.get(key) for key in args_dict):\n where_clause += ' AND '\n\n # We want to add only keys that are part of the query\n string_query_fields = {key: value for key, value in args.items() if key in args_dict and key not in non_string_keys}\n or_statements = []\n for key, values in string_query_fields.items():\n string_values_list: list = argToList(values)\n field = args_dict[key]\n or_statements.append(' OR '.join([f'{field} = \"{value}\"' for value in string_values_list]))\n # ports are digested as ints and cannot be sent as strings\n non_string_query_fields = {key: value for key, value in args.items() if key in non_string_keys}\n for key, values in non_string_query_fields.items():\n non_string_values_list: list = argToList(values)\n field = args_dict[key]\n or_statements.append(' OR '.join([f'{field} = {value}' for value in non_string_values_list]))\n where_clause += ' AND '.join([f'({or_statement})' for or_statement in or_statements if or_statement])\n return where_clause", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def _get_where_clause_for_user_id(_user_settings):\n if _user_settings['login']:\n _where_clause = f'login = \"{_user_settings[\"login\"]}\"'\n elif _user_settings['email']:\n _where_clause = f'email = \"{_user_settings[\"email\"]}\"'\n elif _user_settings['first_name'] or _user_settings['last_name']:\n if _user_settings['first_name'] and _user_settings['last_name']:\n _where_clause = f'first_name = \"{_user_settings[\"first_name\"]}\" and ' + \\\n f'last_name = \"{_user_settings[\"last_name\"]}\"'\n elif _user_settings['last_name']:\n _where_clause = f'last_name = \"{_user_settings[\"last_name\"]}\"'\n else:\n _where_clause = f'first_name = \"{_user_settings[\"first_name\"]}\"'\n else:\n _exc_msg = \"Missing requisite information to accurately look up the User ID of the user.\"\n raise errors.exceptions.MissingRequiredDataError(_exc_msg)\n return _where_clause", "def test_kwargs(self):\n self.Test.default_scope(where='foo')\n self.assertEqual(self.Test.scoped().params['where'], ['foo'])", "def show_where(self, aggregate=False):\n # Return criteria for all tables.\n tables = ['game', 'drive', 'play', 'play_player', 'player']\n with Tx(self._db) as cur:\n return self._sql_where(cur, tables, aggregate=aggregate)\n return ''", "def _cond_where_sql(cursor, conds, tables, prefix=None, aggregate=False):\n isa = isinstance\n pieces = []\n for c in conds:\n if isa(c, Query) or (isa(c, Comparison) and c._table in tables):\n sql = c._sql_where(cursor, tables, prefix=prefix,\n aggregate=aggregate)\n if len(sql) > 0:\n pieces.append(sql)\n return pieces", "def get_basic_query_cond(column: str, val: str, query_params: dict):\n if val is not None:\n query_params[column] = val\n return 'AHJ.' + column + '=%(' + column + ')s AND '\n return ''", "def get_query(self):\n q = db.Query(self.KIND,keys_only=self.KEYS_ONLY)\n for prop, value in self.FILTERS:\n q.filter(\"%s =\" % prop, value)\n if self.ancestor:\n q.ancestor(self.ancestor)\n q.order(self.ORDER_BY)\n return q", "def of(cls, clause=\"\", params=[]):\r\n return Q.C(clause, params)", "def format_search_filter(self, term: event_search.SearchFilter) -> Optional[WhereType]:\n name = term.key.name\n\n converted_filter = self.convert_search_filter_to_condition(\n event_search.SearchFilter(\n # We want to use group_id elsewhere so shouldn't be removed from the dataset\n # but if a user has a tag with the same name we want to make sure that works\n event_search.SearchKey(\"tags[group_id]\" if name == \"group_id\" else name),\n term.operator,\n term.value,\n )\n )\n return converted_filter if converted_filter else None", "def get_random_where_clause(columns: List[str]) -> str:\n\n random_columns = choose_random_columns(columns)\n conditions = [get_random_condition(column) for column in random_columns]\n conditions = [\n f\"{condition[0]} {condition[1]} {condition[2]}\" for condition in conditions]\n return \"WHERE \" + f\" {random.choice(LOGICAL_OPERATORS)} \".join(conditions)", "def search_filter(query_params, query):\n if query_params.get('type') is not None:\n query = query.filter(search.c.kind == query_params.get('type'))\n return query", "def filter(self) -> Optional[pulumi.Input['FilterArgs']]:\n return pulumi.get(self, \"filter\")", "def _getWhereClause(self, tagIDs):\n objectIDs = self._criteria.get('objectIDs')\n where = [TagValue.tagID == Tag.id]\n if objectIDs:\n where.append(TagValue.objectID.is_in(objectIDs))\n if tagIDs:\n where.append(Tag.id.is_in(tagIDs))\n createdBeforeTime = self._criteria.get('createdBeforeTime')\n if createdBeforeTime:\n where.append(TagValue.creationTime < createdBeforeTime)\n return where", "def _get_where_clause_for_username(_user_settings):\n if _user_settings['id']:\n _where_clause = f'id = \"{_user_settings[\"id\"]}\"'\n elif _user_settings['email']:\n _where_clause = f'email = \"{_user_settings[\"email\"]}\"'\n elif _user_settings['first_name'] or _user_settings['last_name']:\n if _user_settings['first_name'] and _user_settings['last_name']:\n _where_clause = f'first_name = \"{_user_settings[\"first_name\"]}\" and ' + \\\n f'last_name = \"{_user_settings[\"last_name\"]}\"'\n elif _user_settings['last_name']:\n _where_clause = f'last_name = \"{_user_settings[\"last_name\"]}\"'\n else:\n _where_clause = f'first_name = \"{_user_settings[\"first_name\"]}\"'\n else:\n _exc_msg = \"Missing requisite information to accurately look up the username of the user.\"\n raise errors.exceptions.MissingRequiredDataError(_exc_msg)\n return _where_clause", "def convert_where(g, op, block):\n\n condition = g.get_node(op.input(\"Condition\")[0])\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Y\")[0])\n out = _op.where(condition, x, y)\n g.add_node(op.output(\"Out\")[0], out)", "def exact_filter(query, model, filters, legal_keys):\n\n filter_dict = {}\n\n # Walk through all the keys\n for key in legal_keys:\n # Skip ones we're not filtering on\n if key not in filters:\n continue\n\n # OK, filtering on this key; what value do we search for?\n value = filters.pop(key)\n\n if isinstance(value, (list, tuple, set, frozenset)):\n # Looking for values in a list; apply to query directly\n column_attr = getattr(model, key)\n query = query.filter(column_attr.in_(value))\n else:\n # OK, simple exact match; save for later\n filter_dict[key] = value\n\n # Apply simple exact matches\n if filter_dict:\n query = query.filter_by(**filter_dict)\n\n return query", "def _get_where_clause_for_email(_user_settings):\n if _user_settings['id']:\n _where_clause = f'id = \"{_user_settings[\"id\"]}\"'\n elif _user_settings['login']:\n _where_clause = f'login = \"{_user_settings[\"login\"]}\"'\n elif _user_settings['first_name'] or _user_settings['last_name']:\n if _user_settings['first_name'] and _user_settings['last_name']:\n _where_clause = f'first_name = \"{_user_settings[\"first_name\"]}\" and ' + \\\n f'last_name = \"{_user_settings[\"last_name\"]}\"'\n elif _user_settings['last_name']:\n _where_clause = f'last_name = \"{_user_settings[\"last_name\"]}\"'\n else:\n _where_clause = f'first_name = \"{_user_settings[\"first_name\"]}\"'\n else:\n _exc_msg = \"Missing requisite information to accurately look up the email address of the user.\"\n raise errors.exceptions.MissingRequiredDataError(_exc_msg)\n return _where_clause", "def _s_filter(cls, arg):\n return cls.query.filter_by(name=arg)", "def _sql_where(self, cur, tables, prefix=None, aggregate=False):\n if aggregate:\n return _sql_where(cur, tables, self._agg_andalso, self._agg_orelse,\n prefix=prefix, aggregate=aggregate)\n else:\n return _sql_where(cur, tables, self._andalso, self._orelse,\n prefix=prefix, aggregate=aggregate)", "def filter_query(self, query):\n\n if self.state:\n query = query.filter(self.model_class.state == self.state)\n if self.term:\n term = '%{}%'.format(self.term)\n query = query.filter(\n or_(\n *[column.ilike(term) for column in self.term_columns]\n )\n )\n if self.user_ids:\n query = query.filter(self.model_class.user_id.in_(self.user_ids))\n if self.group_ids:\n query = query.filter(self.model_class.group_id.in_(self.group_ids))\n if self.issues:\n query = query.filter(self.model_class._issues.has_any(self.issues))\n if self.categories:\n query = query.filter(\n self.model_class._categories.has_any(self.categories)\n )\n if self.organizations:\n query = query.filter(\n self.model_class._organizations.has_any(self.organizations)\n )\n\n return query", "def test_query_expression_where_clause_generation(self):\r\n query1 = self.table.objects(self.table.column('test_id') == 5)\r\n self.assertEqual(len(query1._where), 1)\r\n where = query1._where[0]\r\n self.assertEqual(where.field, 'test_id')\r\n self.assertEqual(where.value, 5)\r\n\r\n query2 = query1.filter(self.table.column('expected_result') >= 1)\r\n self.assertEqual(len(query2._where), 2)\r\n\r\n where = query2._where[0]\r\n self.assertEqual(where.field, 'test_id')\r\n self.assertIsInstance(where.operator, EqualsOperator)\r\n self.assertEqual(where.value, 5)\r\n\r\n where = query2._where[1]\r\n self.assertEqual(where.field, 'expected_result')\r\n self.assertIsInstance(where.operator, GreaterThanOrEqualOperator)\r\n self.assertEqual(where.value, 1)", "def test_applies_scopes_to_query_methods(self):\n self.Test.default_scope(where='foo')\n rel = self.Test.where('bar')\n self.assertEqual(rel.params['where'], ['foo', 'bar'])", "def _valid_filter_query(self):\n if self._output_invalid_imeis:\n valid_filter_sql = sql.SQL('TRUE')\n else:\n valid_filter_sql = sql.SQL('is_valid IS TRUE')\n return valid_filter_sql", "def test_where_clause_type_checking(self):\r\n stmt = BaseCQLStatement('table', [])\r\n with self.assertRaises(StatementException):\r\n stmt.add_where_clause('x=5')", "def add_where_clause(self, clause):\r\n if not isinstance(clause, WhereClause):\r\n raise StatementException(\"only instances of WhereClause can be added to statements\")\r\n clause.set_context_id(self.context_counter)\r\n self.context_counter += clause.get_context_size()\r\n self.where_clauses.append(clause)", "def test_add_dummy_where_with_where_present_and_not_added(self):\n updated_sql = add_dummy_where(self.SQL_WITH_WHERE)\n self.assertEqual(updated_sql, self.SQL_WITH_WHERE)", "def where(self, column, operator=Null(), value=None, boolean='and'):\n # If the column is an array, we will assume it is an array of key-value pairs\n # and can add them each as a where clause. We will maintain the boolean we\n # received when the method was called and pass it into the nested where.\n if isinstance(column, dict):\n nested = self.new_query()\n for key, value in column.items():\n nested.where(key, '=', value)\n\n return self.where_nested(nested, boolean)\n\n if isinstance(column, QueryBuilder):\n return self.where_nested(column, boolean)\n\n if value is None:\n if not isinstance(operator, Null):\n value = operator\n operator = '='\n else:\n raise ArgumentError('Value must be provided')\n\n if operator not in self._operators:\n value = operator\n operator = '='\n\n if isinstance(value, QueryBuilder):\n return self._where_sub(column, operator, value, boolean)\n\n if value is None:\n return self.where_null(column, boolean, operator != '=')\n\n type = 'basic'\n\n self.wheres.append({\n 'type': type,\n 'column': column,\n 'operator': operator,\n 'value': value,\n 'boolean': boolean\n })\n\n if not isinstance(value, QueryExpression):\n self.add_binding(value, 'where')\n\n return self", "def exact_filter(query, model, filters, legal_keys,\n created_at_key='created_at'):\n\n filter_dict = {}\n created_at_attr = getattr(model, created_at_key, None)\n # Walk through all the keys\n for key in legal_keys:\n # Skip ones we're not filtering on\n if key not in filters:\n continue\n\n # OK, filtering on this key; what value do we search for?\n value = filters.pop(key)\n\n if key == 'created_since' and created_at_attr:\n # This is a reserved query parameter to indicate resources created\n # after a particular datetime\n value = timeutils.normalize_time(value)\n query = query.filter(created_at_attr.op('>=')(value))\n elif key == 'created_before' and created_at_attr:\n # This is a reserved query parameter to indicate resources created\n # before a particular datetime\n value = timeutils.normalize_time(value)\n query = query.filter(created_at_attr.op('<=')(value))\n elif isinstance(value, (list, tuple, set, frozenset)):\n # Looking for values in a list; apply to query directly\n column_attr = getattr(model, key)\n query = query.filter(column_attr.in_(value))\n else:\n # OK, simple exact match; save for later\n filter_dict[key] = value\n\n # Apply simple exact matches\n if filter_dict:\n query = query.filter_by(**filter_dict)\n\n return query", "def test_condition_vars(self):\n\n # If condition variables didn't work, a ``NameError`` would be raised.\n self.assertRaises(NotImplementedError, self.table.where,\n 'c_string > bound', {'bound': 0})\n\n def where_with_locals():\n bound = 'foo' # this wouldn't cause an error\n # silence pyflakes warnings\n self.assertIsInstance(bound, str)\n self.table.where('c_string > bound', {'bound': 0})\n self.assertRaises(NotImplementedError, where_with_locals)\n\n def where_with_globals():\n global _gvar\n _gvar = 'foo' # this wouldn't cause an error\n # silence pyflakes warnings\n self.assertIsInstance(_gvar, str)\n try:\n self.table.where('c_string > _gvar', {'_gvar': 0})\n finally:\n del _gvar # to keep global namespace clean\n self.assertRaises(NotImplementedError, where_with_globals)", "def __convert_to_sql_where(conditions : List[Tuple[Any, RelationalOperator, Any]]) -> str:\n\n formatted_identifiers = []\n\n for identifier in conditions:\n col_name, relation, value = identifier\n\n if relation == RelationalOperator.Between and len(value) != 2:\n raise ValueError(\"Between relational operator requires the value parameter to be a list of length 2\")\n \n if relation != RelationalOperator.Between:\n value = SecurityDatabaseWrapper._validate_value(value)\n \n col_name = SecurityDatabaseWrapper._validate_column_name(col_name)\n\n formatted_identifiers.append((col_name, relation, value))\n\n where_clause_section = ' AND '.join([f'{col_name} {relation.value} {value}' for col_name, relation, value in formatted_identifiers])\n return f\"({where_clause_section})\"", "def relax(self, clause):\n query = self.statement.split()\n i=0\n\n hasWhereOrHaving=False\n hasFrom=False\n\n while i<len(query):\n token = query[i]\n lower_token = token.lower()\n if lower_token == \"from\":\n hasFrom=True\n\n elif lower_token in [\"where\", \"having\"] and hasFrom:\n hasWhereOrHaving=True\n query[i] = \" \".join([token, clause, \"or\"])\n i+=1\n\n if hasFrom and not hasWhereOrHaving:\n query.append(\"where \"+clause)\n\n return self.set_child_and_return(' '.join(query))", "def where(self, dct=None, lst=None, **kwargs):\n return [row for row in self.iwhere(dct, lst, **kwargs)]", "def filter(self, *args, **kwargs):\n # *args are `Q` objects\n for q in args:\n self.query.add_q(q)\n if kwargs:\n self.query.add_q(ql.Q(**kwargs))\n return self", "def where(\n self,\n *predicates: Predicate,\n append: bool = True\n ) -> 'Query':\n if not append:\n self.predicate = None\n\n if len(predicates) > 1:\n predicate = reduce(lambda x, y: x & y, predicates)\n else:\n predicate = predicates[0]\n\n if self.predicate is None:\n self.predicate = predicate\n else:\n self.predicate &= predicate\n\n return self", "def filter():\n return get_filter_data(db, MyTable)", "def _where(model, *criteria, **filters):\n conditions = []\n conditions.extend(criteria)\n\n # build criteria from filter\n if filters:\n\n filter_keys = filters.keys()\n\n # select valid filters only\n columns = {c.name: c for c in _get_mapper(model).columns\n if c.name in filter_keys}\n relations = {c.key: c for c in _get_mapper(model).iterate_properties\n if isinstance(c, RelationshipProperty) and c.key in filter_keys}\n\n for attr, rel in relations.items():\n value = filters[attr]\n if not isinstance(value, list):\n value = [value]\n # validate type of object\n for v in value:\n assert not v or isinstance(v, rel.mapper.class_), \"Type mismatch\"\n\n if len(value) == 1:\n conditions.append(getattr(model, attr) == value[0])\n else:\n # Not implemented yet as of SQLAlchemy 0.7.9\n conditions.append(getattr(model, attr).in_(value))\n\n for attr, prop in columns.items():\n value = filters[attr]\n\n if isinstance(value, tuple):\n # ensure only two values in tuple\n if len(value) != 2:\n raise ValueError(\n \"Expected tuple of size 2 generate BETWEEN expression for column '%s.%s'\" % (\n model.__name__, attr))\n lower, upper = min(value), max(value)\n value = (lower, upper)\n elif not isinstance(value, list):\n value = [value]\n elif not value:\n raise ValueError(\n \"Expected non-empty list to generate IN expression for column '%s.%s'\" % (\n model.__name__, attr))\n\n if len(value) == 1:\n # generate = statement\n value = getattr(model, attr) == value[0]\n elif isinstance(value, tuple):\n # generate BETWEEN statement\n lower = min(value)\n upper = max(value)\n value = getattr(model, attr).between(lower, upper)\n else:\n # generate IN statement\n value = getattr(model, attr).in_(value)\n\n conditions.append(value)\n\n return conditions", "def filter_by(self, **kwargs):\n from_entity = self._filter_by_zero()\n\n clauses = [\n _entity_namespace_key(from_entity, key) == value\n for key, value in kwargs.items()\n ]\n return self.filter(*clauses)", "def create_query_body(self, **kwargs):\n\n query = \"\"\n\n for key, value in kwargs.items():\n if value is True and key != 'nsfw':\n query += '&has=%s' % key[:-1]\n\n if key == 'nsfw':\n query += '&include_nsfw=%s' % str(value).lower()\n\n return query", "def build_query_clauses(\n where: str = \"\", order: str = \"\", limit: int = 0, offset: int = 0\n ) -> str:\n return SqliteQueryBuilder.build_query_clauses(where, order, limit, offset)", "def filter(*args, name: Union[AnyStr, bool]=\"\", type: Union[AnyStr, bool]=\"\", q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def filter_by(cls, **kwargs):\n return cls.query.filter_by(**kwargs)", "def get_context(self):\r\n ctx = {}\r\n for clause in self.where_clauses or []:\r\n clause.update_context(ctx)\r\n return ctx", "def dmd_metric_filter_query(self): \n \n metric_filter_query = \"\"\"\n , elig_device_metrics as (\n -- For eligible devices, pull their whole history for the metric we want to filter\n SELECT \n d.device_id,\n d.device_first_seen_ts,\n d.device_first_view_ts,\n d.platform,\n d.platform_type,\n d.ds,\n -- For filtering devices\n sum({cumul_filter_metric}) as daily_filter_metric\n FROM tubidw.device_metric_daily as d\n JOIN pre_approved_devices as p\n ON d.device_id = p.device_id\n GROUP BY 1,2,3,4,5,6\n )\n\n , elig_device_cumul_filter as (\n SELECT *, sum(daily_filter_metric) OVER (PARTITION BY device_id, platform_type, platform ORDER BY ds rows between unbounded preceding and current row) as cumul_filter_metric\n FROM elig_device_metrics\n )\n\n , elig_devices as (\n SELECT device_id\n FROM elig_device_cumul_filter\n GROUP BY 1\n HAVING 1=1\n -- cumulative metric filters dynamically populate below \n {metric_filter_having}\n -- example: \n -- AND max(cumul_filter_metric) >= 3600.0 -- at least 60 mins of cumulative TVT\n -- AND max(cumul_filter_metric) <= 3600.0 -- less than 60 mins of cumulative TVT\n )\n \"\"\"\n return metric_filter_query", "def _validate_select_where(self):" ]
[ "0.70142585", "0.68189335", "0.66024506", "0.6276264", "0.6107378", "0.6103338", "0.6057521", "0.5986232", "0.59493124", "0.5914506", "0.5914506", "0.5914506", "0.58369356", "0.5758533", "0.569798", "0.56843215", "0.56451374", "0.5600097", "0.55459034", "0.5538299", "0.5530821", "0.55130196", "0.5501634", "0.54928416", "0.5474489", "0.5445694", "0.5443927", "0.54369766", "0.54351234", "0.5434608", "0.542677", "0.5425775", "0.5415547", "0.54106677", "0.54075664", "0.5384471", "0.53832495", "0.53541076", "0.53403664", "0.5308619", "0.5306649", "0.52922297", "0.5277759", "0.52763164", "0.52489763", "0.52385294", "0.5232363", "0.52299404", "0.5227714", "0.51914567", "0.5181333", "0.51772577", "0.51762444", "0.5171617", "0.516252", "0.5157335", "0.5157335", "0.5155207", "0.5150878", "0.51477605", "0.51318353", "0.5113345", "0.5111003", "0.5107936", "0.5105701", "0.51026356", "0.5098322", "0.5043108", "0.50262606", "0.50141627", "0.50043327", "0.5002377", "0.4995611", "0.49919373", "0.4984704", "0.4967016", "0.4963951", "0.49631318", "0.4960761", "0.49428627", "0.4935652", "0.49337336", "0.4929035", "0.49248514", "0.49155685", "0.49063632", "0.4905733", "0.49034116", "0.48943835", "0.48916185", "0.48816636", "0.48811314", "0.483884", "0.4819455", "0.48139048", "0.48138842", "0.4812359", "0.48086178", "0.47890756", "0.4789032" ]
0.7027632
0
Create an index for specified columnscan speed up testing in some cases.
def create_index(self, *columns): self._assert_columns_exist(columns) # Build index name. whitelist = lambda col: ''.join(x for x in col if x.isalnum()) idx_name = '_'.join(whitelist(col) for col in columns) idx_name = 'idx_{0}_{1}'.format(self._table, idx_name) # Build column names. col_names = [self._normalize_column(x) for x in columns] col_names = ', '.join(col_names) # Prepare statement. statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})' statement = statement.format(idx_name, self._table, col_names) # Create index. cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') cursor.execute(statement)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_index():", "def build_index():\n pass", "def create_index(self, *columns):\n # Calling super() with older convention to support Python 2.7 & 2.6.\n super(SqliteSource, self).create_index(*columns)", "def create_index(self, *columns):\n # Calling super() with older convention to support Python 2.7 & 2.6.\n super(SqliteSource, self).create_index(*columns)", "def _create_indices(cls):\r\n from thunderdome.connection import _hosts, _index_all_fields, create_key_index\r\n \r\n if not _hosts: return\r\n for column in cls._columns.values():\r\n if column.index or _index_all_fields:\r\n create_key_index(column.db_field_name)", "def create_index(self, table_name, index, timeout):\n _abstract()", "def create_index(self, table_name, index, timeout):\n _abstract()", "def migrate_9(session, **kwargs):\n session.execute(\n \"CREATE INDEX ix_{tb}_size ON {tb} ( size )\"\n .format(tb=IndexRecord.__tablename__))\n\n session.execute(\n \"CREATE INDEX index_record_hash_type_value_idx ON {tb} ( hash_value, hash_type )\"\n .format(tb=IndexRecordHash.__tablename__))", "def create_index(self, *columns):\n self._assert_columns_exist(columns)\n\n # Build index name.\n whitelist = lambda col: ''.join(x for x in col if x.isalnum())\n idx_name = '_'.join(whitelist(col) for col in columns)\n idx_name = 'idx_{0}_{1}'.format(self._table, idx_name)\n\n # Build column names.\n col_names = [self._normalize_column(x) for x in columns]\n col_names = ', '.join(col_names)\n\n # Prepare statement.\n statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})'\n statement = statement.format(idx_name, self._table, col_names)\n\n # Create index.\n cursor = self._connection.cursor()\n cursor.execute(statement)", "def create_indices():\n conn = connect()\n c = conn.cursor()\n\n # To prevent rematch btw players\n c.execute(\n \"\"\"\n CREATE UNIQUE INDEX matches_uniq_idx ON matches\n (greatest(winner, loser), least(winner, loser));\n \"\"\")\n conn.commit()\n conn.close()", "def build_index(self):\r\n date_time('Building indexes in citations table')\r\n self.cursor.execute('DROP INDEX IF EXISTS IDX_citations ;')\r\n self.cursor.execute('CREATE INDEX IDX_citations ON citations (citation);')\r\n self.conn.commit()\r\n gc.collect()", "def create_index(cls, engine):\n\n reg_imei = db.Index('reg_imei_index', cls.imei, postgresql_concurrently=True)\n reg_imei.create(bind=engine)\n\n reg_normalized_imei = db.Index('reg_normalized_imei_index', cls.normalized_imei, postgresql_concurrently=True)\n reg_normalized_imei.create(bind=engine)", "def test_creating_index_type(self):", "def index_time(sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,\n search_type, operation, debug):\n if debug:\n print(\"Benchmarking:\\n\\tSORT = %s\\n\\tN_THREADS = %s\\n\\tSPARSE_FORMAT =\"\n \" %s\\n\\tROWS = %s\\n\\tCOLS = %s\\n\\tNNZ = %s\\n\\tN_INDEXERS =\"\n \" %s\\n\\t\" \"SEARCH_TYPE = %s\\n\\tOPERATION = %s\"\n % (sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,\n search_type, operation))\n\n # Generate matrix.\n with Timer() as t:\n M = sp.sparse.rand(rows, cols, density=nnz/(rows*cols))\n\n if debug:\n print(\"\\tTime to generate sparse matrix: %s\" % t.elapsed)\n\n # Generate indexer.\n with Timer() as t:\n indexer = {}\n idx = np.random.choice(M.nnz, n_indexers, replace=True)\n indexer['row'] = M.row[idx]\n indexer['col'] = M.col[idx]\n indexer['data'] = np.random.rand(idx.size).astype(np.float64)\n\n if debug:\n print(\"\\tTime to generate indexer: %s\" % t.elapsed)\n\n # Convert sparse matrix.\n with Timer() as t:\n if sparse_format == 'CSR':\n M = sp.sparse.csr_matrix(M)\n elif sparse_format == 'CSC':\n M = sp.sparse.csc_matrix(M)\n else:\n raise Exception(\"sparse_format must be either CSR or CSC.\")\n\n if debug:\n print(\"\\tTime to convert sparse matrix: %s\" % t.elapsed)\n\n # Sort.\n with Timer() as t:\n if sort:\n if sparse_format == 'CSR':\n # Sort indices according to row first\n sort_idx = np.lexsort((indexer['col'], indexer['row']))\n elif sparse_format == 'CSC':\n # Sort indices according to col first\n sort_idx = np.lexsort((indexer['row'], indexer['col']))\n else:\n sort_idx = np.arange(indexer['row'].size)\n\n unsort_idx = np.argsort(sort_idx)\n\n if debug:\n print(\"\\tTime to sort indexer: %s\" % t.elapsed)\n sort_time = t.elapsed\n\n # Time the csindexer.\n with Timer() as t:\n if search_type == 'scipy':\n ## Run the Scipy function.\n with Timer() as t:\n if operation == 'get':\n data_py = np.squeeze(np.array(M[indexer['row'][sort_idx],\n indexer['col'][sort_idx]]))\n data_py = data_py[unsort_idx]\n elif operation == 'add':\n M_sp = M.copy()\n\n idx_coo = sp.sparse.coo_matrix(\n (indexer['data'][sort_idx],\n (indexer['row'][sort_idx], indexer['col'][sort_idx])),\n shape=(rows, cols))\n\n M_sp += idx_coo\n else:\n raise Exception(\"Operation must be either get or add.\")\n\n else:\n ## Run the Cython function.\n if operation == 'get':\n ### Don't need to copy M as it doesn't get modified but do have\n ### to copy indexer['data'] as it does.\n data_cs = indexer['data'].copy()\n M_cs = M\n\n csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),\n np.array(indexer['col'][sort_idx]), data_cs,\n operation, search_type, n_threads, debug)\n\n ### Unsort to get final result.\n data_cs = data_cs[unsort_idx]\n\n elif operation == 'add':\n ### Copy M, don't copy indexer['data'].\n data_cs = indexer['data']\n M_cs = M.copy()\n csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),\n np.array(indexer['col'][sort_idx]),\n np.array(data_cs[sort_idx]), operation,\n search_type,\n n_threads, debug)\n else:\n raise Exception(\"Operation must be either get or add.\")\n\n\n if debug:\n print(\"\\tTime for indexing: %s\" % t.elapsed)\n computation_time = t.elapsed\n\n return computation_time, sort_time", "def migrate_8(session, **kwargs):\n session.execute(\n \"CREATE INDEX ix_{tb}_baseid ON {tb} ( baseid )\"\n .format(tb=IndexRecord.__tablename__))", "def test_integer_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_simple_index, 12345)\n\t)", "def create_index(args, client):\n policy = {}\n client.index_geo2dsphere_create(args.nspace, args.set,\n LOCBIN, LOCNDX, policy)\n client.index_integer_create(args.nspace, args.set,\n HSHBIN, HSHNDX, policy)", "def column_index_exists(schema_name, table_name, column_name):\n sql = \"\"\"\n with table_w_DateLstMod (\n SchemaName,\n TableName,\n ColName,\n column_id,\n DATA_TYPE,\n object_id\n ) as (\n SELECT distinct\n s.name,\n t.name,\n c.name,\n c.column_id,\n case\n when p.name = 'numeric' then 'numeric(' + cast(c.precision as varchar(5)) + ',' + cast(c.scale as varchar(5)) + ')'\n else (\n case\n when p.name = 'varchar'\n then 'varchar(' + cast(c.max_length as varchar(5)) + ')' else p.name end) end as 'data_type', t.object_id\n FROM sys.schemas s with(nolock)\n JOIN sys.tables t with(nolock) ON (s.schema_id = t.schema_id)\n JOIN sys.columns c with(nolock) ON (c.object_id = t.object_id)\n JOIN sys.types P ON C.system_type_id = P.system_type_id\n WHERE s.name = ?\n and t.name = ?\n and c.name = ?\n and t.type = 'U'\n ),\n table_indexes (\n SchemaName,\n TableName,\n ColName,\n DATA_TYPE,\n index_name,\n index_type,\n object_id ) as (\n select distinct\n td.SchemaName,\n td.TableName,\n td.ColName,\n td.DATA_TYPE,\n i.name,\n i.type_desc,\n td.object_id\n FROM table_w_DateLstMod td\n JOIN [sys].[index_columns] ic with(nolock)\n on (ic.object_id = td.object_id and ic.column_id = td.column_id and ic.index_column_id = 1)\n JOIN sys.indexes i with(nolock)\n ON (i.object_id = td.object_id and ic.index_id = i.index_id)\n )\n select\n SchemaName, TableName, ColName, DATA_TYPE, index_name, index_type, object_id\n from table_indexes;\n \"\"\"\n\n row = fetch_row(sql, [schema_name, table_name, column_name])\n return row is not None", "def build_index(self):\n self.rebuild_index()", "def __init__(self, create_index=True, online=True):\n self.online = online\n index_exists = self.index_exists()\n if create_index and not index_exists:\n self.create_index()", "def build_index(self):\n \n \n geoids = self.partitions.find_or_new(table='facilities_geoids')\n addresses = self.partitions.find_or_new(table='facilities_addresses')\n facilities = self.partitions.find(table='facilities')\n \n facilities.attach(addresses,'addresses')\n facilities.attach(geoids,'geoids')\n \n q = \"\"\"\n SELECT year, type, oshpd_id, facility_name, dba_city, dba_zip_code, blockgroup_gvid, tract_gvid, county_gvid\n FROM facilities\n JOIN geoids.facilities_geoids AS geoids ON geoids.facilities_id = facilities.id\n JOIN addresses.facilities_addresses AS addresses ON addresses.facilities_id = facilities.id\n \"\"\"\n \n p = self.partitions.find_or_new(table='facilities_index')\n p.clean()\n lr = self.init_log_rate()\n \n with p.inserter() as ins:\n for row in facilities.query(q):\n ins.insert(row)\n lr(str(p.identity))", "def create_index(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def _apply_index_op(db, op):\n if 'createIndexes' not in op['o']:\n return\n o = op['o']\n coll_name = o['createIndexes']\n key = list(o['key'].items())\n name = o['name']\n return db[coll_name].create_index(key, name=name)", "def test_create_index_is_called(self):\r\n assert len(self.index_calls) == 0\r\n\r\n connection._index_all_fields = False\r\n \r\n class TestIndexCreationCallTestVertex(Vertex):\r\n col1 = properties.Text(index=True)\r\n col2 = properties.Text(index=True, db_field='____column')\r\n col3 = properties.Text(db_field='____column3')\r\n\r\n assert len(self.index_calls) == 2\r\n assert 'vid' not in self.index_calls\r\n assert 'col1' in self.index_calls\r\n assert '____column' in self.index_calls\r\n assert '____column3' not in self.index_calls\r\n\r\n connection._index_all_fields = True\r\n self.index_calls = []\r\n\r\n class TestIndexCreationCallTestVertex2(Vertex):\r\n col1 = properties.Text()\r\n col2 = properties.Text(db_field='____column')\r\n\r\n assert len(self.index_calls) == 3\r\n assert 'vid' in self.index_calls\r\n assert 'col1' in self.index_calls\r\n assert '____column' in self.index_calls", "def init_index(clear=False):\n return _run_indexer_func(\"init_index\", clear)", "def create(excludeDecls=False):\r\n return Index(conf.lib.clang_createIndex(excludeDecls, 0))", "def test_secondary_index(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n assert_invalid(session, \"CREATE INDEX ON t_by_v (v2)\",\n \"Secondary indexes are not supported on materialized views\")", "def init_index(self):\n raise NotImplementedError", "def build_index(self, dict_pg_info, list_insert):\n flag_exit = True\n if flag_exit is False:\n self.create_new_index(dict_pg_info)\n self.insert_index(dict_pg_info, list_insert)", "def test_index_reflection(self):\n import warnings\n def capture_warnings(*args, **kw):\n capture_warnings._orig_showwarning(*args, **kw)\n capture_warnings.warnings.append(args)\n capture_warnings._orig_showwarning = warnings.warn\n capture_warnings.warnings = []\n\n m1 = MetaData(testing.db)\n t1 = Table('party', m1,\n Column('id', String(10), nullable=False),\n Column('name', String(20), index=True), \n Column('aname', String(20))\n )\n m1.create_all()\n \n testing.db.execute(\"\"\"\n create index idx1 on party ((id || name))\n \"\"\") \n testing.db.execute(\"\"\"\n create unique index idx2 on party (id) where name = 'test'\n \"\"\")\n \n testing.db.execute(\"\"\"\n create index idx3 on party using btree\n (lower(name::text), lower(aname::text))\n \"\"\")\n \n try:\n m2 = MetaData(testing.db)\n\n warnings.warn = capture_warnings\n t2 = Table('party', m2, autoload=True)\n \n wrn = capture_warnings.warnings\n assert str(wrn[0][0]) == (\n \"Skipped unsupported reflection of expression-based index idx1\")\n assert str(wrn[1][0]) == (\n \"Predicate of partial index idx2 ignored during reflection\")\n assert len(t2.indexes) == 2\n # Make sure indexes are in the order we expect them in\n tmp = [(idx.name, idx) for idx in t2.indexes]\n tmp.sort()\n \n r1, r2 = [idx[1] for idx in tmp]\n\n assert r1.name == 'idx2'\n assert r1.unique == True\n assert r2.unique == False\n assert [t2.c.id] == r1.columns\n assert [t2.c.name] == r2.columns\n finally:\n warnings.warn = capture_warnings._orig_showwarning\n m1.drop_all()", "def find_coverage_index(self):\n idx_on_new_table = [self._new_table.primary_key] + self._new_table.indexes\n old_pk_len = len(self._pk_for_filter)\n for idx in idx_on_new_table:\n # list[:idx] where idx > len(list) yields full list\n idx_prefix = idx.column_list[:old_pk_len]\n idx_name_list = [col.name for col in idx_prefix]\n if self._pk_for_filter == idx_name_list:\n if idx.is_unique:\n return idx.name\n return None", "def create_index(log_df, column):\n temp_list = log_df[[column]].values.tolist()\n subsec_set = {(x[0]) for x in temp_list}\n subsec_set = sorted(list(subsec_set))\n alias = dict()\n for i, _ in enumerate(subsec_set):\n alias[subsec_set[i]] = i + 1\n return alias", "def simple_index():\n examples = [\n benchmark.Example(\n inputs=[\n [12, 34, 56, 78],\n -2,\n ],\n output=56,\n ),\n ]\n constants = []\n description = 'Index into a tensor'\n target_program = 'in1[in2]'\n source = 'handwritten task'\n return benchmark.Benchmark(examples=examples,\n constants=constants,\n description=description,\n target_program=target_program,\n source=source,\n name='simple_index')", "def test_column_index(self):\n c = Column('foo', range(3))\n self.assertEqual(c[0], 0)\n self.assertEqual(c[1], 1)\n self.assertEqual(c[2], 2)", "def create_index(self, indexname, table, columns, unique=False):\n if not isinstance(columns, list) and not isinstance(columns, tuple):\n columns = [columns]\n\n if \".\" in table:\n prefix = table.split(\".\")[0] + \".\"\n table = table.split(\".\")[1]\n else:\n prefix = \"\"\n # table = table\n\n self.LOG(\"index create \", indexname, table, columns, unique)\n if unique:\n sql = \"CREATE UNIQUE INDEX %s%s ON %s (%s);\" % (\n prefix, indexname, table, \",\".join(columns))\n else:\n sql = \"CREATE INDEX %s%s ON %s (%s);\" % (\n prefix, indexname, table, \",\".join(columns))\n self.execute(sql)", "def createSpatialIndex(self, schema, table, column):\r\n index_name = '{}_{}_idx'.format(table, column)\r\n return self.runSql('CREATE INDEX {} ON {} USING gist ({})'.format(index_name, self.encodeTableName(schema, table), self.encodeColumnName(column)))", "def test_create_index_swift(self):\n\n indexfile = tempfile.mktemp()\n self.addCleanup(os.unlink, indexfile)\n\n index = TroveSwiftIndexBuilder(\"short.dat\", out=indexfile)\n\n # read the index file that was created\n with open(indexfile, 'r+b') as fd:\n indextext = fd.read()\n indexlines = indextext.split('\\n')\n\n # 11 lines includes on blank line at the end\n self.assertEquals(11, len(indexlines))\n del indexlines[10]\n\n # check the first character of each line\n docs = [line[0] for line in indexlines]\n self.assertEquals(['1', '2', '3', '4', '5', '6', '7', '8', '9', '1'], docs)\n\n # check some lines from the index\n ref = \"1, 0, 31, short.dat\"\n self.assertEqual(ref, indexlines[0])\n ref = \"10, 279, 32, short.dat\"\n self.assertEqual(ref, indexlines[9])", "def set_index(self, idx, rel, attrs):\n\n query = 'CREATE INDEX {} ON {} ({})'.format(idx, rel, ','.join(attrs))\n\n with self.tpch_cxn.cursor() as curs:\n try:\n curs.execute(query)\n except pg.ProgrammingError as e:\n print(e)", "async def create_index(self, fields, cursor=None):\n if not cursor:\n cursor = self._cursor\n param = []\n for (k, v) in fields.items():\n if v == 1:\n x = (k, pymongo.ASCENDING)\n else:\n x = (k, pymongo.DESCENDING)\n param.append(x)\n result = await cursor.create_index(param, background=True)\n return result, None", "def build_index():\n print \"building index..\"\n\n index_dir = INDEX_DIR_CODE\n if TEST_COLLECTION:\n index_dir = INDEX_DIR_TEST\n CR_DOCS_DB.drop()\n CR_DOCS_DB.ensure_index(\"code_id\", unique=True)\n if os.path.exists(index_dir):\n shutil.rmtree(index_dir)\n os.mkdir(index_dir)\n schema = get_schema()\n storage = FileStorage(index_dir)\n ix = storage.create_index(schema)\n w = ix.writer()\n print \"finding posts..\"\n posts_with_code = POSTS_DB.find({\"answers.Body\": {\"$regex\": \"/.*<code>.*/\"}}, timeout=False)\n print \"adding files..\"\n q = add_from_file(w) if TEST_COLLECTION else 0\n for i, question in enumerate(posts_with_code):\n if TEST_COLLECTION:\n q += add_one_code(w, question, q)\n if q > 999:\n break\n else:\n q += add_doc(w, question)\n if i % 1000 == 0 and not i == 0:\n print \"commit number:\", str(i/1000), \"with\", q, \"codes\"\n w.commit()\n w = ix.writer()\n\n w.commit()\n posts_with_code.close()\n print \"the index was built!\"\n return ix", "def test(indices_to_visit = None):\n ##0 Chicago\n ##1 New York City\n ##2 Los Angeles\n ##3 Minneapolis\n ##4 Denver\n ##5 Dallas\n ##6 Seattle\n ##7 Boston\n ##8 San Francisco\n ##9 St. Louis\n ##10 Houston\n ##11 Phoenix\n ##12 Salt Lake City\n ##13 Miami\n ##14 Atlanta\n ##15 Kansas City\n home_index = 15 # Kansas city\n # 15x15 matrix with main diagonal consisting of 0s and to which data is mirrored along\n # (values are derived from external resource and multiplied by 1000 for higher accuracy)\n matrix = np.array([[0.0, 1148413.3550047704, 2813453.6297408855, 572861.4368351421, 1483440.7452179305, 1296355.2188721865, 2801269.1215845253, 1370943.3069385102, 2996683.256068982, 422589.4697157836, 1515737.0196676727, 2343639.7107855356, 2031500.319603397, 1913900.3015914203, 946854.1020487415, 665894.0336505901],\n [1148413.3550047704, 0.0, 3949451.153672887, 1642119.4792808082, 2628946.6435325537, 2212019.1209020815, 3882177.952930788, 306997.0343229422, 4144977.810718553, 1408454.3261387087, 2286054.8575902223, 3455343.3108375454, 3179102.5335818897, 1754834.3710577146, 1202616.154562711, 1766599.1336905772],\n [2813453.6297408855, 3949451.153672887, 0.0, 2455296.3791196346, 1339227.410707824, 1998182.1420783552, 1545364.434045008, 4184394.186016967, 559978.4273194656, 2560790.9591738936, 2212581.51715849, 575975.8749662543, 933602.6426595236, 3767490.41517038, 3120118.850020503, 2186473.1552241463],\n [572861.4368351421, 1642119.4792808082, 2455296.3791196346, 0.0, 1127312.7583590776, 1390159.7734006236, 2249169.1308160927, 1811513.5290266906, 2554165.8167895717, 750916.7305340832, 1701189.1538312144, 2062079.2399570548, 1590460.9488364782, 2434801.332310659, 1462408.5353501518, 662752.1291133759],\n [1483440.7452179305, 2628946.6435325537, 1339227.410707824, 1127312.7583590776, 0.0, 1067257.7993323756, 1646308.7967673023, 2852307.4164419994, 1530510.2790658756, 1283707.511393525, 1414308.8805983758, 943721.1931707633, 598728.757362067, 2779561.192116527, 1952618.0544916363, 899656.1020173575],\n [1296355.2188721865, 2212019.1209020815, 1998182.1420783552, 1390159.7734006236, 1067257.7993323756, 0.0, 2709804.112590561, 2500314.4507069485, 2390841.4329337194, 882457.80942383, 361482.7025425731, 1427995.4150203674, 1610768.421819668, 1788903.6065106322, 1161480.3557326929, 730446.8613086065],\n [2801269.1215845253, 3882177.952930788, 1545364.434045008, 2249169.1308160927, 1646308.7967673023, 2709804.112590561, 0.0, 4018059.834330202, 1093104.7332788548, 2778905.575804111, 3046648.362755992, 1794989.6453295103, 1129464.5539648102, 4404737.747850686, 3516794.375197078, 2427457.036285458],\n [1370943.3069385102, 306997.0343229422, 4184394.186016967, 1811513.5290266906, 2852307.4164419994, 2500314.4507069485, 4018059.834330202, 0.0, 4350710.853063807, 1673216.4080939887, 2586942.3262796295, 3706392.097841614, 3382851.415271485, 2022974.6418062754, 1509585.60107986, 2015770.1390589625],\n [2996683.256068982, 4144977.810718553, 559978.4273194656, 2554165.8167895717, 1530510.2790658756, 2390841.4329337194, 1093104.7332788548, 4350710.853063807, 0.0, 2812916.3098878833, 2650547.941880299, 1053620.7288649315, 967859.8344376946, 4179636.203479384, 3448359.745690545, 2428862.4239271535],\n [422589.4697157836, 1408454.3261387087, 2560790.9591738936, 750916.7305340832, 1283707.511393525, 882457.80942383, 2778905.575804111, 1673216.4080939887, 2812916.3098878833, 0.0, 1093601.4408876144, 2050115.5214378452, 1872971.1741522516, 1708236.6189296674, 752855.8488125347, 384122.2000072272],\n [1515737.0196676727, 2286054.8575902223, 2212581.51715849, 1701189.1538312144, 1414308.8805983758, 361482.7025425731, 3046648.362755992, 2586942.3262796295, 2650547.941880299, 1093601.4408876144, 0.0, 1636770.4499809493, 1932616.2801687205, 1559260.024532222, 1130480.278513877, 1039856.4844335921],\n [2343639.7107855356, 3455343.3108375454, 575975.8749662543, 2062079.2399570548, 943721.1931707633, 1427995.4150203674, 1794989.6453295103, 3706392.097841614, 1053620.7288649315, 2050115.5214378452, 1636770.4499809493, 0.0, 812548.5062332726, 3191662.5092484164, 2564665.4531581327, 1690942.142157212],\n [2031500.319603397, 3179102.5335818897, 933602.6426595236, 1590460.9488364782, 598728.757362067, 1610768.421819668, 1129464.5539648102, 3382851.415271485, 967859.8344376946, 1872971.1741522516, 1932616.2801687205, 812548.5062332726, 0.0, 3364908.7076308434, 2551338.215149899, 1490589.7393085626],\n [1913900.3015914203, 1754834.3710577146, 3767490.41517038, 2434801.332310659, 2779561.192116527, 1788903.6065106322, 4404737.747850686, 2022974.6418062754, 4179636.203479384, 1708236.6189296674, 1559260.024532222, 3191662.5092484164, 3364908.7076308434, 0.0, 973244.7750437199, 2000112.4162614697],\n [946854.1020487415, 1202616.154562711, 3120118.850020503, 1462408.5353501518, 1952618.0544916363, 1161480.3557326929, 3516794.375197078, 1509585.60107986, 3448359.745690545, 752855.8488125347, 1130480.278513877, 2564665.4531581327, 2551338.215149899, 973244.7750437199, 0.0, 1089830.6426635552],\n [665894.0336505901, 1766599.1336905772, 2186473.1552241463, 662752.1291133759, 899656.1020173575, 730446.8613086065, 2427457.036285458, 2015770.1390589625, 2428862.4239271535, 384122.2000072272, 1039856.4844335921, 1690942.142157212, 1490589.7393085626, 2000112.4162614697, 1089830.6426635552, 0.0]])\n\n solver = FacilityOrderSolver(matrix, home_index)\n \n return solver.solve(indices_to_visit)", "async def build_secret_index(self):\n pass", "def _initIndexes(self):\n class Record:\n \"\"\" a moron simple object for carrying the 'extra'-payload to index\n constructors\n \"\"\"\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n addIndex = self.addIndex\n addColumn = self.addColumn\n\n # Content indexes\n self._catalog.indexes.clear()\n for (index_name, index_type, extra) in self.enumerateIndexes():\n if extra is None:\n addIndex( index_name, index_type)\n else:\n if isinstance(extra, StringTypes):\n p = Record(indexed_attrs=extra)\n elif isinstance(extra, DictType):\n p = Record(**extra)\n else:\n p = Record()\n addIndex( index_name, index_type, extra=p )\n\n # Cached metadata\n self._catalog.names = ()\n self._catalog.schema.clear()\n for column_name in self.enumerateColumns():\n addColumn( column_name )", "def create_index(schema, index_name):\n if not os.path.exists(index_name):\n os.mkdir(index_name)\n ix = index.create_in(index_name, schema)\n print(f\"index {index_name} created successfully\")\n return ix", "def buildIndex(counters):\n route = Route((counter.probe for counter in counters))\n index = ProbeIndexFactory.cache.get(route, None)\n if not index:\n probeMap = ProbeMap()\n for i, counter in enumerate(counters):\n ProbeIndexFactory._addCounterToMap(probeMap, counter, i)\n index = ProbeIndexFactory.Index(route, probeMap)\n ProbeIndexFactory.cache.update({route : index})\n return index", "def CreateIndex(self, arg0: 'unsigned long long') -> \"void\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_CreateIndex(self, arg0)", "def test_create_index(self, collection):\n collection.create_index(\"hello\")\n assert collection._indexes == {\"_id_\": ((\"_id\",), {(1,)})}\n\n collection.create_index(\"hello\", unique=True)\n assert collection._indexes == {\n \"_id_\": ((\"_id\",), {(1,)}),\n \"hello_1\": ((\"hello\",), {(\"there\",)}),\n }", "def index_schema_builder(table):\n conn = table.parent.parent.connection\n\n idx = OrderedDict()\n indexes = conn.execute(\"SHOW INDEXES FROM `%s`.`%s`\" % (table.parent.name, table.name))\n\n if not indexes:\n return idx\n\n for index in indexes:\n n = index['Key_name']\n if n not in idx:\n indexitem = IndexSchema(name=n, parent=table)\n indexitem.non_unique = (bool(index['Non_unique'])) # == not unique\n indexitem.table_name = index['Table']\n\n key_type = index['Index_type'].upper()\n\n if index['Key_name'].upper() == \"PRIMARY\":\n indexitem.kind = \"PRIMARY\"\n elif not indexitem.non_unique:\n indexitem.kind = \"UNIQUE\"\n elif key_type in ('FULLTEXT', 'SPATIAL'):\n indexitem.kind = key_type\n else:\n indexitem.kind = \"INDEX\"\n\n if key_type in ('BTREE', 'HASH', 'RTREE'):\n indexitem.type = key_type\n\n indexitem.collation = index['Collation']\n indexitem.comment = index['Comment']\n\n idx[n] = indexitem\n\n if index['Column_name'] not in idx[n].fields:\n idx[n].fields.insert(index['Seq_in_index'], (index['Column_name'], index['Sub_part'] or 0))\n\n return idx", "def create_bam_file_index(infile, outfile):\n statement = 'samtools index %(infile)s %(outfile)s'\n P.run(statement,\n job_queue = P.PARAMS['queue'],\n job_memory = P.PARAMS['memory'])", "def CreateIndex(self, arg0: 'unsigned long long') -> \"void\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_CreateIndex(self, arg0)", "def _SetupIndexes(self, _open=open):\n pass", "def _init_index(self):\n\n if self._check_idx:\n self._index = bamnostic.bai.Bai(self._index_path)\n self.__nocoordinate = self._index.n_no_coor\n self.__mapped = sum(self._index.unmapped[mapped].n_mapped for mapped in self._index.unmapped) + self.nocoordinate\n self.__unmapped = sum(self._index.unmapped[unmapped].n_unmapped for unmapped in self._index.unmapped) + self.nocoordinate", "def create_indexes_with_stats(self) -> float:\n query_nodes_per_cluster = self.cluster_spec.servers_by_cluster_and_role('n1ql')\n index_nodes_per_cluster = self.cluster_spec.servers_by_cluster_and_role('index')\n\n t0 = time.time()\n for cluster_query_nodes in query_nodes_per_cluster:\n self.create_indexes(query_node=cluster_query_nodes[0])\n\n # Wait for index build to complete on first cluster, and record time\n logger.info('Waiting for index build on primary cluster')\n self.wait_for_indexing(index_nodes=index_nodes_per_cluster[0])\n index_build_time = time.time() - t0\n logger.info(\"Index build completed in {} sec\".format(index_build_time))\n\n # Wait for index build to complete on remaining clusters\n logger.info('Waiting for index build to complete on remaining clusters')\n remaining_index_nodes = [node for nodes in index_nodes_per_cluster[1:] for node in nodes]\n self.wait_for_indexing(index_nodes=remaining_index_nodes)\n\n return index_build_time", "def define_index_field(DomainName=None, IndexField=None):\n pass", "def _create_indexes(self):\r\n # WARNING: The collection will be locked during the index\r\n # creation. If the collection has a large number of\r\n # documents in it, the operation can take a long time.\r\n\r\n # TODO: The creation of indexes can be moved to a Django\r\n # management command or equivalent. There is also an option to\r\n # run the indexing on the background, without locking.\r\n self.collection.ensure_index([('time', pymongo.DESCENDING)])\r\n self.collection.ensure_index('event_type')", "def test_large_block_index():\n\n # TODO: It would be nice to find a way to make this test faster. The\n # real bottleneck here is the enormous YAML section.\n\n buff = io.BytesIO()\n\n narrays = int(io.DEFAULT_BUFFER_SIZE / 4)\n\n arrays = []\n for i in range(narrays):\n arrays.append(np.array([i], np.uint16))\n\n tree = {\"arrays\": arrays}\n\n ff = asdf.AsdfFile(tree)\n # Since we're testing with small arrays, force all arrays to be stored\n # in internal blocks rather than letting some of them be automatically put\n # inline.\n ff.write_to(buff, all_array_storage=\"internal\")\n\n buff.seek(0)\n with asdf.open(buff) as ff2:\n assert isinstance(ff2._blocks._internal_blocks[0], block.Block)\n assert len(ff2._blocks._internal_blocks) == narrays", "def create_indices():\n destroy_indices()\n\n ActionDocument._index.create(ignore=[400, 404])\n ClassificationDocument._index.create(ignore=[400, 404])\n FunctionDocument._index.create(ignore=[400, 404])\n PhaseDocument._index.create(ignore=[400, 404])\n RecordDocument._index.create(ignore=[400, 404])\n\n yield\n\n destroy_indices()", "def create_index(self):\n self.send_robust(self.es_index, data=self.es_meta)\n self.set_index_normal_settings()", "def test_integer_map_key_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_map_key_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_map_key_index, 12345)\n\t)", "def test_geo_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_geo_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_geo_index, (0.0, 0.0))\n\t)", "def init_index(self, index_name):\n return Index(self, index_name)", "def test_sparse_index(self):\n freq = 'MS'\n index = pd.date_range(start=\"2000-1-1\", freq=\"B\", periods=1000)\n tl = formatter.TimestampLocator(plot_index, freq=freq)\n test = tl._process(0, 900)\n new_ind = index[test]\n assert np.all(new_ind.day < 5)\n\n freq = 'M'\n index = pd.date_range(start=\"2000-1-1\", freq=\"B\", periods=1000)\n tl = formatter.TimestampLocator(plot_index, freq=freq)\n test = tl._process(0, 900)\n new_ind = index[test]\n assert np.all(new_ind.day > 25)", "def create_index():\n try:\n client = MongoClient(MONGO_URI,event_listeners=[CommandLogger()])\n db = client.get_database('UNSD')\n \n coll_unfcc = db.get_collection('unfcc')\n coll_ebal = db.get_collection('ebal')\n result_unfcc = coll_unfcc.create_index([('REF_AREA',ASCENDING),('TIME_PERIOD',DESCENDING)])\n result_ebal = coll_ebal.create_index([('REF_AREA',ASCENDING),('TIME_PERIOD',DESCENDING)])\n except pymongo.errors.ConnectionFailure as e:\n logger.error('PyMongo error ConnectionFailure seen: ' + str(e))\n traceback.print_exc(file = sys.stdout)", "def migrate_5(session, **kwargs):\n session.execute(\n \"CREATE INDEX {tb}_idx ON {tb} ( did )\"\n .format(tb=IndexRecordUrl.__tablename__))\n\n session.execute(\n \"CREATE INDEX {tb}_idx ON {tb} ( did )\"\n .format(tb=IndexRecordHash.__tablename__))\n\n session.execute(\n \"CREATE INDEX {tb}_idx ON {tb} ( did )\"\n .format(tb=IndexRecordMetadata.__tablename__))\n\n session.execute(\n \"CREATE INDEX {tb}_idx ON {tb} ( did )\"\n .format(tb=IndexRecordUrlMetadata.__tablename__))", "def build_index(dataset, n_neighbors):\n# Initialize FLANN\n pyflann.set_distance_type(distance_type='euclidean')\n flann = pyflann.FLANN()\n params = flann.build_index(dataset,algorithm='kdtree',trees=4)\n #print params\n nearest_neighbors, dists = flann.nn_index(dataset, n_neighbors, checks=params['checks'])\n return nearest_neighbors, dists", "def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices", "def index_object(idxs=None):", "def buildIndex(filename, currentTime, baseDir):\n pathToFolder = baseDir + 'Collections/IndriIndices/'\n if not os.path.exists(pathToFolder):\n os.makedirs(pathToFolder)\n INDRI_BUILD_INDEX = '/mnt/bi-strg3/v/zivvasilisky/ziv/env/indri/indri/bin/IndriBuildIndex'\n CORPUS_PATH = filename\n CORPUS_CLASS = 'trectext'\n MEMORY = '1G'\n INDEX = pathToFolder + currentTime\n STEMMER = 'krovetz'\n run_bash_command(INDRI_BUILD_INDEX + ' -corpus.path='+CORPUS_PATH + ' -corpus.class='+CORPUS_CLASS + ' -index='+INDEX + ' -memory='+MEMORY + ' -stemmer.name=' + STEMMER)\n return INDEX", "def read_idx(self, searchString=None):\n assert self.idx is not None, f\"No index file for {self.grib}.\"\n \n # Open the idx file\n r = requests.get(self.idx)\n assert r.ok, f\"Index file does not exist: {self.idx}\" \n\n read_idx = r.text.split('\\n')[:-1] # last line is empty\n df = pd.DataFrame([i.split(':') for i in read_idx], \n columns=['grib_message', 'start_byte', \n 'reference_time', 'variable', \n 'level', 'forecast_time', 'none'])\n\n # Format the DataFrame\n df['grib_message'] = df['grib_message'].astype(int)\n df['reference_time'] = pd.to_datetime(df.reference_time, format='d=%Y%m%d%H')\n df['valid_time'] = df['reference_time'] + pd.to_timedelta(f\"{self.fxx}H\")\n df['start_byte'] = df['start_byte'].astype(int)\n df['end_byte'] = df['start_byte'].shift(-1, fill_value='')\n df['range'] = df.start_byte.astype(str) + '-' + df.end_byte.astype(str)\n df = df.drop(columns='none')\n df = df.set_index('grib_message')\n df = df.reindex(columns=['start_byte', 'end_byte', 'range', \n 'reference_time', 'valid_time', \n 'variable', 'level', 'forecast_time'])\n df.attrs = dict(\n source=self.idx_source, \n description='Index (.idx) file for the GRIB2 file.', \n model=self.model, \n field=self.field, \n lead_time=self.fxx, \n datetime=self.date\n )\n\n # Filter DataFrame by searchString\n if searchString not in [None, ':']:\n columns_to_search = df[['variable', 'level', 'forecast_time']].apply(lambda x: ':'.join(x), axis=1)\n logic = columns_to_search.str.contains(searchString)\n if logic.sum() == 0:\n print(f\"No GRIB messages found. There might be something wrong with {searchString=}\")\n print(_searchString_help(searchString))\n df = df.loc[logic]\n return df", "def _build_memmap_index_files(newline_int, build_index_fn, fn, index_mapping_dir: str):\n idx_fn = _index_fn(fn, index_mapping_dir)\n\n # create data map\n if _index_file_exists(idx_fn):\n return False\n else:\n logging.info(f\"Building indexing for fn = {fn}\")\n # find all newline positions\n midx = build_index_fn(fn, newline_int)\n # validate midx\n midx = np.asarray(midx)\n if not np.issubdtype(midx.dtype, np.integer):\n raise TypeError(f\"midx must be an integer array, but got type = {midx.dtype}\")\n\n # create e metadata file\n data = dict(newline_int=newline_int, version=__idx_version__)\n\n # save index as numpy array to enable memmap reading\n logging.info(f\"Saving idx file = {idx_fn}.npy\")\n np.save(idx_fn + \".npy\", midx, allow_pickle=True)\n logging.info(f\"Saving metadata file = {idx_fn}.info\")\n pickle.dump(data, open(idx_fn + \".info\", \"wb\"))\n\n return True", "def test_integer_list_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_list_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_list_index, 12345)\n\t)", "def create_indexes(create_func):\n\tfor set_name, index_path, index_name in zip(SET_NAMES, INDEX_PATHS, INDEX_NAMES):\n\t\tcreate_func(set_name, index_path, index_name)", "def test_index_keys(engine):\n class Model(engine.model):\n id = Column(UUID, hash_key=True)\n other = Column(DateTime, range_key=True)\n another = Column(UUID)\n last = Column(String)\n\n by_last = GlobalSecondaryIndex(hash_key=\"another\", range_key=\"last\")\n by_another = LocalSecondaryIndex(range_key=\"last\")\n\n assert Model.by_last.hash_key is Model.another\n assert Model.by_last.range_key is Model.last\n\n assert Model.by_another.hash_key is Model.id\n assert Model.by_another.range_key is Model.last", "def intrinsic_index_calc(df: pd.DataFrame):\n\n cur_index = 0\n df['Int_index'] = None\n df['Int_index'].iloc[0] = cur_index\n for i in range(len(df)):\n if df['Int_event'][i] in [-1, 1, -2, 2]:\n cur_index = cur_index + 1\n df['Int_index'].iloc[i] = cur_index\n\n return df", "def _make_sample_filter_indices(\n X: np.ndarray,\n test: bool = False,\n building_types: list = None,\n htc_upper_bound: float = None,\n htc_lower_bound: float = None\n):\n all_indices = list(range(X.shape[0]))\n\n print(\">>>\", building_types)\n\n if building_types and len(building_types) > 0:\n building_types_list = _building_types_from_csv(test)\n all_indices += [i for i, bt in enumerate(building_types_list) if bt in building_types]\n\n if htc_lower_bound:\n mean_htcs = _mean_htc_from_csv(test)\n htc_lower_filtered_indices = np.where(np.array(mean_htcs) >= htc_lower_bound)[0].tolist()\n\n if len(all_indices) > 0:\n all_indices = list(set(all_indices) & set(htc_lower_filtered_indices))\n else:\n all_indices = htc_lower_filtered_indices\n\n if htc_upper_bound:\n mean_htcs = _mean_htc_from_csv(test)\n htc_upper_filtered_indices = np.where(np.array(mean_htcs) <= htc_upper_bound)[0].tolist()\n\n if len(all_indices) > 0:\n all_indices = list(set(all_indices) & set(htc_upper_filtered_indices))\n else:\n all_indices = htc_upper_filtered_indices\n\n return all_indices", "def generate_inv_index(people):\n pass", "def create_new_index(self, dict_pg_info):\n # ! Setting if fun can use default setting\n ruler = Rules()\n str_conn = ruler.pg_info_rules(dict_pg_info)\n conn = psycopg2.connect(str_conn)\n\n with conn:\n with conn.cursor() as cur:\n str_create_table = \"CREATE TABLE \" + dict_pg_info['table'] + \" (path varchar PRIMARY KEY);\"\n # ! Check if table already exit\n cur.execute(str_create_table)\n cur.close()\n\n conn.close()", "def index(self, column):\n self.table.add_index(column, f\"{self.table.name}_{column}_index\", \"index\")\n return self", "def _reset_index():\r\n WIX = create_in(INDEX_NAME, BmarkSchema) # noqa\r", "def index_selecting():\n df = pd.read_csv('data/ad_feature.csv',header=0) #type:pd.DataFrame\n print df[:2] , df[2: ] #前两行\n\n df.iloc[:2 , :]\n df.iloc[:2, [2,3] ] # 第 2 列和 第3列\n\n # df.loc[row_indexer,column_indexer]\n df.loc[3, ['cate_id','price']]\n\n df[['cate_id', 'price']]\n\n #boolean index\n df[ df['price'] > 1000]\n df[ (df['price'] > 1000) & (df['price'] < 2000)]\n\n\n\n df[df['cate_id'].isin([6261])]\n\n #select by callable\n\n\n # .loc, .iloc, and also [] indexing can accept a callable as indexer\n\n\n df.loc[lambda d: d.price > 2000, :]", "def create_index_buffer(init):\n return ffi.new('unsigned short[]', init)", "def test_string_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_string_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_simple_index, \"foobar\")\n\t)", "def test_integer_map_value_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_map_value_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_map_value_index, 12345)\n\t)", "def test_local_index_no_range_key(engine):\n with pytest.raises(ValueError):\n class Model(engine.model):\n id = Column(UUID, hash_key=True)\n another = Column(UUID)\n by_another = LocalSecondaryIndex(range_key=\"another\")", "def ensure_index(cls, *a, **ka):\n cls._dbag.ensure_index(*a, **ka)", "def T1(request):\n T = _get_test_table()\n if request.param:\n T.add_index(\"a\")\n return T", "def ensure_table_indexes(collection) -> None:\n # Unique ticker event ID\n collection.create_index('data.u', unique=True)\n # The symbol, e.g. BTCUSDT\n collection.create_index('data.s')\n # The time for sorting.\n collection.create_index('time')", "def get_sql_create_index(tablename, schema, if_not_exists=False):\n if not schema.get(\"columns\"):\n raise ValueError(\"no columns defined\")\n if len(schema[\"columns\"]) != len(set(schema[\"columns\"])):\n raise ValueError(\"same column given more than once\")\n sql = [\"CREATE\"]\n if schema.get(\"unique\"):\n sql.append(\"UNIQUE\")\n sql.append(\"INDEX\")\n if if_not_exists:\n sql.append(\"IF NOT EXISTS\")\n sql.append('\"%s\" ON \"%s\"' % (schema[\"name\"], tablename))\n sql.append(\"(%s)\" % \",\".join([f'\"{c}\"' for c in schema[\"columns\"]]))\n return \" \".join(sql)", "def create_index(self, db_name):\n\t\tindex_func_path = self._get_index_func_filepath(db_name)\n\t\t\n\t\tif os.path.isfile(index_func_path):\n\t\t\t# create index request payload from predefined file\t\n\t\t\twith open(index_func_path, 'r') as content_file:\n\t\t\t\tpayload = content_file.read()\n\t\t\n\t\t\tprint (\"Create index using function in: {}\".format(index_func_path))\n\t\t\turl = \"https://{}/{}/_design/view\".format(\n\t\t\t\tself.cloudanthost, db_name)\n\t\t\tresponse = self.r.put(url, data=payload)\n\t\t\tassert response.status_code == 201", "def test_jw_sparse_index(self):\n expected = [1, 2]\n calculated_indices = jw_number_indices(1, 2)\n self.assertEqual(expected, calculated_indices)\n\n expected = [3]\n calculated_indices = jw_number_indices(2, 2)\n self.assertEqual(expected, calculated_indices)", "def describe_index(self, table_name, timeout):\n\n _abstract()", "def describe_index(self, table_name, timeout):\n\n _abstract()", "def reindex_table(conn, table_name, verbose=0):\n t1 = time.time()\n # check if index exists, delete if needed\n try:\n q = \"DROP INDEX \" + INDEX_NAMES + table_name + '_1'\n conn.execute(q)\n except sqlite3.OperationalError, e:\n #print 'sqlite error:', e\n pass\n try:\n q = \"DROP INDEX \" + INDEX_NAMES + table_name + '_2'\n conn.execute(q)\n except sqlite3.OperationalError, e:\n #print 'sqlite error:', e\n pass\n # create index on jump codes\n q = \"CREATE INDEX \" + INDEX_NAMES + table_name + '_1'\n q += \" ON \" + table_name + \" (jumpcode ASC, weight ASC)\"\n conn.execute(q)\n conn.commit()\n if verbose > 0:\n strtime = str(datetime.timedelta(seconds=time.time() - t1))\n print 'REINDEXED THE TABLE %s (jumpcode,weight) IN %s' % (table_name, strtime)\n t2 = time.time()\n # create index on tids\n q = \"CREATE INDEX \" + INDEX_NAMES + table_name + '_2'\n q += \" ON \" + table_name + \" (tidid ASC)\"\n conn.execute(q)\n conn.commit()\n # verbose\n if verbose > 0:\n strtime = str(datetime.timedelta(seconds=time.time() - t2))\n print 'REINDEXED THE TABLE %s (tidid) IN %s' % (table_name, strtime)", "def getNeighboursCriteriaIndex(seg,mergedSegments,updatedSpeed,inversedIndex,weights,minValidData):\n neighbours = getNeighbours(seg,mergedSegments,inversedIndex)\n if len(neighbours) == 0 : return pd.Series(index=[[],[]])\n\n df = pd.Series(index=[np.array([seg]*len(neighbours)),neighbours])\n return pd.Series(df.index.map( lambda x: computePairCriteria(*x,mergedSegments,updatedSpeed,inversedIndex,weights,minValidData)).values,df.index)", "def instantiate_indexor(prefix, width):\n stdlib = py_ast.Stdlib()\n name = py_ast.CompVar(NAME_SCHEME[\"index name\"].format(prefix=prefix))\n add_name = py_ast.CompVar(f\"{prefix}_add\")\n cells = [\n py_ast.Cell(name, stdlib.register(width)),\n py_ast.Cell(add_name, stdlib.op(\"add\", width, signed=False)),\n ]\n\n init_name = py_ast.CompVar(NAME_SCHEME[\"index init\"].format(prefix=prefix))\n init_group = py_ast.Group(\n init_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 2 ** width - 1), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(init_name, \"done\")\n ),\n ],\n )\n\n upd_name = py_ast.CompVar(NAME_SCHEME[\"index update\"].format(prefix=prefix))\n upd_group = py_ast.Group(\n upd_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 1), py_ast.CompPort(add_name, \"left\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"out\"), py_ast.CompPort(add_name, \"right\")\n ),\n py_ast.Connect(\n py_ast.CompPort(add_name, \"out\"), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(upd_name, \"done\")\n ),\n ],\n )\n\n return (cells, [init_group, upd_group])", "def create_new_index(self, index_name, value, is_cluster, check=False):\n print(f\"Creating {index_name} index started \\n\")\n add_index = \"/html//i[@id='addIndex']\"\n self.locator_finder_by_xpath(add_index).click()\n time.sleep(2)\n\n print(f\"selecting {index_name} from the list\\n\")\n self.locator_finder_by_select(self.select_index_type_id, value)\n\n if index_name == \"Persistent\":\n self.select_persistent_fields_id = self.locator_finder_by_hover_item_id(self.select_persistent_fields_id)\n time.sleep(1)\n self.select_persistent_fields_id.send_keys(\"pfields\").perform()\n self.select_persistent_name_id = self.locator_finder_by_hover_item_id(self.select_persistent_name_id)\n self.select_persistent_fields_id.send_keys(\"Persistent\").perform()\n time.sleep(1)\n\n if not is_cluster:\n self.select_persistent_unique_id = self.locator_finder_by_hover_item_id(\n self.select_persistent_unique_id\n )\n\n self.select_persistent_sparse_id = self.locator_finder_by_hover_item_id(self.select_persistent_sparse_id)\n self.select_persistent_duplicate_id = self.locator_finder_by_hover_item_id(\n self.select_persistent_duplicate_id\n )\n self.select_persistent_background_id = self.locator_finder_by_hover_item_id(self.select_persistent_background_id)\n time.sleep(1)\n\n elif index_name == \"Geo\":\n self.select_geo_fields_id = self.locator_finder_by_hover_item_id(self.select_geo_fields_id)\n self.select_geo_fields_id.send_keys(\"gfields\").perform()\n time.sleep(1)\n self.select_geo_name_id = self.locator_finder_by_hover_item_id(self.select_geo_name_id)\n self.select_geo_name_id.send_keys(\"Geo\").perform()\n time.sleep(1)\n self.select_geo_json_id = self.locator_finder_by_hover_item_id(self.select_geo_json_id)\n self.select_geo_background_id = self.locator_finder_by_hover_item_id(self.select_geo_background_id)\n time.sleep(1)\n self.wait_for_ajax()\n\n elif index_name == \"Fulltext\":\n self.select_fulltext_field_id = self.locator_finder_by_hover_item_id(self.select_fulltext_field_id)\n self.select_fulltext_field_id.send_keys(\"ffields\").perform()\n time.sleep(1)\n self.select_fulltext_name_id = self.locator_finder_by_hover_item_id(self.select_fulltext_name_id)\n self.select_fulltext_name_id.send_keys(\"Fulltext\").perform()\n time.sleep(1)\n self.select_fulltext_length_id = self.locator_finder_by_hover_item_id(self.select_fulltext_length_id)\n self.select_fulltext_length_id.send_keys(100)\n self.select_fulltext_background_id = self.locator_finder_by_hover_item_id(\n self.select_fulltext_background_id\n )\n time.sleep(1)\n self.wait_for_ajax()\n\n elif index_name == \"TTL\":\n self.select_ttl_field_id = self.locator_finder_by_hover_item_id(self.select_ttl_field_id)\n self.select_ttl_field_id.send_keys(\"tfields\").perform()\n time.sleep(1)\n self.select_ttl_name_id = self.locator_finder_by_hover_item_id(self.select_ttl_name_id)\n self.select_ttl_name_id.send_keys(\"TTL\").perform()\n time.sleep(1)\n self.select_ttl_expiry_id = self.locator_finder_by_hover_item_id(self.select_ttl_expiry_id)\n self.select_ttl_expiry_id.send_keys(1000)\n self.select_ttl_background_id = self.locator_finder_by_hover_item_id(self.select_ttl_background_id)\n time.sleep(1)\n self.wait_for_ajax()\n\n # experimental feature\n elif index_name == 'ZKD':\n if check:\n self.navbar_goto(\"collections\")\n print(\"Selecting computed values collections. \\n\")\n col = '//*[@id=\"collection_ComputedValueCol\"]/div/h5'\n self.locator_finder_by_xpath(col).click()\n self.select_index_menu()\n\n print(f\"Creating {index_name} index started \\n\")\n self.locator_finder_by_xpath(add_index).click()\n time.sleep(2)\n\n print(f\"selecting {index_name} from the list\\n\")\n self.locator_finder_by_select(self.select_index_type_id, 5)\n\n time.sleep(1)\n\n select_zkd_field_sitem = self.locator_finder_by_id('newZkdFields')\n select_zkd_field_sitem.click()\n select_zkd_field_sitem.clear()\n select_zkd_field_sitem.send_keys('x,y')\n time.sleep(1)\n else:\n select_zkd_field_sitem = self.locator_finder_by_id('newZkdFields')\n select_zkd_field_sitem.click()\n select_zkd_field_sitem.clear()\n select_zkd_field_sitem.send_keys('zkdfileds')\n time.sleep(1)\n\n select_zkd_name_sitem = self.locator_finder_by_id('newZkdName')\n select_zkd_name_sitem.click()\n select_zkd_name_sitem.clear()\n select_zkd_name_sitem.send_keys('ZKD')\n time.sleep(1)\n\n select_create_index_btn_id = \"createIndex\"\n self.locator_finder_by_id(select_create_index_btn_id).click()\n time.sleep(10)\n self.webdriver.refresh()\n\n if check:\n self.navbar_goto(\"collections\")\n self.select_collection(\"TestDoc\")\n self.select_index_menu()\n\n print(f\"Creating {index_name} index completed \\n\")", "def index_stmt(self, idx):\n return Statement(\"index\", self, idx)", "def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')", "def add_index(self, name, func):\n assert name not in self.indices\n info_name = 'index:%s:%s' % (self.info['name'], name)\n info = self.store._get_info(info_name, index_for=self.info['name'])\n index = Index(self, info, func)\n self.indices[name] = index\n if IndexKeyBuilder:\n self._index_keys = IndexKeyBuilder(self.indices.values()).build\n return index", "def create_index(es_object, index_name):\n created = False\n # index settings\n # the reason why we need mappings is avoid corrupting your data\n settings = {\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n },\n \"mappings\": {\n # custom type called foods\n \"foods\": {\n \"properties\": {\n # Specify that the food_name field contains text values.\n \"food_name\": {\n \"type\": \"text\",\n \"fields\": {\n \"raw\":{ \n \"type\": \"keyword\"\n } # The food_name.raw field can be used for sorting and aggregations\n }\n },\n # Specify that the categories field contains text values.\n \"categories\": {\n \"type\": \"text\",\n \"fields\": {\n \"raw\":{ \n \"type\": \"keyword\"\n } # The categories.raw field can be used for sorting and aggregations\n }\n },\n # Specify that the calories field contains integer values.\n \"calories\": {\n \"type\": \"integer\"\n },\n \"protein\": {\n \"type\": \"integer\"\n },\n \"carbs\": {\n \"type\": \"integer\"\n },\n \"fat\": {\n \"type\": \"integer\"\n }\n \n },\n }\n }\n }\n try:\n if not es_object.indices.exists(index_name):\n # Ignore 400 means to ignore \"Index Already Exist\" error.\n es_object.indices.create(index=index_name, ignore=400, body=settings)\n print('Created Index')\n created = True\n except Exception as ex:\n print(str(ex))\n finally:\n return created" ]
[ "0.6506796", "0.61705136", "0.60441345", "0.60441345", "0.59682924", "0.5770552", "0.5770552", "0.5722764", "0.56989115", "0.5663955", "0.5610542", "0.5545763", "0.5528273", "0.54436374", "0.54176277", "0.54133874", "0.53744704", "0.53413314", "0.52968043", "0.5292923", "0.5291808", "0.52882886", "0.5246274", "0.5245503", "0.52263254", "0.51995707", "0.5181641", "0.5167621", "0.51216155", "0.5118952", "0.51186365", "0.509242", "0.5055916", "0.50508934", "0.5049662", "0.5037377", "0.50367093", "0.5027651", "0.50269157", "0.5019844", "0.49989602", "0.4989097", "0.49866006", "0.49815685", "0.49772522", "0.49558267", "0.49550498", "0.49428266", "0.4939789", "0.49395245", "0.49376887", "0.49330893", "0.4929531", "0.49177673", "0.49151838", "0.4898587", "0.48944065", "0.48928562", "0.48893288", "0.4884679", "0.4882908", "0.48785535", "0.4878444", "0.4876904", "0.48747322", "0.48606128", "0.4855074", "0.48531452", "0.4851588", "0.48460978", "0.4833749", "0.48266143", "0.4826031", "0.48110723", "0.47990194", "0.47900566", "0.47773588", "0.47524163", "0.47447717", "0.473681", "0.47366503", "0.4736397", "0.47355053", "0.47347224", "0.4723183", "0.47137", "0.4696025", "0.46776506", "0.46762773", "0.46664804", "0.46657646", "0.46657646", "0.46615586", "0.46610197", "0.46545345", "0.46526697", "0.46498615", "0.46488085", "0.46487373", "0.46429902" ]
0.5729641
7
Normalize value for use as SQLite column name.
def _normalize_column(column): if not isinstance(column, str): msg = "expected column of type 'str', got {0!r} instead" raise TypeError(msg.format(column.__class__.__name__)) column = column.strip() column = column.replace('"', '""') # Escape quotes. if column == '': column = '_empty_' return '"' + column + '"'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(self, value):\n return str(value)", "def _normalize_expanded_field(value):\n\n value = value.strip()\n value = re.sub(r'\\s{2,}', ' ', value)\n value = re.sub(r'/{2,}', '/', value)\n value = re.sub(r'\\\\{2,}', '\\\\\\\\', value)\n value = re.sub(r'-{2,}', '-', value)\n value = re.sub(r'\\*{2,}', '*', value)\n value = re.sub(r'\\.{2,}', '.', value)\n value = value.upper()\n\n return value", "def __normalize_name(self):\n self.normalized_name = normalizeSimplified(self.name)", "def normalize(val):\n \n if val.find('-') != -1:\n val = val.replace('-','_')\n\n return val", "def normalize(name):\n name = name.lower()\n name = name.replace('-', '')\n name = name.replace(' ', '')\n return name", "def normalize(value):\n value = value.lower()\n for normalized, compare in _NORMALIZE.iteritems():\n if value in compare:\n return normalized\n return value.upper()", "def normalize_name(self, value):\n import unicodedata\n import re\n\n self.log('Converting string %s' % value)\n \n # Double try in name conversion\n try:\n value = unicodedata.normalize('NFKD', u'%s' % value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n except:\n self.log('Conversion error: \\n%s' % traceback.format_exc())\n\n value = unicode(value, 'ascii', errors='ignore')\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n\n\n self.log('Conversion finished to %s' % value)\n\n return value", "def normalize(item):\n item = item.lower().strip().rstrip('_')\n return item", "def _column_original_name(name):\n if ':' in name:\n return name.split(':')[-1]\n else:\n return name", "def prepare_value(self, value):\n if value is None:\n return value\n value = value.replace(\" \", \"\").replace(\".\", \"\")\n if value:\n return \"%s.%s.%s.%s\" % (value[0:3], value[3:7], value[7:11], value[11:])\n return value", "def _normalize(self, key, value):\n\n # None value should not be converted by normalizer\n if value is None:\n return None\n\n normalize_func = getattr(self, 'normalize_{0}'.format(key),\n lambda x: x)\n\n return normalize_func(value)", "def safe_column_name(string):\n string = unidecode(string.replace(' ', '_').lower())\n return re.sub(r'[^0-9a-z_]','', string)", "def sanitize_name(self, value):\n if self.sanitize_names:\n new_value = re.sub('[^a-zA-Z0-9_]', '_', value[:127])\n else:\n new_value = value\n return new_value", "def _validate_column_name(col_name : str) -> str:\n\n if col_name[0].isdigit():\n return f'\"{col_name}\"'\n return col_name", "def normalize_name(field_name):\n fixes = (\n (r\"/\", \"_per_\"),\n (r\"%\", \"_pct_\"),\n (r\"\\W\", \"_\"),\n (r\"^_+\", \"\"), # remove '_' if field_name begins with '_'\n (r\"_+$\", \"\"),\n (r\"__+\", \"_\"),\n )\n result = field_name.strip().lower() or None\n # result = field_name.strip().upper() or None\n if result:\n if result.endswith(\"?\"):\n if not re.match(r\"is[_\\W]\", result):\n result = \"is_\" + result\n for pattern, replacement in fixes:\n result = re.sub(pattern, replacement, result)\n return result", "def _valid_column(column_name):\n return str(column_name)", "def normalize_username(value):\n return value.lower()", "def _normalize_package_name(self, name):\n return Prepared.normalize(name)", "def normalize_column(data: DataFrame, column: str):\n m = mean(data[column])\n s = sd(data[column])\n return data[column].map(lambda x: (x - m) / s)", "def _sanitize_field_name(self, field_name):\n field_name = field_name.replace(self._field_prefix, '')\n return field_name.replace('.', '_')", "def _normalize(self, metric_name, submit_method, prefix):\n metric_prefix = \"mongodb.\" if not prefix else \"mongodb.{0}.\".format(prefix)\n metric_suffix = \"ps\" if submit_method == RATE else \"\"\n\n # Replace case-sensitive metric name characters\n for pattern, repl in self.CASE_SENSITIVE_METRIC_NAME_SUFFIXES.iteritems():\n metric_name = re.compile(pattern).sub(repl, metric_name)\n\n # Normalize, and wrap\n return u\"{metric_prefix}{normalized_metric_name}{metric_suffix}\".format(\n normalized_metric_name=self.normalize(metric_name.lower()),\n metric_prefix=metric_prefix, metric_suffix=metric_suffix\n )", "def clean_numeric_column(name : float) -> float:\n if name > -1 and name < 1:\n name = 0\n return name", "def apply_column_value(raw_column_name, column_value, model, mapping, is_extra_data, cleaner):\n # If the item is the extra_data column, then make sure to save it to the\n # extra_data field of the database\n if raw_column_name in mapping:\n table_name, mapped_column_name, display_name, is_extra_data = mapping.get(raw_column_name)\n\n # special postal case:\n if mapped_column_name in ['postal_code', 'owner_postal_code']:\n if '-' in str(column_value):\n postal = str(column_value).split('-')[0].zfill(5)\n ext = str(column_value).split('-')[1].zfill(4)\n column_value = postal + '-' + ext\n column_value = str(column_value).zfill(5)\n\n cleaned_value = None\n if cleaner:\n # Get the list of Quantity fields from the Column object in SEED. This is non-ideal, since the\n # rest of the mapping code does not use SEED models. Perhaps make this an argument.\n if (model.__class__.__name__, mapped_column_name) in apps.get_model('seed',\n 'Column').QUANTITY_UNIT_COLUMNS:\n # clean against the database type first\n cleaned_value = cleaner.clean_value(column_value, mapped_column_name, is_extra_data)\n\n # This is a temporary fix for when the raw_column_name and the mapped_column_name\n # are the same. It causes the units to be cast twice since the cleaner look up finds\n # the same column twice. The cleaner needs to be cleaned up quite a bit to handle\n # this error correctly.\n if mapped_column_name != raw_column_name:\n # now clean against the raw name with pint (Quantity Units) because that's the column\n # that holds the units needed to interpret the value correctly\n cleaned_value = cleaner.clean_value(cleaned_value, raw_column_name,\n is_extra_data)\n else:\n cleaned_value = cleaner.clean_value(column_value, mapped_column_name, is_extra_data)\n else:\n cleaned_value = default_cleaner(column_value)\n\n if is_extra_data:\n if hasattr(model, 'extra_data'):\n # only save it if the model and the mapping are the same\n if model.__class__.__name__ == table_name:\n if isinstance(cleaned_value, (datetime, date)):\n # TODO: create an encoder for datetime once we are in Django 1.11\n model.extra_data[mapped_column_name] = cleaned_value.isoformat()\n else:\n model.extra_data[mapped_column_name] = cleaned_value\n else:\n # Simply set the field to the cleaned value if it is the correct model\n if model.__class__.__name__ == table_name:\n setattr(model, mapped_column_name, cleaned_value)\n\n return model", "def normalize(path):\n return os.path.normcase(os.path.realpath(path))", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def short_column(name : str) -> str:\n return name.split(\"-\")[1]", "def _normalize_query(self, query):\n return re.sub('\\s+', ' ', query).strip().lower()", "def _normalize(self, entry):\n sql = generalize_sql(entry.get('query'))\n return '{}-{}-{}'.format(self.REPORT_LABEL, sql, entry.get('query_class'))", "def act_on_column_name(self, *, arg, value):\n assert arg is None\n assert isinstance(value, str)", "def normalize_col_name(col_name, used_column_names, is_relation):\n field_params = {}\n field_notes = []\n\n new_name = col_name.lower()\n if new_name != col_name:\n field_notes.append('Field name made lowercase.')\n\n if is_relation:\n if new_name.endswith('_id'):\n new_name = new_name[:-3]\n else:\n field_params['db_column'] = col_name\n\n new_name, num_repl = re.subn(r'\\W', '_', new_name)\n if num_repl > 0:\n field_notes.append('Field renamed to remove unsuitable characters.')\n\n if new_name.find(LOOKUP_SEP) >= 0:\n while new_name.find(LOOKUP_SEP) >= 0:\n new_name = new_name.replace(LOOKUP_SEP, '_')\n if col_name.lower().find(LOOKUP_SEP) >= 0:\n # Only add the comment if the double underscore was in the original\n # name\n field_notes.append(\n \"Field renamed because it contained more than one '_' in a row.\"\n )\n\n if new_name.startswith('_'):\n new_name = 'field%s' % new_name\n field_notes.append(\"Field renamed because it started with '_'.\")\n\n if new_name.endswith('_'):\n new_name = '%sfield' % new_name\n field_notes.append(\"Field renamed because it ended with '_'.\")\n\n if keyword.iskeyword(new_name):\n new_name += '_field'\n field_notes.append(\n 'Field renamed because it was a Python reserved word.')\n\n if new_name[0].isdigit():\n new_name = 'number_%s' % new_name\n field_notes.append(\n \"Field renamed because it wasn't a valid Python identifier.\")\n\n if new_name in used_column_names:\n num = 0\n while '%s_%d' % (new_name, num) in used_column_names:\n num += 1\n new_name = '%s_%d' % (new_name, num)\n field_notes.append('Field renamed because of name conflict.')\n\n if col_name != new_name and field_notes:\n field_params['db_column'] = col_name\n\n return new_name, field_params, field_notes", "def normalize_key(metric_key):\n metric_key = SPACES.sub(\"_\", metric_key)\n metric_key = SLASHES.sub(\"-\", metric_key)\n metric_key = NON_ALNUM.sub(\"\", metric_key)\n return metric_key", "def normalize_var_name(var_name):\n var_case = detect_case(var_name)\n if var_case == SNAKE_CASE:\n return normalize_snake(var_name)\n elif var_case == CAMEL_CASE:\n return normalize_camel(var_name)\n elif var_case == KEBAB_CASE:\n return normalize_kebab(var_name)\n elif var_case == CONST_CASE:\n return normalize_const(var_name)\n else:\n raise ValueError('unknown case {}'.format(var_case))", "def _to_db_identifier(name):\n return name.replace('-', '_')", "def normalize_issn(val):\n val = val.replace(\" \", \"\").replace(\"-\", \"\").strip().upper()\n return \"{0}-{1}\".format(val[:4], val[4:])", "def encodeColumnName(self, column):\r\n return '\"{}\"'.format(column)", "def clean_table_name(table_name):\n path_underscore = table_name.translate(table_name.maketrans(\"-. \", \"___\"))\n return \"_\".join(filter(None, path_underscore.split(\"_\")))", "def cleanfieldlower(value):\n if not value:\n return None\n value = str(value)\n value = value.strip()\n value = value.lower()\n return value", "def normalize(self, s):\n s = normalizing_regexp.sub('_', s)\n if s[0:1] in string.digits:\n s = '_' + s\n return s", "def normalize_name(self):\n name = self.ua_data.get('name', '')\n if not name:\n return\n self.ua_data['name'] = self.normalized_name.get(name.lower(), name)", "def header_converter(self, s):\n\n header = clean_string(s.lower().replace(\"_\", \" \"))\n if hasattr(self, \"locale\"):\n return self.column_headers[self.locale].get(header, header)\n else:\n return header", "def normalize(s):\n s = replace_whitespace(s)\n s = remove_dashes(s)\n s = s.lower()\n return s", "def namingConvention(columnName):\n words = columnName.lower().split(\"_\")\n\n def cap(word):\n if word.lower() == \"id\":\n return word.upper()\n else:\n return word.capitalize()\n\n return words[0] + \"\".join(map(cap, words[1:]))", "def normalise_field_value(value):\n if isinstance(value, datetime):\n return make_timezone_naive(value)\n elif isinstance(value, Decimal):\n return decimal_to_string(value)\n return value", "def column_name(name):\n # Only needs exceptions to standard token cleanup\n column_map = {\n \"line#\" : \"ignore\",\n \"date\" : \"timestamp\",\n \"rh\" : \"humidity\",\n \"par\" : \"par_ue\"\n }\n\n if name in column_map:\n return column_map[name]\n \n return name", "def super_flat(cls, s):\n if s is None:\n return ''\n\n return cls.sql_safe(cls.slugify(s).upper().replace('-', ''))", "def _normalize_argument(self, value):\n return storepass.utils.normalize_empty_to_none(value)", "def _normalize(fmt):\n if re.search(r\"\\d\\s+\", fmt):\n raise StructError(\"bad char in struct format\")\n return fmt.replace(\" \", \"\")", "def normalize_columns_separately(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\trange=column_max-column_min\n\tnomalized=(column_matrix-column_min)/range\n\treturn nomalized", "def get_prep_value(self, value):\n return str(value)", "def normalize_value(self, value, ref_value):\n _, norm, _ = _normalizers[self.name]\n return norm(value, ref_value)", "def normalise(str_val, casing='lower'):\n norm_val = None\n try:\n clean_val = clean(str_val)\n if casing == 'lower':\n norm_val = clean_val.lower()\n elif casing == 'upper':\n norm_val = clean_val.upper()\n else:\n norm_val = clean_val\n except Exception as e:\n pass\n return norm_val", "def normalizeHeaderName(name):\n # type: (AnyStr) -> AnyStr\n return name.lower()", "def normalize_reference_name(name):\n return name.strip().lower().replace(\"-\", \"_\").replace(\" \", \"_\")", "def normalize_label(label):\n label = normalize('NFKD', label)\n label = re.sub('/[^a-z0-9-_:.]/g', '-', label)\n label = label.lower()\n return label", "def normalize_hal(val):\n val = val.replace(\" \", \"\").lower().replace(\"hal:\", \"\")\n return val", "def standardize_table_name(self, schema_name: str, table: str) -> str:\n return table", "def _fake_column_value(self, column_name, raw_value):\n if raw_value not in self.random_column_mappings[column_name]:\n self.random_column_mappings[column_name][raw_value] = uuid.uuid4().hex\n return self.random_column_mappings[column_name][raw_value]", "def normalize_project_name(project_name):\n if project_name in NAME_CACHE:\n return NAME_CACHE[project_name]\n value = project_name.lower().replace(\"-\", \"_\").replace(\".\", \"_\").replace(\" \", \"_\")\n NAME_CACHE[project_name] = value\n return value", "def _output_imei_column(self):\n if self._generate_check_digit:\n imei_col_name = sql.Identifier('imei_norm_with_check_digit')\n else:\n imei_col_name = sql.Identifier('imei_norm')\n return imei_col_name", "def _slugify_columns(column_names):\n\n encoded_names = []\n\n for column_name in column_names:\n slug = RE_ENCODED_COLUMN.sub('_', column_name).lower()\n slug = make_unique(slug, encoded_names + Parser.reserved_words)\n encoded_names.append(slug)\n\n return encoded_names", "def column(self, value):\n\n # Escape |\n return value.replace(\"|\", \"&#124;\") if value else value", "def normalise_key(self, key):\n key = key.replace('-', '_')\n if key.startswith(\"noy_\"):\n key = key[4:]\n return key", "def normalize_data(data):\n return ' '.join(data.upper().split())", "def normalize(self):\n normalized = self.all_details.get('normalized', '')\n if normalized:\n return normalized\n\n if self.is_digit():\n self.all_details['normalized'] = 'Numeric'\n elif self.is_uuid():\n self.all_details['normalized'] = 'UUID'\n elif self.is_gibberish():\n self.all_details['normalized'] = 'Gibberish'\n else:\n for nr in self.normalized_regex_list:\n regex = nr['regex']\n groups = r'{}'.format(nr['groups'])\n ua = regex.sub(groups, self.user_agent)\n if ua != self.user_agent:\n self.all_details['normalized'] = ua\n break\n else:\n self.all_details['normalized'] = ''\n\n return self.all_details['normalized']", "def normalized_species(csv_filename):\n with open(csv_filename) as csvfilename:\n reader = DictReader(csvfilename)\n first_row = next(reader)\n return first_row.get(\"species\").lower().replace(\" \", \"_\")", "def clean_keeping_underscores(cls, value):\r\n return INVALID_CHARS.sub('_', value)", "def slugify(value):\n return '_' + value.replace(' ', '-').lower()", "def pathnormalize(p):\n return os.path.normcase(os.path.normpath(p))", "def _sanitize_to_identifer(name):\n n = name.strip()\n n = re.sub('/', ' ', n)\n n = re.sub('-', ' ', n)\n n = re.sub(' +', '_', n)\n n = re.sub('[\\W]+', '', n)\n return n", "def get_name(tablename):\n\n return tablename[tablename.find(\"_\") + 1:].replace(\"_\", \" \").capitalize()", "def sanitize(value):\n from re import sub\n from unicodedata import normalize\n value = normalize('NFKD', value).encode('ascii', 'ignore')\n value = sub('[^\\w\\s\\.-]', '', value.decode('utf-8')).strip().lower()\n return sub('[-_\\s]+', '_', value)", "def normalizeSerial(serial):\n\treturn serial.upper().replace(\"-\", \"\").strip()", "def name_cleaning(df):\n # Custom cleaning\n df.columns = [re.sub(\"[\\\\. \\\\(\\\\)\\\\/]+\", \"_\", elem) for elem in df.columns]\n df.columns = [re.sub(\"-\", \"_\", elem) for elem in df.columns]\n df.columns = [re.sub(\"'\", \"\", elem) for elem in df.columns]\n df.columns = [re.sub(\",\", \"_\", elem) for elem in df.columns]\n df.columns = [re.sub(\":\", \"_\", elem) for elem in df.columns]\n df.columns = [re.sub(\"<\", \"MIN\", elem) for elem in df.columns]\n df.columns = [re.sub(\">\", \"MAG\", elem) for elem in df.columns]\n df.columns = [re.sub(\"&\", \"E\", elem) for elem in df.columns]\n df.columns = [re.sub(\"°\", \"\", elem) for elem in df.columns]\n df.columns = [re.sub(\"%\", \"PERC\", elem) for elem in df.columns]\n df.columns = [re.sub(\"\\\\+\", \"_\", elem) for elem in df.columns]\n # String upper\n df.columns = [elem.upper() for elem in df.columns]\n # Trim\n df.columns = [elem.strip() for elem in df.columns]\n # Cut recurring underscore\n df.columns = [re.sub(\"_+\", \"_\", elem) for elem in df.columns]\n return(df)", "def _normalize_name(self, name):\n try:\n return safe_join(self.location, name)\n except ValueError:\n raise SuspiciousOperation(\n \"Attempted access to '%s' denied.\" % name,\n )", "def normalize_columns(df, colnames):\r\n for col in colnames:\r\n s = df[col]\r\n df[col] = s.sub(s.min()).div((s.max() - s.min()))\r\n print(f'''Normalized Columns: {colnames}''')\r\n\r\n return df", "def _normalize_show_name(name):\n\tname = name.casefold()\n\tname = re.sub(\"[^a-z0-9]\", \" \", name)\n\tname = re.sub(\"_\", \" \", name)\n\tname = re.sub(\"season \\d( part \\d)?\", \" \", name)\n\tname = re.sub(\"\\s+\", \" \", name)\n\treturn name", "def __prepare_value(self, val, none_symbol=\"-\"):\n\t\t# If type is a tuple, then it is a \n\t\t# (func_unit_label, replica_id) pair. \n\t\t# Concatenate then using \"_\" symbol.\n\t\tif type(val) == type(()):\n\t\t\tval = \"_\".join(map(str, val))\n\n\t\t# Cast value to string/character type,\n\t\t# if it is not a None value\n\t\tif val is not None:\n\t\t\tval = str(val)\n\t\telse:\n\t\t\tval = none_symbol\n\n\t\t# Value processing finished\n\t\treturn val", "def lower_replace(value):\n return value.lower().replace(\" \",\"_\")", "def normalize_name(name):\n return PUNCT_RE.sub('-', name.lower()).strip('-')", "def clean_user_main_attribute(self, main_attribute):\n return force_username_case(main_attribute)", "def normalisesym(self, label):\n return label", "def normalize(self, attr_name): # DONE\n self.data[attr_name] = (self.data[attr_name] - self.data[attr_name].mean()) / self.data[attr_name].std()", "def normalize_const(var_name):\n return var_name.lower().split('_')", "def getColName(self, col):\n try:\n return chr(ord('a') + col)\n except:\n return col", "def getColName(self, col):\n try:\n return chr(ord('a') + col)\n except:\n return col", "def col_name(col):\n\n if isinstance(col, str):\n return col\n return col.__name__", "def sanitize_metric_name(name: str) -> str:\n name = name.replace(\":\", \"-\")\n return name", "def tidy_cols(my_csv):\n return [re.sub(\" \", \"_\", col.lower()) for col in my_csv.columns]", "def normalized(path, normalization='NFC'):\n if sys.platform != 'darwin':\n return path if type(path) == str else str(path, 'utf-8')\n if not isinstance(path, str):\n path = str(path, 'utf-8')\n return unicodedata.normalize(normalization, path)", "def internal_to_friendly(self, value):\n return value / self.conversion", "def normalize_string(value):\n if value is None:\n return \"\"\n head, _, _ = value.partition(\" (\")\n return head.strip()", "def symbolize_sensorname(name):\n return name.lower().replace(\" \", \"_\")", "def name_only(value: str) -> str:\n m = re.match(r'\\d+\\.\\d+ (.+)', value)\n if m:\n return m.group(1)\n else:\n return value", "def format_field(self, value, format_spec):\n value = super(FilenameFormatter, self).format_field(value, format_spec)\n if self.lowercase:\n value = value.lower()\n if not self.nonwordchars:\n value = re.sub('[^\\w\\s]+', '', value)\n value = re.sub('\\s+', self.word_delimiter, value)\n return value", "def canon(raw_attr_name: str) -> str:\n if raw_attr_name: # do not dereference None, and \"\" is already canonical\n return raw_attr_name.replace(\" \", \"\").lower()\n return raw_attr_name", "def normalize_label(label: str) -> str:\n label = re.sub(r\"['\\\"`]+\", \"\", label) # remove apostrophes\n label = re.sub(r\"[-/\\\\ \\t_]+\", \" \", label) # normalize separators\n lower_count = sum(map(str.islower, label))\n upper_count = sum(map(str.isupper, label))\n if \" \" not in label and lower_count > 0 and upper_count > 0:\n # camel case to \"normal case\"\n label = re.sub(r\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", label)\n label = re.sub(r\"(^[Tt]he |^[Aa] )\", \"\", label) # drop determiner\n return label.lower()", "def normalize_directory_name(directory_name: str) -> str:\n return directory_name.lower()" ]
[ "0.67484534", "0.6414139", "0.64000803", "0.6394291", "0.62145936", "0.62083966", "0.62030125", "0.6104114", "0.6069231", "0.604914", "0.59973955", "0.5969188", "0.59618396", "0.59541255", "0.5941385", "0.5940318", "0.58399045", "0.5810996", "0.5807061", "0.5782344", "0.57602286", "0.57570565", "0.5699301", "0.56776536", "0.5676518", "0.5676518", "0.5676518", "0.56664056", "0.5626696", "0.5621128", "0.5604709", "0.5587388", "0.55689013", "0.5550019", "0.55322057", "0.5522908", "0.55205685", "0.5510894", "0.5503285", "0.548497", "0.5478947", "0.5455254", "0.5453922", "0.544556", "0.5427666", "0.54242486", "0.54086816", "0.5408526", "0.54025733", "0.5387637", "0.5385096", "0.53813064", "0.5379968", "0.53732646", "0.5359539", "0.5343593", "0.53274584", "0.5322532", "0.5306487", "0.5297724", "0.5289786", "0.5288774", "0.5279888", "0.5276762", "0.52765286", "0.5271574", "0.5265743", "0.52342516", "0.52290577", "0.5219188", "0.5213126", "0.52124745", "0.52093667", "0.5208953", "0.5207891", "0.52064043", "0.5202133", "0.5200923", "0.51978", "0.5190771", "0.5179096", "0.51786405", "0.5177328", "0.5163485", "0.51574564", "0.5152593", "0.5152593", "0.5152506", "0.5149896", "0.5145254", "0.51440096", "0.5141533", "0.5131418", "0.5126379", "0.5122778", "0.51214117", "0.5114225", "0.51133937", "0.51121503" ]
0.6757992
1
Alternate constructor to load an existing collection of records into a tempoarary SQLite database. Loads data (an iterable of lists, tuples, or dicts) into a temporary table
def from_records(cls, data, columns=None): temptable = TemporarySqliteTable(data, columns) return cls(temptable.connection, temptable.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_records(cls, data, columns=None):\n connection, table = _load_temp_sqlite_table(columns, data)\n return cls(connection, table)", "def load_data(cursor, table, *args, **kwds):\n try:\n records, = args\n columns = None\n except ValueError:\n columns, records = args\n\n default = kwds.pop('default', '')\n if kwds:\n msg = 'load_data() got unexpected keyword argument {0!r}'\n raise TypeError(msg.format(next(iter(kwds.keys()))))\n\n records = iter(records)\n first_record = next(records, None)\n if columns:\n if first_record:\n records = chain([first_record], records)\n else:\n if not first_record:\n return # <- EXIT! (No table created.)\n try: # Try mapping.\n columns = list(first_record.keys())\n records = chain([first_record], records)\n except AttributeError:\n try: # Try namedtuple.\n columns = first_record._fields\n records = chain([first_record], records)\n except AttributeError:\n columns = first_record # Use first row as column names.\n\n if not isinstance(columns, Iterable) or isinstance(columns, str):\n msg = 'expected iterable of strings, got {0!r}'\n raise TypeError(msg.format(columns))\n columns = list(columns) # Make sure columns is a sequence.\n\n if isinstance(first_record, Mapping):\n records = ([rec.get(c, '') for c in columns] for rec in records)\n\n with savepoint(cursor):\n if table_exists(cursor, table):\n alter_table(cursor, table, columns, default=default)\n else:\n create_table(cursor, table, columns, default=default)\n insert_records(cursor, table, columns, records)", "def load_records():\n\n with open('seed_data/records.csv', 'rb') as csvfile:\n data = csv.reader(csvfile)\n for row in data:\n record_id, user_id, common_name, date_time, latitude, longitude, notes, seen, num_birds = row\n\n record = Record(record_id=record_id, user_id=user_id, common_name=common_name,\n date_time=date_time, latitude=latitude, longitude=longitude, \n notes=notes, seen=seen, num_birds=num_birds)\n\n db.session.add(record)\n\n db.session.commit()", "def __init__(self,dbname='',**kwds):\n self._skip = kwds.get('skip',[])\n self._limit= kwds.get('limit',[])\n self._keys= kwds.get('keys',[])\n self._db = getDBConnection()[dbname]\n self._collection_name=kwds.get('collection','all')\n self._collection = []\n self._skip_rec=0\n self._props = {}\n if self._limit and self._skip:\n self._nrows = self._limit[0]\n if len(self._limit)>1:\n self._ncols = self._limit[1]\n else:\n self._ncols = 1\n if len(self._skip)==2:\n self._skip_rows=self._skip[0]\n self._skip_cols=self._skip[1]\n else:\n self._skip_rec = self._skip[0]\n self._table=dict()\n self._is_set=False\n self._set_collection()\n self._row_heads=[]\n self._col_heads=[]", "def __init__(self, entries):\n # objects representing database records\n self.entries = entries", "def load_data(db_handler):\n\n from random import seed\n from random import random\n \n seed(1)\n\n new_notes = []\n\n for i in range(1,10):\n\n new_notes.append({\n\n\n 'title': str(i) + str(random()),\n 'content': 'Lorem ipsum' + str(i),\n 'active': True,\n 'created_by':\"Cristhian\" + str(i),\n 'created_at': date.today(),\n 'edited_at':date.today(),\n \n })\n\n new_notes.append(\n {\n \"active\": False,\n \"content\": \"Jesenia\",\n \"edited_at\": \"2019-10-24\",\n \"title\": \"Jesenia La chica de al lado\",\n \"created_by\": \"Cristhian1\",\n \"created_at\": \"2019-10-24\"\n })\n\n new_notes.append(\n {\n \"active\": False,\n \"title\": \"La vida de los numeros\",\n \"content\": \"Lorem ipsum y los numeros de la muerte\",\n \"edited_at\": \"2019-10-25\",\n \"created_by\": \"Jesenia\",\n \"created_at\": \"2019-10-24\"\n })\n\n Note.insert_many(new_notes).execute()\n\n User(name=\"Cristhian\", email=\"test@gmail.com\",\n password=b'$2b$12$U/QjtHt/j0xRT4r8Hx3fOe93EssM6M0iiUaQJOrTd64RXbxvhw6Ii').save()", "def __init__(self, db_location = ':memory:'):\n self.connection = sqlite3.connect(db_location)\n self.cur = self.connection.cursor()\n self.create_table()", "def load(values):\n import sqlite3\n conn = sqlite3.connect('./example.db')\n df = pd.DataFrame(values)\n df.to_sql('observations', conn)", "def load_data(connection, insert_sql, data):\n cur = connection.cursor()\n for d in data:\n cur.execute(insert_sql, d)\n connection.commit()", "def _load_fixture(filename):\n\n # Read the binary data into text\n with open(filename, 'rb') as stream:\n content = stream.read().decode('utf-8')\n\n # Decode the data as JSON\n data = json.loads(content)\n\n # Instantiate a session.\n session = Session()\n\n # Iterate through the entries to add them one by one.\n for item in data:\n # Resolve model from the table reference.\n table = Base.metadata.tables[item['model'].split('.')[-1]]\n\n # Add the primary key.\n item['fields']['id'] = item['pk']\n\n # Add a new row.\n session.connection().execute(table.insert().values(**item['fields']))\n\n # Commit the session to the database.\n session.commit()", "def load_fixtures(self, dbname, table, data):\n db = self.databases[dbname]['db']\n db.execute('BEGIN')\n for row in data:\n columns = row.keys()\n q = db.Insert(table, cols=columns)\n db.execute(q, row)\n db.execute('COMMIT')", "def setup_sample_data(no_of_records):\n rows_in_database = [{'id': counter, 'name': get_random_string(string.ascii_lowercase, 20), 'dt': '2017-05-03'}\n for counter in range(0, no_of_records)]\n return rows_in_database", "def connect_db_and_load_data(cls):\n db.connect()\n db.create_tables([Product], safe=True)\n load_data(transform_data('./inventory.csv'))", "def SQLNewFactory(cls, data, dbh=None, dbh_key = \"default\"):\n release = False\n if dbh is None:\n release = True\n dbh = dbstuff.getRW(dbh_key)\n try:\n columns = []\n values = []\n for k,v in data.items():\n columns.append('`' + k + '`')\n values.append(v)\n query = \"INSERT INTO \" + cls.SQLTable + \" (\" + \",\".join(columns) + \") VALUES (\" + \",\".join([\"%s\" for v in values]) + \")\"\n c = dbh.cursor()\n if (DEBUG):\n print query\n c.execute( query, tuple(values) )\n id = c.lastrowid\n c.close()\n dbh.commit()\n finally:\n if release:\n dbstuff.release(dbh,dbh_key)\n return cls(id, dbh)", "def populate_table(database, table, data):\n\n for row in data:\n database.session.add(table(row))\n database.session.commit()", "def __init__(self, db_connection, table):\r\n self.elements = []\r\n self.db = db_connection\r\n self.table = table", "def load_expenditures():\n\n Expenditure.query.delete()\n\n with open(expenditure_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n expenditure_data = row.split(\",\")\n print(expenditure_data)\n\n id = expenditure_data[0]\n category_id = expenditure_data[1]\n price = expenditure_data[2]\n date_of_expenditure = expenditure_data[3]\n expenditure_userid = expenditure_data[4]\n where_bought = expenditure_data[5]\n description = expenditure_data[6]\n\n expenditure = Expenditure(\n id = id,\n category_id = category_id,\n price = price,\n date_of_expenditure = get_datetime(date_of_expenditure),\n expenditure_userid = expenditure_userid,\n where_bought = where_bought,\n description = description\n )\n\n db.session.add(expenditure)\n\n db.session.commit()", "def __init__(self, home_dir, temp_tables, *args, **kwargs):\n self.home_dir = home_dir\n self.temp_tables = temp_tables\n self._results = None\n self.haveExpr = False\n self.module = _MODULE_NAME\n self.type = 'MOCKDB'\n self.configdict = {'user': 'non-user',\n 'passwd': 'non-passwd',\n 'meta_file': 'non-file',\n 'meta_section': 'non-section'}\n # register data converters\n sqlite3.register_adapter(datetime.datetime, adapt_timestamp)\n sqlite3.register_converter('TIMESTAMP', convert_timestamp)\n sqlite3.register_converter('DATE', convert_timestamp)\n\n\n needSetup = False\n\n # see if the database needs to be set up\n if not os.path.exists(os.path.join(self.home_dir, DB_FILE)):\n needSetup = True\n elif not os.path.isfile(os.path.join(self.home_dir, DB_FILE)):\n shutil.rmtree(os.path.join(self.home_dir, DB_FILE))\n needSetup = True\n # initialize the connection\n sqlite3.Connection.__init__(self, database=os.path.join(self.home_dir, DB_FILE),\n detect_types=sqlite3.PARSE_DECLTYPES,\n check_same_thread=False)\n cur = self.cursor()\n cur.execute(\"PRAGMA synchronous = OFF\")\n cur.close()\n self._autocommit = False\n self.setupTempTables()\n if needSetup:\n self.setup()", "def load_data_to_db(self, path):\n table_names = ['train_transaction', 'train_identity', 'test_transaction', 'test_identity']\n for table_name in table_names:\n pat = self.TRANSACTION_NON_NUMBER_PATTERN if 'transaction' in table_name else self.IDENTITY_NON_NUMBER_PATTERN\n print(\"Loading table: \" + table_name)\n fn = os.path.join(path, table_name + '.csv')\n self.dbinstance.build_table_from_csv(fn, pat, table_name)\n print(\"Loaded table \" + table_name)", "def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)", "def __init__(self, table_id='', columns=(),\n file_name='', table_data=(), verbose=True):\n self.table_id = table_id\n self.columns = list(columns)\n self.file_name = file_name\n self.table_data = list(table_data)\n self.verbose = verbose", "def from_database(cls, expt_class=ImagingExperiment,\n name='unnamed', parallelize=False, **db_kwargs):\n trial_ids = fetch_trials(**db_kwargs)\n return cls.from_trial_ids(trial_ids, expt_class=expt_class,\n name=name, parallelize=parallelize)", "def test_create_from_dicts(self):\n cols = list(zip(*self.dtypes))[0]\n dicts = [dict([(cols[i], d[i]) for i in xrange(len(d))])\n for d in self.idata]\n\n tbl = Table.create(\n ':memory:', \"Bar\", dicts, verbose=True,\n primary_key='id', autoincrement=True)\n\n self.check_index(self.idata, tbl.select())\n for idx, col in enumerate(cols):\n if col == 'id':\n continue\n self.check_data(self.idata[:, [0, idx]], tbl[col])", "def __init__(self, filename='store.sqlite'):\n self.conn = sqlite3.connect(filename or ':memory:')\n self.cur = self.conn.cursor()\n self.create_tables()\n self.changed = False", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def populate_tables(connection: sqlite3.Connection) -> None:\n fake = Faker()\n Faker.seed(0)\n\n c = conn.cursor()\n\n number_of_courses = fake.pyint(min_value=5, max_value=20)\n\n for _ in range(number_of_courses):\n course_name = fake.word()\n\n insert_statement = f'insert into courses (name) values (\"{course_name}\");'\n c.execute(insert_statement)\n\n connection.commit()\n\n number_of_users = fake.pyint(min_value=1, max_value=23)\n\n Faker.seed()\n\n for _ in range(number_of_users):\n\n if fake.pybool():\n user_name = f'{fake.first_name_female()} {fake.last_name_female()}'\n else:\n user_name = f'{fake.first_name()} {fake.last_name()}'\n\n insert_statement = f'insert into users (name) values (\"{user_name}\");'\n c.execute(insert_statement)\n\n connection.commit()\n\n for _ in range(50000):\n Faker.seed()\n\n random_user_id = fake.pyint(1, number_of_users)\n random_course_id = fake.pyint(1, number_of_courses)\n Faker.seed()\n random_lesson_no = fake.pyint(3, 12)\n Faker.seed()\n random_exercise_no = fake.pyint(1, 50)\n random_data = fake.sentence()\n\n insert_statement = f\"\"\"insert into saves (user_id, course_id, lesson_no, exercise_no,data) \n values ({random_user_id}, {random_course_id}, {random_lesson_no}, \n {random_exercise_no}, '{random_data}');\"\"\"\n c.execute(insert_statement)\n\n connection.commit()", "def __init__(self, object_list, table_name, crowdcontext):\n self.cc = crowdcontext\n self.data = {'id': range(len(object_list)), 'object':object_list}\n self.start_id = len(object_list)\n self.cols = [\"id\", \"object\"]\n self.table_name = table_name\n self.presenter = None\n self.project_id = None\n self.project_short_name = None\n self.project_name = None\n\n if type(object_list) is not list:\n raise Exception(\"'object_list' should be a list\")\n if table_name not in self.cc.show_tables():\n try:\n exe_str = \"CREATE TABLE '%s' (id integer, col_name BLOB, value BLOB DEFAULT NULL, PRIMARY KEY(id, col_name))\" %(table_name)\n self.cc.cursor.execute(exe_str)\n except sqlite3.OperationalError:\n raise", "def load(input):\n for row in input:\n SESSION.execute(INSERT_USERS,\n [int(row['id']),\n row['fname'],\n row['lname'],\n row['email'],\n row['group']])", "def fill_the_db(testapp):\n session_factory = testapp.app.registry[\"dbsession_factory\"]\n with transaction.manager:\n dbsession = get_tm_session(session_factory, transaction.manager)\n for entry in ENTRIES:\n row = MyModel(\n title=entry['title'],\n body=entry['body'],\n creation_date=datetime.datetime.strptime(\n entry['creation_date'], '%b %d, %Y'\n )\n )\n\n dbsession.add(row)", "def __init__(self, filename):\n # this boilerplate is turning me off from sqlalchemy. Why can't I have\n # one object that I interact with, instead of four levels of crap?\n self.sql_engine = create_engine(\n 'sqlite:///'+filename,\n isolation_level='READ UNCOMMITTED'\n )\n self.sql_session_maker = sessionmaker(bind=self.sql_engine)\n self.sql_session = self.sql_session_maker()\n Base.metadata.create_all(bind=self.sql_engine)\n self.term_cache = {}", "def __init__(self, sqlite_filename):\n if os.path.isfile(sqlite_filename):\n create_tables = False\n else:\n create_tables = True\n\n self.conn = sqlite3.connect(sqlite_filename)\n self.conn.row_factory = sqlite3.Row\n\n cur = self.conn.cursor()\n cur.execute('PRAGMA foreign_keys = 1')\n cur.execute('PRAGMA synchronous = NORMAL')\n\n if create_tables:\n self.create_tables()", "def __init__(self, csv_file):\n self.database = []\n\n with open(csv_file, \"r+\", encoding=\"utf-8\", newline=\"\") as file:\n reader = csv.DictReader(file)\n counter = 1\n for row in reader:\n if row[\"Typ\"] == \"Book\":\n object = Book(id=row[\"ID\"], name=row[\"Nazwa\"], price=20, amount=row[\"Ilość\"],\n created_at=row[\"Data dodania\"], last_buy_at=row[\"Data ostatniego zakupu\"],\n author=row[\"Autor\"], number_of_pages=row[\"Ilość stron\"], pic=row['Link do miniaturki'])\n self.database.append(object)\n counter += 1\n elif row[\"Typ\"] == \"Ebook\":\n object = Ebook(id=row[\"ID\"], name=row[\"Nazwa\"], price=20, amount=row[\"Ilość\"],\n created_at=row[\"Data dodania\"], last_buy_at=row[\"Data ostatniego zakupu\"],\n author=row[\"Autor\"], number_of_pages=row[\"Ilość stron\"],\n pic=row['Link do miniaturki'],\n format=row[\"Format\"])\n self.database.append(object)\n counter += 1", "def __init__(self, header=None, rows=None, fromfile=None, delimiter=','):\n\t\tif fromfile:\n\t\t\trs = Csv(fromfile,delimiter=delimiter).getRows()\t\n\t\t\tself.header=rs[0]\n\t\t\tself.data = [] #a list of dictionaries\n\t\t\tfor r in rs[1:]:\n\t\t\t\tacc= dict()\n\t\t\t\tfor h in self.header:\n\t\t\t\t\tacc[h]=r[self.header.index(h)]\t\n\t\t\t\tself.data.append(acc) \n\t\telif header:\n\t\t\tself.header = header\t\n\t\t\tself.data = []\n\t\t\tif rows:\n\t\t\t\tif isinstance(rows[0],dict):\n\t\t\t\t\tself.data = rows\n\t\t\t\telif isinstance(rows[0],list):\t\n\t\t\t\t\tfor r in rows:\n\t\t\t\t\t\tacc= dict()\n\t\t\t\t\t\tfor h in self.header:\n\t\t\t\t\t\t\tacc[h]=r[self.header.index(h)]\n\t\t\t\t\t\tself.data.append(acc) \n\t\telse:\n\t\t\tself.header = []\t\n\t\t\tself.data = []", "def load_up_initial_db(self, date_dict):\n df_tot = []\n for chunk in pd.read_sql_table(self.table, self.disk_engine, chunksize=10000, parse_dates=date_dict):\n df_tot.append(chunk)\n self.df = pd.concat(df_tot)", "def __init__(self, *records: ScalarSequence):\n self._records = [r for r in records if r]", "def load_data(path=None, dbtable=None, headers=None):\n\n DF = dd.read_csv(\n urlpath=path,\n names=headers,\n dtype='unicode')\n\n dd.to_sql(\n DF,\n name=dbtable,\n uri=data_store,\n if_exists='append',\n index=False\n )", "def DBRead( DB, table=None, where=None, sql=None, params=None, cls=None, *args, **kwargs ):\n assert isinstance( DB, sqlite3.Connection ), \"expect sqlite3.Connection\"\n\n connection = DB\n\n #\n if sql is None:\n sql = f\"SELECT * FROM {table}\"\n\n if where is not None:\n sql = f\"{sql} WHERE {where}\"\n\n #\n if params is None:\n params = args\n\n # execute\n cursor = connection.cursor()\n query_result = cursor.execute( sql, params )\n # convert result to object | list | dict | ...\n if cls is None:\n result = query_result\n\n elif cls is tuple:\n result = query_result\n\n elif cls is list:\n result = query_result\n\n elif cls is dict:\n def convert_to_dict( fields, row ):\n return dict( zip( fields, row ) )\n\n fields = [ x[0] for x in query_result.description ]\n fn = functools.partial( convert_to_dict, fields )\n\n result = map( fn, query_result )\n\n else: # cls is class\n def convert_to_cls( cls, fields, row ):\n # str -> str\n # -> []\n # -> {}\n # int -> int\n # nul -> str\n # -> []\n # -> {}\n # -> int\n # depends of cls.attr type\n\n o = cls()\n\n for field, value in zip( fields, row ):\n a = getattr( o, field, None )\n\n if value is None:\n pass\n else:\n if isinstance( a, list ):\n setattr( o, field, json.loads( value ) )\n\n elif isinstance( a, dict ):\n setattr( o, field, json.loads( value ) )\n\n else:\n setattr( o, field, value )\n\n return o\n\n fields = [ x[0] for x in query_result.description ]\n fn = functools.partial( convert_to_cls, cls, fields )\n\n result = map( fn, query_result )\n\n return result", "def create_sqlite_table(self):\n self.print_datetime_output('Connect to data base %s' % self.db_name)\n con = sqlite3.connect(self.db_name)\n cur = con.cursor()\n\n # check if table exists\n cur.execute(\"select count(*) from sqlite_master where type='table' and name='%s'\" % self.db_table)\n if cur.fetchall()[0][0] == 1:\n self.print_datetime_output('Previous table %s was dropped' % self.db_table)\n cur.execute(\"DROP TABLE %s;\" % self.db_table)\n\n self.print_datetime_output('Create table %s and import data from csv file %s' % (self.db_table,\n self.time_series_file_name))\n cur.execute(\"CREATE TABLE %s (timestamp, close_USD);\" % self.db_table)\n\n with open(self.file_name, 'r') as fin:\n dr = csv.DictReader(fin)\n to_db = [(i['timestamp'], i['close (USD)']) for i in dr]\n\n cur.executemany(\"INSERT INTO %s (timestamp, close_USD) VALUES (?, ?);\" % self.db_table, to_db)\n con.commit()\n return con", "def load_data():\n\tscores = pd.read_csv('../data/user_assessment_scores.csv')\n\tviews = pd.read_csv('../data/user_course_views.csv')\n\ttags = pd.read_csv('../data/course_tags.csv')\n\tinterests = pd.read_csv('../data/user_interests.csv')\n\n\tdb_file = '../db/usersim.sqlite'\n\ttry:\n\t\tengine = sqlite3.connect(db_file, timeout=10)\n\t\tscores.to_sql('scores', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\tviews.to_sql('views', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\ttags.to_sql('tags', engine, if_exists='replace', index=False, index_label='course_id')\n\t\tinterests.to_sql('interests', engine, if_exists='replace', index=False, index_label='user_handle')\n\texcept:\n\t\tprint('Error occured while inserting into database')\n\tfinally:\n\t\tif engine:\n\t\t\tengine.close()\n\treturn scores, views, tags, interests", "def setup_database() -> sqlite3.Cursor:\n conn = sqlite3.connect(':memory:')\n cursor = conn.cursor()\n\n insert_books(cursor)\n insert_lookups(cursor)\n\n return cursor", "def __init__(self):\r\n date_time('Connecting to local database ...')\r\n\r\n self.conn = sqlite3.connect(DATABASE_PATH)\r\n self.cursor = self.conn.cursor()\r\n\r\n # Set up database\r\n self.cursor.execute('PRAGMA synchronous = OFF')\r\n self.cursor.execute('PRAGMA journal_mode = OFF')\r\n self.cursor.execute('PRAGMA locking_mode = EXCLUSIVE')\r\n self.cursor.execute('PRAGMA count_changes = FALSE')\r\n\r\n self.cursor.execute('CREATE TABLE IF NOT EXISTS citations (id INTEGER PRIMARY KEY, citation TEXT UNIQUE);')", "def populate_db():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Person class')\n logger.info('Note how I use constants and a list of tuples as a simple schema')\n logger.info('Normally you probably will have prompted for this from a user')\n\n PERSON_NAME = 0\n LIVES_IN_TOWN = 1\n NICKNAME = 2\n\n people = [\n ('Andrew', 'Sumner', 'Andy'),\n ('Peter', 'Seattle', None),\n ('Susan', 'Boston', 'Beannie'),\n ('Pam', 'Coventry', 'PJ'),\n ('Steven', 'Colchester', None),\n ]\n\n logger.info('Creating Person records: iterate through the list of tuples')\n logger.info('Prepare to explain any errors with exceptions')\n logger.info('and the transaction tells the database to fail on error')\n\n logger.info('Working with Job class')\n logger.info('Creating Job records. We use the foreign key')\n\n JOB_NAME = 0\n START_DATE = 1\n END_DATE = 2\n SALARY = 3\n PERSON_EMPLOYED = 4\n\n jobs = [\n ('Analyst', '2001-09-22', '2003-01-30',65500, 'Andrew'),\n ('Senior analyst', '2003-02-01', '2006-10-22', 70000, 'Andrew'),\n ('Senior business analyst', '2006-10-23', '2016-12-24', 80000, 'Andrew'),\n ('Admin supervisor', '2012-10-01', '2014-11,10', 45900, 'Peter'),\n ('Admin manager', '2014-11-14', '2018-01,05', 45900, 'Peter')\n ]\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n try:\n for person in people:\n with database.transaction():\n new_person = Person.create(\n person_name = person[PERSON_NAME],\n lives_in_town = person[LIVES_IN_TOWN],\n nickname = person[NICKNAME])\n new_person.save()\n logger.info('Database add successful')\n\n logger.info('Print the Person records we saved...')\n for saved_person in Person:\n logger.info(f'{saved_person.person_name} lives in {saved_person.lives_in_town} and likes to be known as {saved_person.nickname}')\n\n except Exception as e:\n logger.info(f'Error creating = {person[PERSON_NAME]}')\n logger.info(e)\n logger.info('See how the database protects our data')\n try:\n for job in jobs:\n with database.transaction():\n new_job = Job.create(\n job_name = job[JOB_NAME],\n start_date = job[START_DATE],\n end_date = job[END_DATE],\n salary = job[SALARY],\n person_employed = job[PERSON_EMPLOYED])\n new_job.save()\n\n logger.info('Reading and print all Job rows (note the value of person)...')\n for job in Job:\n logger.info(f'{job.job_name} : {job.start_date} to {job.end_date} for {job.person_employed}')\n\n except Exception as e:\n logger.info(f'Error creating = {job[JOB_NAME]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()", "def example_data():\n\n # In case this is run more than once, empty out existing data\n User.query.delete()\n Trip.query.delete()\n Entry.query.delete()\n Category.query.delete()\n Share.query.delete()\n\n # Add sample data\n user1 = User(email='user1@gmail.com', password=bcrypt.hashpw('user1'.encode('utf8'), bcrypt.gensalt(9)), name='One')\n user2 = User(email='user2@gmail.com', password=bcrypt.hashpw('user2'.encode('utf8'), bcrypt.gensalt(9)), name='Two')\n trip1 = Trip(location='Spain', date='08/09/2017', name='Abroad Trip', user_id=1)\n entry1 = Entry(trip_id=1, name='Tibidabo', address='08035 Barcelona, Spain', notes='Fun day trip!',\n type_id=1)\n category1 = Category(name='Attraction')\n share1 = Share(viewer_id=2, trip_id=1)\n\n db.session.add_all([user1, user2, trip1, entry1, category1, share1])\n db.session.commit()", "def load(self):\n db = CrawlDBI.DBI(dbtype='crawler')\n if self.rowid is not None:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"rowid = ?\",\n data=(self.rowid,))\n else:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"path = ?\",\n data=(self.path,))\n if 0 == len(rows):\n self.in_db = False\n elif 1 == len(rows):\n self.in_db = True\n rz = list(rows[0])\n self.rowid = rz.pop(0)\n self.path = rz.pop(0)\n self.type = rz.pop(0)\n self.cos = rz.pop(0)\n self.cart = rz.pop(0)\n self.ttypes = rz.pop(0)\n self.checksum = rz.pop(0)\n self.last_check = rz.pop(0)\n try:\n self.fails = rz.pop(0)\n except IndexError:\n self.fails = 0\n try:\n self.reported = rz.pop(0)\n except IndexError:\n self.reported = 0\n self.dirty = False\n else:\n raise StandardError(\"There appears to be more than one copy \" +\n \"of %s in the database\" % self)\n\n db.close()", "def __init__(self, table):\n\n self.table = table\n\n ## Lots of shortcutting\n # \"Connection\"\n self.client = self.table.db.client\n\n # Table object\n self.worksheet = self.table.worksheet\n self.worksheet_id = self.table.worksheet_id\n\n # Addressing\n self.fields = self.table.fields\n\n ## And something to come later\n self.row = None\n self.data = {}", "def process_bulkload(self, db, dest, kvargs, lines):\n\n logging.info(\"process_bulkload db={} dest={} kvargs={}\".format(db.name,dest,kvargs))\n\n if len(lines)>1:\n raise RuntimeError(\"Continuation lines not allowed in BULK_LOAD statement\")\n\n # We create a function that creates the SQLite3 database\n # We should be able to restart by seeing how many records are in the database and skiping that many\n # records in the input file.\n\n filename = os.path.join(self.outdir,dest + \".db\")\n logging.info(\"SQLite3 database {} exists: {}\".format(filename,os.path.exists(filename)))\n if self.dry_run and not os.path.exists(filename):\n logging.info(\"DRY RUN: Database will be created; Returning\")\n return False\n\n conn = sqlite3.connect(filename)\n c = conn.cursor()\n try:\n c.execute(\"select value from metadata where key='count'\")\n res = c.fetchall()\n if len(res)==0:\n logging.error(\"Schema present in {} but no count. Database is incomplete. Stopping.\".format(filename))\n raise RuntimeError(\"invalid database file \"+filename)\n logging.info(\"Schema is present. {} records in file\".format(res[0][0]))\n if self.dry_run:\n logging.info(\"DRY RUN: Returning\")\n return False\n except sqlite3.OperationalError:\n # Create the database \n logging.info(\"Create data, metadata and data_idx\")\n if self.dry_run:\n logging.info(\"DRY RUN: Returning\")\n return False\n\n c.execute(\"CREATE TABLE data (key varchar,value varchar);\")\n c.execute(\"CREATE INDEX data_idx ON data (key);\")\n c.execute(\"CREATE TABLE metadata (key varchar,value varchar);\")\n\n # Create the key function, compile it into the environment,\n # and use the key function to load up the database\n\n source = kvargs['table_source']\n key = kvargs['key']\n func = \"def keyfunc({}):return {}\\n\".format(source,key)\n self.compile_and_exec(func)\n\n # Read the input table. Use keyfunc to extract the keys. Put it into the sqlite3 database\n count = 0\n sw = stopwatch()\n sw.start()\n for source_record in db.read_records_as_dicts(tablename=source):\n # Convert any bytes values in source_record to strings\n for key in source_record.keys():\n if type(source_record[key])==bytes:\n source_record[key] = source_record[key].decode('utf-8')\n key_str = self.context.keyfunc(source_record)\n value_str = json.dumps(source_record)\n c.execute(\"insert into data (key,value) values (?,?);\",(key_str,value_str))\n count += 1\n if count % BULK_LOAD_REPORTING_INTERVAL==0:\n conn.commit() \n gc.collect()\n rate = int(BULK_LOAD_REPORTING_INTERVAL / sw.elapsed())\n mem = PROCESS.memory_info().rss\n logging.info(\"count={:,} rate={} records/sec elapsed time= {} pythonMemoryUsed={:,}\".\n format(count,rate,int(sw.elapsed()), mem))\n if count==self.limit:\n break\n c.execute(\"insert into metadata (key,value) values (?,?);\",(\"count\",count))\n conn.commit()\n\n # Create a function that can access the values\n dest_cursor = dest+\"_cursor\"\n self.context.__dict__[dest_cursor] = conn.cursor()\n if self.limit:\n limit = \" LIMIT {} \".format(self.limit)\n else:\n limit = \"\"\n\n func = (\"def {}(key):\\n\".format(dest) +\n \" import json;\\n\" +\n \" cursor={}\\n\".format(dest_cursor) +\n \" cursor.execute('select value from data where key=?;',(key,))\\n\" + \n \" row = cursor.fetchone()\\n\" +\n \" return json.loads(row[0])\\n\" +\n \"def dumpdb():\\n\" +\n \" for (key,value) in {}.execute('select key,value from data {}'):\\n\".format(dest_cursor,limit) +\n \" print(key,'=',value)\\n\")\n self.compile_and_exec(func)\n if self.dumpdb:\n self.context.__dict__['dumpdb']()\n return True", "def __init__(self, *args):\n self.engine = db.create_engine('mysql+pymysql://root:''@127.0.0.1:3306/northwind', echo=True)\n self.connection = self.engine.connect()\n self.metadata = db.MetaData()\n self.tables = db.Table(*args, self.metadata, autoload=True, autoload_with=self.engine)", "def populate_from_samples():\n\n # Tags\n try:\n for row in get_csv_data('samples/tags.csv'):\n tag = Tag(name=row['Name'], desc=row['Description'])\n db_session.add(tag)\n finally:\n db_session.commit()\n\n # Organizations\n try:\n for row in get_csv_data('samples/organizations.csv'):\n org = Organization(desc=row['Name'])\n db_session.add(org)\n finally:\n db_session.commit()\n\n # Departments\n try: \n for row in get_csv_data('samples/departments.csv'):\n org = db_session.query(Organization).filter_by(desc=row['Organization']).one()\n dpt = Department(desc=row['Department'], org=org)\n\n db_session.add(dpt)\n finally:\n db_session.commit()\n\n # Application types\n try:\n for row in get_csv_data('samples/apptypes.csv'):\n apptype = AppType(desc=row['Name'])\n db_session.add(apptype)\n finally:\n db_session.commit()\n\n # Applications\n try:\n for row in get_csv_data('samples/applications.csv'):\n apptype = db_session.query(AppType).filter_by(desc=row['AppType']).one()\n dpt = db_session.query(Department).join(Organization).\\\n filter(Department.desc==row['Department']).\\\n filter(Organization.desc==row['Organization']).\\\n one()\n\n app = App(desc=row['Application'], \n app_type=apptype, \n department=dpt,\n version=row['Version'],\n environment=row['Environment'],\n platform=row['Platform']\n )\n\n db_session.add(app)\n finally:\n db_session.commit()\n\n # Connections and Headers\n try:\n for row in get_csv_data('samples/connections.csv'):\n conn = Connection(conn_type=row['Type'], url=row['URL'], port=row['Port'], answer=row['Answer'])\n header = Header(conn_id=conn.id, header=row['Header'], value=row['Value'], conn=conn)\n\n db_session.add(conn)\n db_session.add(header)\n finally:\n db_session.commit()", "def __init__(self):\n self.__db = sqlite3.connect(DB_PATH)\n self.__cur = self.__db.cursor()\n self.__create_tables()", "def from_database(cls, mouse_name, expt_class=ImagingExperiment,\n parallelize=False, **db_kwargs):\n trial_ids = fetch_trials(mouse_name=mouse_name, **db_kwargs)\n return cls.from_trial_ids(trial_ids, expt_class=expt_class,\n parallelize=parallelize)", "def fill_the_db(testapp):\n session_factory = testapp.app.registry[\"dbsession_factory\"]\n with transaction.manager:\n dbsession = get_tm_session(session_factory, transaction.manager)\n for entry in ENTRIES:\n row = Entry(title=entry[\"title\"],\n creation_date=entry[\"creation_date\"],\n body=entry[\"body\"])\n dbsession.add(row)", "def init_data_base_equipment(path):\n\n conn = sqlite3.connect(path)\n c = conn.cursor()\n\n c.execute(\"DROP TABLE IF EXISTS Equipment\")\n c.execute('''CREATE TABLE Equipment\n (\n comInsee text, comLib text,equipmentFile text,\n equAnneeService text, equNom text , equNomBatiment text\n )'''\n )\n conn.commit()\n conn.close()", "def loading_data_to_sqlite(list_files):\n engine = connecting_database()\n if engine is None:\n return False\n\n print()\n print(\"-\".rjust(60, \"-\"))\n print(\"Loading data\".center(60))\n print(\"-\".rjust(60, \"-\"))\n\n for filename in list_files:\n name, ext = os.path.splitext(filename)\n if ext != '.csv':\n print(\">> WARNING: CSV file invalid!\")\n return False\n\n print(f\">> Populating the table: stg_{name}\")\n df = pd.read_csv(path + inputfile + filename, sep=',', header=0)\n df.to_sql(f\"stg_{name}\", con=engine, index=False, if_exists='replace')\n print(\"-\".rjust(60, \"-\"))\n\n return True", "def load_products():\n\n print \"Loading Products\"\n\n for i, row in enumerate(open(\"data/mock_product_data.csv\")):\n row = row.rstrip()\n title, price, inventory = row.split(\",\")\n\n product = Product(title=title,\n price=price,\n available_inventory=inventory)\n\n db.session.add(product)\n\n db.session.commit()", "def populate_jobs():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Job class')\n logger.info('Creating Job records: just like Person. We use the foreign key')\n\n JOB_NAME = 0\n START_DATE = 1\n END_DATE = 2\n SALARY = 3\n PERSON_EMPLOYED = 4\n DEPARTMENT = 5\n\n jobs = [\n ('Analyst', '2001-09-22', '2003-01-30',65500, 'Andrew', 'ASYS'),\n ('Senior analyst', '2003-02-01', '2006-10-22', 70000, 'Andrew', 'ASYS'),\n ('Senior business analyst', '2006-10-23', '2016-12-24', 80000, 'Andrew', 'BUSI'),\n ('Admin supervisor', '2012-10-01', '2014-11-10', 45900, 'Peter', 'ADMN'),\n ('Admin manager', '2014-11-14', '2018-01-05', 45900, 'Peter', 'ADMN'),\n ('Sr Project Manager', '2014-11-14', '2018-01-05', 100000, 'Ryan', 'ASYS'),\n ('Manager', '2014-11-14', '2018-01-05', 100000, 'Pamela', 'BUSI'),\n ('Director', '2014-11-14', '2018-01-05', 120000, 'Monica', 'MGMT'),\n ]\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for job in jobs:\n with database.transaction():\n new_job = Job.create(\n job_name = job[JOB_NAME],\n start_date = job[START_DATE],\n end_date = job[END_DATE],\n duration = dates_diff(job[END_DATE], job[START_DATE]),\n salary = job[SALARY],\n person_employed = job[PERSON_EMPLOYED],\n job_department = job[DEPARTMENT])\n new_job.save()\n\n logger.info('Reading and print all Job rows (note the value of person)...')\n for job in Job:\n logger.info(f'{job.job_name} : {job.start_date} to {job.end_date} for {job.person_employed} in {job.job_department}')\n\n except Exception as e:\n logger.info(f'Error creating = {job[JOB_NAME]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()", "def SQLFactory(cls, keys={}, sort=[], multi=False, dbh=None, dbh_key=\"default\"):\n rv = None\n if multi:\n rv = []\n release = False\n if dbh is None:\n release = True\n dbh = dbstuff.getRO(dbh_key)\n try:\n whereclause = []\n wherevalues = []\n for k,v in keys.items():\n if k[0]=='<':\n whereclause.append('%s<%%s' % k[1:])\n elif k[0]=='>':\n whereclause.append('%s>%%s' % k[1:])\n else:\n whereclause.append('%s=%%s' % k)\n wherevalues.append(v)\n query = \"SELECT \" + cls.SQLId + \" FROM \" + cls.SQLTable \n if whereclause:\n query += \" WHERE \" + ' AND '.join(whereclause)\n if sort:\n if type(sort)!=type([]):\n sort = [sort]\n query += \" ORDER BY \" + ','.join([' `%s` %s' % (k,d) for k,d in sort])\n c = dbh.cursor()\n \n if (DEBUG):\n print query\n \n c.execute( query, tuple(wherevalues) )\n if c.rowcount>0:\n if multi:\n for id, in c:\n rv.append(cls(id,dbh))\n else:\n #print \"ROW COUNT\"\n #print c.rowcount\n (id,) = c.fetchone()\n rv = cls(id, dbh)\n c.close()\n finally:\n if release:\n dbstuff.release(dbh,dbh_key)\n return rv", "def main(csvfile, dbfile, verbose=False):\n CONN = sqlite3.connect(dbfile)\n cursor = CONN.cursor()\n create_schema(cursor)\n process_data(cursor, csvfile, verbose=verbose)\n CONN.commit()\n CONN.close()", "def load(*args):\r\n\r\n #args[0].to_csv(str(PATH.joinpath('./data/{}.csv'.format(args[1]))),index=False)\r\n\r\n try: # it will fail if duplicates\r\n args[0].to_sql('cmf', con=engine, if_exists='append', index=False)\r\n except:\r\n pass", "def full_load_db_from_file(batch_size=10000):\n\n q_set = QuestionSet(load=True)\n with open('.config/config.json', 'r') as f:\n config = json.load(f)\n config = config['pg']\n\n conn = psycopg2.connect(\n host=config['host'],\n database=config['db'],\n user=config['user'],\n password=config['password'],\n )\n\n i, values = 0, []\n for q in q_set.questions_ordered:\n values.append((\n q.id,\n q.question,\n q.options,\n q.answer,\n q.category_id,\n ))\n i += 1\n\n cur = conn.cursor()\n cur.execute('TRUNCATE TABLE questions')\n query = \"\"\"\n INSERT INTO questions (id, question, options, answer, category_id)\n VALUES {}\n \"\"\"\n\n j = 0\n log.info(\"Writing {} questions to DB...\".format(i))\n for chunk in chunks(values, batch_size):\n log.info('Batch {}...'.format(j + 1))\n j += 1\n\n args = ','.join(cur.mogrify(\"(%s, %s, %s, %s, %s)\", v).decode(\"utf-8\") for v in chunk)\n cur.execute(query.format(args))\n conn.commit()\n\n log.info(\"Data transfer complete.\")\n cur.close()", "def load_data_sql(): \r\n conn = mysql.connect(**st.secrets[\"mysql\"])\r\n\r\n data = pd.read_sql('SELECT * FROM song_data', conn)\r\n lookup_table = pd.read_sql('SELECT * FROM lookup_table', conn)\r\n \r\n return data, lookup_table", "def insert_into_sql(chunk):\n bulk_list = []\n for row in chunk:\n bulk_list.append(StockData(\n date=str(row[0])[0:4] + '-' + str(row[0])[4:6] + '-' + str(row[0])[6:8],\n code=row[1],\n code_name=row[2],\n d1_diff_rate=row[3],\n close=row[4],\n open=row[5],\n high=row[6],\n low=row[7],\n volume=row[8],\n clo5=row[9],\n clo10=row[10],\n clo20=row[11],\n clo40=row[12],\n clo60=row[13],\n clo80=row[14],\n clo100=row[15],\n clo120=row[16],\n clo5_diff_rate=row[17],\n clo10_diff_rate=row[18],\n clo20_diff_rate=row[19],\n clo40_diff_rate=row[20],\n clo60_diff_rate=row[21],\n clo80_diff_rate=row[22],\n clo100_diff_rate=row[23],\n clo120_diff_rate=row[24],\n yes_clo_5=row[25],\n yes_clo_10=row[26],\n yes_clo_20=row[27],\n yes_clo_40=row[28],\n yes_clo_60=row[29],\n yes_clo_80=row[30],\n yes_clo_100=row[31],\n yes_clo_120=row[32],\n vol5=row[33],\n vol10=row[34],\n vol20=row[35],\n vol40=row[36],\n vol60=row[37],\n vol80=row[38],\n vol100=row[39],\n vol120=row[40],\n ))\n StockData.objects.bulk_create(bulk_list)\n return bulk_list", "def load_file(self, query, table_name, infile, batch_size=1000, throttle_size=10_000, throttle_time=3):\n logger.info(\"Importing from {}, batch size = {} throttle size={} throttle time={}\"\\\n .format(infile, batch_size, throttle_size, throttle_time))\n reader = csv.DictReader(open(infile, 'r'))\n insert_q = self.build_insert_query(query, self._clean_fields(reader.fieldnames), table_name)\n logger.debug(insert_q)\n # columnNames = reader.fieldnames\n\n row_count = 0\n batch = []\n batch_count = 1\n\n total_rows_modified = 0\n throttle_count = 0\n\n i = 0\n for row in reader:\n if row_count >= batch_size - 1:\n logger.info(\"Submitting INSERT batch {}\".format(batch_count))\n total_rows_modified += self._submit_batch(insert_q, batch)\n \n logger.debug(str(batch)[:self.max_debug_chars])\n\n batch = []\n row_count = 0\n batch_count += 1\n else:\n row_count += 1\n\n # data = OrderedDict((self._clean_field(key), value) for key, value in row.items())\n data = OrderedDict()\n for key, value in row.items():\n key = self._clean_field(key)\n if key in self.column_type_overrides:\n try:\n value = self.column_type_overrides[key](value)\n except Exception as e:\n logger.debug(e)\n logger.debug(f\"Could not set value for {key}, default to None\")\n value = None\n # value = self._clean_values(key, value)\n else:\n # If no value is defined, use null.\n if not value:\n value = None\n data[key] = value\n\n batch.append(data)\n\n # Put in a sleep timer to throttle how hard we hit the database\n if throttle_time and throttle_size and (throttle_count >= throttle_size - 1):\n logger.info(f\"Sleeping for {throttle_time} seconds... row: {i}\")\n time.sleep(int(throttle_time))\n throttle_count = 0\n elif throttle_time and throttle_size:\n throttle_count += 1\n i += 1\n\n # Submit remaining INSERT queries\n if batch:\n logger.debug(batch)\n logger.info(\"Submitting INSERT batch {}\".format(batch_count))\n total_rows_modified += self._submit_batch(insert_q, batch)\n \n return total_rows_modified", "def _create_sql(self):\n\n pdbfile = self.pdbfile\n sqlfile = self.sqlfile\n\n if self.verbose:\n print('-- Create SQLite3 database')\n\n #name of the table\n #table = 'ATOM'\n\n # column names and types\n self.col = {'serial' : 'INT',\n 'name' : 'TEXT',\n 'altLoc' : 'TEXT',\n 'resName' : 'TEXT',\n 'chainID' : 'TEXT',\n 'resSeq' : 'INT',\n 'iCode' : 'TEXT',\n 'x' : 'REAL',\n 'y' : 'REAL',\n 'z' : 'REAL',\n 'occ' : 'REAL',\n 'temp' : 'REAL'}\n\n # delimtier of the column format\n # taken from http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM\n self.delimiter = {\n 'serial' : [6,11],\n 'name' : [12,16],\n 'altLoc' : [16,17],\n 'resName' :[17,20],\n 'chainID' :[21,22],\n 'resSeq' :[22,26],\n 'iCode' :[26,26],\n 'x' :[30,38],\n 'y' :[38,46],\n 'z' :[46,54],\n 'occ' :[54,60],\n 'temp' :[60,66]}\n\n if self.no_extra:\n del self.col['occ']\n del self.col['temp']\n\n # size of the things\n ncol = len(self.col)\n ndel = len(self.delimiter)\n\n\n # open the data base\n # if we do not specify a db name\n # the db is only in RAM\n # there might be little advantage to use memory\n # https://stackoverflow.com/questions/764710/sqlite-performance-benchmark-why-is-memory-so-slow-only-1-5x-as-fast-as-d\n if self.sqlfile is None:\n self.conn = sqlite3.connect(':memory:')\n \n # or we create a new db file\n else:\n if os.path.isfile(sqlfile):\n sp.call('rm %s' %sqlfile,shell=True)\n self.conn = sqlite3.connect(sqlfile)\n self.c = self.conn.cursor()\n\n # intialize the header/placeholder\n header,qm = '',''\n for ic,(colname,coltype) in enumerate(self.col.items()):\n header += '{cn} {ct}'.format(cn=colname,ct=coltype)\n qm += '?'\n if ic < ncol-1:\n header += ', '\n qm += ','\n\n # create the table\n query = 'CREATE TABLE ATOM ({hd})'.format(hd=header)\n self.c.execute(query)\n\n\n # read the pdb file\n # this is dangerous if there are ATOM written in the comment part\n # which happends often\n #data = sp.check_output(\"awk '/ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a safer version consist at matching against the first field\n # won't work on windows\n #data = sp.check_output(\"awk '$1 ~ /^ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a pure python way\n # RMK we go through the data twice here. Once to read the ATOM line and once to parse the data ...\n # we could do better than that. But the most time consuming step seems to be the CREATE TABLE query\n # if we path a file we read it\n if isinstance(pdbfile,str):\n if os.path.isfile(pdbfile):\n with open(pdbfile,'r') as fi:\n data = [line.split('\\n')[0] for line in fi if line.startswith('ATOM')]\n else:\n raise FileNotFoundError('File %s was not found',pdbfile)\n\n # if we pass a list as for h5py read/write\n # we directly use that\n elif isinstance(pdbfile,np.ndarray):\n data = [l.decode('utf-8') for l in pdbfile.tolist()]\n\n # if we cant read it\n else:\n print(pdbfile)\n raise ValueError('PDB data not recognized')\n\n # if there is no ATOM in the file\n if len(data)==1 and data[0]=='':\n print(\"-- Error : No ATOM in the pdb file.\")\n self.is_valid = False\n return\n\n # haddock chain ID fix\n del_copy = self.delimiter.copy()\n if data[0][del_copy['chainID'][0]] == ' ':\n del_copy['chainID'] = [72,73]\n\n # get all the data\n data_atom = []\n for iatom,atom in enumerate(data):\n\n # sometimes we still have an empty line somewhere\n if len(atom) == 0:\n continue\n\n # browse all attribute of each atom\n at = ()\n for ik,(colname,coltype) in enumerate(self.col.items()):\n\n # get the piece of data\n data = atom[del_copy[colname][0]:del_copy[colname][1]].strip()\n\n # convert it if necessary\n if coltype == 'INT':\n data = int(data)\n elif coltype == 'REAL':\n data = float(data)\n\n # append keep the comma !!\n # we need proper tuple\n at +=(data,)\n\n # append\n data_atom.append(at)\n\n\n # push in the database\n self.c.executemany('INSERT INTO ATOM VALUES ({qm})'.format(qm=qm),data_atom)", "def test_readwrite(self):\n db = Database.TestDB(self.mktemp())\n yield db.execute(\"INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)\", (\"FOO\", \"BAR\",))\n items = (yield db.query(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, ((\"FOO\", \"BAR\"),))\n items = (yield db.queryList(\"SELECT * from TESTTYPE\"))\n self.assertEqual(items, (\"FOO\",))\n db.close()", "def __init__(self, dbfile, modmap=None, *, loop=None):\n super(Database, self).__init__()\n self._loop = loop or asyncio.get_event_loop()\n self._modmap = modmap\n # _object_types dict is keyed on type name, where value is a 3-\n # tuple (id, attrs, idx), where:\n # - id is a unique numeric database id for the type,\n # - attrs is a dict containing registered attributes for the type,\n # keyed on attribute name, where value is a 3-tuple of (type, flags,\n # ivtidx), where type is a python datatype, flags is a bitmask of\n # ATTR_*, ivtidx is the name of the associated inverted index (used\n # if flags has ATTR_INVERTED_INDEX, otherwise None)\n # - idx is a list of n-tuples, where each n-tuple defines two or more\n # (non-ATTR_SIMPLE) attributes on which to create a multi-column\n # sql index.\n self._object_types = {}\n\n # _inverted_indexes dict is keyed on index name, where value is\n # a dict keyed on:\n # - min: minimum length of terms\n # - max: maximum length of terms\n # - ignore: list of terms to ignore\n # - split: function or regular expression used to split string ATTR_INVERTED_INDEX\n # attributes.\n self._inverted_indexes = {}\n\n # True when there are uncommitted changes\n self._dirty = False\n # True when modifications are not allowed to the database, which\n # is the case when Python 3 is opening a database created by Python 2\n # and upgrade_to_py3() has not been called.\n self._readonly = False\n self._dbfile = os.path.realpath(dbfile)\n self._lock = threading.RLock()\n self._lazy_commit_timer = None\n self._lazy_commit_interval = None\n self._open_db()", "def data_table_creation(cursor, connection_to_db):\n\n cursor.execute(\"\"\"\n\n CREATE TABLE IF NOT EXISTS data(\n question TEXT NOT NULL,\n answer TEXT NULL,\n question_type TEXT NOT NULL,\n question_type_answers TEXT NULL,\n PRIMARY KEY(question)\n );\n\n \"\"\")\n\n connection_to_db.commit()", "def __init__(self, path, format, fields, skip_header=False,\n csv_reader_params={}, **kwargs):\n\n cache_path = os.path.join('tmp', (os.path.basename(path) + '.td'))\n try:\n with open(cache_path, 'rb') as f:\n examples = pickle.load(f)\n except:\n format = format.lower()\n make_example = {\n 'json': Example.fromJSON, 'dict': Example.fromdict,\n 'tsv': Example.fromCSV, 'csv': Example.fromCSV}[format]\n\n with io.open(os.path.expanduser(path), encoding=\"utf8\") as f:\n if format == 'csv':\n reader = unicode_csv_reader(f, **csv_reader_params)\n elif format == 'tsv':\n reader = unicode_csv_reader(f, delimiter='\\t', **csv_reader_params)\n else:\n reader = f\n\n if format in ['csv', 'tsv'] and isinstance(fields, dict):\n if skip_header:\n raise ValueError('When using a dict to specify fields with a {} file,'\n 'skip_header must be False and'\n 'the file must have a header.'.format(format))\n header = next(reader)\n field_to_index = {f: header.index(f) for f in fields.keys()}\n make_example = partial(make_example, field_to_index=field_to_index)\n\n if skip_header:\n next(reader)\n\n examples = [make_example(line, fields) for line in reader]\n with open(cache_path, 'wb') as f:\n pickle.dump(examples, f)\n\n if isinstance(fields, dict):\n fields, field_dict = [], fields\n for field in field_dict.values():\n if isinstance(field, list):\n fields.extend(field)\n else:\n fields.append(field)\n\n super(TabularDataset, self).__init__(examples, fields, **kwargs)", "def __init__(self):\n if Database.filename is None:\n Database.filename = Config().GetString('HISTORY_DB')\n if Database.filename is None:\n logging.error('Missing ASH_CFG_HISTORY_DB variable?')\n self.connection = sqlite3.connect(Database.filename)\n self.connection.row_factory = sqlite3.Row\n self.cursor = self.connection.cursor()", "def __init__(self, dbname, table, dbuser='louisf', connectdb = True):\n self.entries = list()\n self.parsedLines = 0\n self.keys = list()\n if connectdb == True:\n try:\n con = None\n con = psycopg2.connect(database=dbname, user=dbuser)\n except psycopg2.DatabaseError as e:\n print(\"I cannot connect to the database \" + dbname)\n print(e)\n\n print(\"Connected to database \" + dbname)\n cur = con.cursor()\n cur.execute(\"Select * FROM \" + table)\n colnames = [desc[0] for desc in cur.description]\n con.close()\n self.keys = colnames\n self.tester = dict()", "def __init__(self, connection=None, url=None,\r\n table=None, schema=None, truncate=False,\r\n create=False, replace=False,\r\n add_id_key=False, id_key_name=None,\r\n buffer_size=None, fields=None, concrete_type_map=None,\r\n **options):\r\n if not options:\r\n options = {}\r\n\r\n self.url = url\r\n self.connection = connection\r\n self.table_name = table\r\n self.schema = schema\r\n self.options = options\r\n self.replace = replace\r\n self.create = create\r\n self.truncate = truncate\r\n self.add_id_key = add_id_key\r\n\r\n self.table = None\r\n self.fields = fields\r\n\r\n self.concrete_type_map = concrete_type_map\r\n\r\n if id_key_name:\r\n self.id_key_name = id_key_name\r\n else:\r\n self.id_key_name = 'id'\r\n\r\n if buffer_size:\r\n self.buffer_size = buffer_size\r\n else:\r\n self.buffer_size = 1000", "def __init__(self, table, data: Iterator, columns: list,\n batch_size: int=BATCH_SIZE, header: bool=False):\n self.table = table\n self.data = data\n self.columns = columns\n self.batch_size = batch_size\n self.header = header\n\n if isinstance(self.data, list):\n self.data = iter(self.data)\n\n if not isinstance(self.data, Iterator):\n raise TypeError('Expected Iterator, got {}'.format(\n self.data.__class__))\n\n if not self.columns:\n raise ValueError('Columns cannot be empty')\n\n if isinstance(self.columns[0], tuple):\n self.columns = [Column(*c) for c in self.columns]", "def setupTempTables(self):\n cur = self.cursor()\n cur.execute(\"PRAGMA temp_store = MEMORY\")\n for table, columns in self.temp_tables.items():\n cur.execute(\"create temporary table if not exists %s (\" % table + (',').join('\"' + key + '\" ' + val for key, val in columns.items()) + ')')", "def init_mock_db(db_file):\n\n # Add reads. Fields that are not relevant for this purpose are set to None\n known = [(1, 1, 1, \"read_1\", \"dataset_1\", None, None, None, None, None, None,\n None, 0.2, None, None),\n (2, 1, 2, \"read_2\", \"dataset_2\", None, None, None, None, None, None,\n None, 0.7, None, None)]\n genomic = [(3, 1, 3, \"read_3\", \"dataset_1\", None, None, None, None, None, None,\n None, 0.2, None, None),\n (4, 1, 3, \"read_4\", \"dataset_2\", None, None, None, None, None, None,\n None, 0.2, None, None),\n (5, 1, 3, \"read_5\", \"dataset_3\", None, None, None, None, None, None,\n None, 0.2, None, None)]\n ISM = [(6, 1, 4, \"read_6\", \"dataset_4\", None, None, None, None, None, None,\n None, 0.7, None, None),\n (7, 1, 4, \"read_7\", \"dataset_4\", None, None, None, None, None, None,\n None, 0.8, None, None),\n (8, 1, 4, \"read_8\", \"dataset_5\", None, None, None, None, None, None,\n None, 0.9, None, None)]\n\n\n reads = known + genomic + ISM\n\n # Datasets\n datasets = [(1, \"dataset_1\", \"test\", \"test\"),\n (2, \"dataset_2\", \"test\", \"test\"),\n (3, \"dataset_3\", \"test\", \"test\"),\n (4, \"dataset_4\", \"test\", \"test\"),\n (5, \"dataset_5\", \"test\", \"test\")]\n\n # Annotations\n annotations = [(1, \"toy\", \"\", \"transcript_status\", \"KNOWN\"),\n (2, \"toy\", \"\", \"transcript_status\", \"KNOWN\"),\n (3, \"TALON\", \"\", \"transcript_status\", \"NOVEL\"),\n (3, \"TALON\", \"\", \"genomic_transcript\", \"TRUE\"),\n (4, \"TALON\", \"\", \"ISM_transcript\", \"TRUE\")]\n\n make_minimal_db_for_filtering(db_file, reads, datasets, annotations)", "def populate_job():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Job class')\n logger.info('Creating Job records: just like Person. We use the foreign key')\n\n JOB_NAME = 0\n START_DATE = 1\n END_DATE = 2\n SALARY = 3\n PERSON_EMPLOYED = 4\n DEPT = 5\n\n jobs = [\n ('Analyst', '2001-09-22', '2003-01-30',65500, 'Andrew', 'AN03'),\n ('Senior analyst', '2003-02-01', '2006-10-22', 70000, 'Andrew', 'AN03'),\n ('Senior business analyst', '2006-10-23', '2016-12-24', 80000, 'Andrew', 'AN03'),\n ('Admin supervisor', '2012-10-01', '2014-11-10', 45900, 'Peter', 'HR02'),\n ('Admin manager', '2014-11-14', '2018-01-05', 45900, 'Peter', 'AD01')\n ]\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for job in jobs:\n with database.transaction():\n new_job = Job.create(\n job_name= job[JOB_NAME],\n start_date=job[START_DATE],\n end_date=job[END_DATE],\n salary=job[SALARY],\n person_employed=job[PERSON_EMPLOYED],\n job_dept=job[DEPT],\n duration=date_diff(job[START_DATE], job[END_DATE])\n )\n new_job.save()\n\n logger.info('Reading and print all Job rows (note the value of person)...')\n for job in Job:\n logger.info(f'{job.job_dept} - {job.job_name} : '\n f'{job.start_date} to {job.end_date}, '\n f'({job.duration} days),'\n f'for {job.person_employed}')\n\n except Exception as e:\n logger.info(f'Error creating = {job[JOB_NAME]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()", "def _db_init_data_tables(self):\n\n #\n # TESTTYPE table\n #\n return self._db_execute(\n \"\"\"\n create table TESTTYPE (\n KEY text unique,\n VALUE text\n )\n \"\"\"\n )", "def __init__(self, db_input=None):\n\n if db_input is None:\n db_input = []\n elif type(db_input) != list:\n db_input = [db_input]\n self.db = copy.deepcopy(db_input)", "def populateSQlite(tagDf): \n conn = sqlite3.connect(os.path.join(prefix, args.db))\n with conn:\n cur = conn.cursor()\n cmds = ['INSERT INTO value VALUES(%d, \\\"%s\\\", %d);' % (r[0], r[1], r[2]) for i, r in tagDf.iterrows()]\n cmds = \"\\n\".join(cmds)\n cur.executescript(cmds)\n conn.commit()", "def create_table_from_csv (sqlite_db_file):\n files = [f for f in os.listdir(os.curdir) if f.endswith(\".csv\")]\n name_df = [re.findall('(.*)\\.csv',f)[0] for f in files ]\n engine = create_engine('sqlite:///' + sqlite_db_file)\n for n, f_n in zip(name_df, files):\n try:\n df = pd.read_csv(f\"{f_n}\", sep=',')\n df.to_sql(f\"{n}\", engine, if_exists=\"fail\")\n\n except Exception:\n pass", "def populate_persons():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Person class')\n\n PERSON_NAME = 0\n LIVES_IN_TOWN = 1\n NICKNAME = 2\n\n people = [\n ('Andrew', 'Sultan', 'Andy'),\n ('Peter', 'Seattle', None),\n ('Susan', 'Boston', 'Beannie'),\n ('Pam', 'Coventry', 'PJ'),\n ('Steven', 'Stevens Pass', None),\n ('Ryan', 'New York', 'Private'),\n ('Pamela', 'Spokane', 'Patrol'),\n ('Monica', 'Portland', None),\n ]\n\n logger.info('Creating Person records: iterate through the list of tuples')\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for person in people:\n with database.transaction():\n new_person = Person.create(\n person_name = person[PERSON_NAME],\n lives_in_town = person[LIVES_IN_TOWN],\n nickname = person[NICKNAME])\n new_person.save()\n logger.info('Database add successful')\n\n logger.info('Print the Person records we saved...')\n for saved_person in Person:\n logger.info(f'{saved_person.person_name} lives in {saved_person.lives_in_town} ' +\\\n f'and likes to be known as {saved_person.nickname}')\n\n except Exception as e:\n logger.info(f'Error creating = {person[PERSON_NAME]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()", "def __init__(self, transaction_id, rows={}):\n self.rows = rows\n self._creation_transaction_id = transaction_id", "def seed_other_dataset(name: str, chunk_size: int, start=None, end=None):\n objects = []\n for chunk in pd.read_csv(name, chunksize=chunk_size, header=1):\n chunk_as_mat = chunk.to_numpy()\n chunk_start = datetime.datetime.strptime(str(chunk_as_mat[0][0]), \"%Y%m%d\")\n chunk_end = datetime.datetime.strptime(str(chunk_as_mat[-1][0]), \"%Y%m%d\")\n if start is not None and start > chunk_end:\n continue\n if end is not None and end < chunk_start:\n break\n # print(chunk.to_numpy())\n objects += insert_into_sql(chunk.to_numpy())\n return objects", "def __init__(self):\n self._db = db\n # Connect to DB\n self._db.connect()\n # Create tables\n self._db.create_tables([Teachers, Parents, Tutors, Students, Homework, Groups, StudentsGroups, Courses])\n # Create filling entries\n self.__create_dummies()\n self._db.close()", "def loadValueTableFromSqlite(): \n conn = sqlite3.connect(prefix + args.db)\n df = io.read_frame(\"SELECT * FROM value\", conn) \n return df", "def execute(self,context):\n postgres = PostgresHook(postgres_conn_id = self.postgres_conn_id)\n conn = postgres.get_conn()\n cursor = conn.cursor()\n start = datetime.now()\n logging.info(\"Clearing data for each load\")\n postgres.run(\"TRUNCATE TABLE {}\".format(self.table))\n\n logging.info(f\"Loading table {self.table}\")\n sql =f\"COPY {self.table} FROM STDIN DELIMITER ',' CSV HEADER\"\n cursor.copy_expert(sql, open(self.path, \"r\"))\n conn.commit()\n logging.info(f\"Loaded table {self.table}\")\n end = datetime.now()\n time_taken = (end-start)\n logging.info(f\"Time taken:{time_taken}\")", "def __init__(self, db_filename, flush=False):\n self.db = sqlite3.connect(db_filename)\n cursor = self.db.cursor()\n\n # Create a table for the triples, but only if it doesn't already exist\n # Put an index on the page_url column for checking whether we already\n # crawled a URL\n cursor.executescript(\"\"\"\n CREATE TABLE IF NOT EXISTS triples (\n page_url TEXT NOT NULL,\n link_type TEXT NOT NULL,\n link_url TEXT NOT NULL\n );\n\n CREATE INDEX IF NOT EXISTS triples_page_url_index ON triples(page_url);\n \"\"\")\n self.db.commit()\n if flush:\n self.flush()", "def fill_the_db(testapp):\n SessionFactory = testapp.app.registry[\"dbsession_factory\"]\n with transaction.manager:\n dbsession = get_tm_session(SessionFactory, transaction.manager)\n if len(dbsession.query(Entries).all()) == 0:\n for entry in ENTRIES:\n row = Entries(title=entry[\"title\"], creation_date=entry[\"creation_date\"], body=entry[\"body\"])\n dbsession.add(row)", "def unit_database() -> Iterator[units.UnitDatabase]:\n yield units.UnitDatabase.PushSingleton()\n units.UnitDatabase.PopSingleton()", "def __init__(self, db_filename):\n self._conn = sqlite3.connect(db_filename)\n self._conn.text_factory = str\n self._cursor = self._conn.cursor()", "def _convertDataToSQLite(self, data: TDXData\n ) -> t.Sequence[t.Mapping[t.Text, schemaconverter.SQLVal]]:\n # self.* lookup is slow so do it once only\n general_schema = self.general_schema\n\n # convert all the data to SQLite types\n convert_row = schemaconverter.convertRowToSqlite\n sql_data = [\n convert_row(general_schema, r, data_dir=self.data_dir)\n for r in data]\n return sql_data", "def __init__(self, output_db):\r\n # If the output file exists, delete it so that the db is\r\n # created from scratch\r\n if os.path.exists(output_db):\r\n os.unlink(output_db)\r\n self.db = create_engine(\"sqlite:///%s\" % output_db)\r\n\r\n # Python's encoding handling is reallly annoying\r\n # http://stackoverflow.com/questions/3033741/sqlalchemy-automatically-converts-str-to-unicode-on-commit\r\n self.db.raw_connection().connection.text_factory = str\r\n self.init_db()\r\n self.started_at = datetime.now()" ]
[ "0.6956785", "0.6560394", "0.5986285", "0.597718", "0.5919446", "0.5904428", "0.58205336", "0.57884705", "0.5786599", "0.57413965", "0.5737391", "0.57340336", "0.56946224", "0.5691723", "0.56570345", "0.56243944", "0.56065536", "0.5595917", "0.55688184", "0.5563457", "0.55514586", "0.5522938", "0.54994094", "0.54774976", "0.5471403", "0.5471403", "0.5471403", "0.5471403", "0.5471403", "0.5471403", "0.5471403", "0.5471403", "0.5471403", "0.5471403", "0.5471403", "0.5434496", "0.5421928", "0.54068565", "0.540683", "0.5406747", "0.54049134", "0.5389732", "0.5382548", "0.53820765", "0.538156", "0.5378497", "0.53621286", "0.5351819", "0.53380543", "0.5337563", "0.53371626", "0.5331705", "0.53261983", "0.5322905", "0.53064436", "0.5305529", "0.53035736", "0.52760845", "0.52746934", "0.52708066", "0.52654433", "0.5264135", "0.52635497", "0.5247788", "0.5247746", "0.5228675", "0.5226595", "0.5224", "0.5223882", "0.5221255", "0.5216483", "0.52145267", "0.52141815", "0.5210309", "0.5206332", "0.5202753", "0.519498", "0.5194622", "0.51909053", "0.518935", "0.5184933", "0.51816803", "0.5177289", "0.51711655", "0.5167376", "0.51671875", "0.5161449", "0.51600105", "0.5157195", "0.51550364", "0.5152639", "0.51466376", "0.5145522", "0.51334375", "0.5128417", "0.5128093", "0.5122737", "0.5121846", "0.51199466", "0.51160365" ]
0.7079159
0
Create an index for specified columnscan speed up testing in some cases. Indexes should be added onebyone to tune a test suite's overall performance. Creating several indexes before testing even begins could lead to worse performance so use them with discretion.
def create_index(self, *columns): # Calling super() with older convention to support Python 2.7 & 2.6. super(SqliteSource, self).create_index(*columns)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_index():", "def build_index():\n pass", "def _create_indices(cls):\r\n from thunderdome.connection import _hosts, _index_all_fields, create_key_index\r\n \r\n if not _hosts: return\r\n for column in cls._columns.values():\r\n if column.index or _index_all_fields:\r\n create_key_index(column.db_field_name)", "def create_index(self, table_name, index, timeout):\n _abstract()", "def create_index(self, table_name, index, timeout):\n _abstract()", "def create_index(self, *columns):\n self._assert_columns_exist(columns)\n\n # Build index name.\n whitelist = lambda col: ''.join(x for x in col if x.isalnum())\n idx_name = '_'.join(whitelist(col) for col in columns)\n idx_name = 'idx_{0}_{1}'.format(self._table, idx_name)\n\n # Build column names.\n col_names = [self._normalize_column(x) for x in columns]\n col_names = ', '.join(col_names)\n\n # Prepare statement.\n statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})'\n statement = statement.format(idx_name, self._table, col_names)\n\n # Create index.\n cursor = self._connection.cursor()\n cursor.execute(statement)", "def create_index(self, *columns):\n self._assert_columns_exist(columns)\n\n # Build index name.\n whitelist = lambda col: ''.join(x for x in col if x.isalnum())\n idx_name = '_'.join(whitelist(col) for col in columns)\n idx_name = 'idx_{0}_{1}'.format(self._table, idx_name)\n\n # Build column names.\n col_names = [self._normalize_column(x) for x in columns]\n col_names = ', '.join(col_names)\n\n # Prepare statement.\n statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})'\n statement = statement.format(idx_name, self._table, col_names)\n\n # Create index.\n cursor = self._connection.cursor()\n cursor.execute('PRAGMA synchronous=OFF')\n cursor.execute(statement)", "def create_index(cls, engine):\n\n reg_imei = db.Index('reg_imei_index', cls.imei, postgresql_concurrently=True)\n reg_imei.create(bind=engine)\n\n reg_normalized_imei = db.Index('reg_normalized_imei_index', cls.normalized_imei, postgresql_concurrently=True)\n reg_normalized_imei.create(bind=engine)", "def test_create_index_is_called(self):\r\n assert len(self.index_calls) == 0\r\n\r\n connection._index_all_fields = False\r\n \r\n class TestIndexCreationCallTestVertex(Vertex):\r\n col1 = properties.Text(index=True)\r\n col2 = properties.Text(index=True, db_field='____column')\r\n col3 = properties.Text(db_field='____column3')\r\n\r\n assert len(self.index_calls) == 2\r\n assert 'vid' not in self.index_calls\r\n assert 'col1' in self.index_calls\r\n assert '____column' in self.index_calls\r\n assert '____column3' not in self.index_calls\r\n\r\n connection._index_all_fields = True\r\n self.index_calls = []\r\n\r\n class TestIndexCreationCallTestVertex2(Vertex):\r\n col1 = properties.Text()\r\n col2 = properties.Text(db_field='____column')\r\n\r\n assert len(self.index_calls) == 3\r\n assert 'vid' in self.index_calls\r\n assert 'col1' in self.index_calls\r\n assert '____column' in self.index_calls", "def test_creating_index_type(self):", "def build_index(self):\r\n date_time('Building indexes in citations table')\r\n self.cursor.execute('DROP INDEX IF EXISTS IDX_citations ;')\r\n self.cursor.execute('CREATE INDEX IDX_citations ON citations (citation);')\r\n self.conn.commit()\r\n gc.collect()", "def create_indices():\n conn = connect()\n c = conn.cursor()\n\n # To prevent rematch btw players\n c.execute(\n \"\"\"\n CREATE UNIQUE INDEX matches_uniq_idx ON matches\n (greatest(winner, loser), least(winner, loser));\n \"\"\")\n conn.commit()\n conn.close()", "def create_indexes_with_stats(self) -> float:\n query_nodes_per_cluster = self.cluster_spec.servers_by_cluster_and_role('n1ql')\n index_nodes_per_cluster = self.cluster_spec.servers_by_cluster_and_role('index')\n\n t0 = time.time()\n for cluster_query_nodes in query_nodes_per_cluster:\n self.create_indexes(query_node=cluster_query_nodes[0])\n\n # Wait for index build to complete on first cluster, and record time\n logger.info('Waiting for index build on primary cluster')\n self.wait_for_indexing(index_nodes=index_nodes_per_cluster[0])\n index_build_time = time.time() - t0\n logger.info(\"Index build completed in {} sec\".format(index_build_time))\n\n # Wait for index build to complete on remaining clusters\n logger.info('Waiting for index build to complete on remaining clusters')\n remaining_index_nodes = [node for nodes in index_nodes_per_cluster[1:] for node in nodes]\n self.wait_for_indexing(index_nodes=remaining_index_nodes)\n\n return index_build_time", "def _create_indexes(self):\r\n # WARNING: The collection will be locked during the index\r\n # creation. If the collection has a large number of\r\n # documents in it, the operation can take a long time.\r\n\r\n # TODO: The creation of indexes can be moved to a Django\r\n # management command or equivalent. There is also an option to\r\n # run the indexing on the background, without locking.\r\n self.collection.ensure_index([('time', pymongo.DESCENDING)])\r\n self.collection.ensure_index('event_type')", "def create_index(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def __init__(self, create_index=True, online=True):\n self.online = online\n index_exists = self.index_exists()\n if create_index and not index_exists:\n self.create_index()", "def build_index(self):\n self.rebuild_index()", "def create_indices():\n destroy_indices()\n\n ActionDocument._index.create(ignore=[400, 404])\n ClassificationDocument._index.create(ignore=[400, 404])\n FunctionDocument._index.create(ignore=[400, 404])\n PhaseDocument._index.create(ignore=[400, 404])\n RecordDocument._index.create(ignore=[400, 404])\n\n yield\n\n destroy_indices()", "def create_index(self):\n self.send_robust(self.es_index, data=self.es_meta)\n self.set_index_normal_settings()", "def create_index(args, client):\n policy = {}\n client.index_geo2dsphere_create(args.nspace, args.set,\n LOCBIN, LOCNDX, policy)\n client.index_integer_create(args.nspace, args.set,\n HSHBIN, HSHNDX, policy)", "def build_index():\n print \"building index..\"\n\n index_dir = INDEX_DIR_CODE\n if TEST_COLLECTION:\n index_dir = INDEX_DIR_TEST\n CR_DOCS_DB.drop()\n CR_DOCS_DB.ensure_index(\"code_id\", unique=True)\n if os.path.exists(index_dir):\n shutil.rmtree(index_dir)\n os.mkdir(index_dir)\n schema = get_schema()\n storage = FileStorage(index_dir)\n ix = storage.create_index(schema)\n w = ix.writer()\n print \"finding posts..\"\n posts_with_code = POSTS_DB.find({\"answers.Body\": {\"$regex\": \"/.*<code>.*/\"}}, timeout=False)\n print \"adding files..\"\n q = add_from_file(w) if TEST_COLLECTION else 0\n for i, question in enumerate(posts_with_code):\n if TEST_COLLECTION:\n q += add_one_code(w, question, q)\n if q > 999:\n break\n else:\n q += add_doc(w, question)\n if i % 1000 == 0 and not i == 0:\n print \"commit number:\", str(i/1000), \"with\", q, \"codes\"\n w.commit()\n w = ix.writer()\n\n w.commit()\n posts_with_code.close()\n print \"the index was built!\"\n return ix", "def build_index(self):\n \n \n geoids = self.partitions.find_or_new(table='facilities_geoids')\n addresses = self.partitions.find_or_new(table='facilities_addresses')\n facilities = self.partitions.find(table='facilities')\n \n facilities.attach(addresses,'addresses')\n facilities.attach(geoids,'geoids')\n \n q = \"\"\"\n SELECT year, type, oshpd_id, facility_name, dba_city, dba_zip_code, blockgroup_gvid, tract_gvid, county_gvid\n FROM facilities\n JOIN geoids.facilities_geoids AS geoids ON geoids.facilities_id = facilities.id\n JOIN addresses.facilities_addresses AS addresses ON addresses.facilities_id = facilities.id\n \"\"\"\n \n p = self.partitions.find_or_new(table='facilities_index')\n p.clean()\n lr = self.init_log_rate()\n \n with p.inserter() as ins:\n for row in facilities.query(q):\n ins.insert(row)\n lr(str(p.identity))", "def migrate_9(session, **kwargs):\n session.execute(\n \"CREATE INDEX ix_{tb}_size ON {tb} ( size )\"\n .format(tb=IndexRecord.__tablename__))\n\n session.execute(\n \"CREATE INDEX index_record_hash_type_value_idx ON {tb} ( hash_value, hash_type )\"\n .format(tb=IndexRecordHash.__tablename__))", "def _apply_index_op(db, op):\n if 'createIndexes' not in op['o']:\n return\n o = op['o']\n coll_name = o['createIndexes']\n key = list(o['key'].items())\n name = o['name']\n return db[coll_name].create_index(key, name=name)", "def _initIndexes(self):\n class Record:\n \"\"\" a moron simple object for carrying the 'extra'-payload to index\n constructors\n \"\"\"\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n addIndex = self.addIndex\n addColumn = self.addColumn\n\n # Content indexes\n self._catalog.indexes.clear()\n for (index_name, index_type, extra) in self.enumerateIndexes():\n if extra is None:\n addIndex( index_name, index_type)\n else:\n if isinstance(extra, StringTypes):\n p = Record(indexed_attrs=extra)\n elif isinstance(extra, DictType):\n p = Record(**extra)\n else:\n p = Record()\n addIndex( index_name, index_type, extra=p )\n\n # Cached metadata\n self._catalog.names = ()\n self._catalog.schema.clear()\n for column_name in self.enumerateColumns():\n addColumn( column_name )", "def create_indexes(create_func):\n\tfor set_name, index_path, index_name in zip(SET_NAMES, INDEX_PATHS, INDEX_NAMES):\n\t\tcreate_func(set_name, index_path, index_name)", "def index_time(sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,\n search_type, operation, debug):\n if debug:\n print(\"Benchmarking:\\n\\tSORT = %s\\n\\tN_THREADS = %s\\n\\tSPARSE_FORMAT =\"\n \" %s\\n\\tROWS = %s\\n\\tCOLS = %s\\n\\tNNZ = %s\\n\\tN_INDEXERS =\"\n \" %s\\n\\t\" \"SEARCH_TYPE = %s\\n\\tOPERATION = %s\"\n % (sort, n_threads, sparse_format, rows, cols, nnz, n_indexers,\n search_type, operation))\n\n # Generate matrix.\n with Timer() as t:\n M = sp.sparse.rand(rows, cols, density=nnz/(rows*cols))\n\n if debug:\n print(\"\\tTime to generate sparse matrix: %s\" % t.elapsed)\n\n # Generate indexer.\n with Timer() as t:\n indexer = {}\n idx = np.random.choice(M.nnz, n_indexers, replace=True)\n indexer['row'] = M.row[idx]\n indexer['col'] = M.col[idx]\n indexer['data'] = np.random.rand(idx.size).astype(np.float64)\n\n if debug:\n print(\"\\tTime to generate indexer: %s\" % t.elapsed)\n\n # Convert sparse matrix.\n with Timer() as t:\n if sparse_format == 'CSR':\n M = sp.sparse.csr_matrix(M)\n elif sparse_format == 'CSC':\n M = sp.sparse.csc_matrix(M)\n else:\n raise Exception(\"sparse_format must be either CSR or CSC.\")\n\n if debug:\n print(\"\\tTime to convert sparse matrix: %s\" % t.elapsed)\n\n # Sort.\n with Timer() as t:\n if sort:\n if sparse_format == 'CSR':\n # Sort indices according to row first\n sort_idx = np.lexsort((indexer['col'], indexer['row']))\n elif sparse_format == 'CSC':\n # Sort indices according to col first\n sort_idx = np.lexsort((indexer['row'], indexer['col']))\n else:\n sort_idx = np.arange(indexer['row'].size)\n\n unsort_idx = np.argsort(sort_idx)\n\n if debug:\n print(\"\\tTime to sort indexer: %s\" % t.elapsed)\n sort_time = t.elapsed\n\n # Time the csindexer.\n with Timer() as t:\n if search_type == 'scipy':\n ## Run the Scipy function.\n with Timer() as t:\n if operation == 'get':\n data_py = np.squeeze(np.array(M[indexer['row'][sort_idx],\n indexer['col'][sort_idx]]))\n data_py = data_py[unsort_idx]\n elif operation == 'add':\n M_sp = M.copy()\n\n idx_coo = sp.sparse.coo_matrix(\n (indexer['data'][sort_idx],\n (indexer['row'][sort_idx], indexer['col'][sort_idx])),\n shape=(rows, cols))\n\n M_sp += idx_coo\n else:\n raise Exception(\"Operation must be either get or add.\")\n\n else:\n ## Run the Cython function.\n if operation == 'get':\n ### Don't need to copy M as it doesn't get modified but do have\n ### to copy indexer['data'] as it does.\n data_cs = indexer['data'].copy()\n M_cs = M\n\n csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),\n np.array(indexer['col'][sort_idx]), data_cs,\n operation, search_type, n_threads, debug)\n\n ### Unsort to get final result.\n data_cs = data_cs[unsort_idx]\n\n elif operation == 'add':\n ### Copy M, don't copy indexer['data'].\n data_cs = indexer['data']\n M_cs = M.copy()\n csindexer.apply(M_cs, np.array(indexer['row'][sort_idx]),\n np.array(indexer['col'][sort_idx]),\n np.array(data_cs[sort_idx]), operation,\n search_type,\n n_threads, debug)\n else:\n raise Exception(\"Operation must be either get or add.\")\n\n\n if debug:\n print(\"\\tTime for indexing: %s\" % t.elapsed)\n computation_time = t.elapsed\n\n return computation_time, sort_time", "def test_create_index(self, collection):\n collection.create_index(\"hello\")\n assert collection._indexes == {\"_id_\": ((\"_id\",), {(1,)})}\n\n collection.create_index(\"hello\", unique=True)\n assert collection._indexes == {\n \"_id_\": ((\"_id\",), {(1,)}),\n \"hello_1\": ((\"hello\",), {(\"there\",)}),\n }", "def create(excludeDecls=False):\r\n return Index(conf.lib.clang_createIndex(excludeDecls, 0))", "def create_index(self, indexname, table, columns, unique=False):\n if not isinstance(columns, list) and not isinstance(columns, tuple):\n columns = [columns]\n\n if \".\" in table:\n prefix = table.split(\".\")[0] + \".\"\n table = table.split(\".\")[1]\n else:\n prefix = \"\"\n # table = table\n\n self.LOG(\"index create \", indexname, table, columns, unique)\n if unique:\n sql = \"CREATE UNIQUE INDEX %s%s ON %s (%s);\" % (\n prefix, indexname, table, \",\".join(columns))\n else:\n sql = \"CREATE INDEX %s%s ON %s (%s);\" % (\n prefix, indexname, table, \",\".join(columns))\n self.execute(sql)", "def create_index():\n try:\n client = MongoClient(MONGO_URI,event_listeners=[CommandLogger()])\n db = client.get_database('UNSD')\n \n coll_unfcc = db.get_collection('unfcc')\n coll_ebal = db.get_collection('ebal')\n result_unfcc = coll_unfcc.create_index([('REF_AREA',ASCENDING),('TIME_PERIOD',DESCENDING)])\n result_ebal = coll_ebal.create_index([('REF_AREA',ASCENDING),('TIME_PERIOD',DESCENDING)])\n except pymongo.errors.ConnectionFailure as e:\n logger.error('PyMongo error ConnectionFailure seen: ' + str(e))\n traceback.print_exc(file = sys.stdout)", "def init_index(clear=False):\n return _run_indexer_func(\"init_index\", clear)", "def _SetupIndexes(self, _open=open):\n pass", "def create_index(self, db_name):\n\t\tindex_func_path = self._get_index_func_filepath(db_name)\n\t\t\n\t\tif os.path.isfile(index_func_path):\n\t\t\t# create index request payload from predefined file\t\n\t\t\twith open(index_func_path, 'r') as content_file:\n\t\t\t\tpayload = content_file.read()\n\t\t\n\t\t\tprint (\"Create index using function in: {}\".format(index_func_path))\n\t\t\turl = \"https://{}/{}/_design/view\".format(\n\t\t\t\tself.cloudanthost, db_name)\n\t\t\tresponse = self.r.put(url, data=payload)\n\t\t\tassert response.status_code == 201", "def test_integer_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_simple_index, 12345)\n\t)", "def create_indexes(self) -> None:\n self.collection.create_index(\"traceId\")\n self.collection.create_index(\"process.serviceName\")", "def set_index(self, idx, rel, attrs):\n\n query = 'CREATE INDEX {} ON {} ({})'.format(idx, rel, ','.join(attrs))\n\n with self.tpch_cxn.cursor() as curs:\n try:\n curs.execute(query)\n except pg.ProgrammingError as e:\n print(e)", "def create_index(schema, index_name):\n if not os.path.exists(index_name):\n os.mkdir(index_name)\n ix = index.create_in(index_name, schema)\n print(f\"index {index_name} created successfully\")\n return ix", "def buildIndex(counters):\n route = Route((counter.probe for counter in counters))\n index = ProbeIndexFactory.cache.get(route, None)\n if not index:\n probeMap = ProbeMap()\n for i, counter in enumerate(counters):\n ProbeIndexFactory._addCounterToMap(probeMap, counter, i)\n index = ProbeIndexFactory.Index(route, probeMap)\n ProbeIndexFactory.cache.update({route : index})\n return index", "def test_index_reflection(self):\n import warnings\n def capture_warnings(*args, **kw):\n capture_warnings._orig_showwarning(*args, **kw)\n capture_warnings.warnings.append(args)\n capture_warnings._orig_showwarning = warnings.warn\n capture_warnings.warnings = []\n\n m1 = MetaData(testing.db)\n t1 = Table('party', m1,\n Column('id', String(10), nullable=False),\n Column('name', String(20), index=True), \n Column('aname', String(20))\n )\n m1.create_all()\n \n testing.db.execute(\"\"\"\n create index idx1 on party ((id || name))\n \"\"\") \n testing.db.execute(\"\"\"\n create unique index idx2 on party (id) where name = 'test'\n \"\"\")\n \n testing.db.execute(\"\"\"\n create index idx3 on party using btree\n (lower(name::text), lower(aname::text))\n \"\"\")\n \n try:\n m2 = MetaData(testing.db)\n\n warnings.warn = capture_warnings\n t2 = Table('party', m2, autoload=True)\n \n wrn = capture_warnings.warnings\n assert str(wrn[0][0]) == (\n \"Skipped unsupported reflection of expression-based index idx1\")\n assert str(wrn[1][0]) == (\n \"Predicate of partial index idx2 ignored during reflection\")\n assert len(t2.indexes) == 2\n # Make sure indexes are in the order we expect them in\n tmp = [(idx.name, idx) for idx in t2.indexes]\n tmp.sort()\n \n r1, r2 = [idx[1] for idx in tmp]\n\n assert r1.name == 'idx2'\n assert r1.unique == True\n assert r2.unique == False\n assert [t2.c.id] == r1.columns\n assert [t2.c.name] == r2.columns\n finally:\n warnings.warn = capture_warnings._orig_showwarning\n m1.drop_all()", "def create_new_index(self, index_name, value, is_cluster, check=False):\n print(f\"Creating {index_name} index started \\n\")\n add_index = \"/html//i[@id='addIndex']\"\n self.locator_finder_by_xpath(add_index).click()\n time.sleep(2)\n\n print(f\"selecting {index_name} from the list\\n\")\n self.locator_finder_by_select(self.select_index_type_id, value)\n\n if index_name == \"Persistent\":\n self.select_persistent_fields_id = self.locator_finder_by_hover_item_id(self.select_persistent_fields_id)\n time.sleep(1)\n self.select_persistent_fields_id.send_keys(\"pfields\").perform()\n self.select_persistent_name_id = self.locator_finder_by_hover_item_id(self.select_persistent_name_id)\n self.select_persistent_fields_id.send_keys(\"Persistent\").perform()\n time.sleep(1)\n\n if not is_cluster:\n self.select_persistent_unique_id = self.locator_finder_by_hover_item_id(\n self.select_persistent_unique_id\n )\n\n self.select_persistent_sparse_id = self.locator_finder_by_hover_item_id(self.select_persistent_sparse_id)\n self.select_persistent_duplicate_id = self.locator_finder_by_hover_item_id(\n self.select_persistent_duplicate_id\n )\n self.select_persistent_background_id = self.locator_finder_by_hover_item_id(self.select_persistent_background_id)\n time.sleep(1)\n\n elif index_name == \"Geo\":\n self.select_geo_fields_id = self.locator_finder_by_hover_item_id(self.select_geo_fields_id)\n self.select_geo_fields_id.send_keys(\"gfields\").perform()\n time.sleep(1)\n self.select_geo_name_id = self.locator_finder_by_hover_item_id(self.select_geo_name_id)\n self.select_geo_name_id.send_keys(\"Geo\").perform()\n time.sleep(1)\n self.select_geo_json_id = self.locator_finder_by_hover_item_id(self.select_geo_json_id)\n self.select_geo_background_id = self.locator_finder_by_hover_item_id(self.select_geo_background_id)\n time.sleep(1)\n self.wait_for_ajax()\n\n elif index_name == \"Fulltext\":\n self.select_fulltext_field_id = self.locator_finder_by_hover_item_id(self.select_fulltext_field_id)\n self.select_fulltext_field_id.send_keys(\"ffields\").perform()\n time.sleep(1)\n self.select_fulltext_name_id = self.locator_finder_by_hover_item_id(self.select_fulltext_name_id)\n self.select_fulltext_name_id.send_keys(\"Fulltext\").perform()\n time.sleep(1)\n self.select_fulltext_length_id = self.locator_finder_by_hover_item_id(self.select_fulltext_length_id)\n self.select_fulltext_length_id.send_keys(100)\n self.select_fulltext_background_id = self.locator_finder_by_hover_item_id(\n self.select_fulltext_background_id\n )\n time.sleep(1)\n self.wait_for_ajax()\n\n elif index_name == \"TTL\":\n self.select_ttl_field_id = self.locator_finder_by_hover_item_id(self.select_ttl_field_id)\n self.select_ttl_field_id.send_keys(\"tfields\").perform()\n time.sleep(1)\n self.select_ttl_name_id = self.locator_finder_by_hover_item_id(self.select_ttl_name_id)\n self.select_ttl_name_id.send_keys(\"TTL\").perform()\n time.sleep(1)\n self.select_ttl_expiry_id = self.locator_finder_by_hover_item_id(self.select_ttl_expiry_id)\n self.select_ttl_expiry_id.send_keys(1000)\n self.select_ttl_background_id = self.locator_finder_by_hover_item_id(self.select_ttl_background_id)\n time.sleep(1)\n self.wait_for_ajax()\n\n # experimental feature\n elif index_name == 'ZKD':\n if check:\n self.navbar_goto(\"collections\")\n print(\"Selecting computed values collections. \\n\")\n col = '//*[@id=\"collection_ComputedValueCol\"]/div/h5'\n self.locator_finder_by_xpath(col).click()\n self.select_index_menu()\n\n print(f\"Creating {index_name} index started \\n\")\n self.locator_finder_by_xpath(add_index).click()\n time.sleep(2)\n\n print(f\"selecting {index_name} from the list\\n\")\n self.locator_finder_by_select(self.select_index_type_id, 5)\n\n time.sleep(1)\n\n select_zkd_field_sitem = self.locator_finder_by_id('newZkdFields')\n select_zkd_field_sitem.click()\n select_zkd_field_sitem.clear()\n select_zkd_field_sitem.send_keys('x,y')\n time.sleep(1)\n else:\n select_zkd_field_sitem = self.locator_finder_by_id('newZkdFields')\n select_zkd_field_sitem.click()\n select_zkd_field_sitem.clear()\n select_zkd_field_sitem.send_keys('zkdfileds')\n time.sleep(1)\n\n select_zkd_name_sitem = self.locator_finder_by_id('newZkdName')\n select_zkd_name_sitem.click()\n select_zkd_name_sitem.clear()\n select_zkd_name_sitem.send_keys('ZKD')\n time.sleep(1)\n\n select_create_index_btn_id = \"createIndex\"\n self.locator_finder_by_id(select_create_index_btn_id).click()\n time.sleep(10)\n self.webdriver.refresh()\n\n if check:\n self.navbar_goto(\"collections\")\n self.select_collection(\"TestDoc\")\n self.select_index_menu()\n\n print(f\"Creating {index_name} index completed \\n\")", "async def create_index(self, fields, cursor=None):\n if not cursor:\n cursor = self._cursor\n param = []\n for (k, v) in fields.items():\n if v == 1:\n x = (k, pymongo.ASCENDING)\n else:\n x = (k, pymongo.DESCENDING)\n param.append(x)\n result = await cursor.create_index(param, background=True)\n return result, None", "def _create_indexes(cls, index_type: IndexType, document: dict, condition=None):\n try:\n criteria = [\n (field_name, pymongo.ASCENDING)\n for field_name in cls._get_index_fields(index_type, document, \"\")\n ]\n if criteria:\n # Avoid using auto generated index name that might be too long\n index_name = (\n f\"uidx{cls.__collection_name__}\"\n if index_type == IndexType.Unique\n else f\"idx{cls.__collection_name__}\"\n )\n cls.logger.info(\n f\"Create {index_name} {index_type.name} index on {cls.__collection_name__} using {criteria} criteria.\"\n )\n if condition is None or cls._server_version < \"3.2\":\n cls.__collection__.create_index(\n criteria, unique=index_type == IndexType.Unique, name=index_name\n )\n else:\n try:\n cls.__collection__.create_index(\n criteria,\n unique=index_type == IndexType.Unique,\n name=index_name,\n partialFilterExpression=condition,\n )\n except pymongo.errors.OperationFailure:\n cls.logger.exception(\n f\"Unable to create a {index_type.name} index.\"\n )\n cls.__collection__.create_index(\n criteria,\n unique=index_type == IndexType.Unique,\n name=index_name,\n )\n except pymongo.errors.DuplicateKeyError:\n cls.logger.exception(\n f\"Duplicate key found for {criteria} criteria \"\n f\"when creating a {index_type.name} index.\"\n )\n raise", "def create_index(collection, index):\n db[collection].create_index(index)", "def create_index(es_object, index_name):\n created = False\n # index settings\n # the reason why we need mappings is avoid corrupting your data\n settings = {\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n },\n \"mappings\": {\n # custom type called foods\n \"foods\": {\n \"properties\": {\n # Specify that the food_name field contains text values.\n \"food_name\": {\n \"type\": \"text\",\n \"fields\": {\n \"raw\":{ \n \"type\": \"keyword\"\n } # The food_name.raw field can be used for sorting and aggregations\n }\n },\n # Specify that the categories field contains text values.\n \"categories\": {\n \"type\": \"text\",\n \"fields\": {\n \"raw\":{ \n \"type\": \"keyword\"\n } # The categories.raw field can be used for sorting and aggregations\n }\n },\n # Specify that the calories field contains integer values.\n \"calories\": {\n \"type\": \"integer\"\n },\n \"protein\": {\n \"type\": \"integer\"\n },\n \"carbs\": {\n \"type\": \"integer\"\n },\n \"fat\": {\n \"type\": \"integer\"\n }\n \n },\n }\n }\n }\n try:\n if not es_object.indices.exists(index_name):\n # Ignore 400 means to ignore \"Index Already Exist\" error.\n es_object.indices.create(index=index_name, ignore=400, body=settings)\n print('Created Index')\n created = True\n except Exception as ex:\n print(str(ex))\n finally:\n return created", "def add_index(self, name, func):\n assert name not in self.indices\n info_name = 'index:%s:%s' % (self.info['name'], name)\n info = self.store._get_info(info_name, index_for=self.info['name'])\n index = Index(self, info, func)\n self.indices[name] = index\n if IndexKeyBuilder:\n self._index_keys = IndexKeyBuilder(self.indices.values()).build\n return index", "def init_index(self):\n raise NotImplementedError", "def test_create_index_swift(self):\n\n indexfile = tempfile.mktemp()\n self.addCleanup(os.unlink, indexfile)\n\n index = TroveSwiftIndexBuilder(\"short.dat\", out=indexfile)\n\n # read the index file that was created\n with open(indexfile, 'r+b') as fd:\n indextext = fd.read()\n indexlines = indextext.split('\\n')\n\n # 11 lines includes on blank line at the end\n self.assertEquals(11, len(indexlines))\n del indexlines[10]\n\n # check the first character of each line\n docs = [line[0] for line in indexlines]\n self.assertEquals(['1', '2', '3', '4', '5', '6', '7', '8', '9', '1'], docs)\n\n # check some lines from the index\n ref = \"1, 0, 31, short.dat\"\n self.assertEqual(ref, indexlines[0])\n ref = \"10, 279, 32, short.dat\"\n self.assertEqual(ref, indexlines[9])", "def create_index(es_object, index_name):\n created = False\n \"\"\" index settings \"\"\"\n settings = {\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n },\n \"mappings\": {\n \"physicians\": {\n \"dynamic\": \"strict\",\n \"properties\": {\n \"overview\": {\n \"type\": \"text\"\n },\n \"full_name\": {\n \"type\": \"text\"\n },\n \"years_of_practice\": {\n \"type\": \"text\"\n },\n \"language\": {\n \"type\": \"text\"\n },\n \"office_location\": {\n \"type\": \"text\"\n },\n \"hospital_affiliation\": {\n \"type\": \"text\"\n },\n \"specialties\": {\n \"type\": \"text\"\n },\n \"education_and_medical_training\": {\n \"type\": \"text\"\n },\n \"certification_and_licensure\": {\n \"type\": \"text\"\n },\n }\n }\n }\n }\n\n try:\n if not es_object.indices.exists(index_name):\n # Ignore 400 means to ignore \"Index Already Exist\" error.\n es_object.indices.create(index=index_name, ignore=400, body=settings)\n print('Created Index')\n created = True\n except Exception as ex:\n print(str(ex))\n finally:\n return created", "async def build_secret_index(self):\n pass", "def test_secondary_index(self):\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n assert_invalid(session, \"CREATE INDEX ON t_by_v (v2)\",\n \"Secondary indexes are not supported on materialized views\")", "def _generate_index_analysis(self, query_analysis, indexes):\r\n needs_recommendation = True\r\n full_indexes = []\r\n partial_indexes = []\r\n coverage = \"unknown\"\r\n\r\n if indexes is not None:\r\n for index_key in indexes.keys():\r\n index = indexes[index_key]\r\n index_report = self._generate_index_report(index,\r\n query_analysis)\r\n if index_report['supported'] is True:\r\n if index_report['coverage'] == 'full':\r\n full_indexes.append(index_report)\r\n if index_report['idealOrder']:\r\n needs_recommendation = False\r\n elif index_report['coverage'] == 'partial':\r\n partial_indexes.append(index_report)\r\n\r\n if len(full_indexes) > 0:\r\n coverage = \"full\"\r\n elif (len(partial_indexes)) > 0:\r\n coverage = \"partial\"\r\n elif query_analysis['supported']:\r\n coverage = \"none\"\r\n\r\n # INDEX ANALYSIS\r\n return OrderedDict([('indexStatus', coverage),\r\n ('fullIndexes', full_indexes),\r\n ('partialIndexes', partial_indexes)])", "def build_index(self, dict_pg_info, list_insert):\n flag_exit = True\n if flag_exit is False:\n self.create_new_index(dict_pg_info)\n self.insert_index(dict_pg_info, list_insert)", "def init_index(self, index_name):\n return Index(self, index_name)", "def simple_index():\n examples = [\n benchmark.Example(\n inputs=[\n [12, 34, 56, 78],\n -2,\n ],\n output=56,\n ),\n ]\n constants = []\n description = 'Index into a tensor'\n target_program = 'in1[in2]'\n source = 'handwritten task'\n return benchmark.Benchmark(examples=examples,\n constants=constants,\n description=description,\n target_program=target_program,\n source=source,\n name='simple_index')", "def index_schema_builder(table):\n conn = table.parent.parent.connection\n\n idx = OrderedDict()\n indexes = conn.execute(\"SHOW INDEXES FROM `%s`.`%s`\" % (table.parent.name, table.name))\n\n if not indexes:\n return idx\n\n for index in indexes:\n n = index['Key_name']\n if n not in idx:\n indexitem = IndexSchema(name=n, parent=table)\n indexitem.non_unique = (bool(index['Non_unique'])) # == not unique\n indexitem.table_name = index['Table']\n\n key_type = index['Index_type'].upper()\n\n if index['Key_name'].upper() == \"PRIMARY\":\n indexitem.kind = \"PRIMARY\"\n elif not indexitem.non_unique:\n indexitem.kind = \"UNIQUE\"\n elif key_type in ('FULLTEXT', 'SPATIAL'):\n indexitem.kind = key_type\n else:\n indexitem.kind = \"INDEX\"\n\n if key_type in ('BTREE', 'HASH', 'RTREE'):\n indexitem.type = key_type\n\n indexitem.collation = index['Collation']\n indexitem.comment = index['Comment']\n\n idx[n] = indexitem\n\n if index['Column_name'] not in idx[n].fields:\n idx[n].fields.insert(index['Seq_in_index'], (index['Column_name'], index['Sub_part'] or 0))\n\n return idx", "def createindexes():\n index = [{}, {}, {}, {}]\n readcorpus(index)\n buildindex4(index[2], index[3])\n writeindextofile(index)\n return index", "def add_catalog_indexes(context, logger):\n if logger is None:\n logger = logging.getLogger('bungenicms.membershipdirectory')\n \n # Run the catalog.xml step as that may have defined new metadata columns. \n # We could instead add <depends name=\"catalog\"/> to the registration of our \n # import step in zcml, but doing it in code makes this method usable as \n # upgrade step as well. Note that this silently does nothing when there is \n # no catalog.xml, so it is quite safe.\n setup = getToolByName(context, 'portal_setup')\n setup.runImportStepFromProfile(PROFILE_ID, 'catalog')\n \n catalog = getToolByName(context, 'portal_catalog')\n indexes = catalog.indexes()\n \n # Specify the indexes you want, with ('index_name', 'index_type')\n wanted = (('county', 'FieldIndex'),\n ('constituency', 'FieldIndex'),\n ('priority_number', 'FieldIndex'), \n ('political_party', 'FieldIndex'),\n ('elected_nominated', 'FieldIndex'),\n ('member_status', 'FieldIndex'),\n ('special_interest', 'FieldIndex'),\n ('other_names', 'FieldIndex'),\n ('member_role', 'FieldIndex'),\n ('member_title', 'FieldIndex'),\n ('body_text', 'FieldIndex'),\n ('member_full_names', 'ZCTextIndex'),\n )\n\n indexables = []\n for (name, meta_type) in wanted:\n if meta_type and name not in indexes:\n if meta_type == 'ZCTextIndex':\n item_extras = Empty()\n item_extras.doc_attr = name\n item_extras.index_type = 'Okapi BM25 Rank'\n item_extras.lexicon_id = 'plone_lexicon'\n catalog.addIndex(name, meta_type, item_extras)\n else:\n catalog.addIndex(name, meta_type)\n \n indexables.append(name)\n logger.info('Added %s for field %s.', meta_type, name)\n if len(indexables) > 0:\n logger.info('Indexing new indexes %s.', ', '.join(indexables))\n catalog.manage_reindexIndex(ids=indexables)", "def setup(self):\n collection = self._get_collection()\n\n indices = copy(self.params[\"indices\"])\n\n if \"when\" not in indices:\n indices[\"when\"] = {}\n\n for index in indices:\n self.log(DEBUG, \"Ensuring we have index for {}\".format(index))\n\n options = indices[index]\n collection.create_index(index, *options)\n self.log(DEBUG, \"Done.\")", "def define_index_field(DomainName=None, IndexField=None):\n pass", "def _generate_index_analysis(self, query_analysis, indexes):\n needs_recommendation = True\n full_indexes = []\n partial_indexes = []\n coverage = \"unknown\"\n\n if indexes is not None:\n for index_key in indexes.keys():\n index = indexes[index_key]\n index_report = self._generate_index_report(index,\n query_analysis)\n if index_report['supported'] is True:\n if index_report['coverage'] == 'full':\n full_indexes.append(index_report)\n if index_report['idealOrder']:\n needs_recommendation = False\n elif index_report['coverage'] == 'partial':\n partial_indexes.append(index_report)\n\n if len(full_indexes) > 0:\n coverage = \"full\"\n elif (len(partial_indexes)) > 0:\n coverage = \"partial\"\n elif query_analysis['supported']:\n coverage = \"none\"\n\n # INDEX ANALYSIS\n return OrderedDict([('indexStatus', coverage),\n ('fullIndexes', full_indexes),\n ('partialIndexes', partial_indexes)])", "def create_index(self):\n\n indice = client.IndicesClient(self.es)\n\n if not indice.exists(self.es_main_index):\n indice.create(\n index=self.es_main_index\n )\n\n return True", "def migrate_8(session, **kwargs):\n session.execute(\n \"CREATE INDEX ix_{tb}_baseid ON {tb} ( baseid )\"\n .format(tb=IndexRecord.__tablename__))", "def option_index(args):\n print(\"= MAKE INDEX =\")\n print()\n print(\"Database folder:\\t{}\".format(args.folder))\n if not os.path.isdir(args.folder):\n raise OSError(\"No such directory!\")\n print(\"Index file:\\t\\t{}\".format(args.indexfile))\n\n indexer.create_index_from_folder(args.folder, args.indexfile)", "def create_index(\n self,\n ) -> Callable[\n [datastore_admin.CreateIndexRequest], Awaitable[operations_pb2.Operation]\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_index\" not in self._stubs:\n self._stubs[\"create_index\"] = self.grpc_channel.unary_unary(\n \"/google.datastore.admin.v1.DatastoreAdmin/CreateIndex\",\n request_serializer=datastore_admin.CreateIndexRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"create_index\"]", "def test_index_stats(self):\n #Create Index\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False)\n #Check Index Stats\n self.sleep(30)\n index_map = self.get_index_stats()\n self.log.info(index_map)\n for query_definition in self.query_definitions:\n index_name = query_definition.index_name\n for bucket in self.buckets:\n bucket_name = bucket.name\n check_keys = ['items_count', 'total_scan_duration', 'num_docs_queued',\n 'num_requests', 'num_rows_returned', 'num_docs_queued',\n 'num_docs_pending','delete_bytes' ]\n map = self._create_stats_map(items_count=2016)\n self._verify_index_stats(index_map, index_name, bucket_name, map, check_keys)", "def __create_new_index(self, index, index_id, begin_timestamp):\n index_name = index + \"_\" + str(index_id)\n target_config = index + \".target_config\"\n pipeline_id = index + \".pipeline\"\n mapping = {\n \"user_id\": {\"type\": \"long\"},\n \"client_id\": {\"type\": \"long\"},\n \"partner_id\": {\"type\": \"long\"},\n \"module\": {\"type\": \"keyword\"},\n \"page\": {\"type\": \"keyword\"},\n \"uri\": {\"type\": \"keyword\"},\n \"app_type\": {\"type\": \"keyword\"},\n \"created_at\": {\"type\": \"date\"},\n \"request_time\": {\"type\": \"date\"},\n \"duration\": {\"type\": \"long\"},\n }\n body = {\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 1,\n },\n \"mappings\": {\n \"docs\": {\n \"properties\": mapping\n }\n },\n }\n self.client.indices.create(index=index_name, body=body)\n self.client.index(\n index=target_config,\n doc_type=\"config\",\n id=index_id,\n body={\"index_id\": index_id, \"begin_timestamp\": begin_timestamp},\n )\n\n return index_id", "def T1(request):\n T = _get_test_table()\n if request.param:\n T.add_index(\"a\")\n return T", "def build_index(self):\n\t\tix = self.create_index()\n\t\twriter = AsyncWriter(ix)\n\n\t\tfor i, document in enumerate(self.documents):\n\t\t\tif document:\n\t\t\t\twriter.add_document(**document)\n\t\t\tupdate_progress_bar(\"Building Index\", i, len(self.documents))\n\n\t\twriter.commit(optimize=True)", "def migrate_5(session, **kwargs):\n session.execute(\n \"CREATE INDEX {tb}_idx ON {tb} ( did )\"\n .format(tb=IndexRecordUrl.__tablename__))\n\n session.execute(\n \"CREATE INDEX {tb}_idx ON {tb} ( did )\"\n .format(tb=IndexRecordHash.__tablename__))\n\n session.execute(\n \"CREATE INDEX {tb}_idx ON {tb} ( did )\"\n .format(tb=IndexRecordMetadata.__tablename__))\n\n session.execute(\n \"CREATE INDEX {tb}_idx ON {tb} ( did )\"\n .format(tb=IndexRecordUrlMetadata.__tablename__))", "def __init__(self):\n super().__init__()\n self.index_dir = self.base_dir + \"user/\"\n self.index_schema = self.__get_index_schema()\n if not os.path.exists(self.index_dir):\n os.makedirs(self.index_dir)\n self.indexer = index.create_in(self.index_dir, self.index_schema) # creates the index\n else:\n self.indexer = index.open_dir(self.index_dir) # opens the index if it already exists", "def create_indices(self) -> None:\n self.client.indices.create(\n index=\"business\",\n body=BUSINESS_MAPPINGS\n )\n self.client.indices.create(\n index=\"review\",\n body=REVIEW_MAPPINGS\n )\n self.client.indices.create(\n index=\"tip\",\n body=TIP_MAPPINGS\n )", "def column_index_exists(schema_name, table_name, column_name):\n sql = \"\"\"\n with table_w_DateLstMod (\n SchemaName,\n TableName,\n ColName,\n column_id,\n DATA_TYPE,\n object_id\n ) as (\n SELECT distinct\n s.name,\n t.name,\n c.name,\n c.column_id,\n case\n when p.name = 'numeric' then 'numeric(' + cast(c.precision as varchar(5)) + ',' + cast(c.scale as varchar(5)) + ')'\n else (\n case\n when p.name = 'varchar'\n then 'varchar(' + cast(c.max_length as varchar(5)) + ')' else p.name end) end as 'data_type', t.object_id\n FROM sys.schemas s with(nolock)\n JOIN sys.tables t with(nolock) ON (s.schema_id = t.schema_id)\n JOIN sys.columns c with(nolock) ON (c.object_id = t.object_id)\n JOIN sys.types P ON C.system_type_id = P.system_type_id\n WHERE s.name = ?\n and t.name = ?\n and c.name = ?\n and t.type = 'U'\n ),\n table_indexes (\n SchemaName,\n TableName,\n ColName,\n DATA_TYPE,\n index_name,\n index_type,\n object_id ) as (\n select distinct\n td.SchemaName,\n td.TableName,\n td.ColName,\n td.DATA_TYPE,\n i.name,\n i.type_desc,\n td.object_id\n FROM table_w_DateLstMod td\n JOIN [sys].[index_columns] ic with(nolock)\n on (ic.object_id = td.object_id and ic.column_id = td.column_id and ic.index_column_id = 1)\n JOIN sys.indexes i with(nolock)\n ON (i.object_id = td.object_id and ic.index_id = i.index_id)\n )\n select\n SchemaName, TableName, ColName, DATA_TYPE, index_name, index_type, object_id\n from table_indexes;\n \"\"\"\n\n row = fetch_row(sql, [schema_name, table_name, column_name])\n return row is not None", "def buildIndex(filename, currentTime, baseDir):\n pathToFolder = baseDir + 'Collections/IndriIndices/'\n if not os.path.exists(pathToFolder):\n os.makedirs(pathToFolder)\n INDRI_BUILD_INDEX = '/mnt/bi-strg3/v/zivvasilisky/ziv/env/indri/indri/bin/IndriBuildIndex'\n CORPUS_PATH = filename\n CORPUS_CLASS = 'trectext'\n MEMORY = '1G'\n INDEX = pathToFolder + currentTime\n STEMMER = 'krovetz'\n run_bash_command(INDRI_BUILD_INDEX + ' -corpus.path='+CORPUS_PATH + ' -corpus.class='+CORPUS_CLASS + ' -index='+INDEX + ' -memory='+MEMORY + ' -stemmer.name=' + STEMMER)\n return INDEX", "def test_geo_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_geo_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_geo_index, (0.0, 0.0))\n\t)", "def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')", "def create_bam_file_index(infile, outfile):\n statement = 'samtools index %(infile)s %(outfile)s'\n P.run(statement,\n job_queue = P.PARAMS['queue'],\n job_memory = P.PARAMS['memory'])", "def addCatalogIndexes(portal):\n catalog = getToolByName(portal, 'portal_catalog')\n indexes = catalog.indexes()\n wanted = (('standardTags', 'KeywordIndex'),\n ('iamTags', 'KeywordIndex'),\n ('isearchTags', 'KeywordIndex'),\n ('hiddenTags', 'KeywordIndex'))\n indexables = []\n for name, meta_type in wanted:\n if name not in indexes:\n catalog.addIndex(name, meta_type)\n indexables.append(name)\n logger.info(\"Added %s for field %s.\", meta_type, name)\n if len(indexables) > 0:\n logger.info(\"Indexing new indexes %s.\", ', '.join(indexables))\n catalog.manage_reindexIndex(ids=indexables)", "def CreateIndex(self, arg0: 'unsigned long long') -> \"void\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_CreateIndex(self, arg0)", "def create_index(self):\n if self.index_exists():\n logger.info('Index {} already exists'.format(self.index_name))\n logger.info('Deleting existing index')\n self.indices_client.delete(index=self.index_name)\n self.create_index_if_not_exist()", "def CreateIndex(self, arg0: 'unsigned long long') -> \"void\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_CreateIndex(self, arg0)", "def test_create_index(self):\n\n class Comment(Document):\n message = StringField()\n meta = {\"allow_inheritance\": True}\n\n Comment.create_index(\"message\")\n\n info = Comment.objects._collection.index_information()\n info = [\n (value[\"key\"], value.get(\"unique\", False), value.get(\"sparse\", False))\n for key, value in info.items()\n ]\n assert ([(\"_cls\", 1), (\"message\", 1)], False, False) in info", "def _make_index(self, fname, sents, words):\n for w in words:\n # word index for this file only\n findex = []\n\n for ixS, s in enumerate(sents):\n # iterate over each word in the sentencep\n for ixT, token in enumerate(s):\n # could use regex for substring matching instead\n if w == token.lower():\n findex.append((ixS, ixT))\n # keep track of word use frequency\n self._freq[w] += 1\n\n # grow the main index \n self._index[w][fname]= findex", "def test_index_keys(engine):\n class Model(engine.model):\n id = Column(UUID, hash_key=True)\n other = Column(DateTime, range_key=True)\n another = Column(UUID)\n last = Column(String)\n\n by_last = GlobalSecondaryIndex(hash_key=\"another\", range_key=\"last\")\n by_another = LocalSecondaryIndex(range_key=\"last\")\n\n assert Model.by_last.hash_key is Model.another\n assert Model.by_last.range_key is Model.last\n\n assert Model.by_another.hash_key is Model.id\n assert Model.by_another.range_key is Model.last", "def createSpatialIndex(self, schema, table, column):\r\n index_name = '{}_{}_idx'.format(table, column)\r\n return self.runSql('CREATE INDEX {} ON {} USING gist ({})'.format(index_name, self.encodeTableName(schema, table), self.encodeColumnName(column)))", "def configure_index(client):\n index_name = client.index + \"-\" + _random_id()\n mappings = ANNOTATION_MAPPING\n\n if client.server_version < Version(\"7.0.0\"):\n mappings = {client.mapping_type: mappings}\n\n client.conn.indices.create(\n index_name,\n body={\n \"mappings\": mappings,\n \"settings\": {\"analysis\": ANALYSIS_SETTINGS},\n },\n )\n\n return index_name", "def _make_sample_filter_indices(\n X: np.ndarray,\n test: bool = False,\n building_types: list = None,\n htc_upper_bound: float = None,\n htc_lower_bound: float = None\n):\n all_indices = list(range(X.shape[0]))\n\n print(\">>>\", building_types)\n\n if building_types and len(building_types) > 0:\n building_types_list = _building_types_from_csv(test)\n all_indices += [i for i, bt in enumerate(building_types_list) if bt in building_types]\n\n if htc_lower_bound:\n mean_htcs = _mean_htc_from_csv(test)\n htc_lower_filtered_indices = np.where(np.array(mean_htcs) >= htc_lower_bound)[0].tolist()\n\n if len(all_indices) > 0:\n all_indices = list(set(all_indices) & set(htc_lower_filtered_indices))\n else:\n all_indices = htc_lower_filtered_indices\n\n if htc_upper_bound:\n mean_htcs = _mean_htc_from_csv(test)\n htc_upper_filtered_indices = np.where(np.array(mean_htcs) <= htc_upper_bound)[0].tolist()\n\n if len(all_indices) > 0:\n all_indices = list(set(all_indices) & set(htc_upper_filtered_indices))\n else:\n all_indices = htc_upper_filtered_indices\n\n return all_indices", "def instantiate_indexor(prefix, width):\n stdlib = py_ast.Stdlib()\n name = py_ast.CompVar(NAME_SCHEME[\"index name\"].format(prefix=prefix))\n add_name = py_ast.CompVar(f\"{prefix}_add\")\n cells = [\n py_ast.Cell(name, stdlib.register(width)),\n py_ast.Cell(add_name, stdlib.op(\"add\", width, signed=False)),\n ]\n\n init_name = py_ast.CompVar(NAME_SCHEME[\"index init\"].format(prefix=prefix))\n init_group = py_ast.Group(\n init_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 2 ** width - 1), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(init_name, \"done\")\n ),\n ],\n )\n\n upd_name = py_ast.CompVar(NAME_SCHEME[\"index update\"].format(prefix=prefix))\n upd_group = py_ast.Group(\n upd_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 1), py_ast.CompPort(add_name, \"left\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"out\"), py_ast.CompPort(add_name, \"right\")\n ),\n py_ast.Connect(\n py_ast.CompPort(add_name, \"out\"), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(upd_name, \"done\")\n ),\n ],\n )\n\n return (cells, [init_group, upd_group])", "def _init_index(self):\n\n if self._check_idx:\n self._index = bamnostic.bai.Bai(self._index_path)\n self.__nocoordinate = self._index.n_no_coor\n self.__mapped = sum(self._index.unmapped[mapped].n_mapped for mapped in self._index.unmapped) + self.nocoordinate\n self.__unmapped = sum(self._index.unmapped[unmapped].n_unmapped for unmapped in self._index.unmapped) + self.nocoordinate", "def create_index(self):\r\n\r\n #### Begin functionality here\r\n if self.session is not None:\r\n self.disconnect()\r\n self.create_database()\r\n return(True)", "def get_or_create_index(self, uid: str, options: Optional[Dict[str, Any]] = None) -> Index:\n try:\n index_instance = self.get_index(uid)\n except MeiliSearchApiError as err:\n if err.error_code != 'index_not_found':\n raise err\n index_instance = self.create_index(uid, options)\n return index_instance", "def create_index(log_df, column):\n temp_list = log_df[[column]].values.tolist()\n subsec_set = {(x[0]) for x in temp_list}\n subsec_set = sorted(list(subsec_set))\n alias = dict()\n for i, _ in enumerate(subsec_set):\n alias[subsec_set[i]] = i + 1\n return alias", "def describe_index(self, table_name, timeout):\n\n _abstract()", "def describe_index(self, table_name, timeout):\n\n _abstract()", "def test_large_block_index():\n\n # TODO: It would be nice to find a way to make this test faster. The\n # real bottleneck here is the enormous YAML section.\n\n buff = io.BytesIO()\n\n narrays = int(io.DEFAULT_BUFFER_SIZE / 4)\n\n arrays = []\n for i in range(narrays):\n arrays.append(np.array([i], np.uint16))\n\n tree = {\"arrays\": arrays}\n\n ff = asdf.AsdfFile(tree)\n # Since we're testing with small arrays, force all arrays to be stored\n # in internal blocks rather than letting some of them be automatically put\n # inline.\n ff.write_to(buff, all_array_storage=\"internal\")\n\n buff.seek(0)\n with asdf.open(buff) as ff2:\n assert isinstance(ff2._blocks._internal_blocks[0], block.Block)\n assert len(ff2._blocks._internal_blocks) == narrays", "def build_index(self):\n # Init the HNSWLIB index\n self.create_index()\n logger.info(f\"Building HNSWLIB index, max_elements: {len(self.corpus)}\")\n logger.debug(f\"Parameters Required: M: {self.M}\")\n logger.debug(f\"Parameters Required: ef_construction: {self.ef_construction}\")\n logger.debug(f\"Parameters Required: ef(>topn): {self.ef}\")\n\n # Then we train the index to find a suitable clustering\n self.index.add_items(self.corpus_embeddings, list(range(len(self.corpus_embeddings))))", "def test(indices_to_visit = None):\n ##0 Chicago\n ##1 New York City\n ##2 Los Angeles\n ##3 Minneapolis\n ##4 Denver\n ##5 Dallas\n ##6 Seattle\n ##7 Boston\n ##8 San Francisco\n ##9 St. Louis\n ##10 Houston\n ##11 Phoenix\n ##12 Salt Lake City\n ##13 Miami\n ##14 Atlanta\n ##15 Kansas City\n home_index = 15 # Kansas city\n # 15x15 matrix with main diagonal consisting of 0s and to which data is mirrored along\n # (values are derived from external resource and multiplied by 1000 for higher accuracy)\n matrix = np.array([[0.0, 1148413.3550047704, 2813453.6297408855, 572861.4368351421, 1483440.7452179305, 1296355.2188721865, 2801269.1215845253, 1370943.3069385102, 2996683.256068982, 422589.4697157836, 1515737.0196676727, 2343639.7107855356, 2031500.319603397, 1913900.3015914203, 946854.1020487415, 665894.0336505901],\n [1148413.3550047704, 0.0, 3949451.153672887, 1642119.4792808082, 2628946.6435325537, 2212019.1209020815, 3882177.952930788, 306997.0343229422, 4144977.810718553, 1408454.3261387087, 2286054.8575902223, 3455343.3108375454, 3179102.5335818897, 1754834.3710577146, 1202616.154562711, 1766599.1336905772],\n [2813453.6297408855, 3949451.153672887, 0.0, 2455296.3791196346, 1339227.410707824, 1998182.1420783552, 1545364.434045008, 4184394.186016967, 559978.4273194656, 2560790.9591738936, 2212581.51715849, 575975.8749662543, 933602.6426595236, 3767490.41517038, 3120118.850020503, 2186473.1552241463],\n [572861.4368351421, 1642119.4792808082, 2455296.3791196346, 0.0, 1127312.7583590776, 1390159.7734006236, 2249169.1308160927, 1811513.5290266906, 2554165.8167895717, 750916.7305340832, 1701189.1538312144, 2062079.2399570548, 1590460.9488364782, 2434801.332310659, 1462408.5353501518, 662752.1291133759],\n [1483440.7452179305, 2628946.6435325537, 1339227.410707824, 1127312.7583590776, 0.0, 1067257.7993323756, 1646308.7967673023, 2852307.4164419994, 1530510.2790658756, 1283707.511393525, 1414308.8805983758, 943721.1931707633, 598728.757362067, 2779561.192116527, 1952618.0544916363, 899656.1020173575],\n [1296355.2188721865, 2212019.1209020815, 1998182.1420783552, 1390159.7734006236, 1067257.7993323756, 0.0, 2709804.112590561, 2500314.4507069485, 2390841.4329337194, 882457.80942383, 361482.7025425731, 1427995.4150203674, 1610768.421819668, 1788903.6065106322, 1161480.3557326929, 730446.8613086065],\n [2801269.1215845253, 3882177.952930788, 1545364.434045008, 2249169.1308160927, 1646308.7967673023, 2709804.112590561, 0.0, 4018059.834330202, 1093104.7332788548, 2778905.575804111, 3046648.362755992, 1794989.6453295103, 1129464.5539648102, 4404737.747850686, 3516794.375197078, 2427457.036285458],\n [1370943.3069385102, 306997.0343229422, 4184394.186016967, 1811513.5290266906, 2852307.4164419994, 2500314.4507069485, 4018059.834330202, 0.0, 4350710.853063807, 1673216.4080939887, 2586942.3262796295, 3706392.097841614, 3382851.415271485, 2022974.6418062754, 1509585.60107986, 2015770.1390589625],\n [2996683.256068982, 4144977.810718553, 559978.4273194656, 2554165.8167895717, 1530510.2790658756, 2390841.4329337194, 1093104.7332788548, 4350710.853063807, 0.0, 2812916.3098878833, 2650547.941880299, 1053620.7288649315, 967859.8344376946, 4179636.203479384, 3448359.745690545, 2428862.4239271535],\n [422589.4697157836, 1408454.3261387087, 2560790.9591738936, 750916.7305340832, 1283707.511393525, 882457.80942383, 2778905.575804111, 1673216.4080939887, 2812916.3098878833, 0.0, 1093601.4408876144, 2050115.5214378452, 1872971.1741522516, 1708236.6189296674, 752855.8488125347, 384122.2000072272],\n [1515737.0196676727, 2286054.8575902223, 2212581.51715849, 1701189.1538312144, 1414308.8805983758, 361482.7025425731, 3046648.362755992, 2586942.3262796295, 2650547.941880299, 1093601.4408876144, 0.0, 1636770.4499809493, 1932616.2801687205, 1559260.024532222, 1130480.278513877, 1039856.4844335921],\n [2343639.7107855356, 3455343.3108375454, 575975.8749662543, 2062079.2399570548, 943721.1931707633, 1427995.4150203674, 1794989.6453295103, 3706392.097841614, 1053620.7288649315, 2050115.5214378452, 1636770.4499809493, 0.0, 812548.5062332726, 3191662.5092484164, 2564665.4531581327, 1690942.142157212],\n [2031500.319603397, 3179102.5335818897, 933602.6426595236, 1590460.9488364782, 598728.757362067, 1610768.421819668, 1129464.5539648102, 3382851.415271485, 967859.8344376946, 1872971.1741522516, 1932616.2801687205, 812548.5062332726, 0.0, 3364908.7076308434, 2551338.215149899, 1490589.7393085626],\n [1913900.3015914203, 1754834.3710577146, 3767490.41517038, 2434801.332310659, 2779561.192116527, 1788903.6065106322, 4404737.747850686, 2022974.6418062754, 4179636.203479384, 1708236.6189296674, 1559260.024532222, 3191662.5092484164, 3364908.7076308434, 0.0, 973244.7750437199, 2000112.4162614697],\n [946854.1020487415, 1202616.154562711, 3120118.850020503, 1462408.5353501518, 1952618.0544916363, 1161480.3557326929, 3516794.375197078, 1509585.60107986, 3448359.745690545, 752855.8488125347, 1130480.278513877, 2564665.4531581327, 2551338.215149899, 973244.7750437199, 0.0, 1089830.6426635552],\n [665894.0336505901, 1766599.1336905772, 2186473.1552241463, 662752.1291133759, 899656.1020173575, 730446.8613086065, 2427457.036285458, 2015770.1390589625, 2428862.4239271535, 384122.2000072272, 1039856.4844335921, 1690942.142157212, 1490589.7393085626, 2000112.4162614697, 1089830.6426635552, 0.0]])\n\n solver = FacilityOrderSolver(matrix, home_index)\n \n return solver.solve(indices_to_visit)", "def test_string_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_string_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_simple_index, \"foobar\")\n\t)", "def add_index(self, column_list, name=None, unique=False):\n columns = self._join_cols(column_list)\n if not name:\n name = self.new_index_name('_'.join(column_list), unique)\n\n self.execute(self.commands.add_index(self.name, name, columns, unique))\n self.commit()" ]
[ "0.68685216", "0.64368653", "0.6305295", "0.61893433", "0.61893433", "0.61447036", "0.6129321", "0.5909839", "0.5897009", "0.5865872", "0.58181304", "0.57768124", "0.57675445", "0.57356316", "0.5727753", "0.57036835", "0.5691433", "0.5681699", "0.56654525", "0.56597537", "0.56320864", "0.5629931", "0.5624973", "0.5622942", "0.56093454", "0.5591493", "0.5580706", "0.55398023", "0.5517882", "0.54803383", "0.54765046", "0.5464431", "0.5432039", "0.5410101", "0.5407539", "0.53897315", "0.5364562", "0.535344", "0.5340103", "0.53255045", "0.5320285", "0.5312941", "0.530627", "0.5287273", "0.5266337", "0.5265745", "0.52656865", "0.52566856", "0.5250548", "0.52495056", "0.52363", "0.5219873", "0.5209664", "0.5204283", "0.51902604", "0.5189798", "0.5184978", "0.5168076", "0.5160897", "0.51553434", "0.5154451", "0.51525205", "0.5149391", "0.5112608", "0.5112127", "0.50884616", "0.50725824", "0.50600994", "0.5053504", "0.50509405", "0.5049853", "0.5041078", "0.5033773", "0.5033584", "0.49947873", "0.49931347", "0.4979854", "0.49785617", "0.49553004", "0.49507773", "0.494831", "0.49474335", "0.49452993", "0.49390262", "0.493896", "0.49374834", "0.49355578", "0.49293938", "0.4912473", "0.4909169", "0.49086195", "0.4906702", "0.49020872", "0.49020872", "0.49004954", "0.49004328", "0.48977095", "0.4895047", "0.48938578" ]
0.6015564
8
Handles linear scaling rule and LR decay. Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the provided scaling factor.
def learning_rate_schedule(current_epoch, current_batch, batches_per_epoch, batch_size): del current_batch, batches_per_epoch # not used initial_learning_rate = common.BASE_LEARNING_RATE * batch_size / 128 learning_rate = initial_learning_rate for mult, start_epoch in LR_SCHEDULE: if current_epoch >= start_epoch: learning_rate = initial_learning_rate * mult else: break return learning_rate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjust_learning_rate_schedule(optimizer, epoch, initial_lr, decay_factor, decay_epochs):\n\n # Find the index of the current interval:\n interval_index = len([mark for mark in decay_epochs if mark < epoch])\n\n lr = initial_lr * (decay_factor ** interval_index)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):\n final_lr = initial_lr * 0.1\n init_lr = (initial_lr - final_lr)\n lr = final_lr + init_lr - (init_lr * (min(1., epoch / (float(total_num_epochs)))))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):\n lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(self, optimizer, batch, learning_rate, steps, scales, batch_size):\n lr = learning_rate\n for i in range(len(steps)):\n scale = scales[i] if i < len(scales) else 1\n if batch >= steps[i]:\n lr = lr * scale\n if batch == steps[i]:\n break\n else:\n break\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr/batch_size\n return lr", "def lr_schedule(epoch, current_lr):\n lr = current_lr\n if epoch == 23: lr *= 0.5\n elif epoch == 21: lr *= 1e-1\n elif epoch == 16: lr *= 1e-1\n elif epoch == 11: lr *= 1e-1\n return lr", "def adjust_learning_rate_by_schedule(config, optimizer, epoch, decrease_rate=0.1):\n if not isinstance(optimizer, torch.optim.SGD):\n return\n #lr = config.lr * (0.1 ** (epoch // 10))\n if epoch and epoch % 10 == 0:\n for i, param_group in enumerate(optimizer.param_groups):\n param_group['lr'] *= decrease_rate\n logger.info('Setting learning layer=i, rate=%.6f', i, param_group['lr'])", "def adjust_prior_learning_rate_schedule(optimizer, epoch, initial_lr, decay_factor, decay_epochs):\n\n # Find the index of the current interval:\n interval_index = len([mark for mark in decay_epochs if mark < epoch])\n\n lr = initial_lr * (decay_factor ** interval_index)\n return lr\n # for param_group in optimizer.param_groups:\n # param_group['lr'] = lr", "def learning_rate_schedule(epoch):\n lr = 1.0\n\n if config[\"distributed_lr_warmup_epoch\"] > 0:\n warmup_epochs = config[\"distributed_lr_warmup_epoch\"]\n max_factor = config[\"distributed_lr_warmup_factor\"]\n factor = 1.0 + (max_factor - 1.0) * min(epoch / warmup_epochs, 1.0)\n lr *= factor\n\n for (milestone, factor) in config[\"lr_schedule_milestones\"]:\n if epoch >= milestone:\n lr *= factor\n else:\n return lr\n return lr", "def _adjust_learning_rate_resnet(optimizer, epoch):\n\n if epoch == 90:\n return lr_scheduler.MultiStepLR(optimizer, [30, 60, 80])\n elif epoch == 270: # autoaugment\n return lr_scheduler.MultiStepLR(optimizer, [90, 180, 240])\n else:\n raise ValueError('invalid epoch=%d for resnet scheduler' % epoch)", "def scheduler(epoch_idx, lr):\n new_lr = lr\n if (epoch_idx == 60 or epoch_idx == 120 or epoch_idx == 160\n or epoch_idx == 260 or epoch_idx == 320 or epoch_idx == 360):\n new_lr *= 0.2\n \"\"\"\n if epoch_idx == 200:\n new_lr = 0.1\n \"\"\"\n return new_lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr\n if args.cos: # cosine lr schedule\n lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))\n else: # stepwise lr schedule\n for milestone in args.schedule:\n lr *= 0.1 if epoch >= milestone else 1.\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def decay_lr(self):\n self.scheduler.step()\n for i, param_group in enumerate(self.optimizer.param_groups):\n self.lrs[i] = param_group['lr']", "def _lr_schedule(self, epoch: int):\n penultimate_activ_func = K.function([self.model.layers[0].input], [self.model.layers[-2].output])\n\n Kz = 0.\n for i in range((len(self.x_train) - 1) // self.bs + 1):\n start_i = i * self.bs\n end_i = start_i + self.bs\n xb = self.x_train[start_i:end_i]\n\n activ = np.linalg.norm(penultimate_activ_func([xb]))\n if activ > Kz:\n Kz = activ\n\n K_ = ((self.n_classes - 1) * Kz) / (self.n_classes * self.bs)\n lr = 1 / K_\n\n self.lr_history.append(lr)\n return lr", "def adjust_learning_rate(optimizer, cur_epoch, base_lr=0.1, lr_schedule=[4, 8, 12, 14, 16]):\n lr = 0\n for i, e in enumerate(lr_schedule):\n if cur_epoch < e:\n lr = base_lr * (0.1 ** i)\n break\n if lr == 0:\n lr = base_lr\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(initial_lr, optimizer, epoch, every_epoch):\n lr = initial_lr * (0.1 ** (epoch // every_epoch))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def _learning_rate_scheduler(max_learning_rate, \n iteration, nb_iterations, nb_warmup_iterations, nb_warmdown_iterations, \n beta2):\n warmup_scaling = lax.max(0.5 * iteration * (1. - beta2), iteration / nb_warmup_iterations)\n warmdown_scaling = (nb_iterations - iteration) / nb_warmdown_iterations\n scaling = lax.min(1., lax.min(warmup_scaling, warmdown_scaling))\n return scaling * max_learning_rate", "def scale_lambda(self, lmbd, nr_iterations, lmbd_schedule = 500000):\n self.scaled_lambda = lmbd * float(min(lmbd_schedule,nr_iterations)) / lmbd_schedule", "def adjust_learning_rate(self):\n out_base_lr = self.args.base_lr\n for param_group in self.optimizer.param_groups:\n in_lr = param_group[\"initial_lr\"]\n out_lr = in_lr\n if self.args.lr_decay_type == \"cos\": # cosine lr schedule\n out_lr *= 0.5 * (1.0 + np.cos(np.pi * self.epoch / self.args.epochs))\n else: # stepwise lr schedule\n for milestone in self.args.lr_step_schedule:\n out_lr *= 0.1 if self.epoch >= milestone else 1.0\n param_group[\"lr\"] = out_lr\n if in_lr == self.args.base_lr:\n out_base_lr = out_lr\n if self.train_logger is not None:\n self.train_logger.scalar_summary(\n \"metrics/%s/epoch\" % self.full_name, self.epoch, step=self.iteration, increment_counter=False\n )\n self.train_logger.scalar_summary(\n \"metrics/%s/lr\" % self.full_name, out_base_lr, step=self.iteration, increment_counter=False\n )\n print(\"Epoch\", self.epoch, \"Learning rate\", out_base_lr)\n return out_base_lr", "def _update_learning_rate(self):\r\n\r\n self.n_steps += 1\r\n lr = self.factor * self._get_lr_scale()\r\n for param_group in self._optimizer.param_groups:\r\n param_group['lr'] = lr", "def update_learning_rate(self):\n for scheduler in self.schedulers:\n if self.opt.lr_policy == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = %.7f' % lr)", "def adjust_learning_rate(start_lr, optimizer, epoch, total_epoch_num):\n #lr = start_lr * (0.1 ** (epoch // 30))\n lr = start_lr * (0.3 ** (epoch // 5))\n if epoch==total_epoch_num:\n lr = lr * 0.3\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, lr_factor, epoch):\n #lr = args.lr * (0.1 ** (epoch // 30))\n print('the learning rate is set to {0:.5f}'.format(lr_factor[epoch]*args.lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_factor[epoch]*args.lr", "def adjust_learning_rate(optimizer, epoch):\n lr = config.optimizer.lr\n schedule = config.lr_schedule if hasattr(config, 'lr_schedule') else 'fixed'\n if schedule == 'fixed':\n if epoch >= 0.75 * config.epochs:\n lr = config.optimizer.lr * 0.1\n if epoch >= 0.9 * config.epochs:\n lr = config.optimizer.lr * 0.01\n if epoch >= config.epochs:\n lr = config.optimizer.lr * 0.001\n # cosine schedule\n elif schedule == 'cosine':\n lr = config.optimizer.lr * 0.5 * (1 + np.cos((epoch - 1) / config.epochs * np.pi))\n elif schedule == 'search':\n if epoch >= 75:\n lr = 0.01\n if epoch >= 90:\n lr = 0.001\n else:\n raise ValueError('Unkown LR schedule %s' % schedule)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def adjust_learning_rate(args,optimizer, epoch):\n lr = args.lr\n schedule = args.lr_schedule\n # schedule from TRADES repo (different from paper due to bug there)\n if schedule == 'trades':\n if epoch >= 0.75 * args.epochs:\n lr = args.lr * 0.1\n # schedule as in TRADES paper\n elif schedule == 'trades_fixed':\n if epoch >= 0.75 * args.epochs:\n lr = args.lr * 0.1\n if epoch >= 0.9 * args.epochs:\n lr = args.lr * 0.01\n if epoch >= args.epochs:\n lr = args.lr * 0.001\n # cosine schedule\n elif schedule == 'cosine':\n lr = args.lr * 0.5 * (1 + np.cos((epoch - 1) / args.epochs * np.pi))\n # schedule as in WRN paper\n elif schedule == 'wrn':\n if epoch >= 0.3 * args.epochs:\n lr = args.lr * 0.2\n if epoch >= 0.6 * args.epochs:\n lr = args.lr * 0.2 * 0.2\n if epoch >= 0.8 * args.epochs:\n lr = args.lr * 0.2 * 0.2 * 0.2\n else:\n raise ValueError('Unkown LR schedule %s' % schedule)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def adjust_learning_rate(optimizer, epoch, lr, schedule, gamma):\n if epoch in schedule:\n lr *= gamma\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def adjust_learning_rate(optimizer, epoch, lr, schedule, gamma):\n if epoch in schedule:\n lr *= gamma\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def adjust_learning_rate(optimizer, epoch, lr, schedule, gamma):\r\n if epoch in schedule:\r\n lr *= gamma\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n return lr", "def get_lr_scheduler(learning_rate, lr_refactor_step, lr_refactor_ratio,\n num_example, batch_size, begin_epoch):\n assert lr_refactor_ratio > 0\n iter_refactor = [int(r) for r in lr_refactor_step.split(',') if r.strip()]\n if lr_refactor_ratio >= 1:\n return (learning_rate, None)\n else:\n lr = learning_rate\n epoch_size = num_example // batch_size\n for s in iter_refactor:\n if begin_epoch >= s:\n lr *= lr_refactor_ratio\n if lr != learning_rate:\n pass\n # logging.getLogger().info(\"Adjusted learning rate to {} for epoch {}\".format(lr, begin_epoch))\n steps = [epoch_size * (x - begin_epoch) for x in iter_refactor if x > begin_epoch]\n if not steps:\n return (lr, None)\n lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=lr_refactor_ratio)\n return (lr, lr_scheduler)", "def create_learning_rate_scheduler(\n base_learning_rate=0.5,\n factors='constant * linear_warmup * rsqrt_normalized_decay',\n warmup_steps=16000,\n decay_factor=0.5,\n steps_per_decay=50000,\n steps_per_cycle=100000):\n factors = [n.strip() for n in factors.split('*')]\n\n def step_fn(step):\n \"\"\"Step to learning rate function.\"\"\"\n ret = 1.0\n for name in factors:\n if name == 'constant':\n ret *= base_learning_rate\n elif name == 'linear_warmup':\n ret *= jnp.minimum(1.0, step / warmup_steps)\n elif name == 'rsqrt_decay':\n ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps))\n elif name == 'rsqrt_normalized_decay':\n ret *= jnp.sqrt(warmup_steps)\n ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))\n elif name == 'decay_every':\n ret *= (decay_factor**(step // steps_per_decay))\n elif name == 'cosine_decay':\n progress = jnp.maximum(0.0,\n (step - warmup_steps) / float(steps_per_cycle))\n ret *= jnp.maximum(0.0,\n 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))\n else:\n raise ValueError('Unknown factor %s.' % name)\n return jnp.asarray(ret, dtype=jnp.float32)\n\n return step_fn", "def adjust_learning_rate_D(start_lr, optimizer, epoch):\n #lr = start_lr * (0.1 ** (epoch // 30))\n lr = start_lr * (0.3 ** (epoch // 5))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def get_scheduler(optimizer, opt):\n \n epochs_no_decay = opt.epochs - opt.lr_linear\n lr_policy = opt.lr_policy\n \n if lr_policy == 'linear':\n def lr_lambda(epoch):\n return 1. - max(0, epoch - epochs_no_decay) / float(opt.lr_linear + 1)\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)\n elif lr_policy == 'step':\n # multiply by gamma every lr_decay_steps\n # for example lr_decay_steps=50 and initial learning = .5\n # then we have \n # lr = .5 for 0 <= epoch < 50;\n # lr = .05 for 50 <= epoch < 100;\n # lr = .005 for 100 <= epoch < 150;\n scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_step, gamma=.1)\n elif lr_policy == 'plateau':\n # Reduce learning rate when a metric has stopped improving. \n # Models often benefit from reducing the learning rate by a factor of 2-10 once learning stagnates. \n # This scheduler reads a metrics quantity and if no improvement \n # is seen for a ‘patience’ number of epochs, \n # the learning rate is reduced.\n # Parameters\n # - mode (str, default=min): In `min` mode, lr will be reduced when the quantity monitored has stopped decreasing; \n # in `max` mode, lr will be reduced when the quantity monitored has stopped increasing.\n # - factor (float, default=.1): Factor by which the learning rate will be reduced. new_lr = lr * factor.\n # - patience (int, default=10): Number of epochs with no improvement after which learning rate will be reduced. \n # - threshold (float): only decrease lr if the change in the quantitiy monitored is smaller than threshold. \n # Say we have threshold=0.001, if loss is $18.0$ on epoch $n$ and loss is $17.9999$ on epoch $n+1$,\n # then multiply current learning rate by the factor.\n # On the contrary, if the loss is 17.99, lr doesn't have to be changed.\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=.2, threshold=.01, patience=5)\n else:\n return NotImplementedError(f'learning rate policy {lr_policy} is not implemented')\n return scheduler", "def create_learning_rate_scheduler(\n factors: str = 'constant * linear_warmup * rsqrt_decay',\n base_learning_rate: float = 0.5,\n warmup_steps: int = 1000,\n decay_factor: float = 0.5,\n steps_per_decay: int = 20000,\n steps_per_cycle: int = 100000,\n step_offset: int = 0,\n min_learning_rate: float = 1e-8) -> LearningRateCallable:\n factors = [n.strip() for n in factors.split('*')]\n\n def step_fn(step: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Step to learning rate function.\"\"\"\n step = jnp.maximum(0, step - step_offset)\n ret = 1.0\n for name in factors:\n if name == 'constant':\n ret *= base_learning_rate\n elif name == 'linear_warmup':\n ret *= jnp.minimum(1.0, step / warmup_steps)\n elif name == 'linear_decay':\n ret *= base_learning_rate * jnp.minimum(\n step / warmup_steps, 1.0 + decay_factor * (warmup_steps - step))\n elif name == 'rsqrt_decay':\n ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))\n elif name == 'rsqrt_normalized_decay':\n ret *= jnp.sqrt(warmup_steps)\n ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))\n elif name == 'decay_every':\n ret *= (decay_factor**(step // steps_per_decay))\n elif name == 'cosine_decay':\n progress = jnp.maximum(0.0,\n (step - warmup_steps) / float(steps_per_cycle))\n ret *= jnp.maximum(0.0,\n 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))\n else:\n raise ValueError('Unknown factor %s.' % name)\n ret = jnp.maximum(ret, min_learning_rate)\n return jnp.asarray(ret, dtype=jnp.float32)\n\n return step_fn", "def decay_lr(current_epoch, optimizer):\n schedule = Config.get(\"LearningRateSchedule\")\n gammas = Config.get(\"LearningRateGammas\")\n for idx, target_epoch in enumerate(schedule):\n if current_epoch == int(target_epoch):\n for params in optimizer.param_groups:\n current_lr = params['lr']\n params['lr'] = current_lr * float(gammas[idx])\n logger.info(\"Changed lr from {} to {}\".format(current_lr,\n params['lr']))", "def reschedule_learning_rate(model, epoch, scheduler):\n if epoch == 7:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.005)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n if epoch == 13:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.005)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n if epoch == 19:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.002)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n if epoch == 25:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.002)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n\n return model, scheduler", "def test_lr_scalers():\n # We include a cost other than SumOfParams so that data is actually\n # queried from the training set, and the expected number of updates\n # are applied.\n cost = SumOfCosts([SumOfParams(), (0., DummyCost())])\n\n scales = [.01, .02, .05, 1., 5.]\n shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]\n\n learning_rate = .001\n\n class ModelWithScalers(Model):\n def __init__(self):\n super(ModelWithScalers, self).__init__()\n self._params = [sharedX(np.zeros(shape)) for shape in shapes]\n self.input_space = VectorSpace(1)\n\n def __call__(self, X):\n # Implemented only so that DummyCost would work\n return X\n\n def get_lr_scalers(self):\n return dict(zip(self._params, scales))\n\n model = ModelWithScalers()\n\n dataset = ArangeDataset(1)\n\n sgd = SGD(cost=cost,\n learning_rate=learning_rate,\n learning_rule=Momentum(.0),\n batch_size=1)\n\n sgd.setup(model=model, dataset=dataset)\n\n manual = [param.get_value() for param in model.get_params()]\n manual = [param - learning_rate * scale for param, scale in\n zip(manual, scales)]\n\n sgd.train(dataset=dataset)\n\n assert all(np.allclose(manual_param, sgd_param.get_value())\n for manual_param, sgd_param\n in zip(manual, model.get_params()))\n\n manual = [param - learning_rate * scale\n for param, scale\n in zip(manual, scales)]\n\n sgd.train(dataset=dataset)\n\n assert all(np.allclose(manual_param, sgd_param.get_value())\n for manual_param, sgd_param\n in zip(manual, model.get_params()))", "def adjust_learning_rate_SR(optimizer, epoch):\r\n lr = opt.lr * (0.1 ** (epoch // opt.step))\r\n return lr", "def scaling():\n \n for i in range(cfg.nfea):\n dm = 0\n var = 0\n for j in range(cfg.ntrain):\n dm += cfg.a[j,i]\n dm = dm/cfg.ntrain\n \n for j in range(cfg.ntrain):\n var += (cfg.a[j,i]-dm)**2\n\n var = var/cfg.ntrain\n var = np.sqrt(var)\n \n if var >= 10**(-5):\n cfg.clin[i] = 1.0/var \n cfg.dlin[i] = -dm/var \n \n else: \n if np.abs(dm)<=1.0:\n cfg.clin[i] = 1.0\n cfg.dlin[i] = 0.0 \n else: \n cfg.clin[i] = 1.0/dm\n cfg.dlin[i] = 0.0 \n \n for j in range(cfg.ntrain):\n cfg.a_scaled[j,i] = cfg.clin[i]*cfg.a[j,i] + cfg.dlin[i]\n \n return", "def adjust_learning_rate(lr, decay, optimizer, cur_epoch, every_n_epochs):\n new_lr = lr * (decay ** (cur_epoch // every_n_epochs))\n\n # if cur_epoch % every_n_epochs == 0:\n # new_lr = lr * 0.1\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr", "def adjust_lr_staircase(optimizer, base_lr, ep, decay_at_epochs, factor):\n assert ep >= 1, \"Current epoch number should be >= 1\"\n\n if ep not in decay_at_epochs:\n return\n\n ind = find_index(decay_at_epochs, ep)\n for g in optimizer.param_groups:\n g['lr'] = base_lr * factor ** (ind + 1)\n print('=====> lr adjusted to {:.10f}'.format(g['lr']).rstrip('0'))", "def calculate_initial_lr(cfg: OmegaConf) -> float:\n if cfg[\"parameter\"][\"linear_schedule\"]:\n scaled_lr = cfg[\"experiment\"][\"lr\"] * cfg[\"experiment\"][\"batches\"] / 256.\n else:\n scaled_lr = cfg[\"experiment\"][\"lr\"] * np.sqrt(cfg[\"experiment\"][\"batches\"])\n return scaled_lr", "def lr_schedule(self, epoch):\n lr = self.params.learning_rate\n\n if epoch > int(self.params.num_epochs * 0.8):\n lr *= 1e-3\n elif epoch > int(self.params.num_epochs * 0.6):\n lr *= 1e-2\n elif epoch > int(self.params.num_epochs * 0.4):\n lr *= 1e-1\n print('Learning rate: ', lr)\n return lr", "def lr_schedule(epoch):\n lr = 1e-3\n if epoch > 180:\n lr *= 0.5e-3\n elif epoch > 160:\n lr *= 1e-3\n elif epoch > 120:\n lr *= 1e-2\n elif epoch > 80:\n lr *= 1e-1\n return lr", "def lr_schedule(epoch):\r\n lr = 1e-3\r\n if epoch > 180:\r\n lr *= 0.5e-3\r\n elif epoch > 160:\r\n lr *= 1e-3\r\n elif epoch > 120:\r\n lr *= 1e-2\r\n elif epoch > 80:\r\n lr *= 1e-1\r\n print('Learning rate: ', lr)\r\n return lr", "def lr_schedule(epoch,lr):\r\n learning_rate = lr\r\n if epoch > 10:\r\n learning_rate *= 0.1\r\n if epoch > 20:\r\n learning_rate *= 0.1\r\n if epoch > 50:\r\n learning_rate *= 0.01\r\n\r\n # tf.summary.scalar('learning rate', data=learning_rate, step=epoch)\r\n return learning_rate", "def exp_lr_scheduler(optimizer, epoch, lr_decay_epoch=100,lr_decay_factor=0.5):\n init_lr = optimizer.param_groups[0]['lr']\n if epoch > 0 and (epoch % lr_decay_epoch == 0):\n lr = init_lr*lr_decay_factor\n print('\\n LR is set to {}'.format(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer", "def test_scheduler_correct_lr():\n params = list(TEST_MODEL.parameters())\n opt = torch.optim.SGD([dict(params=params[:2], lr=1e-3), dict(params=params[2:], lr=1e-2)])\n sch = pt_clb.PhasesScheduler([{\"start\": 0, \"end\": 1, \"lr\": [0, 5]}], change_every=1)\n # should be half of max LR after half epochs\n runner = Runner(model=TEST_MODEL, optimizer=opt, criterion=TEST_CRITERION, callbacks=sch)\n runner.fit(TEST_LOADER, epochs=1)\n expected = 5 * (LOADER_LEN - 1) / (2 * LOADER_LEN)\n assert [pg[\"lr\"] for pg in runner.state.optimizer.param_groups] == [expected * 1e-3, expected * 1e-2]\n # should be full LR after full epochs\n runner.state.optimizer = torch.optim.SGD([dict(params=params[:2], lr=1e-3), dict(params=params[2:], lr=1e-2)])\n runner.fit(TEST_LOADER, epochs=2)\n expected = 5 * (2 * LOADER_LEN - 1) / (2 * LOADER_LEN)\n assert [pg[\"lr\"] for pg in runner.state.optimizer.param_groups] == [expected * 1e-3, expected * 1e-2]", "def lr_schedule(epoch):\n lr = 1e-3\n if epoch > 180:\n lr *= 0.5e-3\n elif epoch > 160:\n lr *= 1e-3\n elif epoch > 120:\n lr *= 1e-2\n elif epoch > 80:\n lr *= 1e-1\n print('Learning rate: ', lr)\n return lr", "def lr_schedule(epoch):\n lr = 1e-3\n if epoch > 180:\n lr *= 0.5e-3\n elif epoch > 160:\n lr *= 1e-3\n elif epoch > 120:\n lr *= 1e-2\n elif epoch > 80:\n lr *= 1e-1\n print('Learning rate: ', lr)\n return lr", "def lr_schedule(epoch):\n lr = 1e-3\n if epoch > 180:\n lr *= 0.5e-3\n elif epoch > 160:\n lr *= 1e-3\n elif epoch > 120:\n lr *= 1e-2\n elif epoch > 80:\n lr *= 1e-1\n print('Learning rate: ', lr)\n return lr", "def lr_schedule(epoch):\n lr = 1e-3\n if epoch > 180:\n lr *= 0.5e-3\n elif epoch > 160:\n lr *= 1e-3\n elif epoch > 120:\n lr *= 1e-2\n elif epoch > 80:\n lr *= 1e-1\n print('Learning rate: ', lr)\n return lr", "def adjust_learning_rate(optimizer, epoch, args, step):\n lr = args.lr * (0.1 ** (epoch // step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=50):\n if epoch % lr_decay_epoch == 0 and epoch > 1:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n return optimizer", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=50):\n if epoch % lr_decay_epoch == 0 and epoch > 1:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n return optimizer", "def adjust_learning_rate(lr, optimizer, epoch):\n lr = lr_init * (0.1 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def adjust_learning_rate(optimizer, epoch):\n \n boundary = [args.epochs//2,args.epochs//4*3,args.epochs]\n lr = args.lr * 0.1 ** int(bisect.bisect_left(boundary, epoch))\n print('Learning rate: %f'%lr)\n #print(epoch, lr, bisect.bisect_left(boundary, epoch))\n # lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return lr", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=100):\r\n\r\n if epoch % lr_decay_epoch == 0 and epoch > 1:\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = param_group['lr'] * 0.1\r\n\r\n return optimizer", "def adjust_learning_rate(optimizer, batch):\n lr = learning_rate\n for i in range(len(steps)):\n scale = scales[i] if i < len(scales) else 1\n if batch >= steps[i]:\n lr = lr * scale\n if batch == steps[i]:\n break\n else:\n break\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr / batch_size\n return lr", "def ft_adjust_learning_rate(optimizer, intial_lr, epoch, lr_steps):\n decay = 0.3 ** (sum(epoch >= np.array(lr_steps)))\n lr = intial_lr * decay\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=100):\n\n if epoch % lr_decay_epoch == 0 and epoch > 1:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n\n return optimizer", "def adjust_learning_rate(self, optimizer, epoch):\n if self.args.lr_mode == 'step':\n lr = self.args.lr * (0.1 ** (epoch // self.args.lr_step))\n elif self.args.lr_mode == 'poly':\n lr = self.args.lr * (1 - epoch / self.args.train_epochs) ** 0.9\n else:\n raise ValueError('Unknown lr mode {}'.format(self.args.lr_mode))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def adjust_learning_rate(optimizer, epoch, decay, lrate):\n lr = lrate * (0.1 ** (epoch // decay))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def exp_lr_scheduler(optimizer, epoch, init_lr=0.01, lr_decay_epoch=10):\r\n lr = init_lr * (0.8**(epoch // lr_decay_epoch))\r\n print('LR is set to {}'.format(lr))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\n return optimizer", "def adjust_lr(self):\n learning_rate = self.params.base_lr * (1 - float(self.epoch) / self.params.num_epoch) ** self.params.power\n for param_group in self.opt.param_groups:\n param_group['lr'] = learning_rate\n print('Change learning rate into %f' % (learning_rate))\n self.summary_writer.add_scalar('learning_rate', learning_rate, self.epoch)", "def exp_lr_scheduler(optimizer, epoch, init_lr, lr_decay_epoch):\n\n lr = init_lr * (0.1**((epoch-1) // lr_decay_epoch))\n\n if (epoch-1) % lr_decay_epoch == 0:\n print('LR is set to {}'.format(lr))\n # log_value('lr',lr,epoch)\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer", "def _create_learning_rate_scheduler(learning_rate_config, optimizer, total_step):\n lr_scheduler = None\n learning_rate_type = learning_rate_config.WhichOneof('learning_rate')\n if learning_rate_type == 'multi_phase':\n config = learning_rate_config.multi_phase\n lr_phases = []\n mom_phases = []\n for phase_cfg in config.phases:\n lr_phases.append((phase_cfg.start, phase_cfg.lambda_func))\n mom_phases.append((phase_cfg.start, phase_cfg.momentum_lambda_func))\n lr_scheduler = lsf.LRSchedulerStep(\n optimizer,total_step, lr_phases, mom_phases)\n\n if learning_rate_type == 'one_cycle':\n config = learning_rate_config.one_cycle\n lr_scheduler = lsf.OneCycle(\n optimizer, total_step, config.lr_max, list(config.moms), config.div_factor, config.pct_start)\n\n if lr_scheduler is None:\n raise ValueError('Learning_rate %s not supported.' % learning_rate_type)\n\n return lr_scheduler", "def exp_lr_scheduler(optimizer, epoch, init_lr=0.01, lr_decay_epoch=10):\n lr = init_lr * (0.8**(epoch // lr_decay_epoch))\n print('LR is set to {}'.format(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer", "def adjust_learning_rate(lr, optimizer, epoch, decay_epoch=30):\n lr = lr * (0.1 ** (epoch // decay_epoch))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def decay_lr(self):\n if self.lr_pi_decay_type == \"linear\":\n self.lr_pi = self.lr_pi_init * \\\n (1 - self.update_counter / float(self.lr_pi_decay))\n if self.lr_pi_decay_type == \"exponential\":\n self.lr_pi = self.lr_pi_init * \\\n np.exp(-self.lr_pi_decay * self.update_counter)\n self.lr_pi = np.clip(self.lr_pi, self.lr_pi_min, self.lr_pi_init)\n for g in self.pi_optimizer.param_groups:\n g['lr'] = self.lr_pi", "def adjust_learning_rate(opt, optimizer, epoch):\n epoch = copy.deepcopy(epoch)\n lr = opt.maxlr\n wd = opt.weightDecay\n if opt.learningratescheduler == 'imagenetscheduler':\n if epoch >= 1 and epoch <= 18:\n lr = 1e-3\n wd = 5e-5\n elif epoch >= 19 and epoch <= 29:\n lr = 5e-4\n wd = 5e-5\n elif epoch >= 30 and epoch <= 43:\n lr = 1e-4\n wd = 0\n elif epoch >= 44 and epoch <= 52:\n lr = 5e-5\n wd = 0\n elif epoch >= 53:\n lr = 2e-5\n wd = 0\n if opt.optimType=='sgd':\n lr *= 10\n opt.lr = lr\n opt.weightDecay = wd\n if opt.learningratescheduler == 'decayscheduler':\n while epoch >= opt.decayinterval:\n lr = lr/opt.decaylevel\n epoch = epoch - opt.decayinterval\n lr = max(lr,opt.minlr)\n opt.lr = lr\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n param_group['weight_decay'] = wd", "def lr_schedule(epoch):\n lr = 1e-3\n if epoch > 30:\n lr *= 0.5e-3\n elif epoch > 25:\n lr *= 1e-3\n elif epoch > 20:\n lr *= 1e-2\n elif epoch > 10:\n lr *= 1e-1\n print('Learning rate: ', lr)\n return lr", "def adjust_learning_rate(lr, optimizer, epoch):\n lr = lr * (0.1 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def lr_scheduler(self, lr_init, global_step):\n pass", "def on_lr_adjust(self, runner):\n if runner.scheduler:\n runner.scheduler.step()", "def lr_schedule(epoch):\n lr = 1e-3\n return lr", "def lr_schedule(num_epochs):\n return lambda epoch: 1.0", "def adjust_learning_rate(optimizer, epoch, gammas, schedule):\n lr = args.learning_rate\n assert len(gammas) == len(schedule), \"length of gammas and schedule should be equal\"\n for (gamma, step) in zip(gammas, schedule):\n if (epoch >= step):\n lr = lr * gamma\n else:\n break\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def adjust_learning_rate(optimizer, epoch, power):\n lr = args.lr * (0.1 ** (power*(epoch // 30)))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def update_learning_rate(self) -> None:\n optimizer = list(self.optimizers.values())[0]\n old_lr = optimizer.param_groups[0]['lr']\n for name, scheduler in self.schedulers.items():\n if name == 'generator' and self.opt.generator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n elif name == 'discriminator' and self.opt.discriminator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = optimizer.param_groups[0]['lr']\n print('learning rate %.7f -> %.7f' % (old_lr, lr))\n return", "def schedule_variant(epoch):\n\n def _linear_annealing(epoch):\n t = epoch / args.epochs\n lr_ratio = 0.01\n if t <= 0.5:\n factor = 1.0\n elif t <= 0.9:\n factor = 1.0 - (1.0 - lr_ratio) * (t - 0.5) / 0.4\n else:\n factor = lr_ratio\n return args.lr_init * factor\n\n lr_tmp = _linear_annealing(epoch)\n if args.swag:\n if (epoch > args.swag_start) or lr_tmp <= args.swag_lr:\n return _linear_annealing(args.swag_start)\n return lr_tmp", "def lr_scheduler(optimizer, lr_decay=0.001, epoch=None, step=1):\n if epoch is None or step == 1 or (epoch+1) % step == 0:\n for param_group in optimizer.param_groups:\n param_group['lr'] *= (1 - lr_decay)\n return optimizer", "def exp_lr_scheduler(optimizer, epoch, init_lr=0.01, lr_decay_epoch=decay):\n lr = init_lr * (0.1**(epoch // lr_decay_epoch))\n\n if epoch % lr_decay_epoch == 0:\n print('LR is set to {}'.format(lr))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer", "def adjust_learning_rate(optimizer, epoch):\r\n lr = args.lr\r\n if epoch >= 0.5 * args.epoch:\r\n lr /= 10\r\n if epoch >= 0.75 * args.epoch:\r\n lr /= 10\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def make_lr_scheduler(lr: float, final_lr: float, n_epochs: int,\n verbose: int = 1) -> keras.callbacks.LearningRateScheduler:\n schedule = build_schedule(lr, final_lr, n_epochs)\n return LearningRateScheduler(schedule=schedule, verbose=verbose)", "def adjust_learning_rate(optimizer, epoch, args_lr, epoch_adjust):\n lr = args_lr * (0.1 ** (epoch // epoch_adjust))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(self, optimizer, epoch, args):\n lr = args.learning_rate * (0.1 ** (epoch // 30))\n # print(lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def rnn_learning_rate_schedule(dataset):\n model, prefix = make_rnn(non_negative=True)\n # model.set_params(**{prefix + \"val\": 0.1})\n\n # base = no schedule\n print(\"Baseline RNN\")\n # model.set_params(**{prefix + \"history_file\": \"nn_default.csv\"})\n cross_validate(dataset, model)\n # model.fit(dataset.inputs, dataset.outputs)\n\n # higher init, decay set to reach the same at 40 epochs\n print(\"RNN with decay\")\n model.set_params(**{prefix + \"lr_decay\": \"DecreasingLearningRateScheduler\"})\n # model.fit(dataset.inputs, dataset.outputs)\n cross_validate(dataset, model)", "def adjust_learning_rate_adam(optimizer, epoch):\n \n boundary = [args.epochs//5*4]\n lr = args.lr * 0.2 ** int(bisect.bisect_left(boundary, epoch))\n print('Learning rate: %f'%lr)\n #print(epoch, lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n \n return lr", "def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):\n with fluid.default_main_program()._lr_schedule_guard():\n lr = fluid.layers.tensor.create_global_var(\n shape=[1],\n value=0.0,\n dtype='float32',\n persistable=True,\n name=\"scheduled_learning_rate\")\n\n global_step = fluid.layers.learning_rate_scheduler._decay_step_counter(\n )\n\n with fluid.layers.control_flow.Switch() as switch:\n with switch.case(global_step < warmup_steps):\n warmup_lr = learning_rate * (global_step / warmup_steps)\n fluid.layers.tensor.assign(warmup_lr, lr)\n with switch.default():\n decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay(\n learning_rate=learning_rate,\n decay_steps=num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n fluid.layers.tensor.assign(decayed_lr, lr)\n\n return lr", "def adjust_learning_rate(optimizer, epoch, lr, schedule, gamma, num_gpus=1, warmup=5):\n if epoch in schedule:\n lr *= gamma\n if num_gpus > 1 and epoch < warmup:\n param_lr = lr * (epoch + 1) / warmup\n else:\n param_lr = lr\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = param_lr\n return lr", "def adjust_learning_rate(optimizer, epoch, args):\r\n lr = args.lr * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def update_learning_rate(self):\r\n self.scheduler.step(self.clock.epoch)", "def adjust_learning_rate(optimizer, lr, step, args):\n # decay = 0.1**(sum(epoch >= np.array(lr_steps)))\n lr = lr * (0.95**(step//args.lr_decay_every))\n print(\"current learning rate: {:.6f}\".format(lr))\n param_group = optimizer.param_groups\n for i in range(len(param_group)):\n param_group[i]['lr'] = lr\n\n return optimizer", "def adjust_learning_rate(self, optimizer, epoch):\n lr = self.lr\n if epoch >= 80:\n lr = self.lr * (0.1 ** ((epoch-80) // 40))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(lr, optimizer, lr_decay, epoch):\n\n if epoch >= lr_decay[0]:\n lr = lr * 0.1\n if epoch >= lr_decay[1]:\n lr = lr * 0.01\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return lr", "def adjust_learning_rate(optimizer, epoch, base_lr):\n lr = max(base_lr * (0.5 ** (epoch // 20)), 1e-5)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n #if (epoch + 1) == 51 or (epoch + 1) == 101 or (epoch + 1) == 151:\n # #lr = lr * 0.1\n # for param_group in optimizer.param_groups:\n # param_group['lr'] *= 0.1", "def lr_schedule(epoch):\n lr = 0.0001\n if epoch > 40:\n lr *= 1e-2\n elif epoch > 30:\n lr *= 5e-2\n elif epoch > 20:\n lr *= 1e-1\n elif epoch > 10:\n lr *= 5e-1\n print ('Learning rate: ', lr)\n return lr", "def adjust_learning_rate(args, optimizer, epoch):\n if (epoch*3==args.epochs) or (epoch*3==2*args.epochs):\n lr = args.lr * (0.1 ** (epoch*3//args.epochs))\n print(\"Changing Learning Rate to {}\".format(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr * (0.3 ** (epoch // args.lr_decay))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(self, epoch):\n lr = self.lr * (0.5 ** (epoch // 2))\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr", "def lr_schedule(epoch):\n lr = 1e-3\n if epoch > 500:\n lr *= 1e-1\n elif epoch > 800:\n lr *= 1e-2\n print('Learning rate: ', lr)\n return lr" ]
[ "0.6678285", "0.66325784", "0.6542716", "0.6416433", "0.6363528", "0.63295734", "0.62856907", "0.6262307", "0.62460744", "0.6218091", "0.61931515", "0.6188003", "0.61465865", "0.614502", "0.6144216", "0.6138332", "0.6137244", "0.6130743", "0.6122289", "0.6105983", "0.61007077", "0.609482", "0.60812", "0.6064145", "0.60595804", "0.60595804", "0.6047262", "0.6032348", "0.60308415", "0.6015544", "0.60145724", "0.5977983", "0.5975046", "0.5956152", "0.5941907", "0.5932328", "0.592994", "0.5924675", "0.5905882", "0.5897659", "0.5893893", "0.58839875", "0.5853032", "0.58470875", "0.584505", "0.58396584", "0.5829511", "0.5829511", "0.5829511", "0.5829511", "0.58289635", "0.58272105", "0.58272105", "0.58264756", "0.5826341", "0.58234483", "0.5823407", "0.5822458", "0.58105", "0.5806395", "0.5799565", "0.57994324", "0.5799051", "0.579803", "0.57902396", "0.5788333", "0.57878035", "0.5780763", "0.57799673", "0.57790905", "0.5775472", "0.5772758", "0.57720786", "0.57671356", "0.5766873", "0.5764522", "0.57636076", "0.5762629", "0.57502425", "0.5748263", "0.5741662", "0.5735813", "0.5733446", "0.5732168", "0.5730763", "0.57267696", "0.57218486", "0.5720921", "0.5713111", "0.57112813", "0.5709023", "0.5708601", "0.5706702", "0.57066077", "0.5699997", "0.56996375", "0.5690478", "0.5689031", "0.5686583", "0.5685172" ]
0.65202194
3
Executes before step begins.
def on_batch_begin(self, batch, logs=None): lr = self.schedule(self.epochs, batch, self.steps_per_epoch, self.batch_size) if not isinstance(lr, (float, np.float32, np.float64)): raise ValueError('The output of the "schedule" function should be float.') if lr != self.prev_lr: self.model.optimizer.learning_rate = lr # lr should be a float here self.prev_lr = lr logging.debug( 'Epoch %05d Batch %05d: LearningRateBatchScheduler ' 'change learning rate to %s.', self.epochs, batch, lr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_before(self):\r\n pass", "def run_before(self):\n\n for path in self.hooks.get('before', []):\n self.run_module(path)", "def pre_execute(self):", "def before(self) -> None:\n pass", "def test_before(self):\n\n support.create_project(self, 'candice')\n support.add_step(self)\n support.add_step(self, position='0')\n\n project = cauldron.project.get_internal_project()\n steps = project.steps\n\n self.assertTrue(steps[0].filename.startswith('S01'))\n self.assertTrue(steps[1].filename.startswith('S02'))", "def on_before_execution(self):\n pass", "def _timestep_before_hook(self, *args, **kwargs):\n pass", "def before(self, context):\n raise NotImplementedError", "def PreExecute(self):\n return True", "def pre_step(self,status):\n self.t0 = time.time()\n pass", "def at_pre_cmd(self):\n pass", "def stepStarted(build, step):", "def do_step(self) -> None:", "def pre_start(self) -> None:\n pass", "def before_run_tests(cls):\n pass", "def perform_step(self) -> None:\n pass", "def _tidyBeforeRun (self):\n\t\tself._buildProps ()\n\t\tself._buildInput ()\n\t\tself._buildProcVars ()\n\t\tself._buildJobs ()", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def train_loop_pre(self, current_step):\r\n pass", "def step(self):\n\n pass", "def step(self):\n self.driver.step()", "def _step(self):\n pass", "def pre(self, emulator=None):\n\n # Are we using an emulator?\n if emulator is not None:\n return emulator.emulatePre(self.step)\n\n logging.info(\"Steps.Executors.%s.pre called\", self.__class__.__name__)\n return None", "def run_one_step(self):\n pass", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\") \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)", "def TestOneStep(self):\n pass", "def pre_start_hook(self):\n\n LOG.debug(_('XManager pre_start_hook...'))\n\n pass", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\") \n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\") \n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)", "def insert_before(self, request, current_step, step):\n steps = self.get_steps(request)\n\n if step not in steps:\n index = steps.index(current_step)\n steps.insert(index, step)", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\")\n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n #Creates Sample Template if not exists\n self.createSampleTemplate(templateName=self.templateName)", "def before_test(self, func, *args, **kwargs):\n pass", "def step(self, **kwargs):\n pass", "def __init__(self):\n super(PreProcess, self).__init__()", "def startTestRun(self):", "def before_task_start(self, task_db, task_spec):\n # No-op by default.\n pass", "def startOfTestcase(self):\n pass # nothing to do here. Hence pass statement is called.", "def pre_step(self):\n\n self.reward = 0", "def beginStep(self, message=''):\n if not self.initialized:\n self.start(message)", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def preloop(self):\n super(CoreCommand, self).preloop() # sets up command completion", "def before_scenario(context, scenario):\n context.resource_manager = contextlib.ExitStack()", "def _step(self) -> None:", "def pre_setup(self) -> None:\n if self.__setup_done:\n self.base_logger.error(\"pre_setup was erroneously called twice\")\n raise SetupAlreadyDoneError()", "def start_scenario(self, _, scenario, **kwargs):\n if scenario.tags and \"skip\" in scenario.tags:\n scenario.skip(\"Marked with @skip\")\n self._scenario_id = self._rp.start_test_item(\n name=scenario.name,\n start_time=timestamp(),\n item_type=\"STEP\",\n parent_item_id=self._feature_id,\n code_ref=self._code_ref(scenario),\n attributes=self._attributes(scenario),\n parameters=self._get_parameters(scenario),\n description=self._item_description(scenario),\n test_case_id=self._test_case_id(scenario),\n **kwargs,\n )\n self._log_fixtures(scenario, \"BEFORE_TEST\", self._scenario_id)\n self._log_item_id = self._scenario_id", "def on_pre_execution(**kwargs):\n logging.debug(\"Calling callbacks: %s\", __pre_exec_callbacks)\n for callback in __pre_exec_callbacks:\n try:\n callback(**kwargs)\n except Exception:\n logging.exception(\"Failed on pre-execution callback using %s\", callback)", "def _step(self, whence):\n pass", "def startTestHook(self):", "def onTimeStepStart(self, timeStep):\n pass", "def on_pre_enter(self):\n self.setup()\n self.start()", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "async def pre_action_init(self) -> None:", "def run_step(self):\n self.control_instance.run_step()", "def preloop(self):\n cmd.Cmd.preloop(self) ## sets up command completion\n self._hist = [] ## No history yet\n self._locals = {} ## Initialize execution namespace for user\n self._globals = {}", "def before_train(self, runner) -> None:\n if runner._resume:\n for action_epoch, recipe in self.schedule.items():\n if action_epoch >= runner.epoch + 1:\n break\n self._do_switch(runner, recipe,\n f' (resume recipe of epoch {action_epoch})')", "def _training_before_hook(self):\n pass", "def preProcess(self, datum):\n pass", "def do_before_job(self, dump_items):\n self.runinfo.save_dump_runinfo(\n RunInfo.report_dump_runinfo(dump_items))", "def after_step():\n raise NotImplementedError", "def before(self, location=None):\n side_effect = SideEffect(self.f, location)\n dataflow = inject_before(self.container, side_effect, location)\n return self._construct(dataflow)", "def start_fixture(self):\n pass", "def do_begin(begin):\n if begin:\n do_action(begin)", "def preloop(self):\n cmd.Cmd.preloop(self) ## sets up command completion\n self._hist = [] ## No history yet\n self._locals = {} ## Initialize execution namespace for user\n self._globals = {}", "def before_any(self) -> None:", "def _before_execute(self, db, entity):\n pass", "def _before_execute(self, db, entity):\n pass", "def step(self):\n self.function()", "def setupStarted(self, *args, **kwargs): # real signature unknown\n pass", "def step(self, step=None):\n pass", "def start_game(self):\n return self.do_actions('before_game')", "def _step(self):\n title()\n self.runCount = 1\n self.experiment.pause = False\n self._runExperiment()\n self.pause = True", "def startup_run(self):\n raise NotImplementedError # implement in subclass", "def before_productline_steps():\n cmd = ['phantomjs', '--webdriver', '4444']\n click.echo(\"Running command\" + subprocess.list2cmdline(cmd))\n process = subprocess.Popen(cmd)\n RUNNING_TEST_PROCESSES.append(process)", "def onPreFork(self):", "def begin(self, tests):\r\n raise NotImplementedError", "def test_run_started(self):", "def _epoch_before_hook(self):\n self._train_steps_this_epoch = 0", "def pre_build(self):\n pass", "def begin(self, pipeline: osbuild.Pipeline):", "def before(self, before: Route.Decorator):\n pass", "def ensure_preparation_started(self, study, user):\n task = self.filter(study=study, type=constants.TaskType.PREPARATION).first()\n if task:\n task.start_if_unstarted(user)", "def pre_run_hook(self, instance, private_data_dir):\n instance.log_lifecycle(\"pre_run\")\n\n # Before task is started, ensure that job_event partitions exist\n create_partition(instance.event_class._meta.db_table, start=instance.created)", "def run_starter(self, expect_to_fail=False):", "def pre_task_run(self, extra_events: Optional[dict] = None):\n\n if extra_events is None:\n extra_events = {}\n\n bound_args = self.bound_args\n default_bound_args = self.default_bound_args\n\n # Send a custom task-started-info event with the args\n if not self.request.called_directly:\n self.send_event('task-started-info',\n firex_bound_args=convert_to_serializable(bound_args),\n firex_default_bound_args=convert_to_serializable(default_bound_args),\n called_as_orig=self.called_as_orig,\n long_name=self.name_without_orig,\n log_filepath=self.task_log_url,\n from_plugin=self.from_plugin,\n code_filepath=self.code_filepath,\n retries=self.request.retries,\n task_parent_id=self.request.parent_id,\n **extra_events)\n self.send_firex_data(self.abog)\n\n # Print the pre-call header\n self.print_precall_header(bound_args, default_bound_args)\n self._log_soft_time_limit_override_if_applicable()", "def test_pre_hooks(self):\n os.makedirs('/tmp/localhost/pacha_pre')\n touch_script = open('/tmp/localhost/pacha_pre/foo.sh', 'w')\n touch_script.write('''touch /tmp/localhost/pre_got_executed.txt''')\n touch_script.close()\n run = rebuild.Rebuild(hostname='localhost') \n run.pre_hooks()\n self.assertTrue(os.path.isfile('/tmp/localhost/pre_got_executed.txt'))", "def _preparation_workflow(self):\n self._validate_environment()\n self._validate_parameters()\n self._update_verbosity()", "def step_impl(context):\n pass", "def step_impl(context):\n pass", "def set_first_machine_time_step(self, first_machine_time_step):", "def postRun(self):\n pass", "def startPhase(self, phaseName):\n \n pass", "def _pre_process_hook(self, player, **kwargs):\n pass", "def setUp(self):\n self.clean_workflow_engine_state()\n self.app.plugins.install_plugins(DemoDnaSeqPlugin)\n self.has_context()" ]
[ "0.7514745", "0.7484829", "0.71251595", "0.70989174", "0.70584476", "0.70152277", "0.69902265", "0.6825404", "0.6803939", "0.6744725", "0.6697794", "0.66936266", "0.6585134", "0.65798724", "0.6415457", "0.6349697", "0.6306303", "0.6304141", "0.6304141", "0.6304141", "0.6304141", "0.6304141", "0.6273224", "0.62202334", "0.6186362", "0.6176682", "0.6149029", "0.6137524", "0.6131665", "0.61058366", "0.60955095", "0.6075924", "0.6075924", "0.60576767", "0.6056267", "0.60496765", "0.60330206", "0.6028436", "0.6023409", "0.6018395", "0.601321", "0.59984404", "0.5974248", "0.59717906", "0.59717906", "0.59666574", "0.5965236", "0.59631443", "0.5942896", "0.5942196", "0.58929884", "0.5891236", "0.5882833", "0.5879769", "0.58773947", "0.5875779", "0.5875779", "0.5875779", "0.5875779", "0.58739394", "0.5858132", "0.58521086", "0.584883", "0.58481437", "0.58438116", "0.5842254", "0.5840904", "0.5815033", "0.5806603", "0.57797223", "0.5776024", "0.576351", "0.5762094", "0.5762094", "0.5761706", "0.57596433", "0.5751198", "0.5739366", "0.5733517", "0.571379", "0.5709819", "0.57049733", "0.5701661", "0.5698948", "0.56970584", "0.56956625", "0.5693066", "0.5690945", "0.568347", "0.56790936", "0.56694967", "0.5668586", "0.56666857", "0.5661263", "0.56497145", "0.56497145", "0.56449986", "0.5644246", "0.56225103", "0.5619993", "0.5615405" ]
0.0
-1
Run ResNet Cifar10 training and eval loop using native Keras APIs.
def run(flags_obj): keras_utils.set_session_config( enable_xla=flags_obj.enable_xla) # Execute flag override logic for better model performance if flags_obj.tf_gpu_thread_mode: keras_utils.set_gpu_thread_mode_and_count( per_gpu_thread_count=flags_obj.per_gpu_thread_count, gpu_thread_mode=flags_obj.tf_gpu_thread_mode, num_gpus=flags_obj.num_gpus, datasets_num_private_threads=flags_obj.datasets_num_private_threads) common.set_cudnn_batchnorm_mode() dtype = flags_core.get_tf_dtype(flags_obj) if dtype == 'fp16': raise ValueError('dtype fp16 is not supported in Keras. Use the default ' 'value(fp32).') data_format = flags_obj.data_format if data_format is None: data_format = ('channels_first' if tf.config.list_physical_devices('GPU') else 'channels_last') tf.keras.backend.set_image_data_format(data_format) strategy = distribution_utils.get_distribution_strategy( distribution_strategy=flags_obj.distribution_strategy, num_gpus=flags_obj.num_gpus, all_reduce_alg=flags_obj.all_reduce_alg, num_packs=flags_obj.num_packs) if strategy: # flags_obj.enable_get_next_as_optional controls whether enabling # get_next_as_optional behavior in DistributedIterator. If true, last # partial batch can be supported. strategy.extended.experimental_enable_get_next_as_optional = ( flags_obj.enable_get_next_as_optional ) strategy_scope = distribution_utils.get_strategy_scope(strategy) if flags_obj.use_synthetic_data: synthetic_util.set_up_synthetic_data() input_fn = common.get_synth_input_fn( height=cifar_preprocessing.HEIGHT, width=cifar_preprocessing.WIDTH, num_channels=cifar_preprocessing.NUM_CHANNELS, num_classes=cifar_preprocessing.NUM_CLASSES, dtype=flags_core.get_tf_dtype(flags_obj), drop_remainder=True) else: synthetic_util.undo_set_up_synthetic_data() input_fn = cifar_preprocessing.input_fn train_input_dataset = input_fn( is_training=True, data_dir=flags_obj.data_dir, batch_size=flags_obj.batch_size, parse_record_fn=cifar_preprocessing.parse_record, datasets_num_private_threads=flags_obj.datasets_num_private_threads, dtype=dtype, # Setting drop_remainder to avoid the partial batch logic in normalization # layer, which triggers tf.where and leads to extra memory copy of input # sizes between host and GPU. drop_remainder=(not flags_obj.enable_get_next_as_optional)) eval_input_dataset = None if not flags_obj.skip_eval: eval_input_dataset = input_fn( is_training=False, data_dir=flags_obj.data_dir, batch_size=flags_obj.batch_size, parse_record_fn=cifar_preprocessing.parse_record) options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA eval_input_dataset = eval_input_dataset.with_options(options) steps_per_epoch = ( cifar_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size) lr_schedule = 0.1 if flags_obj.use_tensor_lr: initial_learning_rate = common.BASE_LEARNING_RATE * flags_obj.batch_size / 128 lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay( boundaries=list(p[1] * steps_per_epoch for p in LR_SCHEDULE), values=[initial_learning_rate] + list(p[0] * initial_learning_rate for p in LR_SCHEDULE)) with strategy_scope: optimizer = common.get_optimizer(lr_schedule) model = resnet_cifar_model.resnet56(classes=cifar_preprocessing.NUM_CLASSES) model.compile( loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=(['sparse_categorical_accuracy'] if flags_obj.report_accuracy_metrics else None), run_eagerly=flags_obj.run_eagerly) train_epochs = flags_obj.train_epochs callbacks = common.get_callbacks() if not flags_obj.use_tensor_lr: lr_callback = LearningRateBatchScheduler( schedule=learning_rate_schedule, batch_size=flags_obj.batch_size, steps_per_epoch=steps_per_epoch) callbacks.append(lr_callback) # if mutliple epochs, ignore the train_steps flag. if train_epochs <= 1 and flags_obj.train_steps: steps_per_epoch = min(flags_obj.train_steps, steps_per_epoch) train_epochs = 1 num_eval_steps = (cifar_preprocessing.NUM_IMAGES['validation'] // flags_obj.batch_size) validation_data = eval_input_dataset if flags_obj.skip_eval: if flags_obj.set_learning_phase_to_train: # TODO(haoyuzhang): Understand slowdown of setting learning phase when # not using distribution strategy. tf.keras.backend.set_learning_phase(1) num_eval_steps = None validation_data = None if not strategy and flags_obj.explicit_gpu_placement: # TODO(b/135607227): Add device scope automatically in Keras training loop # when not using distribition strategy. no_dist_strat_device = tf.device('/device:GPU:0') no_dist_strat_device.__enter__() history = model.fit(train_input_dataset, epochs=train_epochs, steps_per_epoch=steps_per_epoch, callbacks=callbacks, validation_steps=num_eval_steps, validation_data=validation_data, validation_freq=flags_obj.epochs_between_evals, verbose=2) eval_output = None if not flags_obj.skip_eval: eval_output = model.evaluate(eval_input_dataset, steps=num_eval_steps, verbose=2) if not strategy and flags_obj.explicit_gpu_placement: no_dist_strat_device.__exit__() stats = common.build_stats(history, eval_output, callbacks) return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_keras():\n epochs = 2\n strategy = tf.distribute.MirroredStrategy()\n global_batch_size = strategy.num_replicas_in_sync * 32\n train_dataset = create_dataset(global_batch_size)\n\n with strategy.scope():\n model = ResNet50(input_shape=(224, 224, 3), num_classes=1000)\n model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),\n optimizer=tf.keras.optimizers.Adam(),\n metrics=['accuracy'])\n model.fit(train_dataset, epochs=epochs)", "def resnet50_cifar10(pretrained=True, progress=True, use_data_parallel=False, **kwargs):\n return _model('resnet50_cifar10', resnet50, pretrained, progress, use_data_parallel, **kwargs)", "def run_crnet(model, epoch=100):\n criterion = CRLoss()\n\n optimizer_ft = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-5)\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=50, gamma=0.1)\n\n print('start loading SCUTFBP5500 dataset...')\n train_loader, test_loader = data_loader.load_scutfbp5500_data()\n train_model_with_crloss(model=model, train_dataloader=train_loader, test_dataloader=test_loader,\n criterion=criterion, optimizer=optimizer_ft, scheduler=exp_lr_scheduler, num_epochs=epoch,\n inference=False)", "def train_cifar10():\r\n # type: () -> None\r\n\r\n # Build dataset and model\r\n dataset = CIFAR10_TRAIN(path=Config.video_folder)\r\n model = LSACIFAR10(input_shape=dataset.shape, code_length=64,\r\n cpd_channels=100).to(device).train()\r\n\r\n # Set up result helper and perform test\r\n helper = OneClassResultHelper(dataset, model,\r\n checkpoints_dir=Config.model_ckpt,\r\n output_file='cifar10.txt')\r\n helper.train_one_class_classification()", "def cifar10_model_fn(features, labels, mode, params):\n features = tf.reshape(features, [-1, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS])\n\n learning_rate_fn = resnet_run_loop.learning_rate_with_decay(\n batch_size=params['batch_size'], batch_denom=128,\n num_images=_NUM_IMAGES['train'], boundary_epochs=[10, 20, 30],\n decay_rates=[1, 0.1, 0.01, 0.001])\n\n # We use a weight decay of 0.0002, which performs better\n # than the 0.0001 that was originally suggested.\n weight_decay = 2e-4\n\n # Empirical testing showed that including batch_normalization variables\n # in the calculation of regularized loss helped validation accuracy\n # for the CIFAR-10 dataset, perhaps because the regularization prevents\n # overfitting on the small data set. We therefore include all vars when\n # regularizing and computing loss during training.\n def loss_filter_fn(_):\n return True\n\n return resnet_run_loop.resnet_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n model_class=Model,\n resnet_size=params['resnet_size'],\n weight_decay=weight_decay,\n learning_rate_fn=learning_rate_fn,\n momentum=0.9,\n data_format=params['data_format'],\n resnet_version=params['resnet_version'],\n loss_scale=params['loss_scale'],\n loss_filter_fn=loss_filter_fn,\n dtype=params['dtype'],\n fine_tune=params['fine_tune']\n )", "def run_flower(flags_obj):\n input_function = input_fn\n resnet_run_loop.resnet_main(\n flags_obj,\n cifar10_model_fn,\n input_function,\n DATASET_NAME,\n shape=[_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS])", "def train_resnet():\n\n # load data\n training_sets = load_augmented_dataset()\n\n # build models\n model_resnet = build_resnetv2()\n\n baseWeights_t = model_resnet.get_weights()\n\n for training_set in training_sets:\n print(\" Starting training for set {}\".format(str(training_set)))\n model_resnet.set_weights(baseWeights_t) # Resets model\n train_x = np.load(os.path.join(\"./model_cache/train_data\", training_sets[training_set][0]))\n train_y = np.load(os.path.join(\"./model_cache/train_data\", training_sets[training_set][1]))\n\n early_stopping_monitor = EarlyStopping(patience=3)\n history = model_resnet.fit(train_x, train_y, batch_size=32, epochs=40, verbose=1, validation_split=0.2,\n shuffle=True,\n callbacks=[early_stopping_monitor])\n\n mpu.plot_accuracy_loss(history,\n \"./model_cache/train_data/{}_resnet_plots.png\".format(str(training_set)))\n\n upload_blob(BUCKET_NAME, \"./model_cache/train_data/{}_resnet_plots.png\".format(str(training_set)),\n \"model_charts/{}_resnet_plots.png\".format(str(training_set)))\n\n model_resnet.save(\"./model_cache/train_data/{}_resnet.h5\".format(str(training_set)))\n\n upload_blob(BUCKET_NAME, \"./model_cache/train_data/{}_resnet.h5\".format(str(training_set)),\n \"saved_models/{}_resnet.h5\".format(str(training_set)))", "def train(self):\n # Track initial loss/accuracy\n self.validation_epoch()\n for epoch in range(self.epochs):\n # Perform a full pass through all the training samples\n for batch_it, (X_batch, Y_batch) in enumerate(self.dataloader_train):\n # X_batch is the CIFAR10 images. Shape: [batch_size, 3, 32, 32]\n # Y_batch is the CIFAR10 image label. Shape: [batch_size]\n # Transfer images / labels to GPU VRAM, if possible\n X_batch = to_cuda(X_batch)\n Y_batch = to_cuda(Y_batch)\n\n # Perform the forward pass\n predictions = self.model(X_batch)\n # Compute the cross entropy loss for the batch\n loss = self.loss_criterion(predictions, Y_batch)\n\n # Backpropagation\n loss.backward()\n\n # Gradient descent step\n self.optimizer.step()\n \n # Reset all computed gradients to 0\n self.optimizer.zero_grad()\n # Compute loss/accuracy for all three datasets.\n if batch_it % self.validation_check == 0:\n self.validation_epoch()\n # Check early stopping criteria.\n if self.should_early_stop():\n print(\"Early stopping.\")\n return", "def resnet_model(images,\n is_training,\n num_classes,\n resnet_size=50,\n weight_decay=None,\n kernel_size=7,\n num_filters=64,\n return_intermediate_values=False,\n film_generator_fn=None,\n film_generator_input=None,\n pretrain_checkpoint=None):\n # For bigger models, we want to use \"bottleneck\" layers\n if resnet_size < 50:\n bottleneck = False\n else:\n bottleneck = True\n model = resnet_lib.Model(\n resnet_size=resnet_size,\n bottleneck=bottleneck,\n num_classes=num_classes,\n num_filters=num_filters,\n kernel_size=kernel_size,\n conv_stride=2,\n first_pool_size=3,\n first_pool_stride=2,\n block_sizes=_get_block_sizes(resnet_size),\n block_strides=[1, 2, 2, 2],\n resnet_version=resnet_lib.DEFAULT_VERSION,\n data_format='channels_last',\n weight_decay=weight_decay,\n dtype=resnet_lib.DEFAULT_DTYPE\n )\n final_dense = model(images, is_training,\n film_generator_fn, film_generator_input)\n if pretrain_checkpoint:\n # Initialize variables in ResNet, excluding the final dense layer and any\n # optimization-specific variables (e.g. Momentum, Adam Beta).\n # When initializing on TPUs, use AbstractT2RModel.init_from_checkpoint_fn.\n resnet_init_from_checkpoint_fn(pretrain_checkpoint)\n if return_intermediate_values:\n return resnet_endpoints(model)\n else:\n return final_dense", "def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)", "def train_with_simclr_framework(builder, train_ds, eval_ds):\n num_train_examples = builder.info.splits['train'].num_examples\n main_task = {'name': 'label', 'excluded_label': 3}\n num_classes = builder.info.features[main_task['name']].num_classes - 1\n main_task['num_classes'] = num_classes \n\n lr_scheduler = tf.keras.experimental.CosineDecayRestarts(\n initial_learning_rate=0.0001, \n first_decay_steps=10*(num_train_examples//FLAGS.pretrain_bs),\n t_mul=2.0,\n m_mul=0.9,\n alpha=0.1)\n optimizer = tf.keras.optimizers.Adam(learning_rate=lr_scheduler)\n model = models_lib.Model(num_classes=num_classes)\n\n if FLAGS.ckpt: \n model, optimizer, ckpt, ckpt_manager = load_model(FLAGS.ckpt, model, optimizer)\n else: \n if FLAGS.save_model: \n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir+'/pretrain', \n max_to_keep=3 \n )\n else: \n ckpt=None\n ckpt_manager=None\n\n pretrain(pretrain_ds=train_ds, model=model, optimizer=optimizer, epochs=FLAGS.pretrain_epochs, \n lineareval_epochs=FLAGS.lineareval_epochs, lineareval_task=main_task, eval_ds=eval_ds, \n ckpt_manager=ckpt_manager)\n\n linear_eval(train_ds, model, main_task, 30, eval_ds)\n evaluate(eval_ds, model, main_task)", "def train():\n args = arguments()\n\n # Create output directories\n create_output_dir(args.outputDir)\n\n # Start Log File\n log_path = os.path.join(args.outputDir, LOG_DIR, time.strftime('%Y-%m-%d_%H-%M-%S.log'))\n log_file = Logger(log_path)\n\n # Log arguments\n arg_str = ''\n for arg in vars(args):\n arg_str += \"\\n\" + \"{:30} {}\".format(str(arg), getattr(args, arg))\n log_file.log_line(\"Arguments\", arg_str)\n log_file.newline()\n\n # Load Params\n configuration = config_cvppp.TrainConfig()\n\n # Log params\n log_file.log_line(\"Config Parameters\\n\", configuration.to_string())\n log_file.newline()\n\n ## Load dataset API (Already logged in the args log step)\n train_dataset, crossVal_dataset = load_datasets(args)\n\n # Init the model\n checkpoint_path = os.path.join(args.outputDir, CHECKPOINT_DIR)\n training_model = model.MaskRCNN('training', configuration, checkpoint_path)\n\n # Load weights\n if args.init == 'last':\n weights_path = training_model.find_last()\n log_file.log_line(\"Initialised with \", weights_path)\n training_model.load_weights(weights_path, by_name=True)\n\n elif args.init == 'rand':\n log_file.log_line(\"Initialised with \", \"random weights\")\n pass\n\n else:\n if not os.path.exists(args.init):\n raise OSError('No weights at: ' + args.init)\n \n log_file.log_line(\"Initialised with \", args.init)\n training_model.load_weights(args.init, by_name=True)\n\n # Train the model\n augmentation = get_augmentation_sequence()\n\n custom_callbacks = None\n\n training_model.train(train_dataset, crossVal_dataset, \n learning_rate=configuration.LEARNING_RATE, \n epochs=args.numEpochs,\n augmentation=augmentation,\n layers='all',\n custom_callbacks=custom_callbacks) # Train all layers\n\n # Close the log file\n log_file.close()", "def train(nepochs, model): \n if model == 'cnn':\n return gennet.train_cnn(nepochs, 'Resnet50')\n elif model == 'logreg':\n return gennet.train_logreg('Resnet50')", "def evaluate():\n with tf.Graph().as_default() as g:\n # Get images and labels for CIFAR-10.\n # eval_data = FLAGS.eval_data\n # images, labels = model.inputs(eval_data=eval_data, batch_size=FLAGS.eval_batch_size)\n data_obj = model.inputs()\n values = data_obj.value\n data = tf.concat(values,0)\n # print(data)\n\n data = tf.reshape(data, [data_obj.height,data_obj.width])\n # print(data)\n # Build a Graph that computes the logits predictions from the\n # inference model.\n representation, reconstruct = model.inference_fconn(data)\n # print(representation)\n # print(reconstruct)\n # print(data)\n # Calculate predictions.\n # representation_reshape = tf.reshape(representation, [FLAGS.eval_batch_size, -1])\n\n # Restore the moving average version of the learned variables for eval.\n variable_averages = tf.train.ExponentialMovingAverage(\n model.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n\n # images_reconstruct = tf.concat([data, tf.transpose(reconstruct)],1)\n # print(images_reconstruct)\n # tf.summary.image('original_reconstruct', images_reconstruct)\n # tf.image_summary('original', images, max_images=20)\n # tf.image_summary('reconstruct', reconstruct, max_images=20)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.summary.merge_all()\n\n summary_writer = tf.summary.FileWriter(eval_dir, g)\n\n\n while True:\n eval_once(saver, summary_writer, representation, summary_op, reconstruct, data)\n if FLAGS.run_once:\n break\n time.sleep(FLAGS.eval_interval_secs)", "def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n model = MLP(n_hidden=dnn_hidden_units,n_classes=10,batch_size=FLAGS.batch_size, input_dim=32*32*3, \n weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale)\n\n Datasets = utils.get_cifar10(data_dir = DATA_DIR_DEFAULT, one_hot = True, validation_size = 0)\n \n for i in range(1500): #(FLAGS.max_steps):\n train_batch = Datasets.train.next_batch(batch_size = FLAGS.batch_size)\n #Get the model output\n logits = model.inference(x=train_batch[0].reshape([FLAGS.batch_size,32*32*3]))\n #Get the loss and let the model set the loss derivative.\n loss = model.loss(logits=logits, labels=train_batch[1])\n #Perform training step\n model.train_step(loss=loss, flags=FLAGS)\n\n #Every 100th iteratin print accuracy on the whole test set.\n if i % 100 == 0:\n # for layer in model.layers:\n test_batch = Datasets.test.next_batch(batch_size = 200) #Datasets.test.num_examples\n logits = model.inference(x=test_batch[0].reshape([200,32*32*3]))\n print('-- Step: ', i, \" accuracy: \",model.accuracy(logits=logits,labels=test_batch[1]),'loss', loss )\n\n ########################\n # END OF YOUR CODE #\n #######################", "def executeCNN(args, files, var_targets, nn_arch, batchsize, epoch, mode, n_gpu=(1, 'avolkov'), shuffle=(False, None), tb_logger=False):\n\n print '\\nEpoch Interval:\\t', epoch[0], ' - ', epoch[1], '\\n'\n\n if epoch[0] == 0:\n if nn_arch == 'DCNN':\n if args.wires in ['U', 'V']: model = create_shared_dcnn_network_2()\n elif args.wires in ['UV', 'U+V']: model = create_shared_dcnn_network_4()\n else: raise ValueError('passed wire specifier need to be U/V/UV')\n elif nn_arch == 'ResNet':\n raise ValueError('Currently, this is not implemented')\n elif nn_arch == 'Inception':\n if args.wires in ['U', 'V']: model = create_shared_inception_network_2()\n elif args.wires in ['UV', 'U+V']: model = create_shared_inception_network_4()\n else: raise ValueError('passed wire specifier need to be U/V/UV')\n elif nn_arch == 'Conv_LSTM':\n raise ValueError('Currently, this is not implemented')\n else:\n raise ValueError('Currently, only DCNN and Inception are available as nn_arch')\n else:\n model = load_trained_model(args)\n\n if mode == 'train':\n model.summary()\n try: # plot model, install missing packages with conda install if it throws a module error\n raise OSError\n ks.utils.plot_model(model, to_file=args.folderOUT + '/plot_model.png',\n show_shapes=True, show_layer_names=False)\n except OSError:\n save_plot_model_script(folderOUT=args.folderOUT)\n print 'could not produce plot_model.png ---- run generate_model_plot on CPU'\n\n # exit()\n\n adam = ks.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0) # Default: epsi: None, Deep NN: epsi=0.1/1.0\n optimizer = adam # Choose optimizer, only used if epoch == 0\n\n # model, batchsize = parallelize_model_to_n_gpus(model, n_gpu, batchsize) # TODO compile after restart????\n # if n_gpu[0] > 1: model.compile(loss=loss_opt[0], optimizer=optimizer, metrics=[loss_opt[1]]) # TODO check\n\n if epoch[0] == 0:\n print 'Compiling Keras model\\n'\n model.compile(\n loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n # TODO Add Precision/Recall to metric, see:\n # TODO https://stackoverflow.com/questions/43076609/how-to-calculate-precision-and-recall-in-keras\n\n print 'optimizer epsilon:', model.optimizer.epsilon\n\n print \"\\nTraining begins in Epoch:\\t\", epoch\n\n model.save(args.folderOUT + \"models/model-000.hdf5\")\n model.save_weights(args.folderOUT + \"models/weights-000.hdf5\")\n model = fit_model(args, model, files, batchsize, var_targets, epoch, shuffle, n_events=None, tb_logger=tb_logger)\n model.save_weights(args.folderOUT + \"models/weights_final.hdf5\")\n model.save(args.folderOUT + \"models/model_final.hdf5\")\n elif mode in ['valid']: #validation\n # model.summary()\n print 'Validate events'\n print model\n print model.get_layer(index=3)\n exit()\n\n args.sources = \"\".join(sorted(args.sources))\n args.position = \"\".join(sorted(args.position))\n args.folderOUT += \"0validation/\" + args.sources + \"-\" + mode + \"-\" + args.position + \"-\" + str(args.num_weights) + \"-\" + args.wires + \"/\"\n os.system(\"mkdir -p -m 770 %s \" % (args.folderOUT))\n\n EVENT_INFO = get_events(args=args, files=files, model=model,\n fOUT=(args.folderOUT + \"events_\" + str(args.num_weights) + \"_\" + args.sources + \"-\" + mode + \"-\" + args.position + \"-\" + args.wires + \".hdf5\"))\n\n # EVENT_INFO['DNNPredClass'] = EVENT_INFO['DNNPred'].argmax(axis=-1)\n # EVENT_INFO['DNNTrueClass'] = EVENT_INFO['DNNTrue'].argmax(axis=-1)\n # EVENT_INFO['DNNPredTrueClass'] = EVENT_INFO['DNNPred'][:, 1]\n\n validation_mc_plots(args=args, folderOUT=args.folderOUT, data=EVENT_INFO)\n else:\n raise ValueError('chosen mode (%s) not available. Choose between train/mc/data'%(mode))", "def make_NN(n_hidden, n_epoch, labelsdict, lr, device, model_name, trainloader, validloader, train_data, pretrain, finetune_whole, custom_model):\n if custom_model == 2:\n # Use custom two-layer convolution model\n print(\"Using Two-Layer CNN\")\n model = TwoLayerConvNet()\n elif custom_model == 5:\n print(\"Using Five-Layer CNN\")\n # Use custom five-layer convolution model\n model = FiveLayerConvNet()\n else:\n # Import NN model (either pretrained or not)\n model = getattr(models, model_name)(pretrained=pretrain)\n \"\"\" ===================================================================================== \"\"\"\"\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER FREEZE THE PARAMETERS OR NOT (WILBERT ARISTO) \"\"\"\n # If we do not need to finetune whole model, freeze parameters that we don't need to re-train\n if not finetune_whole:\n for param in model.parameters():\n param.requires_grad = False\n \"\"\" ===================================================================================== \"\"\"\"\n\n n_out = len(labelsdict)\n\n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Make classifier\n n_in = next(model.fc.modules()).in_features\n model.fc = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.fc.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n else:\n # Make classifier\n n_in = next(model.classifier.modules()).in_features\n model.classifier = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.classifier.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n # Define criterion\n criterion = nn.NLLLoss() \n\n model.to(device)\n start = time.time()\n\n epochs = n_epoch\n steps = 0 \n running_loss = 0\n print_every = 40\n for e in range(epochs):\n model.train()\n for images, labels in trainloader:\n images, labels = images.to(device), labels.to(device)\n\n steps += 1\n\n optimizer.zero_grad()\n\n output = model.forward(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n # Eval mode for predictions\n model.eval()\n\n # Turn off gradients for validation\n with torch.no_grad():\n test_loss, accuracy = validation(model, validloader, criterion, device)\n\n print(\"Epoch: {}/{} - \".format(e+1, epochs),\n \"Training Loss: {:.3f} - \".format(running_loss/print_every),\n \"Validation Loss: {:.3f} - \".format(test_loss/len(validloader)),\n \"Validation Accuracy: {:.3f}\".format(accuracy/len(validloader)))\n\n running_loss = 0\n\n # Make sure training is back on\n model.train()\n \n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Add model info \n model.fc.n_in = n_in\n model.fc.n_hidden = n_hidden\n model.fc.n_out = n_out\n model.fc.labelsdict = labelsdict\n model.fc.lr = lr\n model.fc.optimizer_state_dict = optimizer.state_dict\n model.fc.model_name = model_name\n model.fc.class_to_idx = train_data.class_to_idx\n else:\n # Add model info \n model.classifier.n_in = n_in\n model.classifier.n_hidden = n_hidden\n model.classifier.n_out = n_out\n model.classifier.labelsdict = labelsdict\n model.classifier.lr = lr\n model.classifier.optimizer_state_dict = optimizer.state_dict\n model.classifier.model_name = model_name\n model.classifier.class_to_idx = train_data.class_to_idx\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n print('model:', model_name, '- hidden layers:', n_hidden, '- epochs:', n_epoch, '- lr:', lr)\n print(f\"Run time: {(time.time() - start)/60:.3f} min\")\n return model\n\n# Define function to save checkpoint\ndef save_checkpoint(model, path):\n checkpoint = {'c_input': model.classifier.n_in,\n 'c_hidden': model.classifier.n_hidden,\n 'c_out': model.classifier.n_out,\n 'labelsdict': model.classifier.labelsdict,\n 'c_lr': model.classifier.lr,\n 'state_dict': model.state_dict(),\n 'c_state_dict': model.classifier.state_dict(),\n 'opti_state_dict': model.classifier.optimizer_state_dict,\n 'model_name': model.classifier.model_name,\n 'class_to_idx': model.classifier.class_to_idx\n }\n torch.save(checkpoint, path)\n \n# Define function to load model\ndef load_model(path):\n cp = torch.load(path)\n \n # Import pre-trained NN model \n model = getattr(models, cp['model_name'])(pretrained=True)\n \n # Freeze parameters that we don't need to re-train \n for param in model.parameters():\n param.requires_grad = False\n \n # Make classifier\n model.classifier = NN_Classifier(input_size=cp['c_input'], output_size=cp['c_out'], \\\n hidden_layers=cp['c_hidden'])\n \n # Add model info \n model.classifier.n_in = cp['c_input']\n model.classifier.n_hidden = cp['c_hidden']\n model.classifier.n_out = cp['c_out']\n model.classifier.labelsdict = cp['labelsdict']\n model.classifier.lr = cp['c_lr']\n model.classifier.optimizer_state_dict = cp['opti_state_dict']\n model.classifier.model_name = cp['model_name']\n model.classifier.class_to_idx = cp['class_to_idx']\n model.load_state_dict(cp['state_dict'])\n \n return model", "def train(self, network_parameters=network_model, eval_steps=100):\n #model_dir=TFFLAGS.train_dir\n #self.model_dir=model_dir\n network_parameters=buildNetwork()\n model_path='models/central/'\n save_path=\"models/cifar/clients\"\n cifar_train_file=\"data/clients/cifar\"\n cifar_test_file=\"data/clients/cifar\"\n \n batch_size = TFFLAGS.batch_size\n\n params = {\"accountant_type\": TFFLAGS.accountant_type,\n \"task_id\": 0,\n \"batch_size\": TFFLAGS.batch_size,\n \"default_gradient_l2norm_bound\":\n network_parameters.default_gradient_l2norm_bound,\n \"num_hidden_layers\": TFFLAGS.num_hidden_layers,\n \"hidden_layer_num_units\": TFFLAGS.hidden_layer_num_units,\n \"num_examples\": self.num_training_images,\n \"learning_rate\": TFFLAGS.lr,\n \"end_learning_rate\": TFFLAGS.end_lr,\n \"learning_rate_saturate_epochs\": TFFLAGS.lr_saturate_epochs\n }\n\n params.update({\"sigma\": TFFLAGS.sigma})\n #saver = tf.train.Saver()\n\n with tf.Graph().as_default(), tf.Session() as sess, tf.device('/cpu:0'):\n # Create the basic Cifar model.\n # TODO: GET INPUT FOR CLIENT\n #images, labels = CifarInput(cifar_train_file, batch_size, TFFLAGS.randomize)\n images, labels = cifar10_input.distorted_inputs(\"data/clients/cifar\", self.index, TFFLAGS.batch_size)\n \n\n logits, projection, training_params = utils.BuildNetwork(\n images, network_parameters)\n \n \n cost = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=tf.one_hot(labels, 10))\n\n # The actual cost is the average across the examples.\n cost = tf.reduce_sum(cost, [0]) / batch_size\n\n priv_accountant = accountant.GaussianMomentsAccountant(self.num_training_images)\n sigma = TFFLAGS.sigma\n with_privacy = TFFLAGS.sigma > 0\n with_privacy = False\n # Note: Here and below, we scale down the l2norm_bound by\n # batch_size. This is because per_example_gradients computes the\n # gradient of the minibatch loss with respect to each individual\n # example, and the minibatch loss (for our model) is the *average*\n # loss over examples in the minibatch. Hence, the scale of the\n # per-example gradients goes like 1 / batch_size.\n gaussian_sanitizer = sanitizer.AmortizedGaussianSanitizer(\n priv_accountant, \n [network_parameters.default_gradient_l2norm_bound / batch_size, True])\n\n for var in training_params:\n if \"gradient_l2norm_bound\" in training_params[var]:\n l2bound = training_params[var][\"gradient_l2norm_bound\"] / batch_size\n gaussian_sanitizer.set_option(var,\n sanitizer.ClipOption(l2bound, True))\n lr = tf.placeholder(tf.float32)\n eps = tf.placeholder(tf.float32)\n delta = tf.placeholder(tf.float32)\n\n init_ops = []\n\n # Add global_step\n global_step = tf.Variable(0, dtype=tf.int32, trainable=False,\n name=\"global_step\")\n\n if with_privacy:\n gd_op = dp_optimizer.DPGradientDescentOptimizer(\n lr,\n [eps, delta],\n gaussian_sanitizer,\n sigma=sigma,\n batches_per_lot=TFFLAGS.batches_per_lot).minimize(\n cost, global_step=global_step)\n else:\n gd_op = tf.train.GradientDescentOptimizer(lr).minimize(cost)\n\n saver = tf.train.Saver()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # We need to maintain the intialization sequence.\n #for v in tf.trainable_variables():\n # sess.run(tf.variables_initializer([v]))\n #sess.run(tf.global_variables_initializer())\n \n ##### NEED DIS TO LOAD MODEL PLS.\n saver.restore(sess, tf.train.latest_checkpoint(save_path))\n\n sess.run(init_ops)\n\n start_time = time.time()\n prev_time = start_time\n filename = \"client-\" + str(self.index) + \"-results-0.json\"\n log_path = os.path.join(save_path, filename)\n print(log_path)\n target_eps = [float(s) for s in TFFLAGS.target_eps.split(\",\")]\n max_target_eps = max(target_eps)\n\n lot_size = TFFLAGS.batches_per_lot * TFFLAGS.batch_size\n lots_per_epoch = self.num_training_images / lot_size\n for step in range(TFFLAGS.num_training_steps):\n epoch = step / lots_per_epoch\n curr_lr = utils.VaryRate(TFFLAGS.lr, TFFLAGS.end_lr,\n TFFLAGS.lr_saturate_epochs, epoch)\n curr_eps = utils.VaryRate(TFFLAGS.eps, TFFLAGS.end_eps,\n TFFLAGS.eps_saturate_epochs, epoch)\n for _ in range(TFFLAGS.batches_per_lot):\n _ = sess.run(\n [gd_op], feed_dict={lr: curr_lr, eps: curr_eps, delta: TFFLAGS.delta})\n sys.stderr.write(\"step: %d\\n\" % step)\n\n # See if we should stop training due to exceeded privacy budget:\n should_terminate = False\n terminate_spent_eps_delta = None\n if with_privacy and TFFLAGS.terminate_based_on_privacy:\n terminate_spent_eps_delta = priv_accountant.get_privacy_spent(\n sess, target_eps=[max_target_eps])[0]\n # For the Moments accountant, we should always have\n # spent_eps == max_target_eps.\n if (terminate_spent_eps_delta.spent_delta > TFFLAGS.target_delta or\n terminate_spent_eps_delta.spent_eps > max_target_eps):\n should_terminate = True\n\n if (eval_steps > 0 and (step + 1) % eval_steps == 0) or should_terminate:\n if with_privacy:\n spent_eps_deltas = priv_accountant.get_privacy_spent(\n sess, target_eps=target_eps)\n else:\n spent_eps_deltas = [accountant.EpsDelta(0, 0)]\n for spent_eps, spent_delta in spent_eps_deltas:\n sys.stderr.write(\"spent privacy: eps %.4f delta %.5g\\n\" % (\n spent_eps, spent_delta))\n path = save_path + \"/model%s.ckpt\"%self.client\n saver.save(sess, save_path=path)\n train_accuracy, _ = self.Eval(cifar_train_file, network_parameters,\n num_testing_images=self.num_testing_images,\n randomize=True, load_path=TFFLAGS.save_path)\n sys.stderr.write(\"train_accuracy: %.2f\\n\" % train_accuracy)\n \n curr_time = time.time()\n elapsed_time = curr_time - prev_time\n prev_time = curr_time\n \n self.results.append({\"step\": step+1, # Number of lots trained so far.\n \"elapsed_secs\": elapsed_time,\n \"spent_eps_deltas\": spent_eps_deltas,\n \"train_accuracy\": train_accuracy})\n loginfo = {\"elapsed_secs\": curr_time-start_time,\n \"spent_eps_deltas\": spent_eps_deltas,\n \"train_accuracy\": train_accuracy,\n \"num_training_steps\": step+1, # Steps so far.\n \"result_series\": self.results}\n loginfo.update(params)\n if log_path:\n with tf.gfile.Open(log_path, \"w\") as f:\n json.dump(loginfo, f, indent=2)\n f.write(\"\\n\")\n f.close()\n if should_terminate:\n for t in tf.trainable_variables():\n weights.append(t.eval(session=sess))\n break\n for t in tf.trainable_variables():\n weights.append(t.eval(session=sess))\n \n coord.request_stop()\n coord.join(threads)", "def train():\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n epoch_num = 3\n ########################\n # PUT YOUR CODE HERE #\n #######################\n model = ConvNet(3, 10)\n\n #Obtain Dataset\n train_dataset = cifar10_utils.get_cifar10()['train']\n val_dataset = cifar10_utils.get_cifar10()['validation']\n test_dataset = cifar10_utils.get_cifar10()['test']\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size = BATCH_SIZE_DEFAULT,\n drop_last = True)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters())\n for epoch in range(epoch_num):\n model.train()\n losses = []\n accs = []\n for i_iter in range(int(train_dataset.num_examples/BATCH_SIZE_DEFAULT)):\n images, labels = train_dataset.next_batch(BATCH_SIZE_DEFAULT)\n images, labels = torch.tensor(images), torch.tensor(labels, dtype = torch.long)\n pred = model(images)\n labels = torch.argmax(labels, dim=1)\n loss = criterion(pred, labels)\n pred_result = torch.argmax(pred, dim=1)\n acc = accuracy(pred_result, labels)\n accs.append(acc)\n model.zero_grad()\n loss.backward()\n optimizer.step()\n losses.append(loss)\n if i_iter % 100 ==0:\n msg = 'Epoch:[{}/{}] Iter: [{}/{}], Loss: {: .6f}, ACC:[{: .6f}]'.format(epoch, epoch_num, i_iter, int(train_dataset.num_examples/BATCH_SIZE_DEFAULT), sum(losses)/len(losses), sum(accs)/len(accs))\n print(msg)\n with open('./log.txt', 'a') as f:\n f.write(msg)\n f.write('\\n')\n msg_epoch = '--------Epoch: [{}/{}], Loss: {: .6f}, ACC:[{: .6f}]-------'.format(epoch, epoch_num, sum(losses)/len(losses), sum(accs)/len(accs))\n print(msg_epoch)\n with open('./log.txt', 'a') as f:\n f.write(msg)\n f.write('\\n')\n #raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################", "def grid_search(train_labels: str, \n test_labels: str, \n output:str, \n res:tuple=(120, 160), \n lazy:bool=True, \n batch_size:int=16, \n epochs:int=20):\n\n # Data\n print(\"=> Loading data.\")\n train = FLIRDataset(train_labels, res=res, batch_size=batch_size)\n test = FLIRDataset(test_labels, res=res, batch_size=batch_size)\n\n # In eager loading mode, train on everything.\n if not lazy:\n X_train, y_train = train.get_all()\n X_test, y_test = test.get_all()\n X_train = np.concatenate([X_train, X_test], axis=0)\n y_train = np.concatenate([y_train, y_test], axis=0)\n\n\n def net(x, num_classes=1):\n x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x)\n x = K.layers.Flatten()(x)\n x = K.layers.Dense(num_classes, activation=\"softmax\")(x)\n return x\n\n print(\"\\n=> Training model.\")\n input_tensor = K.layers.Input((160, 120, 1))\n output_tensor = net(input_tensor, num_classes=train.num_classes())\n model = K.Model(input_tensor, output_tensor)\n\n model.compile(optimizer=\"sgd\",\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"])\n\n # Train model\n if lazy:\n model.fit(x=train, \n epochs=epochs, \n validation_data=train, \n verbose=2)\n else:\n model.fit(x=X_train, \n y=y_train, \n epochs=epochs, \n batch_size=batch_size, \n verbose=2)\n\n # Save weights\n model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\"))", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def __call__(self, inputs, training):\n\n self.training = training\n input_shape = inputs.shape\n if self.data_format == 'channels_first':\n img_size = (input_shape[2], input_shape[3])\n else:\n img_size = (input_shape[1], input_shape[2])\n\n with self._model_variable_scope('ssd300_model'):\n if self.data_format == 'channels_first':\n # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).\n # This provides a large performance boost on GPU. See\n # https://www.tensorflow.org/performance/performance_guide#data_formats\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n net = super(Model, self).__call__(inputs, training)\n\n with self._model_variable_scope('ssd300_model'):\n\n net = self._atrous_convolution_2d(net, filters=1024,\n kernel_size=3,\n atrous_rate=6, name='fc6')\n\n net = self._conv2d(net, filters=1024, kernel_size=1,\n padding='same', name='fc7')\n\n net = self._conv2d(net, filters=256, kernel_size=1,\n padding='same', name='conv6_1')\n\n net = self._conv2d(net, filters=512, kernel_size=3,\n strides=2,\n padding='same', name='conv6_2')\n\n net = self._conv2d(net, filters=128, kernel_size=1,\n padding='same', name='conv7_1')\n\n net = self._conv2d(fixed_padding(net, 3, self.data_format),\n filters=256, kernel_size=3,\n strides=2,\n padding='valid', name='conv7_2')\n\n net = self._conv2d(net, filters=128, kernel_size=1,\n padding='same', name='conv8_1')\n\n net = self._conv2d(net, filters=256, kernel_size=3,\n strides=2,\n padding='same', name='conv8_2')\n\n if self.data_format == 'channels_first':\n net = tf.reduce_mean(net, [2, 3])\n else:\n net = tf.reduce_mean(net, [1, 2])\n self.layers['pool6'] = net\n\n # Prediction from conv4_3\n conv4_3_norm = self._normalize(net, 20, name='conv4_3_norm')\n num_priors = 3\n x = self._conv2d(conv4_3_norm, filters=num_priors * 4, kernel_size=3,\n padding='same', name='conv4_3_norm_mbox_loc')\n self.layers['conv4_3_norm_mbox_loc_flat'] = tf.layers.flatten(x, name='conv4_3_norm_mbox_loc_flat')\n\n x = self._conv2d(conv4_3_norm, filters=num_priors * self.num_classes,\n kernel_size=3, padding='same',\n name='conv4_3_norm_mbox_conf')\n self.layers['conv4_3_norm_mbox_conf_flat'] = tf.layers.flatten(x, name='conv4_3_norm_mbox_conf_flat')\n\n prior_box = PriorBox(img_size, min_size=30.0, aspect_ratios=[2],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv4_3_norm_mbox_priorbox')\n net['conv4_3_norm_mbox_priorbox'] = prior_box(conv4_3_norm)\n\n return net", "def ResNet18(input_shape = (28, 28, 1), classes = 24):\n \n # Define the input as a tensor with shape input_shape\n X = X_input = Input(input_shape)\n\n \n # Zero-Padding\n X = ZeroPadding2D((3, 3))(X_input)\n \n # Stage 1\n X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)\n X = Activation('relu')(X)\n #X = MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = convolutional_block(X, [64, 64], stage=2, block='a')\n X = identity_block(X, [64, 64], stage=2, block='b')\n\n # Stage 3\n X = convolutional_block(X, [128, 128], stage=3, block='a')\n X = identity_block(X, [128, 128], stage=3, block='b')\n\n # Stage 4\n X = convolutional_block(X, [256, 256], stage=4, block='a')\n X = identity_block(X, [256, 256], stage=4, block='b')\n\n # Stage 5\n X = convolutional_block(X, [512, 512], stage=5, block='a')\n X = identity_block(X, [512, 512], stage=5, block='b')\n\n # AVGPOOL\n # X = AveragePooling2D(pool_size=(2,2), name='avg_pool')(X)\n\n # output layer\n X = Flatten()(X)\n X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)\n \n # Create model\n model = Model(inputs = X_input, outputs = X, name='ResNet18')\n\n return model", "def run_epoch(session, model, eval_op=None, verbose=False, epoch_size=1):\n start_time = time.time()\n all_words = 0\n costs = 0.0\n predicts = []\n\n fetches = {\n \"cost\": model.cost,\n \"mask\": model.mask,\n \"predict\": model.predicts,\n \"seqlen\": model.seq_len,\n \"loss\": model.loss,\n \"label\": model.label,\n \"label_flat\": model.label_flat,\n \"not_space\": model.not_space\n }\n if eval_op is not None:\n fetches[\"eval_op\"] = eval_op\n # if debug:\n # fetches[\"inputs\"] = model.Dinputs\n # fetches[\"states\"] = model.Dstates\n # fetches[\"outputs\"] = model.Doutput\n\n logging.info(\"Epoch size: %d\" % epoch_size) \n print_idx = 0\n for step in range(epoch_size):\n vals = session.run(fetches)\n cost = vals[\"cost\"]\n mask = vals[\"mask\"]\n predict = vals[\"predict\"]\n label = vals[\"label\"]\n np.set_printoptions(threshold=np.nan)\n \n if eval_op is None:\n \n # if step > 497:\n # #for i in range(len(mask)):\n # # print(mask[i])\n # print(np.sum(mask, axis=1))\n # print(vals[\"seqlen\"])\n mask = np.array(np.round(mask), dtype=np.int32)\n shape = mask.shape\n # if step > 10 and step < 20:\n # print(predict)\n # print(np.argmax(predict, 1))\n predict = np.reshape(np.argmax(predict, 1), shape).tolist()\n mask = np.sum(mask, axis=1).tolist()\n for i in range(shape[0]):\n predicts.append(predict[i][:mask[i]])\n\n costs += cost\n words = np.sum(mask)\n all_words += words\n\n if epoch_size < 100:\n verbose = False\n\n if (step * 10 / epoch_size) > print_idx and eval_op is not None:\n print_idx = step * 10 / epoch_size + 1\n logging.info(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / epoch_size, np.exp(costs / step),\n num_gpus * all_words / (time.time() - start_time)))\n predict = np.argmax(predict, 1)\n label_flat = np.reshape(label, [-1])\n all_label_equal = np.equal(predict, label_flat)\n not_space_label = np.not_equal(label_flat, np.zeros(np.shape(label_flat)))\n not_space_equal = all_label_equal * not_space_label\n not_space_label_count = np.sum(not_space_label)\n not_space_equal_count = np.sum(not_space_equal)\n none_space_accuracy = not_space_equal_count / not_space_label_count\n logging.info(\"not space label: %d\" % not_space_label_count)\n logging.info(\"not space correct: %d\" % not_space_equal_count)\n logging.info(\"not space accuracy: %.3f\" % none_space_accuracy)\n logging.info(\"cost: %.3f\" % (costs / step))\n if np.isnan(np.exp(costs / step)):\n print(\"perplexity is nan\")\n print(\"cost: %f step: %d\" % (costs, step))\n return np.exp(costs / step)\n\n if eval_op is None:\n predict = np.reshape(predict, [-1])\n label_flat = np.reshape(label, [-1])\n all_label_equal = np.equal(predict, label_flat)\n not_space_label = np.not_equal(label_flat, np.zeros(np.shape(label_flat)))\n not_space_equal = all_label_equal * not_space_label\n not_space_label_count = np.sum(not_space_label)\n not_space_equal_count = np.sum(not_space_equal)\n none_space_accuracy = not_space_equal_count / not_space_label_count\n logging.info(\"not space label: %d\" % not_space_label_count)\n logging.info(\"not space correct: %d\" % not_space_equal_count)\n logging.info(\"not space accuracy: %.3f\" % none_space_accuracy)\n logging.info(\"cost: %.3f\" % (costs / step))\n return np.exp(costs / epoch_size), predicts\n # elif get_post:\n # # Keep in mind, when get_post, num_steps=1, batch_size=1\n # return np.exp(costs / iters), posteriors\n else:\n return np.exp(costs / epoch_size)", "def train():\n init_distributed_mode(args)\n save_dir = TRAIN_CFG['save_dir']\n if not os.path.exists(save_dir) and torch.distributed.get_rank() == 0:\n os.mkdir(save_dir)\n kwargs = {}\n # If augmenting data, disable Pytorch's own augmentataion\n # This has to be done manually as augmentation is embedded\n # refer : https://github.com/pytorch/vision/issues/2263\n base_path = DATASET_CFG['base_path']\n train_set = DATASET_CFG['train']\n valid_set = DATASET_CFG['valid']\n dset_mean_std = DATASET_CFG['mean_std']\n if dset_mean_std is not None:\n dataset_mean = [i/255. for i in dset_mean_std[0]]\n dataset_std = [i/255. for i in dset_mean_std[1]]\n else:\n dataset_mean, dataset_std = compute_mean_std(base_path, train_set)\n kwargs['image_mean'] = dataset_mean\n kwargs['image_std'] = dataset_std\n kwargs['min_size'] = DATASET_CFG['min_size']\n kwargs['max_size'] = DATASET_CFG['max_size']\n kwargs['box_detections_per_img'] = 300 # increase max det to max val in our benchmark\n\n # Set benchmark related parameters\n if benchmark == 'ScutHead':\n combined_cfg = {**cfg, **sh_anchors}\n elif benchmark == 'CrowdHuman':\n combined_cfg = {**cfg, **ch_anchors}\n elif benchmark == 'Combined':\n combined_cfg = {**cfg, **combined_anchors}\n else:\n raise ValueError(\"New dataset has to be registered\")\n\n # Create Model\n default_filter = False\n model = customRCNN(cfg=combined_cfg,\n use_deform=NET_CFG['use_deform'],\n ohem=NET_CFG['ohem'],\n context=NET_CFG['context'],\n custom_sampling=NET_CFG['custom_sampling'],\n default_filter=default_filter,\n soft_nms=NET_CFG['soft_nms'],\n upscale_rpn=NET_CFG['upscale_rpn'],\n median_anchors=NET_CFG['median_anchors'],\n **kwargs).cuda() \n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],\n find_unused_parameters=True)\n model_without_ddp = model.module\n\n # Create Optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=HYP_CFG['learning_rate'],\n momentum=HYP_CFG['learning_rate'],\n weight_decay=HYP_CFG['weight_decay'])\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=TRAIN_CFG['milestones'],\n gamma=HYP_CFG['gamma'])\n # Restore from checkpoint\n pt_model = TRAIN_CFG['pretrained_model']\n if pt_model:\n model_without_ddp = restore_network(model_without_ddp, pt_model,\n only_backbone=TRAIN_CFG['only_backbone'])\n \n # Create training and vaid dataset\n dataset_param = {'mean': dataset_mean, 'std':dataset_std,\n 'shape':(kwargs['min_size'], kwargs['max_size'])}\n batch_size = HYP_CFG['batch_size']\n train_dataset = HeadDataset(train_set,\n base_path,\n dataset_param,\n train=True)\n val_dataset = HeadDataset(valid_set,\n base_path,\n dataset_param,\n train=False)\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,\n batch_size,\n drop_last=True)\n train_data_loader = torch.utils.data.DataLoader(train_dataset,\n batch_sampler=train_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)\n val_batch_sampler = torch.utils.data.BatchSampler(val_sampler,\n batch_size,\n drop_last=True)\n val_data_loader = torch.utils.data.DataLoader(val_dataset,\n batch_sampler=val_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n # Fastforward the LR decayer\n start_epoch = TRAIN_CFG['start_epoch']\n max_epoch = TRAIN_CFG['max_epoch']\n for _ in range(0, -1):\n scheduler.step()\n\n # Start training\n print(\"======= Training for \" + str(max_epoch) + \"===========\")\n for epoch in range(start_epoch, int(max_epoch) + 1):\n if epoch % TRAIN_CFG['eval_every'] == 0:\n print(\"========= Evaluating Model ==========\")\n result_dict = evaluate(model, val_data_loader, benchmark=benchmark)\n if torch.distributed.get_rank() == 0:\n logging.info('Eval score at {0} epoch is {1}'.format(str(epoch),\n result_dict))\n \n train_one_epoch(model, optimizer, train_data_loader,\n device, epoch, print_freq=1000)\n scheduler.step()\n if torch.distributed.get_rank() == 0:\n print(\"Saving model\")\n torch.save(model.state_dict(), osp.join(save_dir,\n TRAIN_CFG['exp_name'] + '_epoch_' + str(epoch) + '.pth'))", "def resnet50():\n initializer = K.initializers.he_normal(seed=None)\n\n X = K.Input(shape=(224, 224, 3))\n\n # conv1\n layer = K.layers.Conv2D(filters=64,\n kernel_size=(7, 7),\n strides=(2, 2),\n padding='same',\n kernel_initializer=initializer,\n )(X)\n\n layer = K.layers.BatchNormalization(axis=3)(layer)\n\n layer = K.layers.Activation('relu')(layer)\n\n # conv2_x\n layer = K.layers.MaxPool2D(pool_size=(3, 3),\n strides=(2, 2),\n padding='same')(layer)\n\n layer = projection_block(layer, [64, 64, 256], 1)\n for _ in range(2):\n layer = identity_block(layer, [64, 64, 256])\n\n # conv3_x\n layer = projection_block(layer, [128, 128, 512])\n for _ in range(3):\n layer = identity_block(layer, [128, 128, 512])\n\n # conv4_x\n layer = projection_block(layer, [256, 256, 1024])\n for _ in range(5):\n layer = identity_block(layer, [256, 256, 1024])\n\n # conv5_x\n layer = projection_block(layer, [512, 512, 2048])\n for _ in range(2):\n layer = identity_block(layer, [512, 512, 2048])\n\n layer = K.layers.AveragePooling2D(pool_size=(7, 7),\n padding='same')(layer)\n\n layer = K.layers.Dense(units=1000,\n activation='softmax',\n kernel_initializer=initializer,\n )(layer)\n\n model = K.models.Model(inputs=X, outputs=layer)\n return model", "def train(self):\n best_loss = math.inf\n for _ in range(self.epoch, self.end_epoch):\n self.summary_writer.add_scalar('epoch', self.epoch, self.total_steps)\n epoch_loss, _ = self.run_epoch(self.dataloader)\n if epoch_loss < best_loss:\n best_loss = epoch_loss\n # save best module as onnx format\n dummy_input = torch.randn((10, 3, self.image_dim, self.image_dim))\n module_path = os.path.join(self.models_dir, 'resnet.onnx')\n self.save_module(\n self.resnet.module, module_path, save_onnx=True, dummy_input=dummy_input)\n self.save_checkpoint('resnet_e{}_state.pth'.format(self.epoch))\n\n # validate step\n val_loss, _ = self.validate()\n\n # update learning rates\n self.lr_scheduler.step(val_loss)\n self.save_learning_rate(self.summary_writer, self.optimizer, self.total_steps)\n self.epoch += 1\n self.test()", "def get_cifar10_cnn():\n # Set defaults.\n nb_classes = 10 #dataset dependent \n batch_size = 128\n epochs = 4\n \n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n \n # convert class vectors to binary class matrices\n y_train = to_categorical(y_train, nb_classes)\n y_test = to_categorical(y_test, nb_classes)\n\n #x._train shape: (50000, 32, 32, 3)\n #input shape (32, 32, 3)\n input_shape = x_train.shape[1:]\n\n #print('x_train shape:', x_train.shape)\n #print(x_train.shape[0], 'train samples')\n #print(x_test.shape[0], 'test samples')\n #print('input shape', input_shape)\n \n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs)", "def run(self, data, training=False):\n # Set mode\n if training:\n self._model.train()\n else:\n self._model.eval()\n # Compute\n return self._model(data)", "def train():\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n # Get negative slope parameter for LeakyReLU\n neg_slope = FLAGS.neg_slope\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n import matplotlib.pyplot as plt\n\n data = cifar10_utils.get_cifar10(FLAGS.data_dir)\n train = data['train']\n test = data['test']\n dim_x = train.images.shape[1]*train.images.shape[2]*train.images.shape[3]\n\n mlp = MLP(dim_x, dnn_hidden_units, train.labels.shape[1], neg_slope)\n loss_module = CrossEntropyModule()\n\n loss_train = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n loss_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n accuracy_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n\n images_test = test.images\n labels_test = test.labels\n images_test = np.reshape(images_test, (images_test.shape[0], dim_x))\n\n for i in range(0, FLAGS.max_steps):\n if PRINTS:\n print('iter', i+1, end='\\r')\n images, labels = train.next_batch(FLAGS.batch_size) \n images = np.reshape(images, (images.shape[0], dim_x))\n\n pred = mlp.forward(images)\n loss = loss_module.forward(pred, labels)\n loss_grad = loss_module.backward(pred, labels)\n mlp.backward(loss_grad)\n\n for module in reversed(mlp.modules):\n if isinstance(module, LinearModule):\n module.params['weight'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['weight']\n module.params['bias'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['bias']\n if (i+1) % FLAGS.eval_freq == 0:\n pred_test = mlp.forward(images_test)\n loss_train[i // FLAGS.eval_freq] = loss\n accuracy_test[i // FLAGS.eval_freq] = accuracy(pred_test, labels_test)\n loss_test[i // FLAGS.eval_freq] = loss_module.forward(pred_test, labels_test)\n if PRINTS:\n print()\n print('test_loss:', loss_test[i // FLAGS.eval_freq])\n print('test_accuracy:', accuracy_test[i // FLAGS.eval_freq])\n print('train_loss:', loss_train[i // FLAGS.eval_freq])\n\n if PLOTS:\n fig, ax = plt.subplots(1, 2, figsize=(10,5))\n fig.suptitle('Training curves for Numpy MLP\\nFinal test accuracy: {:0.4f}, default configuration'.format(accuracy_test[i // FLAGS.eval_freq]))\n\n ax[0].set_title('Loss')\n ax[0].set_ylabel('Loss value')\n ax[0].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[0].plot(loss_train, label='Train')\n ax[0].plot(loss_test, label='Test')\n ax[0].legend()\n\n ax[1].set_title('Accuracy')\n ax[1].set_ylabel('Accuracy value')\n ax[1].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[1].plot(accuracy_test, label='Test')\n ax[1].legend()\n plt.show()\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def run(self):\n params = self.params\n\n # load data\n self.data = self.load_data()\n # check if loaded classification data set is using the same dict as pretrained model\n if not self.data['dico'] == self._embedder.dico:\n self.compare_dict(self.data['dico'], self._embedder.dico)\n raise Exception((\"Dictionary in evaluation data (%i words) seems different than the one \" +\n \"in the pretrained model (%i words). Please verify you used the same dictionary, \" +\n \"and the same values for max_vocab and min_count.\") % (len(self.data['dico']),\n len(self._embedder.dico)))\n\n # embedder\n self.encoder = copy.deepcopy(self._embedder)\n self.encoder.cuda()\n\n # projection layer: CHANGE 3 to your number of classes output\n self.proj = nn.Sequential(*[\n nn.Dropout(params.dropout),\n nn.Linear(self.encoder.out_dim, params.clf_output_dim)\n ]).cuda()\n\n # optimizers: use different optimizers to tune embedding layer and projection layer\n self.optimizer_e = get_optimizer(list(self.encoder.get_parameters(params.finetune_layers)), params.optimizer_e)\n self.optimizer_p = get_optimizer(self.proj.parameters(), params.optimizer_p)\n best_acc = 0\n eval_metric = \"CLF_valid_en_acc\"\n # train and evaluate the model\n for epoch in range(params.n_epochs):\n # update epoch\n self.epoch = epoch\n\n # training\n logger.info(\"CLF - Training epoch %i ...\" % epoch)\n self.train()\n\n # evaluation\n logger.info(\"CLF - Evaluating epoch %i ...\" % epoch)\n with torch.no_grad():\n scores = self.eval()\n if scores[eval_metric] > best_acc:\n logger.info('New best score for %s: %.6f' % (eval_metric, scores[eval_metric]))\n self.save_checkpoint('best-%s' % eval_metric)\n self.decrease_counts = 0\n best_acc = scores[eval_metric]\n else:\n logger.info(\"Not a better validation score (%i / %i).\"\n % (self.decrease_counts, self.decrease_counts_max))\n self.decrease_counts += 1\n if self.decrease_counts > self.decrease_counts_max:\n logger.info(\"Stopping criterion has been below its best value for more \"\n \"than %i epochs. Ending the experiment...\" % self.decrease_counts_max)\n exit()\n self.scores.update(scores)", "def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500):\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # subsample the data\n mask = range(num_training, num_training + num_validation)\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n X_test = X_test[mask]\n y_test = y_test[mask]\n mask = np.random.choice(num_training, num_dev, replace=False)\n X_dev = X_train[mask]\n y_dev = y_train[mask]\n\n # Preprocessing: reshape the image data into rows\n X_train = np.reshape(X_train, (X_train.shape[0], -1))\n X_val = np.reshape(X_val, (X_val.shape[0], -1))\n X_test = np.reshape(X_test, (X_test.shape[0], -1))\n X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))\n\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n X_dev -= mean_image\n\n # add bias dimension and transform into columns\n X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])\n X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])\n X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])\n X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])\n\n return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev", "def load_CIFAR10(ROOT):\r\n xs = []\r\n ys = []\r\n for b in range(1,6):\r\n f = os.path.join(ROOT, 'data_batch_%d' % (b, ))\r\n X, Y = load_CIFAR_batch(f)\r\n xs.append(X)\r\n ys.append(Y)\r\n Xtr = np.concatenate(xs)\r\n Ytr = np.concatenate(ys)\r\n del X, Y\r\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\r\n return Xtr, Ytr, Xte, Yte\r\n\r\n\tdef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000):\r\n # Load the raw CIFAR-10 data\r\n \r\n cifar10_dir = 'Downloads/cifar-10-batches-py'\r\n \r\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\r\n\r\n # Subsample the data\r\n mask = range(num_training, num_training + num_validation)\r\n X_val = X_train[mask]\r\n y_val = y_train[mask]\r\n mask = range(num_training)\r\n X_train = X_train[mask]\r\n y_train = y_train[mask]\r\n mask = range(num_test)\r\n X_test = X_test[mask]\r\n y_test = y_test[mask]\r\n\r\n x_train = X_train.astype('float32') \r\n x_test = X_test.astype('float32')\r\n \r\n x_train = x_train.reshape(-1, 32, 32, 3)\r\n x_test = x_test.reshape(-1, 32, 32, 3)\r\n x_train /= 255\r\n x_test /= 255\r\n\r\n return x_train, y_train, X_val, y_val, x_test, y_test", "def main():\r\n # Read dataset.\r\n reader = DatasetReader\r\n train_filename = sys.argv[1]\r\n test_filename = train_filename.replace('_train_', '_dev_')\r\n term_index, tag_index, train_data, test_data = reader.ReadData(train_filename, test_filename)\r\n (train_terms, train_tags, train_lengths) = train_data\r\n (test_terms, test_tags, test_lengths) = test_data\r\n\r\n model = SequenceModel(train_tags.shape[1], len(term_index), len(tag_index))\r\n model.build_inference()\r\n model.build_training()\r\n for j in range(5):\r\n model.train_epoch(train_terms,train_tags, train_lengths)\r\n print('Finished epoch %i. Evaluating ...' % (j+1))\r\n model.evaluate(test_terms, test_tags, test_lengths)", "def modified_resnet10(self) -> torch.nn.Module:\n # initialize a Resnet-10 instance\n net = torchvision.models.resnet._resnet(arch=\"resnet10\", block=torchvision.models.resnet.BasicBlock, layers=[1, 1, 1, 1], pretrained=False, progress=False)\n\n # the first layer will be a lazy convolutional layer with any input channels\n net.conv1 = torch.nn.LazyConv2d(\n out_channels=64,\n kernel_size=(7, 7),\n stride=(2, 2),\n padding=(3, 3),\n bias=not self.bn_affine\n )\n\n # modify batch-norm layer to have momentum 1 and no tracking statistics\n net.bn1 = torch.nn.BatchNorm2d(64, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer1[0].bn1 = torch.nn.BatchNorm2d(64, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer1[0].bn2 = torch.nn.BatchNorm2d(64, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer2[0].bn1 = torch.nn.BatchNorm2d(128, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer2[0].bn2 = torch.nn.BatchNorm2d(128, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer2[0].downsample[1] = torch.nn.BatchNorm2d(128, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer3[0].bn1 = torch.nn.BatchNorm2d(256, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer3[0].bn2 = torch.nn.BatchNorm2d(256, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer3[0].downsample[1] = torch.nn.BatchNorm2d(256, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer4[0].bn1 = torch.nn.BatchNorm2d(512, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer4[0].bn2 = torch.nn.BatchNorm2d(512, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer4[0].downsample[1] = torch.nn.BatchNorm2d(512, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n # last layer\n if self.dim_output is not None:\n net.fc = torch.nn.LazyLinear(out_features=self.dim_output)\n else:\n net.fc = torch.nn.Identity()\n\n # add dropout-2d after layers 1, 2, and 3\n net.maxpool.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer1[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer1[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer1.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer2[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer2[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer2.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer3[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer3[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer3.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer4[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer4[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer4.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n return net", "def resnet101_classifier(num_rois, num_classes, base_model = None, weight_regularizer=None, bias_regularizer=None):\n roi_input = Input(shape=(None, 4), name='roi_input')\n\n pooling_input = base_model.output if base_model else Input(shape=(None, None, FINAL_CONV_FILTERS))\n model_input = base_model.input if base_model else pooling_input\n resize_out = RoiResizeConv(POOLING_REGIONS, num_rois)([pooling_input, roi_input])\n\n out = td_conv_block(resize_out, 3, [512, 512, 2048], stage=5, block='a', strides=(1,1),\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n td_input_shape=(num_rois, POOLING_REGIONS, POOLING_REGIONS, 1024),\n use_conv_bias=False, separate_scale=True)\n out = td_identity_block(out, 3, [512, 512, 2048], stage=5, block='b',\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n out = td_identity_block(out, 3, [512, 512, 2048], stage=5, block='c',\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n out = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(out)\n\n out = TimeDistributed(Flatten(name='flatten'))(out)\n\n gaussian_initializer_cls = TruncatedNormal(stddev=0.01)\n gaussian_initializer_bbreg = TruncatedNormal(stddev=0.001)\n\n out_class = TimeDistributed(Dense(num_classes, activation='softmax',\n kernel_initializer=gaussian_initializer_cls,\n kernel_regularizer=weight_regularizer,\n bias_regularizer=bias_regularizer\n ),\n name='dense_class_{}'.format(num_classes))(out)\n out_reg = TimeDistributed(Dense(4 * (num_classes - 1), activation='linear',\n kernel_initializer=gaussian_initializer_bbreg,\n kernel_regularizer=weight_regularizer,\n bias_regularizer=bias_regularizer\n ),\n name='dense_reg_{}'.format(num_classes))(out)\n\n cls_model = Model(inputs=[model_input, roi_input], outputs=[out_class, out_reg])\n\n this_dir = os.path.dirname(__file__)\n weights_path = os.path.join(this_dir, '../models/resnet101_weights_tf.h5')\n cls_model.load_weights(weights_path, by_name=True)\n\n return cls_model", "def eval(self):\n self.train(mode=False)", "def plscorr_eval(train_fmri_ts, train_feat_ts, val_fmri_ts, val_feat_ts,\n out_dir, mask_file):\n train_feat_ts = train_feat_ts.reshape(-1, train_feat_ts.shape[3]).T\n val_feat_ts = val_feat_ts.reshape(-1, val_feat_ts.shape[3]).T\n train_fmri_ts = train_fmri_ts.T\n val_fmri_ts = val_fmri_ts.T\n\n # Iteration loop for different component number\n #for n in range(5, 19):\n # print '--- Components number %s ---' %(n)\n # plsca = PLSCanonical(n_components=n)\n # plsca.fit(train_feat_ts, train_fmri_ts)\n # pred_feat_c, pred_fmri_c = plsca.transform(val_feat_ts, val_fmri_ts)\n # pred_fmri_ts = plsca.predict(val_feat_ts) \n # # calculate correlation coefficient between truth and prediction\n # r = corr2_coef(val_fmri_ts.T, pred_fmri_ts.T, mode='pair')\n # # get top 20% corrcoef for model evaluation\n # vsample = int(np.rint(0.2*len(r)))\n # print 'Sample size for evaluation : %s' % (vsample)\n # r.sort()\n # meanr = np.mean(r[-1*vsample:])\n # print 'Mean prediction corrcoef : %s' %(meanr)\n \n # model generation based on optimized CC number\n cc_num = 10\n plsca = PLSCanonical(n_components=cc_num)\n plsca.fit(train_feat_ts, train_fmri_ts)\n from sklearn.externals import joblib\n joblib.dump(plsca, os.path.join(out_dir, 'plsca_model.pkl'))\n plsca = joblib.load(os.path.join(out_dir, 'plsca_model.pkl'))\n\n # calculate correlation coefficient between truth and prediction\n pred_fmri_ts = plsca.predict(val_feat_ts)\n fmri_pred_r = corr2_coef(val_fmri_ts.T, pred_fmri_ts.T, mode='pair')\n mask = vutil.data_swap(mask_file)\n vxl_idx = np.nonzero(mask.flatten()==1)[0]\n tmp = np.zeros_like(mask.flatten(), dtype=np.float64)\n tmp[vxl_idx] = fmri_pred_r\n tmp = tmp.reshape(mask.shape)\n vutil.save2nifti(tmp, os.path.join(out_dir, 'pred_fmri_r.nii.gz'))\n pred_feat_ts = pls_y_pred_x(plsca, val_fmri_ts)\n pred_feat_ts = pred_feat_ts.T.reshape(96, 14, 14, 540)\n np.save(os.path.join(out_dir, 'pred_feat.npy'), pred_feat_ts)\n\n # get PLS-CCA weights\n feat_cc, fmri_cc = plsca.transform(train_feat_ts, train_fmri_ts)\n np.save(os.path.join(out_dir, 'feat_cc.npy'), feat_cc)\n np.save(os.path.join(out_dir, 'fmri_cc.npy'), fmri_cc)\n feat_weight = plsca.x_weights_.reshape(96, 14, 14, cc_num)\n #feat_weight = plsca.x_weights_.reshape(96, 11, 11, cc_num)\n fmri_weight = plsca.y_weights_\n np.save(os.path.join(out_dir, 'feat_weights.npy'), feat_weight)\n np.save(os.path.join(out_dir, 'fmri_weights.npy'), fmri_weight)\n fmri_orig_ccs = get_pls_components(plsca.y_scores_, plsca.y_loadings_)\n np.save(os.path.join(out_dir, 'fmri_orig_ccs.npy'), fmri_orig_ccs)", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def train():\n\t# 1、make dataloader\n\ttrain_loader, val_loader, num_query, num_class = make_data_loader(cfg)\n\t#print(\"num_query:{},num_class:{}\".format(num_query,num_class))\n\n\t# 2、make model\n\tmodel = build_model(cfg, num_class)\n\n\t# model.eval()\n\t# x = model(img_tensor)\n\t# print(x.shape)\n\t# 3、 make optimizer\n\toptimizer = make_optimizer(cfg, model)\n\n\t# 4、 make lr_scheduler\n\tscheduler = make_lr_scheduler(cfg, optimizer)\n\n\t# 5、 make loss_func\n\tif cfg.MODEL.PCB_NECK:\n\t\t# make loss specificially for pcb \n\t\tloss_func = get_softmax_triplet_loss_fn(cfg, num_class)\n\telse:\n\t\tloss_func = make_loss(cfg, num_class)\n\n\t# get paramters\n\tlog_period = cfg.OUTPUT.LOG_PERIOD \n\tckpt_period =cfg.OUTPUT.CHECKPOINT_PERIOD\n\teval_period = cfg.OUTPUT.EVAL_PERIOD\n\toutput_dir = cfg.OUTPUT.ROOT_DIR\n\tdevice = cfg.MODEL.DEVICE\n\tepochs = cfg.SOLVER.MAX_EPOCHS\n\tuse_gpu = device == \"cuda\"\n\tuse_neck = cfg.MODEL.NECK or cfg.MODEL.LEARN_REGION \n\t# how many batch for each log\n\tbatch_size = cfg.SOLVER.IMGS_PER_BATCH\n\tbatch_num = len(train_loader) \n\t\n\tlog_iters = batch_num // log_period\n\tpretrained = cfg.MODEL.PRETRAIN_PATH != ''\n\tparallel = cfg.MODEL.PARALLEL \t\n\tgrad_clip = cfg.DARTS.GRAD_CLIP \n\n\tfeat_norm = cfg.TEST.FEAT_NORM \n\tckpt_save_path = cfg.OUTPUT.ROOT_DIR + cfg.OUTPUT.CKPT_DIR\n\tif not os.path.exists(ckpt_save_path):\n\t\tos.makedirs(ckpt_save_path)\n\n\n\t# create *_result.xlsx\n\t# save the result for analyze\n\tname = (cfg.OUTPUT.LOG_NAME).split(\".\")[0] + \".xlsx\"\n\tresult_path = cfg.OUTPUT.ROOT_DIR + name\n\n\twb = xl.Workbook()\n\tsheet = wb.worksheets[0]\n\ttitles = ['size/M','speed/ms','final_planes', 'acc', 'mAP', 'r1', 'r5', 'r10', 'loss',\n\t\t\t 'acc', 'mAP', 'r1', 'r5', 'r10', 'loss','acc', 'mAP', 'r1', 'r5', 'r10', 'loss']\n\tsheet.append(titles)\n\tcheck_epochs = [40, 80, 120, 160, 200, 240, 280, 320, 360, epochs]\n\tvalues = []\n\n\tlogger = logging.getLogger('MobileNetReID.train')\n\t\n\t# count parameter\n\tsize = count_parameters(model)\n\tlogger.info(\"the param number of the model is {:.2f} M\".format(size))\n\t\n\tvalues.append(format(size, '.2f'))\n\tvalues.append(model.final_planes)\n\n\tlogger.info(\"Start training\")\n\t\n\t#count = 183, x, y = batch -> 11712 for train\n\tif pretrained:\n\t\tstart_epoch = model.start_epoch\n\n\tif parallel:\n\t\tmodel = nn.DataParallel(model)\n\n\tif use_gpu:\n\t\t# model = nn.DataParallel(model)\n\t\tmodel.to(device)\n\t\n\t# save the best model\n\tbest_mAP, best_r1 = 0., 0.\n\tis_best = False\n\t# batch : img, pid, camid, img_path\n\tavg_loss, avg_acc = RunningAverageMeter(), RunningAverageMeter()\n\tavg_time, global_avg_time = AverageMeter(), AverageMeter()\n\tglobal_avg_time.reset()\n\tfor epoch in range(epochs):\n\t\tscheduler.step()\n\n\t\tif pretrained and epoch < start_epoch - 1:\n\t\t\tcontinue\n\t\n\t\tmodel.train()\n\t\t# sum_loss, sum_acc = 0., 0.\n\t\tavg_loss.reset()\n\t\tavg_acc.reset()\n\t\tavg_time.reset()\n\t\tfor i, batch in enumerate(train_loader):\n\n\t\t\tt0 = time.time()\n\t\t\timgs,labels = batch\n\n\t\t\tif use_gpu:\n\t\t\t\timgs = imgs.to(device)\n\t\t\t\tlabels = labels.to(device)\n\n\t\t\tres = model(imgs)\n\t\t\t# score, feat = model(imgs)\n\t\t\t# loss = loss_func(score, feat, labels)\n\t\t\tloss, acc = compute_loss_acc(use_neck, res, labels, loss_func)\n\t\t\t\n\t\t\tloss.backward()\n\t\t\tif grad_clip != 0:\n\t\t\t\tnn.utils.clip_grad_norm(model.parameters(), grad_clip)\n\n\t\t\toptimizer.step()\n\n\t\t\toptimizer.zero_grad()\n\n\t\t\t# acc = (score.max(1)[1] == labels).float().mean()\n\n\t\t\t# sum_loss += loss\n\t\t\t# sum_acc += acc \n\t\t\tt1 = time.time()\n\t\t\tavg_time.update((t1 - t0) / batch_size)\n\t\t\tavg_loss.update(loss)\n\t\t\tavg_acc.update(acc)\n\n\t\t\t#log the info \n\t\t\tif (i+1) % log_iters == 0:\n\n\t\t\t\tlogger.info(\"epoch {}: {}/{} with loss is {:.5f} and acc is {:.3f}\".format(\n\t\t\t\t\t epoch+1, i+1, batch_num, avg_loss.avg, avg_acc.avg))\n\n\t\tlr = optimizer.state_dict()['param_groups'][0]['lr']\n\t\tlogger.info(\"end epochs {}/{} with lr: {:.5f} and avg_time is {:.3f} ms\".format(epoch+1, epochs, lr, avg_time.avg * 1000))\n\t\tglobal_avg_time.update(avg_time.avg)\n\t\t# change the lr \n\n\t\t# eval the model \n\t\tif (epoch+1) % eval_period == 0 or (epoch + 1) == epochs :\n\t\t\t\n\t\t\tmodel.eval()\n\t\t\tmetrics = R1_mAP(num_query, use_gpu = use_gpu, feat_norm = feat_norm)\n\n\t\t\twith torch.no_grad():\n\n\t\t\t\tfor vi, batch in enumerate(val_loader):\n\t\t\t\t\t\n\t\t\t\t\timgs, labels, camids = batch\n\n\t\t\t\t\tif use_gpu:\n\t\t\t\t\t\timgs = imgs.to(device)\n\n\t\t\t\t\tfeats = model(imgs)\n\t\t\t\t\tmetrics.update((feats,labels, camids))\n\n\t\t\t\t#compute cmc and mAP\n\t\t\t\tcmc, mAP = metrics.compute()\n\t\t\t\tlogger.info(\"validation results at epoch:{}\".format(epoch + 1))\n\t\t\t\tlogger.info(\"mAP:{:.2%}\".format(mAP))\n\t\t\t\tfor r in [1,5,10]:\n\t\t\t\t\tlogger.info(\"CMC curve, Rank-{:<3}:{:.2%}\".format(r,cmc[r-1]))\t\n\n\t\t\t\t# determine whether cur model is the best \n\t\t\t\tif mAP > best_mAP:\n\t\t\t\t\tis_best = True\n\t\t\t\t\tbest_mAP = mAP\n\t\t\t\t\tlogger.info(\"Get a new best mAP\")\n\t\t\t\tif cmc[0] > best_r1:\n\t\t\t\t\tis_best = True\n\t\t\t\t\tbest_r1 = cmc[0]\n\t\t\t\t\tlogger.info(\"Get a new best r1\")\n\n\t\t\t\t# add the result to sheet\n\t\t\t\tif (epoch + 1) in check_epochs:\n\t\t\t\t\tval = [avg_acc.avg, mAP, cmc[0], cmc[4], cmc[9]]\n\t\t\t\t\tchange = [format(v * 100, '.2f') for v in val]\n\t\t\t\t\tchange.append(format(avg_loss.avg, '.3f'))\n\t\t\t\t\tvalues.extend(change)\n\n\n\t\t# we hope that eval_period == ckpt_period or eval_period == k* ckpt_period where k is int\t\t\t\n\t\t# whether to save the model\n\t\tif (epoch+1) % ckpt_period == 0 or is_best:\n\n\t\t\tif parallel:\n\t\t\t\ttorch.save(model.module.state_dict(), ckpt_save_path + \"checkpoint_{}.pth\".format(epoch + 1 ))\n\t\t\telse:\n\t\t\t\ttorch.save(model.state_dict(), ckpt_save_path + \"checkpoint_{}.pth\".format(epoch + 1 ))\n\n\t\t\tlogger.info(\"checkpoint {} saved !\".format(epoch + 1))\n\n\t\t\tif is_best:\n\t\t\t\tif parallel:\n\t\t\t\t\ttorch.save(model.module.state_dict(), ckpt_save_path + \"best_ckpt.pth\")\n\t\t\t\telse:\n\t\t\t\t\ttorch.save(model.state_dict(), ckpt_save_path + \"best_ckpt.pth\")\n\t\t\t\tlogger.info(\"best checkpoint was saved\")\n\t\t\t\tis_best = False\n\t\n\tvalues.insert(1, format(global_avg_time.avg * 1000, '.2f'))\n\tsheet.append(values)\n\twb.save(result_path)\n\n\tlogger.info(\"training is end, time for per imgs is {} ms\".format(global_avg_time.avg *1000))", "def run_net(self,\n pre_trained_chckpnt_dir ='' #for resuming training, load the model from this directory\n ):\n\n _rd = _read_data(data=self.data)\n\n self.alpha_coeff=1\n\n #read path of the images for train, test, and validation\n train_CTs, train_GTVs, train_Torso, train_penalize, train_surface,\\\n validation_CTs, validation_GTVs, validation_Torso, validation_penalize, validation_surface,\\\n test_CTs, test_GTVs, test_Torso, test_penalize,test_surface=_rd.read_data_path(fold=self.fold)\n self.img_width = self.img_width\n self.img_height = self.img_height\n # ======================================\n #validation instances\n bunch_of_images_no=20\n _image_class_vl = image_class(validation_CTs, validation_GTVs, validation_Torso,validation_penalize,validation_surface\n , bunch_of_images_no=bunch_of_images_no, is_training=0,\n patch_window=self.patch_window)\n _patch_extractor_thread_vl = _patch_extractor_thread(_image_class=_image_class_vl,\n sample_no=self.sample_no, patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n tumor_percent=self.tumor_percent,\n img_no=bunch_of_images_no,\n mutex=settings.mutex,is_training=0,vl_sample_no=self.validation_samples\n )\n _fill_thread_vl = fill_thread(validation_CTs,\n validation_GTVs,\n validation_Torso,\n validation_penalize,\n validation_surface,\n _image_class_vl,\n sample_no=self.sample_no,\n total_sample_no=self.validation_samples,\n patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n img_width=self.img_width, img_height=self.img_height,\n mutex=settings.mutex,\n tumor_percent=self.tumor_percent,\n is_training=0,\n patch_extractor=_patch_extractor_thread_vl,\n fold=self.fold)\n\n\n _fill_thread_vl.start()\n _patch_extractor_thread_vl.start()\n _read_thread_vl = read_thread(_fill_thread_vl, mutex=settings.mutex,\n validation_sample_no=self.validation_samples, is_training=0)\n _read_thread_vl.start()\n # ======================================\n #training instances\n bunch_of_images_no = 24\n _image_class = image_class(train_CTs, train_GTVs, train_Torso,train_penalize,train_surface\n , bunch_of_images_no=bunch_of_images_no,is_training=1,patch_window=self.patch_window\n )\n patch_extractor_thread = _patch_extractor_thread(_image_class=_image_class,\n sample_no=240, patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n tumor_percent=self.tumor_percent,\n img_no=bunch_of_images_no,\n mutex=settings.mutex,is_training=1)\n _fill_thread = fill_thread(train_CTs, train_GTVs, train_Torso,train_penalize,train_surface,\n _image_class,\n sample_no=self.sample_no,total_sample_no=self.sample_no,\n patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n img_width=self.img_width,\n img_height=self.img_height,mutex=settings.mutex,\n tumor_percent=self.tumor_percent,\n is_training=1,\n patch_extractor=patch_extractor_thread,\n fold=self.fold)\n\n _fill_thread.start()\n patch_extractor_thread.start()\n\n _read_thread = read_thread(_fill_thread,mutex=settings.mutex,is_training=1)\n _read_thread.start()\n # ======================================\n\n image = tf.placeholder(tf.float32, shape=[None, None, None, None, 1])\n label = tf.placeholder(tf.float32, shape=[None, None, None, None, 2])\n penalize = tf.placeholder(tf.float32, shape=[None, None, None, None,1])\n surf_map = tf.placeholder(tf.float32, shape=[None, None, None, None,1])\n loss_coef = tf.placeholder(tf.float32, shape=[None, 2]) # shape: batchno * 2 values for each class\n alpha = tf.placeholder(tf.float32, name='alpha') # background coeff\n beta = tf.placeholder(tf.float32, name='beta') # tumor coeff\n\n ave_vali_acc=tf.placeholder(tf.float32)\n ave_loss_vali=tf.placeholder(tf.float32)\n ave_dsc_vali=tf.placeholder(tf.float32)\n\n dropout=tf.placeholder(tf.float32,name='dropout')\n is_training = tf.placeholder(tf.bool, name='is_training')\n is_training_bn = tf.placeholder(tf.bool, name='is_training_bn')\n dense_net_dim = tf.placeholder(tf.int32, name='dense_net_dim')\n\n _dn = _densenet_unet(self.densnet_unet_config,self.compression_coefficient,self.growth_rate) #create object\n y=_dn.dens_net(image=image,is_training=is_training,dropout_rate1=0,dropout_rate2=0,dim=dense_net_dim,is_training_bn=is_training_bn)\n # y = _dn.vgg(image)\n\n y_dirX = ((y[:, int(self.GTV_patchs_size / 2), :, :, 0, np.newaxis]))\n label_dirX = (label[:, int(self.GTV_patchs_size / 2), :, :, 0, np.newaxis])\n penalize_dirX = (penalize[:,16,:,:,0,np.newaxis])\n surf_map_dirX = (surf_map[:,16,:,:,0,np.newaxis])\n image_dirX = ((image[:, int(self.patch_window / 2), :, :, 0, np.newaxis]))\n\n show_img=tf.nn.softmax(y)[:, int(self.GTV_patchs_size / 2) , :, :, 0, np.newaxis]\n tf.summary.image('outprunut',show_img , 3)\n tf.summary.image('output without softmax',y_dirX ,3)\n tf.summary.image('groundtruth', label_dirX,3)\n tf.summary.image('penalize', penalize_dirX,3)\n tf.summary.image('surf_map', surf_map_dirX,3)\n tf.summary.image('image',image_dirX ,3)\n\n print('*****************************************')\n print('*****************************************')\n print('*****************************************')\n sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n devices = sess.list_devices()\n print(devices)\n\n print(device_lib.list_local_devices())\n print('*****************************************')\n print('*****************************************')\n print('*****************************************')\n\n train_writer = tf.summary.FileWriter(self.LOGDIR + '/train' ,graph=tf.get_default_graph())\n validation_writer = tf.summary.FileWriter(self.LOGDIR + '/validation' , graph=sess.graph)\n\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n saver=tf.train.Saver(tf.global_variables(), max_to_keep=1000)\n\n\n\n #define the loss function\n with tf.name_scope('cost'):\n penalize_weight=0\n [ penalized_loss,\n soft_dice_coef,logt,lbl]=self.loss_instance.dice_plus_distance_penalize(logits=y, labels=label,penalize=penalize)\n surface_loss= self.loss_instance.surface_loss(logits=y, labels=label, surf_map=surf_map)\n cost = tf.reduce_mean((1.0 - soft_dice_coef[1])+penalize_weight*penalized_loss+surface_loss, name=\"cost\")\n\n #Setup the Tensorboard plots\n tf.summary.scalar(\"cost\", cost)\n f1_measure = self.loss_instance.f1_measure(logits=y, labels=label)\n tf.summary.scalar(\"dice_bakground\", f1_measure[0])\n tf.summary.scalar(\"dice_tumor\", f1_measure[1])\n\n pwc = self.loss_instance.PWC(y, label)\n tf.summary.scalar(\"pwc_bakground\", pwc[0])\n tf.summary.scalar(\"pwc_tumor\", pwc[1])\n\n recall = self.loss_instance.Recall(y, label)\n tf.summary.scalar(\"recall_bakground\", recall[0])\n tf.summary.scalar(\"recall_tumor\", recall[1])\n\n precision = self.loss_instance.Precision(y, label)\n tf.summary.scalar(\"precision_bakground\", precision[0])\n tf.summary.scalar(\"precision_tumor\", precision[1])\n\n fpr = self.loss_instance.FPR(y, label)\n tf.summary.scalar(\"FPR_bakground\", fpr[0])\n tf.summary.scalar(\"FPR_tumor\", fpr[1])\n\n fnr = self.loss_instance.FNR(y, label)\n tf.summary.scalar(\"FNR_bakground\", fnr[0])\n tf.summary.scalar(\"FNR_tumor\", fnr[1])\n\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n optimizer_tmp = tf.train.AdamOptimizer(self.learning_rate,epsilon=0.001)\n optimizer = optimizer_tmp.minimize(cost)\n\n with tf.name_scope('validation'):\n average_validation_accuracy=ave_vali_acc\n average_validation_loss=ave_loss_vali\n average_dsc_loss=ave_dsc_vali\n tf.summary.scalar(\"average_validation_accuracy\",average_validation_accuracy)\n tf.summary.scalar(\"average_validation_loss\",average_validation_loss)\n tf.summary.scalar(\"average_dsc_loss\",average_dsc_loss)\n\n with tf.name_scope('accuracy'):\n accuracy=self.loss_instance.accuracy_fn(y, label)\n\n tf.summary.scalar(\"accuracy\", accuracy)\n\n sess.run(tf.global_variables_initializer())\n logging.debug('total number of variables %s' % (\n np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))\n summ=tf.summary.merge_all()\n\n point = 0 # starting point, starts from a value > 0 if training is resumed\n itr1 = 0 # number of iterations\n if len(pre_trained_chckpnt_dir):\n ckpt = tf.train.get_checkpoint_state(pre_trained_chckpnt_dir)\n saver.restore(sess, ckpt.model_checkpoint_path)\n point=int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])\n itr1=point\n\n\n # patch_radius = 49\n '''loop for epochs'''\n\n for epoch in range(self.total_epochs):\n while self.no_sample_per_each_itr*int(point/self.no_sample_per_each_itr)<self.sample_no:\n print('0')\n print(\"epoch #: %d\" %(epoch))\n startTime = time.time()\n step = 0\n self.beta_coeff=1+1 * np.exp(-point/2000)\n # =============start validation================\n if itr1 % self.display_validation_step ==0:\n '''Validation: '''\n loss_validation = 0\n acc_validation = 0\n validation_step = 0\n dsc_validation=0\n while (validation_step * self.batch_no_validation <settings.validation_totalimg_patch):\n [validation_CT_image, validation_GTV_image,validation_Penalize_patch,validation_Surface_patch] = _image_class_vl.return_patches_validation( validation_step * self.batch_no_validation, (validation_step + 1) *self.batch_no_validation)\n if (len(validation_CT_image)<self.batch_no_validation) | (len(validation_GTV_image)<self.batch_no_validation) | (len(validation_Penalize_patch)<self.batch_no_validation) | (len(validation_Surface_patch)<self.batch_no_validation) :\n _read_thread_vl.resume()\n time.sleep(0.5)\n continue\n\n validation_CT_image_patchs = validation_CT_image\n validation_GTV_label = validation_GTV_image\n tic=time.time()\n\n [acc_vali, loss_vali,dsc_vali,surface_loss1] = sess.run([accuracy, cost,f1_measure,surface_loss],\n feed_dict={image: validation_CT_image_patchs,\n label: validation_GTV_label,\n penalize: validation_Penalize_patch,\n dropout: 1,\n is_training: False,\n ave_vali_acc: -1,\n ave_loss_vali: -1,\n ave_dsc_vali:-1,\n dense_net_dim: self.patch_window,\n is_training_bn:False,\n alpha:1,\n beta:1,\n surf_map:validation_Surface_patch,\n })\n elapsed=time.time()-tic\n\n acc_validation += acc_vali\n loss_validation += loss_vali\n dsc_validation+=dsc_vali[1]\n validation_step += 1\n if np.isnan(dsc_validation) or np.isnan(loss_validation) or np.isnan(acc_validation):\n print('nan problem')\n process = psutil.Process(os.getpid())\n\n print(\n '%d - > %d: elapsed_time:%d acc_validation: %f, loss_validation: %f, memory_percent: %4s' % (\n validation_step,validation_step * self.batch_no_validation\n , elapsed, acc_vali, loss_vali, str(process.memory_percent()),\n ))\n\n settings.queue_isready_vl = False\n acc_validation = acc_validation / (validation_step)\n loss_validation = loss_validation / (validation_step)\n dsc_validation = dsc_validation / (validation_step)\n if np.isnan(dsc_validation) or np.isnan(loss_validation) or np.isnan(acc_validation):\n print('nan problem')\n _fill_thread_vl.kill_thread()\n print('******Validation, step: %d , accuracy: %.4f, loss: %f*******' % (\n itr1, acc_validation, loss_validation))\n\n [sum_validation] = sess.run([summ],\n feed_dict={image: validation_CT_image_patchs,\n label: validation_GTV_label,\n penalize: validation_Penalize_patch,\n dropout: 1,\n is_training: False,\n ave_vali_acc: acc_validation,\n ave_loss_vali: loss_validation,\n ave_dsc_vali:dsc_validation,\n dense_net_dim: self.patch_window,\n is_training_bn: False,\n alpha: 1,\n beta: 1,\n surf_map: validation_Surface_patch,\n\n })\n validation_writer.add_summary(sum_validation, point)\n print('end of validation---------%d' % (point))\n\n #loop for training batches\n while(step*self.batch_no<self.no_sample_per_each_itr):\n [train_CT_image_patchs, train_GTV_label, train_Penalize_patch,loss_coef_weights,train_Surface_patch] = _image_class.return_patches( self.batch_no)\n\n if (len(train_CT_image_patchs)<self.batch_no)|(len(train_GTV_label)<self.batch_no)\\\n |(len(train_Penalize_patch)<self.batch_no)|(len(train_Surface_patch)<self.batch_no):\n time.sleep(0.5)\n _read_thread.resume()\n continue\n\n tic=time.time()\n [acc_train1, loss_train1, optimizing,out,dsc_train11] = sess.run([accuracy, cost, optimizer,y,f1_measure],\n feed_dict={image: train_CT_image_patchs,\n label: train_GTV_label,\n penalize: train_Penalize_patch,\n # loss_coef: loss_coef_weights,\n dropout: self.dropout_keep,\n is_training: True,\n ave_vali_acc: -1,\n ave_loss_vali: -1,\n ave_dsc_vali: -1,\n dense_net_dim: self.patch_window,\n is_training_bn: True,\n alpha: self.alpha_coeff,\n beta: self.beta_coeff,\n surf_map: train_Surface_patch,\n\n })\n elapsed=time.time()-tic\n dsc_train1=dsc_train11[1]\n\n self.x_hist=self.x_hist+1\n # np.hstack((self.x_hist, [np.ceil(\n\n [sum_train] = sess.run([summ],\n feed_dict={image: train_CT_image_patchs,\n label: train_GTV_label,\n penalize: train_Penalize_patch,\n dropout: self.dropout_keep, is_training: True,\n ave_vali_acc: acc_train1,\n ave_loss_vali: loss_train1,\n ave_dsc_vali: dsc_train1,\n dense_net_dim: self.patch_window,\n is_training_bn: True,\n alpha: self.alpha_coeff,\n beta: self.beta_coeff,\n surf_map: train_Surface_patch,\n\n })\n train_writer.add_summary(sum_train,point)\n step = step + 1\n\n process = psutil.Process(os.getpid())\n\n print(\n 'point: %d, elapsed_time:%d step*self.batch_no:%f , LR: %.15f, acc_train1:%f, loss_train1:%f,memory_percent: %4s' % (\n int((point)),elapsed,\n step * self.batch_no, self.learning_rate, acc_train1, loss_train1,\n str(process.memory_percent())))\n\n\n point=int((point))\n if point%100==0:\n '''saveing model inter epoch'''\n chckpnt_path = os.path.join(self.chckpnt_dir,\n ('densenet_unet_inter_epoch%d_point%d.ckpt' % (epoch, point)))\n saver.save(sess, chckpnt_path, global_step=point)\n itr1 = itr1 + 1\n point=point+1\n endTime = time.time()\n\n #==============\n '''saveing model after each epoch'''\n chckpnt_path = os.path.join(self.chckpnt_dir, 'densenet_unet.ckpt')\n saver.save(sess, chckpnt_path, global_step=epoch)\n print(\"End of epoch----> %d, elapsed time: %d\" % (epoch, endTime - startTime))", "def train_and_evaluate(self) -> None:\n with tf.Session() as self.sess:\n # Initialize computation graph.\n self.create_model()\n\n # Initialize variables.\n tf.global_variables_initializer().run()\n\n # Initialize summary writer.\n self.writer = tf.summary.FileWriter(logdir='conv_vis')\n\n for epoch_no in range(self.nb_epochs):\n # Train model on next batch\n batch_x, batch_y = self.mnist.train.next_batch(self.mb_size)\n results = self.train_on_batch(batch_x, batch_y, global_step=epoch_no)\n\n if epoch_no > 0 and epoch_no % self.lr_decay_time == 0:\n # Test on all samples.\n self.test_on_all()\n # Perform learning rate decay.\n self.learning_rate /= 2\n if epoch_no % 100 == 0:\n self.logger.info(\"Epoch {0}: Loss: {1[0]}, accuracy: {1[1]}\".format(epoch_no, results))\n batch_x_t, batch_y_t = self.mnist.test.next_batch(self.mb_size)\n test_results = self.test_on_batch(batch_x_t, batch_y_t)\n self.logger.info(\"(Test(batch): Loss: {0[0]}, accuracy: {0[1]}\".format(test_results))\n self.test_on_all()\n\n # Save the trained model with all valuable variables.\n saver = tf.train.Saver()\n saver.save(sess=self.sess, save_path='./saved_model', global_step=epoch_no)", "def run_epoch(model, data, optimizer, epoch):\n traindata, valdata = data\n\n model.train()\n train_bpd = epoch_iter(model, traindata, optimizer, epoch)\n\n model.eval()\n val_bpd = epoch_iter(model, valdata, optimizer, epoch)\n\n return train_bpd, val_bpd", "def train_model_with_crloss(model, train_dataloader, test_dataloader, criterion, optimizer, scheduler, num_epochs=25,\n inference=False):\n model = model.float()\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model)\n model = model.to(device)\n\n if not inference:\n model.train()\n print('Start training CRNet...')\n for epoch in range(num_epochs):\n scheduler.step()\n\n running_loss = 0.0\n for i, data in enumerate(train_dataloader, 0):\n inputs, scores, classes = data['image'], data['label'], data['class']\n\n inputs = inputs.to(device)\n scores = scores.to(device)\n classes = classes.to(device)\n\n optimizer.zero_grad()\n\n inputs = inputs.float()\n # scores = scores.float().view(cfg['batch_size'], -1)\n # classes = classes.int().view(cfg['batch_size'], 3)\n\n reg_out, cls_out = model(inputs)\n loss = criterion(cls_out, classes, reg_out, scores.float().unsqueeze(-1))\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 10 == 9: # print every 10 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 10))\n running_loss = 0.0\n\n if epoch % 10 == 9:\n model.eval()\n with torch.no_grad():\n tmp_y_pred = []\n tmp_y_true = []\n tmp_filenames = []\n\n for data in test_dataloader:\n images, labels, classes, filename = data['image'], data['label'], data['class'], data[\n 'filename']\n\n images = images.to(device)\n labels = labels.to(device)\n\n reg_outputs, cls_outputs = model(images)\n # _, predicted = torch.max(outputs.data, 1)\n\n tmp_y_true += labels.to(\"cpu\").detach().numpy().tolist()\n tmp_y_pred += reg_outputs.to(\"cpu\").detach().numpy().tolist()\n tmp_filenames += filename\n\n model_path_dir = './model'\n mkdirs_if_not_exist(model_path_dir)\n if torch.cuda.device_count() > 1:\n torch.save(model.module.state_dict(),\n os.path.join(model_path_dir, model.__class__.__name__ + '-epoch-%d.pth' % (epoch + 1)))\n else:\n torch.save(model.state_dict(),\n os.path.join(model_path_dir, model.__class__.__name__ + '-epoch-%d.pth' % (epoch + 1)))\n\n rmse_lr = round(np.math.sqrt(mean_squared_error(np.array(tmp_y_true), np.array(tmp_y_pred).ravel())), 4)\n mae_lr = round(mean_absolute_error(np.array(tmp_y_true), np.array(tmp_y_pred).ravel()), 4)\n pc = round(np.corrcoef(np.array(tmp_y_true), np.array(tmp_y_pred).ravel())[0, 1], 4)\n print('RMSE of {0} on test set: {1} '.format(model.__class__.__name__, rmse_lr))\n print('MAE of {0} on test set: {1} '.format(model.__class__.__name__, mae_lr))\n print('PC of {0} on test set: {1} '.format(model.__class__.__name__, pc))\n\n model.train()\n\n print('Finished training CRNet...\\n')\n print('Saving trained model...')\n model_path_dir = './model'\n mkdirs_if_not_exist(model_path_dir)\n if torch.cuda.device_count() > 1:\n torch.save(model.module.state_dict(), os.path.join(model_path_dir, '%s.pth' % model.__class__.__name__))\n else:\n torch.save(model.state_dict(), os.path.join(model_path_dir, '%s.pth' % model.__class__.__name__))\n\n print('CRNet has been saved successfully~')\n\n else:\n print('Loading pre-trained model...')\n model.load_state_dict(torch.load(os.path.join('./model/%s.pth' % model.__class__.__name__)))\n\n model.eval()\n\n print('Start testing CRNet...')\n predicted_labels = []\n gt_labels = []\n filenames = []\n for data in test_dataloader:\n images, scores, classes, filename = data['image'], data['label'], data['class'], data['filename']\n images = images.to(device)\n\n reg_out, cls_out = model.forward(images)\n\n # bat_list = []\n # for out in F.softmax(cls_out).to(\"cpu\"):\n # tmp = 0\n # for i in range(0, 3, 1):\n # tmp += out[i] * (i - 1)\n # bat_list.append(float(tmp.detach().numpy()))\n\n # predicted_labels += (0.6 * reg_out.to(\"cpu\").detach().numpy() + 0.4 * np.array(bat_list)).tolist()\n\n predicted_labels += reg_out.to(\"cpu\").detach().numpy().tolist()\n gt_labels += scores.to(\"cpu\").detach().numpy().tolist()\n filenames += filename\n\n mae_lr = round(mean_absolute_error(np.array(gt_labels), np.array(predicted_labels).ravel()), 4)\n rmse_lr = round(np.math.sqrt(mean_squared_error(np.array(gt_labels), np.array(predicted_labels).ravel())), 4)\n pc = round(np.corrcoef(np.array(gt_labels), np.array(predicted_labels).ravel())[0, 1], 4)\n\n print('===============The Mean Absolute Error of CRNet is {0}===================='.format(mae_lr))\n print('===============The Root Mean Square Error of CRNet is {0}===================='.format(rmse_lr))\n print('===============The Pearson Correlation of CRNet is {0}===================='.format(pc))\n\n col = ['filename', 'gt', 'pred']\n df = pd.DataFrame([[filenames[i], gt_labels[i], predicted_labels[i][0]] for i in range(len(gt_labels))],\n columns=col)\n df.to_excel(\"./output.xlsx\", sheet_name='Output', index=False)\n print('Output Excel has been generated~')", "def build_resnet(self):\r\n\r\n # INPUTS\r\n inputs_data = Input((self.data_rows, self.data_cols, 1),name='inputs_data')\r\n\r\n\r\n def residual_block(input, output_channels=64, kernel_size=(3, 3), stride=(1, 1)):\r\n x = Conv2D(output_channels, kernel_size, padding='same', strides=stride)(input)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(output_channels, kernel_size, padding='same', strides=stride)(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Add()([x, input])\r\n\r\n residual_block.counter += 1\r\n return x\r\n\r\n residual_block.counter = 0\r\n\r\n conv1=Conv2D(64,(3,3),strides=(1,1),padding='same',activation='relu')(inputs_data)\r\n res_block1=residual_block(conv1,output_channels=64)\r\n res_block2 =residual_block(res_block1, output_channels=64)\r\n res_block3 =residual_block(res_block2, output_channels=64)\r\n conv2=Conv2D(1,(3,3),strides=(1,1),padding='same')(res_block3)\r\n outputs=Add()([conv2,inputs_data])\r\n\r\n\r\n model = Model(inputs=inputs_data, outputs=outputs)\r\n\r\n\r\n return model", "def training_loop(self, train_data, train_lbls, validate_data,\n validate_lbls):\n self.net.compile(\n optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n self.net.fit(\n train_data,\n train_lbls,\n epochs=self.epochs,\n batch_size=self.batch_size)\n train_loss, train_acc = self.net.evaluate(train_data, train_lbls)\n validate_loss, validate_acc = self.net.evaluate(validate_data, validate_lbls)", "def define_model(input_shape=(32,32,3), depth=110, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def run_epoch(model, data, optimizer):\n traindata, valdata = data\n\n model.train()\n train_elbo = epoch_iter(model, traindata, optimizer)\n\n model.eval()\n val_elbo = epoch_iter(model, valdata, optimizer)\n\n return train_elbo, val_elbo", "def run_epoch(model, data, optimizer):\n traindata, valdata = data\n\n model.train()\n train_elbo = epoch_iter(model, traindata, optimizer)\n\n model.eval()\n val_elbo = epoch_iter(model, valdata, optimizer)\n\n return train_elbo, val_elbo", "def main():\n setup_keras()\n\n args = parse()\n\n train_settings = common.load_settings(args.settings_path, default_conf_name='train.yml')\n train_settings['store'] = args.store\n\n feature_settings = common.load_settings(args.settings_path, default_conf_name='feature.yml')\n model_settings = common.load_settings(args.settings_path, default_conf_name=train_settings['model_conf'])\n\n train_df, val_df = load_training_data(dict(train_settings, **feature_settings))\n assert train_df.shape[0] > val_df.shape[0] * 4.5, f'training data {train_df.shape[0]} should be much larger than validation {val_df.shape[0]}'\n\n sample_featurizer = AudioFeature(feature_settings)\n\n if args.load_name:\n model_name = args.load_name\n print('Loading existing model', model_name)\n m = keras.models.load_model(model_name)\n else:\n t = datetime.datetime.now().strftime('%Y%m%d-%H%M')\n model_name = f\"model-{model_settings['model']}_hop{feature_settings['hop_length']}_{t}\"\n m = models.build(dict(model_settings, **feature_settings))\n m.summary()\n\n output_dir = os.path.join(args.model_store, model_name)\n\n print(f\"Training model: '{model_name}'\", json.dumps(train_settings, indent=1))\n\n combined_settings = dict(train_settings, **model_settings, **feature_settings)\n\n h = train_model(output_dir, train_df, val_df,\n model=m,\n sample_featurizer=sample_featurizer,\n settings=combined_settings)", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def main():\n nn = CarsClassifierModel()\n train_x, train_y, test_x, test_y = nn.load_data_preprocess()\n history = nn.run(train_x,train_y)\n nn.evaluate(test_x, test_y)\n nn.save(\"keras_nn_5\")\n #nn.plots(history)\n #print(train_x.shape)\n #plt.imshow(train_x[52])\n #plt.title(\"Car\")\n #plt.show()\n #print(train_y[52])", "def main(Args):\n norm = [1.9844158727667542, 413.83759806375525,\n 51.2789974336363, 1038.4760551905683]\n input_pull = False\n input_model_mapping = False\n max_number = 2\n count = 40000\n catalog_name = os.path.join(DATA_PATH, 'OneDegSq.fits')\n # Define parameters for mrcnn model with btk here\n resid_model = btk_utils.Resid_btk_model(\n Args.model_name, Args.model_path, MODEL_DIR, training=True,\n images_per_gpu=4, validation_for_training=True)\n # Load parameters for dataset and load model\n resid_model.config.WEIGHT_DECAY = 0.001\n resid_model.config.STEPS_PER_EPOCH = 1000\n resid_model.config.VALIDATION_STEPS = 20\n sampling_function = None\n layers = 'all'\n if Args.model_name == 'model1':\n resid_model.config.BACKBONE = 'resnet41'\n elif Args.model_name == 'model2':\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model3':\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model5':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet35'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4_large':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = '4+' # '3+'\n elif Args.model_name == 'model6':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 51.2789974336363, 1038.4760551905683]\n input_pull = True\n elif Args.model_name == 'model7':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model8': # stretch = 0.1, Q = 3\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model9': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again3': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model12': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n elif Args.model_name == 'model12_again': # stretch = 2000, Q = 0.5 # larger learning rate\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 10 # changed from 6 to 10 for run 4\n elif Args.model_name == 'model12_again2': # stretch = 2000, Q = 0.5 # larger learning rate val set reduced to 10\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n resid_model.config.VALIDATION_STEPS = 10\n else:\n raise AttributeError(\"model not found\", Args.model_name)\n print(\"Train in model:\", Args.model_name)\n resid_model.config.display()\n resid_model.make_resid_model(catalog_name, count=count,\n max_number=max_number, augmentation=True,\n norm_val=norm, input_pull=input_pull,\n sampling_function=sampling_function,\n input_model_mapping=input_model_mapping)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs,\n layers=layers)\n name = Args.model_name + '_run2'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs + 10)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs+10,\n layers=layers)\n name = Args.model_name + '_run3'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)", "def build_resnet101(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 4):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b3_feats = temp\n \n res4a_feats = self.basic_block(res3b3_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 23):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b22_feats = temp\n\n res5a_feats = self.basic_block(res4b22_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def run_epoch(data, session, model, train_op=None, verbose=False):\n start_time = time.time()\n costs = 0.0\n num_steps = model['num_steps']\n num_batches = (data.shape[1] - 1) // num_steps\n\n # initialize RNN cell states to be all zero\n state = session.run(model['initial_state'])\n\n fetches = {\n \"cost\": model['cost'],\n \"final_state\": model['final_state'],\n }\n\n # train model\n if train_op is not None:\n fetches[\"train_op\"] = train_op\n\n for batch in range(num_batches):\n feed_dict = {\n model['user_inputs']: data[:, batch * num_steps: (batch + 1) * num_steps],\n model['targets']: data[:, batch * num_steps + 1: (batch + 1) * num_steps + 1],\n }\n for i, (c, h) in enumerate(model['initial_state']):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n costs += cost\n\n if verbose and batch % (num_batches // 10) == 10:\n iters = num_steps * (batch + 1)\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (batch * 1.0 / num_batches, np.exp(costs / iters),\n iters * data.shape[0] * 1 /\n (time.time() - start_time)))\n\n return np.exp(costs / (data.shape[1] - 1))", "def ResNet20(inputShape):\n inputs = Input(shape=inputShape)\n x = resLayer(inputs) # resLayer1\n\n # resBlocks\n for nStage in range(3):\n for nBlock in range(3):\n x = resBlock(x, nStage, nBlock)\n\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(10, activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Generate model\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def run_training_loop():\n logging.info(\"Starting the training loop.\")\n\n trainer = trainer_class(\n output_dir=output_dir,\n train_env=train_env,\n eval_env=eval_env,\n trajectory_dump_dir=trajectory_dump_dir,\n )\n trainer.training_loop(n_epochs=n_epochs)", "def train(self, mode=True):\n super(ResNet, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()", "def train():\r\n with tf.Graph().as_default():\r\n global_step = tf.train.get_or_create_global_step()\r\n # Get images and labels for CIFAR-10.\r\n # Force input pipeline to CPU:0 to avoid operations sometimes ending up on\r\n # GPU and resulting in a slow down.\r\n with tf.device('/cpu:0'):\r\n signals, labels = cnnHAR.distorted_inputs()\r\n print('<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>')\r\n \r\n # Build a Graph that computes the logits predictions from the\r\n # inference model.\r\n #training = tf.placeholder(tf.bool)\r\n \r\n pool11=cnnHAR.inference_cov11(signals,'_01')\r\n reshape1=cnnHAR.inference_cov21(pool11,'_01')\r\n local21=cnnHAR.inference_local21(reshape1,'_01')\r\n local31=cnnHAR.inference_local31(local21,'_01')\r\n local41=cnnHAR.inference_local41(local31,'_01')\r\n logits1=cnnHAR.inference_output1(local41,'_01')\r\n \r\n \r\n pool12=cnnHAR.inference_cov11(signals,'_02')\r\n reshape2=cnnHAR.inference_cov21(pool12,'_02')\r\n local22=cnnHAR.inference_local21(reshape2,'_02')\r\n local32=cnnHAR.inference_local31(local22,'_02')\r\n local42=cnnHAR.inference_local41(local32,'_02')\r\n logits2=cnnHAR.inference_output1(local42,'_02')\r\n \r\n \r\n pool13=cnnHAR.inference_cov11(signals,'_03')\r\n reshape3=cnnHAR.inference_cov21(pool13,'_03')\r\n local23=cnnHAR.inference_local21(reshape3,'_03')\r\n local33=cnnHAR.inference_local31(local23,'_03')\r\n local43=cnnHAR.inference_local41(local33,'_03')\r\n logits3=cnnHAR.inference_output1(local43,'_03')\r\n \r\n \r\n pool14=cnnHAR.inference_cov11(signals,'_04')\r\n reshape4=cnnHAR.inference_cov21(pool14,'_04')\r\n local24=cnnHAR.inference_local21(reshape4,'_04')\r\n local34=cnnHAR.inference_local31(local24,'_04')\r\n local44=cnnHAR.inference_local41(local34,'_04')\r\n logits4=cnnHAR.inference_output1(local44,'_04')\r\n\r\n \r\n pool15=cnnHAR.inference_cov11(signals,'_05')\r\n reshape5=cnnHAR.inference_cov21(pool15,'_05')\r\n local25=cnnHAR.inference_local21(reshape5,'_05')\r\n local35=cnnHAR.inference_local31(local25,'_05')\r\n local45=cnnHAR.inference_local41(local35,'_05')\r\n logits5=cnnHAR.inference_output1(local45,'_05')\r\n\r\n pool16=cnnHAR.inference_cov11(signals,'_06')\r\n reshape6=cnnHAR.inference_cov21(pool16,'_06')\r\n local26=cnnHAR.inference_local21(reshape6,'_06')\r\n local36=cnnHAR.inference_local31(local26,'_06')\r\n local46=cnnHAR.inference_local41(local36,'_06')\r\n logits6=cnnHAR.inference_output1(local46,'_06')\r\n \r\n\r\n loss1=cnnHAR.loss(logits1, labels,'_01')\r\n loss2=cnnHAR.loss(logits2, labels,'_02')\r\n loss3=cnnHAR.loss(logits3, labels,'_03')\r\n loss4=cnnHAR.loss(logits4, labels,'_04')\r\n loss5=cnnHAR.loss(logits5, labels,'_05')\r\n loss6=cnnHAR.loss(logits6, labels,'_06')\r\n \r\n train_op1 = cnnHAR.train(loss1, global_step,'_01')\r\n train_op2 = cnnHAR.train(loss2, global_step,'_02')\r\n train_op3 = cnnHAR.train(loss3, global_step,'_03')\r\n train_op4 = cnnHAR.train(loss4, global_step,'_04')\r\n train_op5 = cnnHAR.train(loss5, global_step,'_05')\r\n train_op6 = cnnHAR.train(loss6, global_step,'_06')\r\n \r\n \r\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n class _LoggerHook(tf.train.SessionRunHook):\r\n \"\"\"Logs loss and runtime.\"\"\"\r\n\r\n def begin(self):\r\n self._step = -1\r\n self._start_time = time.time()\r\n\r\n def before_run(self, run_context):\r\n self._step += 1\r\n #print('~~~~~~~~~~~~~~~~before run1~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n #tmp = tf.concat([labels,signals],1)\r\n \r\n index=int(self._step%(num*7)/7)\r\n if index==0:\r\n return tf.train.SessionRunArgs(loss1)\r\n elif index==1:\r\n return tf.train.SessionRunArgs(loss2)\r\n elif index==2:\r\n return tf.train.SessionRunArgs(loss3)\r\n elif index==3:\r\n return tf.train.SessionRunArgs(loss4)\r\n elif index==4:\r\n return tf.train.SessionRunArgs(loss5)\r\n elif index==5:\r\n return tf.train.SessionRunArgs(loss6)\r\n \r\n # Asks for loss value.\r\n\r\n def after_run(self, run_context, run_values):\r\n# if self._step == 1000:\r\n# #tf.Session().run(tf.global_variables_initializer())\r\n# ndar = np.array(run_values.results)\r\n# np.savetxt(\"logits.csv\", ndar.reshape(128,256), delimiter=\",\")\r\n \r\n if int((self._step-1) / log_frequency)%10==0 and (self._step%(num*7)+1)%7==0 and int(self._step%(num*7)/7)==0:#(self._step-1) % (log_frequency)== 0:\r\n #print('~~~~~~~~~~~~~~~~after run1~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n '''\r\n current_time = time.time()\r\n duration = current_time - self._start_time\r\n self._start_time = current_time\r\n \r\n loss_value = run_values.results\r\n examples_per_sec = log_frequency * batch_size / duration\r\n sec_per_batch = float(duration / log_frequency)\r\n format_str = ('%s: step %d, loss = %.8f (%.1f examples/sec; %.3f '\r\n 'sec/batch)')\r\n '''\r\n format_str = ('%s: step %d loss%d=%0.8f')\r\n print(format_str % (datetime.now(), self._step+1, int(self._step%(num*7)/7)+1,run_values.results))\r\n \r\n class _LoggerHook2(tf.train.SessionRunHook):\r\n \"\"\"Logs signals.\"\"\"\r\n\r\n def begin(self):\r\n self._step = -1\r\n\r\n def before_run(self, run_context):\r\n self._step += 1\r\n #print('~~~~~~~~~~~~~~~~before run2~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n return tf.train.SessionRunArgs(logits) # Asks for logits.\r\n\r\n def after_run(self, run_context, run_values):\r\n if self._step == max_steps-1:#:\r\n print('~~~~~~~~~~~~~~~~after run2~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n ndar = np.array(run_values.results)\r\n np.savetxt(\"logits\"+str(self._step)+\".csv\", ndar.reshape(batch_size,NUM_CLASSES), delimiter=\",\")\r\n\r\n class _LoggerHook3(tf.train.SessionRunHook):\r\n \"\"\"Logs labels.\"\"\"\r\n\r\n def begin(self):\r\n self._step = -1\r\n\r\n def before_run(self, run_context):\r\n self._step += 1\r\n return tf.train.SessionRunArgs(labels) # Asks for labels.\r\n\r\n def after_run(self, run_context, run_values):\r\n if self._step == max_steps-1:\r\n ndar = np.array(run_values.results)\r\n np.savetxt(\"labels\"+str(self._step)+\".csv\", ndar.reshape(batch_size,NUM_CLASSES), delimiter=\",\")\r\n\r\n class _LoggerHook4(tf.train.SessionRunHook):\r\n \"\"\"Logs signals.\"\"\"\r\n\r\n def begin(self):\r\n self._step = -1\r\n\r\n def before_run(self, run_context):\r\n self._step += 1\r\n #print('~~~~~~~~~~~~~~~~before run4~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n #return tf.train.SessionRunArgs(signals) # Asks for signals.\r\n\r\n def after_run(self, run_context, run_values):\r\n if (self._step+1)% (50*log_frequency) == 0:\r\n #if self._step == max_steps-1:#:\r\n #print('~~~~~~~~~~~~~~~~after run4~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n cnnHAR_eval.main()\r\n\r\n with tf.train.MonitoredTrainingSession(\r\n checkpoint_dir=train_dir,\r\n hooks=[tf.train.StopAtStepHook(last_step=max_steps),\r\n #tf.train.NanTensorHook(loss),\r\n _LoggerHook(),\r\n #_LoggerHook2(),\r\n _LoggerHook4()],#,save_checkpoint_steps=5000\r\n config=tf.ConfigProto(\r\n log_device_placement=log_device_placement),save_checkpoint_steps=50*log_frequency) as mon_sess:\r\n ''',save_checkpoint_steps=10*log_frequency'''\r\n i=0\r\n while not mon_sess.should_stop():\r\n# mon_sess = tfdbg.LocalCLIDebugWrapperSession(mon_sess)\r\n #mon_sess.run([train_op1,extra_update_ops])\r\n #print('~~~~~~~~~~~~~~~~%d step:'%i)\r\n \r\n index=int(i%(num*7)/7)\r\n if index==0:\r\n #print('~~~~~~~~~~~~~~~~train_op1')\r\n mon_sess.run([train_op1,extra_update_ops])\r\n elif index==1:\r\n #print('~~~~~~~~~~~~~~~~train_op2')\r\n mon_sess.run([train_op2,extra_update_ops])\r\n elif index==2:\r\n #print('~~~~~~~~~~~~~~~~train_op3')\r\n mon_sess.run([train_op3,extra_update_ops])\r\n elif index==3:\r\n #print('~~~~~~~~~~~~~~~~train_op4')\r\n mon_sess.run([train_op4,extra_update_ops])\r\n elif index==4:\r\n #print('~~~~~~~~~~~~~~~~train_op5')\r\n mon_sess.run([train_op5,extra_update_ops])\r\n elif index==5:\r\n #print('~~~~~~~~~~~~~~~~train_op6')\r\n mon_sess.run([train_op6,extra_update_ops])\r\n '''\r\n elif index==6:\r\n #print('~~~~~~~~~~~~~~~~train_op1')\r\n mon_sess.run([train_op7,extra_update_ops])\r\n elif index==7:\r\n #print('~~~~~~~~~~~~~~~~train_op2')\r\n mon_sess.run([train_op8,extra_update_ops])\r\n elif index==8:\r\n #print('~~~~~~~~~~~~~~~~train_op3')\r\n mon_sess.run([train_op9,extra_update_ops])\r\n elif index==9:\r\n #print('~~~~~~~~~~~~~~~~train_op4')\r\n mon_sess.run([train_op10,extra_update_ops])\r\n elif index==10:\r\n #print('~~~~~~~~~~~~~~~~train_op5')\r\n mon_sess.run([train_op11,extra_update_ops])\r\n elif index==11:\r\n #print('~~~~~~~~~~~~~~~~train_op6')\r\n mon_sess.run([train_op12,extra_update_ops])\r\n '''\r\n i=i+1\r\n \r\n #print('~~~~~~~~~~~~~~~~one session ends~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r", "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def resnet10(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [1, 1, 1, 1], shortcut_type, num_classes, in_channels)\n return model", "def train(dataset_path: str) -> NoReturn:\n model_output_path = os.path.join(\n \"outputs\", \"models\", os.path.basename(dataset_path)\n )\n os.makedirs(model_output_path, exist_ok=True)\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n print(\"Running training on device {}\".format(device))\n train_dataset = get_coco_dataset(dataset_path, train=True)\n validation_dataset = get_coco_dataset(dataset_path, train=False)\n indices = torch.randperm(len(train_dataset)).tolist()\n train_samples = math.ceil(RATIO_TRAINING_SPLIT * len(train_dataset))\n train_dataset = Subset(train_dataset, indices[:train_samples])\n validation_dataset = Subset(validation_dataset, indices[train_samples:])\n training_dataloader = DataLoader(\n train_dataset,\n batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS,\n shuffle=True,\n collate_fn=collate_fn,\n )\n validation_dataloader = DataLoader(\n validation_dataset,\n batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS,\n shuffle=False,\n collate_fn=collate_fn,\n )\n model = get_fasterrcnn_resnet50_fpn(\n trainable_backbone_layers=TRAINABLE_BACKBONE_LAYERS,\n number_classes=get_number_of_classes(train_dataset.dataset),\n )\n model.to(device)\n # construct an optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = SGD(\n params, lr=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY\n )\n # and a learning rate scheduler\n lr_scheduler_training = lr_scheduler.StepLR(\n optimizer, step_size=STEP_SIZE, gamma=GAMMA\n )\n\n for epoch in range(NUM_EPOCHS):\n train_one_epoch(\n model,\n optimizer,\n training_dataloader,\n device,\n epoch,\n print_freq=PRINT_FREQUENCY,\n )\n torch.save(\n {\n \"state_dict\": model.state_dict(),\n \"categories\": get_model_categories_metadata(train_dataset.dataset),\n },\n os.path.join(model_output_path, f\"epoch_{epoch}.pth\"),\n )\n lr_scheduler_training.step()\n # evaluate on the test dataset\n evaluate(model, validation_dataloader, device=device)", "def _train(args): \n\n #device = 'cuda' if torch.cuda.is_available() else 'cpu'\n device = 'cpu'\n logger.info(\"Device Type: {}\".format(device))\n\n logger.info(\"Loading SUN360 dataset\")\n transform = transforms.Compose(\n [transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n target_transform = transforms.Compose([transforms.Resize((224,224)),\n transforms.ToTensor()]) \n\n trainset = SUN360Dataset(\"imagedata.json\",transform = transform, target_transform = target_transform)\n train_loader = DataLoader(trainset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers)\n \"\"\"\n testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False,\n download=False, transform=transform)\n test_loader = DataLoader(testset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.workers)\n \"\"\" \n\n logger.info(\"Model loaded\")\n model = EfficientNet.from_name('efficientnet-b0',conv_type='Equi')\n\n if torch.cuda.device_count() > 1:\n logger.info(\"Gpu count: {}\".format(torch.cuda.device_count()))\n model = nn.DataParallel(model)\n\n model = model.to(device)\n\n criterion = CELoss().to(device)\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(0, args.epochs):\n running_loss = 0.0\n for i, data in enumerate(train_loader):\n # get the inputs\n inputs, EM , CM = data\n inputs, EM, CM = inputs.to(device), EM.to(device), CM.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n EMLoss, CMLoss = map_loss(outputs,EM,CM,criterion)\n loss = EMLoss + CMLoss\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n print('Finished Training')\n return _save_model(model, args.model_dir)", "def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def catlin_run(workdir, cycles, cyclesize, gpuid = 0, max_testimages = 100000000000):\n\n # load val-data for evaluation\n pyparams = pload(osp.join(workdir, 'valpyparams.pkl'))\n imlist = [line.rstrip() for line in open(osp.join(workdir, 'vallist.txt'))][:max_testimages]\n with open(osp.join(workdir, 'valdict.json')) as f:\n imdict = json.load(f)\n\n # run\n for i in range(cycles):\n bct.run(workdir, gpuid = gpuid, nbr_iters = cyclesize)\n _ = bct.classify_from_patchlist_wrapper(imlist, imdict, pyparams, workdir, gpuid = gpuid, save = True, net_prototxt = 'valnet.prototxt')\n\n # find and load optimal model\n bestiter, _ = find_best_iter(workdir)\n caffemodel = 'snapshot_iter_{}.caffemodel'.format(bestiter)\n net = bct.load_model(workdir, caffemodel, gpuid = gpuid, net_prototxt = 'testnet.prototxt')\n\n # load test-data\n pyparams = pload(osp.join(workdir, 'testpyparams.pkl'))\n imlist = [line.rstrip() for line in open(osp.join(workdir, 'testlist.txt'))][:max_testimages]\n with open(osp.join(workdir, 'testdict.json')) as f:\n imdict = json.load(f)\n\n # run on test-data\n (gtlist, estlist, scorelist) = bct.classify_from_patchlist(imlist, imdict, pyparams, net)\n psave((gtlist, estlist, scorelist), osp.join(workdir, 'predictions_on_test.p'))", "def load_cifar10_data(img_rows, img_cols):\n\n # Load cifar10 training and validation sets\n (X_train, Y_train), (X_valid, Y_valid) = cifar10.load_data()\n\n # Resize training images\n X_train = np.array([cv2.resize(img, (img_rows, \\\n img_cols)) for img in X_train[:, :, :, :]])\n\n X_valid = np.array([cv2.resize(img, (img_rows, \\\n img_cols)) for img in X_valid[:, :, :, :]])\n\n # Check the data type of X_train or X_valid\n for each in X_train:\n print(type(each))\n\n # Transform targets to keras compatible format\n Y_train = np_utils.to_categorical(Y_train, num_classes)\n Y_valid = np_utils.to_categorical(Y_valid, num_classes)\n\n X_train = X_train.astype('float32')\n X_valid = X_valid.astype('float32')\n\n # Data normalization\n X_train = X_train / 255.0\n X_valid = X_valid / 255.0\n\n return X_train, Y_train, X_valid, Y_valid", "def main():\n\n # Load the data and scale\n x_train = np.load(\"../data/audio/ESC-10/esc10_raw_train_audio.npy\")[:,:,0]\n y_train = np.load(\"../data/audio/ESC-10/esc10_raw_train_labels.npy\")\n x_test = np.load(\"../data/audio/ESC-10/esc10_raw_test_audio.npy\")[:,:,0]\n y_test = np.load(\"../data/audio/ESC-10/esc10_raw_test_labels.npy\")\n\n x_train = (x_train.astype('float32') + 32768) / 65536\n x_test = (x_test.astype('float32') + 32768) / 65536\n\n # Train and test the models\n train(x_train, y_train, x_test, y_test)", "def train_and_eval(config, babas_data):\n\n if config.resume_from_checkpoint is not None:\n try:\n if config.augment_background == 'background':\n bg = config.augment_background\n else:\n bg = None\n rfc = config.resume_from_checkpoint\n ic = config.include_validation\n print 'Loading saved config: %s' % config.saved_config\n config = np.load(config.saved_config).item()\n config.resume_from_checkpoint = rfc\n config.include_validation = ic\n if not hasattr(config, 'augment_background'):\n config.augment_background = 'constant'\n if not hasattr(config, 'background_folder'):\n config.background_folder = 'backgrounds'\n if bg is not None:\n print 'Overriding saved config to add kinect backgrounds to training.'\n config.augment_background = bg\n results_dir = rfc\n except:\n print 'Relying on default config file.'\n\n if babas_data: # Shitty naive training method\n config.tfrecord_dir = '/media/data_cifs/monkey_tracking/data_for_babas/tfrecords_from_babas'\n config.babas_tfrecord_dir = config.tfrecord_dir\n config.steps_before_validation = 20\n config.epochs = 2000\n config.convert_labels_to_pixel_space = False\n config.augment_background = 'constant'\n\n # Import your model\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n model_file = import_cnn(config.model_type)\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = '%s_%s' % (config.model_type, dt_stamp)\n if config.selected_joints is not None:\n dt_dataset = '_%s' % (config.selected_joints) + dt_dataset\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, dt_dataset)\n results_dir = os.path.join(config.npy_dir, dt_dataset)\n print 'Saving Dmurphy\\'s online updates to: %s' % results_dir\n dir_list = [config.train_checkpoint, config.summary_dir, results_dir]\n [tf_fun.make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, config.train_tfrecords)\n if config.babas_tfrecord_dir is not None:\n train_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.train_tfrecords)\n if config.include_validation or config.include_validation is None:\n val_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.val_tfrecords)\n else:\n train_babas_tfrecord_dir = None\n val_babas_tfrecord_dir = None\n\n if isinstance(config.include_validation, basestring):\n validation_data = config.include_validation\n elif config.include_validation == True:\n validation_data = os.path.join(\n config.tfrecord_dir,\n config.val_tfrecords)\n else:\n validation_data = None\n\n print 'Using training set: %s' % train_data\n print 'Using validation set: %s' % validation_data\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_data_dict = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background=config.augment_background,\n background_folder=config.background_folder,\n randomize_background=config.randomize_background,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=train_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n train_data_dict['deconv_label_size'] = len(config.labels)\n\n val_data_dict = inputs(\n tfrecord_file=validation_data,\n batch_size=config.validation_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background='none',\n background_folder=config.background_folder,\n randomize_background=None,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=val_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n val_data_dict['deconv_label_size'] = len(config.labels)\n\n # Check output_shape\n if config.selected_joints is not None:\n print 'Targeting joint: %s' % config.selected_joints\n joint_shape = len(config.selected_joints) * config.keep_dims\n if (config.num_classes // config.keep_dims) > (joint_shape):\n print 'New target size: %s' % joint_shape\n config.num_classes = joint_shape\n\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n print 'Creating training graph:'\n model = model_file.model_struct(\n weight_npy_path=config.weight_npy_path)\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n rgb=train_data_dict['image'],\n target_variables=train_data_dict,\n train_mode=train_mode,\n batchnorm=config.batch_norm)\n train_mu, train_var = tf.nn.moments(train_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"train image mean\", train_mu)\n tf.summary.histogram(\"train image std\", tf.sqrt(train_var))\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv train', model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(model.deconv, axis=3), tf.float32), 3))\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n print 'Creating validation graph:'\n val_model = model_file.model_struct()\n val_model.build(\n rgb=val_data_dict['image'],\n target_variables=val_data_dict)\n\n # Calculate validation accuracy\n val_mu, val_var = tf.nn.moments(val_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"validation image mean\", val_mu)\n tf.summary.histogram(\"validation image std\", tf.sqrt(val_var))\n if 'label' in val_data_dict.keys():\n # val_score = tf.reduce_mean(\n # tf_fun.l2_loss(\n # val_model.output, val_data_dict['label']))\n if config.keep_dims == 3:\n z_mask = tf.expand_dims(tf.tile([1, 1, 0], [int(val_data_dict['label'].get_shape()[-1]) // 3]), axis=0)\n z_mask = tf.cast(z_mask, tf.float32)\n val_model.output = val_model.output * z_mask\n val_data_dict['label'] = val_data_dict['label'] * z_mask \n val_score = tf.reduce_mean(tf.nn.l2_loss(val_model.output - val_data_dict['label']))\n tf.summary.scalar(\"validation mse\", val_score)\n if 'fc' in config.aux_losses:\n tf.summary.image('FC val activations', val_model.final_fc)\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv val', val_model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(val_model.deconv, axis=3),\n tf.float32), 3))\n tf.summary.image(\n 'validation images',\n tf.cast(val_data_dict['image'], tf.float32))\n\n # Prepare the loss functions:::\n loss_list, loss_label = [], []\n if 'label' in train_data_dict.keys():\n # 1. Joint localization loss\n if config.calculate_per_joint_loss == 'thomas':\n label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton and joint':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n loss_label += ['skeleton loss']\n delta = model['output'] - train_data_dict['label']\n proc_weights = np.asarray(\n config.dim_weight)[None,:].repeat(\n len(config.joint_names), axis=0).reshape(1, -1)\n delta *= proc_weights\n # label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n # model=model,\n # train_data_dict=train_data_dict,\n # config=config,\n # y_key='label',\n # yhat_key='output')\n # loss_list += [label_loss]\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n else:\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n loss_label += ['combined head']\n for al in loss_helper.potential_aux_losses():\n loss_list, loss_label = loss_helper.get_aux_losses(\n loss_list=loss_list,\n loss_label=loss_label,\n train_data_dict=train_data_dict,\n model=model,\n aux_loss_dict=al,\n domain_adaptation=train_babas_tfrecord_dir)\n loss = tf.add_n(loss_list)\n\n # Add wd if necessary\n if config.wd_penalty is not None:\n _, l2_wd_layers = tf_fun.fine_tune_prepare_layers(\n tf.trainable_variables(), config.wd_layers)\n l2_wd_layers = [\n x for x in l2_wd_layers if 'biases' not in x.name]\n if config.wd_type == 'l1':\n loss += (config.wd_penalty * tf.add_n(\n [tf.reduce_sum(tf.abs(x)) for x in l2_wd_layers]))\n elif config.wd_type == 'l2':\n loss += (config.wd_penalty * tf.add_n(\n [tf.nn.l2_loss(x) for x in l2_wd_layers]))\n\n optimizer = loss_helper.return_optimizer(config.optimizer)\n optimizer = optimizer(config.lr)\n\n if hasattr(config, 'fine_tune_layers') and config.fine_tune_layers is not None:\n print 'Finetuning learning for: %s' % config.fine_tune_layers\n train_op, grads = tf_fun.finetune_learning(\n loss,\n trainables=tf.trainable_variables(),\n fine_tune_layers=config.fine_tune_layers,\n config=config\n )\n else:\n # Op to calculate every variable gradient\n grads = optimizer.compute_gradients(\n loss, tf.trainable_variables())\n # Op to update all variables according to their gradient\n train_op = optimizer.apply_gradients(\n grads_and_vars=grads)\n\n # Summarize all gradients and weights\n [tf.summary.histogram(\n var.name + '/gradient', grad)\n for grad, var in grads if grad is not None]\n # train_op = optimizer.minimize(loss)\n\n # Summarize losses\n [tf.summary.scalar(lab, il) for lab, il in zip(\n loss_label, loss_list)]\n\n # Summarize images and l1 weights\n tf.summary.image(\n 'train images',\n tf.cast(train_data_dict['image'], tf.float32))\n tf_fun.add_filter_summary(\n trainables=tf.trainable_variables(),\n target_layer='conv1_1_filters')\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n tf.add_to_collection('output', model.output)\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Create list of variables to run through training model\n train_session_vars = {\n 'train_op': train_op,\n 'loss_value': loss,\n 'im': train_data_dict['image'],\n 'yhat': model.output,\n 'ytrue': train_data_dict['label']\n }\n if hasattr(model, 'deconv'):\n train_session_vars['deconv'] = model.deconv\n if hasattr(model, 'final_fc'):\n train_session_vars['fc'] = model.final_fc\n\n # Create list of variables to run through validation model\n val_session_vars = {\n 'val_acc': val_score,\n 'val_pred': val_model.output,\n 'val_ims': val_data_dict['image'],\n 'val_true': val_data_dict['label'],\n }\n\n # Create list of variables to save to numpys\n save_training_vars = [\n 'im',\n 'yhat',\n 'ytrue',\n 'yhat'\n ]\n\n for al in loss_helper.potential_aux_losses():\n if al.keys()[0] in train_data_dict.keys():\n y_key = '%s' % al.keys()[0]\n train_session_vars[y_key] = train_data_dict[al.values()[0]['y_name']]\n save_training_vars += [y_key]\n\n yhat_key = '%s_hat' % al.keys()[0]\n train_session_vars[yhat_key] = model[al.values()[0]['model_name']]\n save_training_vars += [yhat_key]\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, losses = 0, []\n num_joints = int(\n train_data_dict['label'].get_shape()[-1]) // config.keep_dims\n normalize_vec = tf_fun.get_normalization_vec(config, num_joints)\n if config.resume_from_checkpoint is not None:\n if '.ckpt' in config.resume_from_checkpoint:\n ckpt = config.resume_from_checkpoint\n 'Restoring specified checkpoint: %s' % config.resume_from_checkpoint\n else:\n ckpt = tf.train.latest_checkpoint(config.resume_from_checkpoint)\n print 'Evaluating checkpoint: %s' % ckpt\n saver.restore(sess, ckpt)\n try:\n while not coord.should_stop():\n start_time = time.time()\n train_out_dict = sess.run(train_session_vars.values())\n train_out_dict = {k: v for k, v in zip(\n train_session_vars.keys(), train_out_dict)}\n losses.append(train_out_dict['loss_value'])\n duration = time.time() - start_time\n assert not np.isnan(\n train_out_dict['loss_value']), 'Model diverged with loss = NaN'\n if step % config.steps_before_validation == 0:\n if validation_data is not False:\n val_out_dict = sess.run(\n val_session_vars.values())\n val_out_dict = {k: v for k, v in zip(\n val_session_vars.keys(), val_out_dict)}\n # if config.normalize_labels:\n # val_out_dict['val_pred'] *= normalize_vec\n # val_out_dict['val_true'] *= normalize_vec\n np.savez(\n os.path.join(\n results_dir, '%s_val_coors' % step),\n val_pred=val_out_dict['val_pred'],\n val_ims=val_out_dict['val_ims'],\n val_true=val_out_dict['val_true'],\n normalize_vec=normalize_vec)\n with open(\n os.path.join(\n results_dir, '%s_config.p' % step), 'wb') as fp:\n pickle.dump(config, fp)\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy attach 9177\n format_str = (\n '%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch) | '\n 'Validation l2 loss = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, train_out_dict['loss_value'],\n config.train_batch / duration, float(duration),\n val_out_dict['val_acc'],\n config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if config.normalize_labels:\n train_out_dict['yhat'] *= normalize_vec\n train_out_dict['ytrue'] *= normalize_vec\n [save_training_data(\n output_dir=results_dir,\n data=train_out_dict[k],\n name='%s_%s' % (k, step)) for k in save_training_vars]\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch)')\n print (format_str % (\n datetime.now(),\n step,\n train_out_dict['loss_value'],\n config.train_batch / duration,\n float(duration)))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%s_training_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def train(epoch, net, device, train_data, optimizer, batches_per_epoch, vis=False):\n results = {\n 'loss': 0,\n 'losses': {\n }\n }\n\n net.train()\n\n batch_idx = 0\n # Use batches per epoch to make training on different sized datasets (cornell/jacquard) more equivalent.\n while batch_idx < batches_per_epoch:\n for x, y, _, _, _ in train_data:\n batch_idx += 1\n if batch_idx >= batches_per_epoch:\n break\n\n xc = x.to(device)\n yc = [yy.to(device) for yy in y]\n lossd = net.compute_loss(xc, yc)\n\n loss = lossd['loss']\n\n if batch_idx % 100 == 0:\n logout('Epoch: {}, Batch: {}, Loss: {:0.4f}'.format(epoch, batch_idx, loss.item()))\n\n results['loss'] += loss.item()\n for ln, l in lossd['losses'].items():\n if ln not in results['losses']:\n results['losses'][ln] = 0\n results['losses'][ln] += l.item()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Display the images\n if vis:\n imgs = []\n n_img = min(4, x.shape[0])\n for idx in range(n_img):\n imgs.extend([x[idx,].numpy().squeeze()] + [yi[idx,].numpy().squeeze() for yi in y] + [\n x[idx,].numpy().squeeze()] + [pc[idx,].detach().cpu().numpy().squeeze() for pc in lossd['pred'].values()])\n gridshow('Display', imgs,\n [(xc.min().item(), xc.max().item()), (0.0, 1.0), (0.0, 1.0), (-1.0, 1.0), (0.0, 1.0)] * 2 * n_img,\n [cv2.COLORMAP_BONE] * 10 * n_img, 10)\n cv2.waitKey(2)\n\n results['loss'] /= batch_idx\n for l in results['losses']:\n results['losses'][l] /= batch_idx\n\n return results", "def resnet_v1(input_shape, depth, num_classes=100):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(optimizer=SGD(lr=0.1), loss='categorical_crossentropy', metrics = ['accuracy'])\n return model", "def resnet_v1(input_shape, depth, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n inputs = Input(shape=input_shape)\n num_filters = 16\n num_sub_blocks = int((depth - 2) / 6)\n\n x = resnet_block(inputs=inputs)\n # Instantiate convolutional base (stack of blocks).\n for i in range(3):\n for j in range(num_sub_blocks):\n strides = 1\n is_first_layer_but_not_first_block = j == 0 and i > 0\n if is_first_layer_but_not_first_block:\n strides = 2\n y = resnet_block(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_block(inputs=y,\n num_filters=num_filters,\n activation=None)\n if is_first_layer_but_not_first_block:\n x = resnet_block(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters = 2 * num_filters\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_path',\n type=str,\n default='../Datasets2/cifar100Dataset.npy',\n help='location of the dataset in numpy format', )\n parser.add_argument('--train_steps',\n type=int,\n default=150000,\n help='training steps', )\n parser.add_argument('--measuring_step_size',\n type=float,\n default=0.1,\n help='step size to where a second loss is determined to approximate the loss function '\n 'in the direction of the gradient by a parabola', )\n parser.add_argument('--momentum',\n type=float,\n default=0.4,\n help='momentum term', )\n parser.add_argument('--batch_size',\n type=int,\n default=128,\n help='batch_size', )\n parser.add_argument('--experiment_name',\n type=str,\n default=\"testmodel\",\n help='the name of the experiment', )\n parser.add_argument('--loose_approximation_factor',\n type=float,\n default=1.0,\n help='intentionally approximate the function with less or more curvature. = 1/ step size adaptation '\n 'less curvature <1 more curvature >1', )\n parser.add_argument('--train_data_size',\n type=int,\n default=45000,\n help='train data size,remaining elements define the evaluation_Res_Net set', )\n parser.add_argument('--random_seed',\n type=int,\n default=1,\n help='random number seed for numpy and tensorflow to get same results for multiple runs', )\n parser.add_argument('--max_stepsize',\n type=float,\n default=3.6,\n help='max stepsize in direction of the gradient', )\n parser.add_argument('--decay',\n type=float,\n default=1,\n help='max stepsize and measurment stepsize decay rate', )\n parser.add_argument('--additional',\n type=float,\n default=100,\n help='additional parameter', )\n parser.add_argument('--num_gpus',\n type=int,\n default=1,\n help='num gpus to train on', )\n parser.add_argument('--optimizer',\n type=str,\n default=\"SLS\",\n help='the optimizer to use', )\n\n FLAGS, unparsed = parser.parse_known_args()\n for k, v in vars(FLAGS).items():\n k, v = str(k), str(v)\n print('%s: %s' % (k, v))\n FLAGS.dataset_path = os.path.expanduser(FLAGS.dataset_path)\n print(\"DatasetPath: \" + str(FLAGS.dataset_path))\n\n workpath = os.path.dirname(os.path.dirname(sys.argv[0])) + '/' # double dir name to get parent\n\n print(\"workpath: \" + workpath)\n\n # check gpus\n local_device_protos = device_lib.list_local_devices()\n num_available_gpus = len([x.name for x in local_device_protos if x.device_type == 'GPU'])\n assert num_available_gpus >= FLAGS.num_gpus\n print(\"GPUs available: {1:d} \\t GPUs used: {1:d}\".format(num_available_gpus, FLAGS.num_gpus))\n\n learning_rate_pf = lambda global_step, learning_rate: tf.train.piecewise_constant(global_step,\n [75000.0, 112500.0],\n [float(learning_rate),\n float(learning_rate / 10),\n float(learning_rate / 100),\n ])\n\n if FLAGS.optimizer == \"PAL\":\n optimizer = PAL(None, FLAGS.measuring_step_size, FLAGS.momentum, FLAGS.loose_approximation_factor,\n FLAGS.max_stepsize, False)\n elif FLAGS.optimizer == \"SLS\":\n optimizer = SLS(n_batches_per_epoch=FLAGS.train_data_size // FLAGS.batch_size,\n init_step_size=FLAGS.measuring_step_size, c=FLAGS.momentum,\n beta_b=FLAGS.loose_approximation_factor, gamma=FLAGS.max_stepsize)\n elif FLAGS.optimizer == \"OL\":\n #optimizer = OptimalLineSearch(initial_search_step=FLAGS.measuring_step_size,\n # max_num_of_steps=FLAGS.max_stepsize, momentum=FLAGS.momentum)\n optimizer = OptimalLineSearch(initial_search_step=1.0,\n max_num_of_steps=20.0, momentum=0.0)\n\n elif FLAGS.optimizer == \"RMSP\":\n optimizer = TfOptimizer(tf.train.RMSPropOptimizer, learning_rate_pf,\n {\"learning_rate\": FLAGS.measuring_step_size, \"decay\": FLAGS.momentum,\n \"epsilon\": FLAGS.loose_approximation_factor})\n elif FLAGS.optimizer == \"ADAM\":\n optimizer = TfOptimizer(tf.train.AdamOptimizer, learning_rate_pf,\n {\"learning_rate\": FLAGS.measuring_step_size, \"beta1\": FLAGS.momentum,\n \"beta2\": FLAGS.loose_approximation_factor, \"epsilon\": FLAGS.max_stepsize})\n elif FLAGS.optimizer == \"SGD\":\n optimizer = TfOptimizer(tf.train.MomentumOptimizer, learning_rate_pf,\n {\"learning_rate\": FLAGS.measuring_step_size, \"momentum\": FLAGS.momentum,\n \"use_nesterov\": True})\n elif FLAGS.optimizer == \"SGDHD\":\n optimizer = TfOptimizer(SGDHD, None,\n {\"learning_rate\": FLAGS.measuring_step_size, \"hyper_gradient_learning_rate\": FLAGS.momentum})\n elif FLAGS.optimizer == \"ALIG\":\n optimizer = TfOptimizer(AliGwithMomentum, None,\n {\"max_lr\": FLAGS.measuring_step_size, \"momentum\": FLAGS.momentum})\n elif FLAGS.optimizer == \"COCOB\":\n optimizer = TfOptimizer(COCOB, None,\n {\"alpha\": FLAGS.measuring_step_size})\n\n else:\n raise ValueError(\"unknown optimizer flag:\" + FLAGS.optimizer)\n\n # Uncomment the network and dataset to use!\n\n # net_type= tolstoi_rnn.TolstoiRNN\n\n # net_type= simple_mnist_net.SimpleMnistNet\n\n # net_type=efficient_net_cifar10.EfficientNet\n # net_type=mobile_net_v2_cifar10.MobileNetV2\n net_type = resnet_32_cifar10.ResNet\n # net_type=dense_net_cifar10.DenseNet\n # net_type=resnet_34_IN_style_cifar.ResNet\n\n # net_type=efficient_net_cifar100.EfficientNet\n # net_type=mobile_net_v2_cifar100.MobileNetV2\n # net_type=resnet_32_cifar100.ResNet\n # net_type=dense_net_cifar100.DenseNet\n\n # net_type=efficient_net_IM.EfficientNet\n # net_type=mobile_net_v2_IM.MobileNetV2\n # net_type=resnet_101_IM.ResNet\n # net_type=resnet_50_IM.ResNet\n # net_type=dense_net_IM.DenseNet\n\n # data_set_loader = ImageNetLoader\n #data_set_loader = Cifar10Loader # also uncomment is_augment\n #data_set_loader.is_augment = True\n data_set_loader = Cifar100Loader\n # data_set_loader = TolstoiLoader\n # data_set_loader= MNISTLoader\n\n sys.stdout.flush()\n\n if FLAGS.optimizer == \"OL\":\n net = net_frame_ol.NetFrame(net_type, data_set_loader, optimizer, FLAGS.num_gpus, FLAGS.random_seed,\n FLAGS.train_data_size,\n FLAGS.batch_size, FLAGS.dataset_path, workpath, FLAGS.experiment_name,\n is_calc_angle=False)\n else:\n net = net_frame.NetFrame(net_type, data_set_loader, optimizer, FLAGS.num_gpus, FLAGS.random_seed,\n FLAGS.train_data_size,\n FLAGS.batch_size, FLAGS.dataset_path, workpath, FLAGS.experiment_name,\n is_calc_angle=False) # 100. 0.001 # problem 1,1 or 20,1 -> very steep descent!\n\n is_failed = False\n try:\n mean_train_losses_per_interval, evaluation_accuracies, train_losses_for_each_step, step_sizes_for_each_step, \\\n angles_for_each_step, grad_norms_for_each_step, train_time_for_each_step, tran_acc_per_interval, \\\n eval_losses, avg_test_acc, avg_test_loss, all_first_derivatives, all_second_derivatives \\\n = net.train(FLAGS.train_steps)\n except Exception as e:\n print(e.__doc__)\n is_failed = True\n print(\"FAILED\")\n\n if is_failed:\n eval_data_wrapper = fu.EvalDataWrapper(FLAGS.experiment_name, FLAGS.random_seed, FLAGS.optimizer,\n FLAGS.train_data_size,\n FLAGS.train_steps, FLAGS.batch_size, FLAGS.measuring_step_size,\n FLAGS.momentum,\n FLAGS.loose_approximation_factor, FLAGS.max_stepsize, FLAGS.decay,\n FLAGS.additional, [], [], [], [], None, None, is_failed)\n else:\n eval_data_wrapper = fu.EvalDataWrapper(FLAGS.experiment_name, FLAGS.random_seed, FLAGS.optimizer,\n FLAGS.train_data_size,\n FLAGS.train_steps, FLAGS.batch_size, FLAGS.measuring_step_size,\n FLAGS.momentum,\n FLAGS.loose_approximation_factor, FLAGS.max_stepsize, FLAGS.decay,\n FLAGS.additional, mean_train_losses_per_interval, tran_acc_per_interval,\n evaluation_accuracies, eval_losses, avg_test_acc, avg_test_loss, is_failed,\n angles_for_each_step, step_sizes_for_each_step, grad_norms_for_each_step,\n all_first_derivatives, all_second_derivatives\n )\n\n fu.save_eval_data_wrapper(eval_data_wrapper, net.model_dir)", "def __call__(self, ens_x_input, vgg_x_input, inc_x_input, tcd_x_input):\n reuse = True if self.built else None\n logits = None\n aux_logits = None\n weights = [[0.7, 0.1], [0.2, 0.1]]\n all_inputs = [[ens_x_input, tcd_x_input], [inc_x_input, tcd_x_input]]\n scopes = [inception_resnet_v2.inception_resnet_v2_arg_scope(), inception.inception_v3_arg_scope()]\n reuse_flags = [reuse, True]\n for model_idx, model in enumerate([inception_resnet_v2.inception_resnet_v2, inception.inception_v3]):\n with slim.arg_scope(scopes[model_idx]):\n for idx, inputs in enumerate(all_inputs[model_idx]):\n result = model(inputs, num_classes=self.num_classes, is_training=False, reuse=reuse_flags[idx])\n weight = weights[model_idx][idx]\n # :1 is for slicing out the background class\n if logits == None:\n logits = result[0][:, 1:] * weight\n aux_logits = result[1]['AuxLogits'][:, 1:] * weight\n else:\n logits += result[0][:, 1:] * weight\n aux_logits += result[1]['AuxLogits'][:, 1:] * weight\n\n with slim.arg_scope(vgg.vgg_arg_scope()):\n weight = 0.1\n result = vgg.vgg_16(vgg_x_input, num_classes=1000, is_training=False)\n logits += result[0] * weight\n\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n weight = 0.05\n result = resnet_v2.resnet_v2_152(vgg_x_input, num_classes=self.num_classes, reuse=reuse)\n logits += tf.squeeze(result[0])[:, 1:] * weight\n\n self.built = True\n aux_weight = 0.8\n logits += aux_logits * aux_weight\n\n predictions = layers_lib.softmax(logits)\n return predictions", "def configure_training_centralized(\n task_spec: training_specs.TaskSpecCentralized,\n *, # Caller passes below args by name.\n resnet_layers: int = 18,\n num_classes: int = 100,\n l2_weight_decay: float = 1e-4,\n) -> training_specs.RunnerSpecCentralized:\n\n return _Cifar100ImageTask(\n task_spec,\n resnet_layers=resnet_layers,\n num_classes=num_classes,\n l2_weight_decay=l2_weight_decay).build_centralized_runner_spec()", "def NN(train_df, val_df, test_df, sub_path):\n logging.info('Neural Network preprocessing')\n \n if train_df is not None: \n y_train = train_df['is_attributed'].values\n train_df = train_df.drop('is_attributed', axis = 1)\n train_df = train_df.drop('attributed_time', axis = 1) \n #train_df = train_df.drop('click_time', axis = 1) #only if no preprocessing\n gc.collect()\n if val_df is not None:\n y_val = val_df['is_attributed'].values \n val_df = val_df.drop(['is_attributed'], axis = 1)\n val_df = get_keras_data(val_df)\n \n list_variables = get_values(train_df)\n print(list_variables)\n \n logging.info('Model is creating...') \n \n max_var = []\n if test_df is not None:\n for i, var in enumerate(list_variables):\n max_var.append(np.max([train_df[var].max(), test_df[var].max()])+1) \n train_df = get_keras_data(train_df)\n else:\n for i, var in enumerate(list_variables):\n max_var.append(train_df[var].max()+1) \n train_df = get_keras_data(train_df)\n \n emb_n = 50\n dense_n = 1000\n \n in_var = []\n emb_var = [] \n for i, var in enumerate(list_variables):\n in_var.append(Input(shape=[1], name = var))\n emb_var.append(Embedding(max_var[i], emb_n)(in_var[i]))\n \n fe = concatenate([emb for emb in emb_var])\n s_dout = SpatialDropout1D(0.2)(fe)\n fl1 = Flatten()(s_dout)\n #conv = Conv1D(100, kernel_size=4, strides=1, padding='same')(s_dout)\n dl = Dense(100)(s_dout)\n fl2 = Flatten()(dl)\n concat = concatenate([(fl1), (fl2)])\n x = Dropout(0.2)(Dense(dense_n,activation='relu')(concat))\n x = Dropout(0.2)(Dense(dense_n,activation='relu')(x))\n outp = Dense(1,activation='sigmoid')(x)\n \n model = Model(inputs=[var for var in in_var], outputs=outp)\n \n logging.info('Model is compiling...')\n \n batch_size = 50000\n epochs = 2 #12 for sample_train\n exp_decay = lambda init, fin, steps: (init/fin)**(1/(steps-1)) - 1\n steps = int(len(list(train_df)[0]) / batch_size) * epochs\n lr_init, lr_fin = 0.002, 0.0002\n lr_decay = exp_decay(lr_init, lr_fin, steps)\n optimizer_adam = Adam(lr=lr_init, decay=lr_decay)\n \n model.compile(loss='binary_crossentropy',optimizer=optimizer_adam,metrics=['accuracy'])\n model.summary()\n \n logging.info('Model is training...')\n \n model.fit(train_df, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=2, validation_split=0.1)\n del train_df, y_train; gc.collect()\n \n if val_df is not None:\n logging.info('Prediction on validation set')\n predictions_NN_prob = model.predict(val_df, batch_size=batch_size, verbose=2)\n del val_df; gc.collect()\n predictions_NN_prob = predictions_NN_prob[:,0]\n \n predictions_NN = np.where(predictions_NN_prob > 0.5, 1, 0)\n acc_NN = accuracy_score(y_val, predictions_NN)\n print('Overall accuracy of Neural Network model:', acc_NN)\n \n if test_df is not None:\n logging.info('Prediction on test set')\n sub = pd.DataFrame()\n sub['click_id'] = test_df['click_id'].astype('int')\n test_df = test_df.drop(['click_id'], axis=1)\n test_df = get_keras_data(test_df)\n \n sub['is_attributed'] = model.predict(test_df, batch_size=batch_size, verbose=2)\n del test_df; gc.collect()\n logging.info(\"Writing....\")\n with file_io.FileIO(sub_path, mode='wb') as fout:\n sub.to_csv(fout,index=False)\n logging.info(\"Done...\")\n logging.info(sub.info())", "def trainNet():", "def train_one_epoch(self):\n print('Training......')\n\n # set mode train\n self.network.train()\n\n # prepare data\n train_loss = 0\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n RandomHorizontalFlip(),\n ToTensor()\n ])\n\n\n\n dataset = Cityscapes(params.dataset_root, mode='train', transforms = transform)\n\n train_loader = DataLoader(dataset,\n batch_size=params.train_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n \n train_size = 1896\n if train_size % self.params.train_batch != 0:\n total_batch = train_size // self.params.train_batch + 1\n else:\n total_batch = train_size // self.params.train_batch\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0 \n # train through dataset\n for batch_idx, batch in enumerate(train_loader):\n self.pb.click(batch_idx, total_batch)\n image, label = batch['image'], batch['label']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n\n # checkpoint split\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n\n\n loss = self.loss_fn(out, label_cuda)\n \n #display_image(out, label_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one + TN\n IOU = IOU+ FN \n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n\n # optimize\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n # accumulate\n train_loss += loss.item()\n\n # record first loss\n if self.train_loss == []:\n self.train_loss.append(train_loss)\n self.summary_writer.add_scalar('loss/train_loss', train_loss, 0)\n \n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(accuracy_new/total_batch)\n \n self.pb.close()\n train_loss /= total_batch\n self.train_loss.append(train_loss)\n\n # add to summary\n self.summary_writer.add_scalar('loss/train_loss', train_loss, self.epoch)", "def train_Convnet(DIM, SHIPS, device):\n agent = ModelConvnet(\"Vikram\", DIM, len(SHIPS), device)\n agent.to(device)\n env = Environment(DIM, SHIPS, \"Vikram\")\n batch_size = 1024\n num_episodes = 1000\n max_running_avg = 64\n\n batch = 0\n total_moves = 0\n\n inputs = np.empty([batch_size, 1, DIM, DIM])\n labels = np.empty([batch_size, DIM, DIM])\n\n for e in range(num_episodes):\n env.reset()\n state = env.get_state()\n done = False\n episode_moves = 0\n\n for time in range(DIM*DIM):\n action = agent.move(state)\n episode_moves += 1\n reward, next_state = env.step(action)\n next_input, open_locations, hit, sunk, done = next_state\n inputs[batch, :, :] = next_input[0, :, :]\n labels[batch, :, :] = env.get_ground_truth()\n\n if done == True:\n total_moves += episode_moves\n episode_moves = 0\n if e % max_running_avg == 0 and e != 0:\n print(\"Episodes: {}, Avg Moves: {}\".format(e,float(total_moves)/float(max_running_avg)))\n total_moves = 0\n\n break\n \n \n batch += 1\n\n if batch == batch_size:\n agent.replay(inputs, labels)\n batch = 0\n \n state = next_state\n\n if done == False:\n print(env.placement)\n print(inputs,actions, hits)\n # break\n\n return agent", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader_MultiClass_Loss(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n RANDOM_SEED,\n False, # No random scale.\n False, # No random mirror.\n coord)\n image, l2_catg, binary_catg, hinge_catg = reader.image, reader.l2_catg, reader.binary_catg, reader.hinge_catg\n image_batch = tf.expand_dims(image, dim=0)\n binary_catg_batch = tf.expand_dims(binary_catg, dim=0)\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n \n # Predictions.\n raw_output = net.layers['fc1_voc12']\n\n # Do the global average pooling\n raw_output_bcgd_rmvd = raw_output[:,:,:,1:]\n g_avg_pool = tf.reduce_mean(tf.reduce_mean(raw_output_bcgd_rmvd, axis=1, keep_dims=True),\\\n axis=2, keep_dims=True) # Avg across the width and height dimension -> [Bx21]\n g_avg_pool_sqzd = tf.squeeze(g_avg_pool, axis=[1, 2])\n pred = tf.nn.softmax(g_avg_pool_sqzd)\n\n # Get the class activation map\n raw_output_up = tf.image.resize_bilinear(raw_output_bcgd_rmvd, tf.shape(image_batch)[1:3,])\n raw_output_up = raw_output_up - tf.reduce_min(tf.reduce_min(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True) + EPSILON\n raw_output_up = raw_output_up / tf.reduce_max(tf.reduce_max(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True)\n cam_m_1 = tf.argmax(raw_output_up, dimension=3) + 1\n raw_output_catgs_rmvd = raw_output_up * tf.expand_dims(tf.expand_dims(binary_catg_batch, 1), 2)\n cam_m_2 = tf.argmax(raw_output_catgs_rmvd, dimension=3) + 1\n cam = tf.cast(tf.equal(cam_m_1, cam_m_2), tf.int64) * cam_m_1\n\n cam_batch = tf.expand_dims(cam, dim=3)\n\n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n sess.run(tf.local_variables_initializer())\n \n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if args.restore_from is not None:\n load(loader, sess, args.restore_from)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n \n # Iterate over training steps.\n for step in range(args.num_steps):\n preds, images, cams, bin_catg = sess.run([pred, image_batch, cam_batch, binary_catg])\n \"\"\"\n print(bin_catg)\n print(np.unique(np.unique(cams)))\n \"\"\"\n img = inv_preprocess(images)\n attMap = decode_labels(cams)\n output_dir = './output_maps_binary_without_norm/'\n img_name = output_dir + str(step) + '.jpg'\n map_name = output_dir + str(step) + '.png'\n misc.imsave(img_name, img[0,:,:,:])\n misc.imsave(map_name, attMap[0,:,:,:])\n coord.request_stop()\n coord.join(threads)", "def compile_train(self, epochs):\n self.model.compile(\n optimizer=Adam(0.001),\n loss=SparseCategoricalCrossentropy(from_logits=True),\n metrics='accuracy'\n )\n\n self.model.fit(\n train_x,\n train_y,\n batch_size=50,\n epochs=epochs,\n validation_data=(val_x, val_y),\n verbose=1\n )", "def __init__(self, **kwargs):\n super(CIFAR10Classifier, self).__init__() #pylint: disable=super-with-arguments\n self.model_conv = models.resnet50(pretrained=True)\n for param in self.model_conv.parameters():\n param.requires_grad = False\n num_ftrs = self.model_conv.fc.in_features\n num_classes = 10\n self.model_conv.fc = nn.Linear(num_ftrs, num_classes)\n\n self.scheduler = None\n self.optimizer = None\n self.args = kwargs\n\n self.train_acc = Accuracy()\n self.val_acc = Accuracy()\n self.test_acc = Accuracy()\n\n self.preds = []\n self.target = []\n self.example_input_array = torch.rand((1, 3, 64, 64))", "def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def train(self, epochs=5):\n x_train, y_train, x_test, y_test = self._load_data()\n x_train = tf.keras.utils.normalize(x_train, axis=1) # Scale between 0-1\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n model = tf.keras.models.Sequential()\n # 28 x 28 (digits dimensions) -> flat 784\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n # neurons -> number of classification\n model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\n dtnow = datetime.now().strftime(\"%Y-%m-%dT%H:%M\")\n tb_logs = self._artifact_repo.artifact_path(self._TENSORBOARD_LOGS)\n tensorboard = tf.keras.callbacks.TensorBoard(log_dir='{}/{}'.format(tb_logs, dtnow))\n model.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n )\n model.fit(x_train, y_train, epochs=int(epochs), validation_data=(x_test, y_test), callbacks=[tensorboard])\n\n # val_loss, val_acc = model.evaluate(x_test, y_test)\n\n # self._logger.info(\"Evaluation on test dataset: Loss: %s, Accuracy: %s\", val_loss, val_acc)\n\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model.save(path)", "def resnet50_classifier(num_rois, num_classes, base_model = None, weight_regularizer=None, bias_regularizer=None):\n roi_input = Input(shape=(None, 4), name='roi_input')\n\n pooling_input = base_model.output if base_model else Input(shape=(None, None, FINAL_CONV_FILTERS))\n model_input = base_model.input if base_model else pooling_input\n resize_out = RoiResizeConv(POOLING_REGIONS, num_rois)([pooling_input, roi_input])\n\n out = td_conv_block(resize_out, 3, [512, 512, 2048], stage=5, block='a', strides=(1,1),\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n td_input_shape=(num_rois, POOLING_REGIONS, POOLING_REGIONS, 1024))\n out = td_identity_block(out, 3, [512, 512, 2048], stage=5, block='b',\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n out = td_identity_block(out, 3, [512, 512, 2048], stage=5, block='c',\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n out = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(out)\n\n out = TimeDistributed(Flatten(name='flatten'))(out)\n\n gaussian_initializer_cls = TruncatedNormal(stddev=0.01)\n gaussian_initializer_bbreg = TruncatedNormal(stddev=0.001)\n\n out_class = TimeDistributed(Dense(num_classes, activation='softmax',\n kernel_initializer=gaussian_initializer_cls,\n kernel_regularizer=weight_regularizer,\n bias_regularizer=bias_regularizer\n ),\n name='dense_class_{}'.format(num_classes))(out)\n out_reg = TimeDistributed(Dense(4 * (num_classes - 1), activation='linear',\n kernel_initializer=gaussian_initializer_bbreg,\n kernel_regularizer=weight_regularizer,\n bias_regularizer=bias_regularizer\n ),\n name='dense_reg_{}'.format(num_classes))(out)\n\n cls_model = Model(inputs=[model_input, roi_input], outputs=[out_class, out_reg])\n\n # not sure if needed - bn layers should already be frozen\n for layer in cls_model.layers:\n if isinstance(layer, TimeDistributed) and isinstance(layer.layer, BatchNormalization):\n layer.layer.trainable = False\n\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n cls_model.load_weights(weights_path, by_name=True)\n\n return cls_model", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n\n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n False, # No random scale.\n False, # No random mirror.\n args.ignore_label,\n IMG_MEAN,\n coord)\n image, label = reader.image, reader.label\n image_batch, label_batch = tf.expand_dims(image, dim=0), tf.expand_dims(label, dim=0) # Add one batch dimension.\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False, num_classes=args.num_classes)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n\n # Predictions.\n raw_output = net.layers['fc1_voc12']\n raw_output = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n #raw_output = tf.argmax(raw_output, dimension=3)\n #pred = tf.expand_dims(raw_output, dim=3) # Create 4-d tensor.\n pred = raw_output\n\n # Set up TF session and initialize variables.\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n\n sess.run(init)\n sess.run(tf.local_variables_initializer())\n\n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if args.restore_from is not None:\n load(loader, sess, args.restore_from)\n\n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n with open(args.data_list) as f:\n content = f.readlines()\n\n content = [x.strip() for x in content]\n\n\n \n for index, value in enumerate(content):\n print(\"outputting \"+str(index))\n \timg = tf.image.decode_png(tf.read_file(value.split()[0]), channels=3)\n raw_img = misc.imread(value.split()[0])\n print(type(raw_img))\n \t# Convert RGB to BGR.\n \timg_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img)\n \timg = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32)\n \t# Extract mean.\n \timg -= IMG_MEAN \n \t# Predictions.\n \traw_output = net.layers['fc1_voc12']\n\n \traw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img)[0:2,])\n \t#pred = raw_output_up\n probabilities = tf.nn.softmax(raw_output_up)\n pred = tf.argmax(raw_output_up, dimension=3)\n \tpred = tf.expand_dims(pred, dim=3)\n \t# Perform inference.\n \tpreds, probs = sess.run([pred, probabilities])\n print(preds.shape)\n print(probs.shape)\n print(\"probs\")\n print(probs)\n softmax = probs[0, :, :, :]\n print(\"softmax\")\n print(softmax)\n print(softmax.shape)\n print(type(softmax))\n processed_probabilities = softmax.transpose((2, 0, 1))\n print(processed_probabilities.shape)\n print(type(processed_probabilities))\n crf_processed = performCRF(processed_probabilities, raw_img)\n\n im_preds = Image.fromarray(np.uint8(preds[0, :, :, 0]))\n\n print(\"preds shape\", preds.shape)\n \tmsk = decode_labels(preds, num_classes=args.num_classes)\n \tim = Image.fromarray(msk[0])\n\n print(\"crf_processed shape\", crf_processed.shape)\n crf_processed = crf_processed.reshape(1, crf_processed.shape[0], crf_processed.shape[1], 1)\n msk_crf = decode_labels(crf_processed, num_classes=args.num_classes)\n im_crf = Image.fromarray(msk_crf[0])\n\n \tif not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n #im_preds.save(args.save_dir +str(index).zfill(8) +'_predlabels_'+args.train_set+'.png')\n \tim.save(args.save_dir +str(index).zfill(8) +'_pred_'+args.train_set+'.png')\n im_crf.save(args.save_dir +str(index).zfill(8) +'_predcrf_'+args.train_set+'.png')", "def train_calibrator(cfg, src_net_file):\n\n ###########################\n # Setup cuda and networks #\n ###########################\n\n # setup cuda\n if torch.cuda.is_available():\n kwargs = {'num_workers': cfg['TRAIN']['NUM_WORKERS'], 'pin_memory': True}\n else:\n kwargs = {}\n\n # setup network \n # calibrator net contains source domain model, calibrator and domain discriminators\n net = get_model('CalibratorNet',cfg, src_weights_init = src_net_file)\n\n\n \n src = cfg['DATASET']['SRC']\n tgt = cfg['DATASET']['TGT']\n datadir = cfg['TRAIN']['DATA_DIR']\n src_net = cfg['SRC_NET']['ARCH']\n # print network and arguments\n print(net)\n print('Training calibrator with pretrained {} model for {}->{}'.format(src_net, src, tgt))\n\n #######################################\n # Setup data for training and testing #\n #######################################\n\n # define dataset properties\n num_channels = net.src_net.module.num_channels if hasattr(net.src_net,'module') else net.src_net.num_channels\n image_size = net.src_net.module.image_size if hasattr(net.src_net,'module') else net.src_net.image_size\n\n\n batch = cfg['CALIBRATOR']['BATCH_SIZE']\n \n train_src_data = load_data(src, 'train', batch=batch,\n rootdir=join(datadir,src), num_channels=num_channels,\n image_size=image_size, download=True, kwargs=kwargs)\n\n test_src_data = load_data(src, 'test', batch=batch,\n rootdir=join(datadir,src), num_channels=num_channels,\n image_size=image_size, download=True, kwargs=kwargs)\n \n\n train_tgt_data = load_data(tgt, 'train', batch=batch,\n rootdir=join(datadir,tgt), num_channels=num_channels,\n image_size=image_size, download=True, kwargs=kwargs)\n\n\n test_tgt_data = load_data(tgt, 'test', batch=batch,\n rootdir=join(datadir,tgt), num_channels=num_channels,\n image_size=image_size, download=True, kwargs=kwargs)\n \n\n \n ######################\n # Optimization setup #\n ######################\n\n # net_param = net.tgt_net.parameters()\n # opt_net = optim.Adam(net_param, lr=lr, weight_decay=weight_decay, betas=betas)\n\n\n lr = cfg['CALIBRATOR']['CALI_LR']\n\n weight_decay = cfg['OPTIMIZER']['ADAM']['WEIGHT_DECAY']\n betas = cfg['OPTIMIZER']['ADAM']['BETAS']\n\n\n \n \n \n opt_dis = optim.Adam(net.discriminator.parameters(), lr=lr,\n weight_decay=weight_decay, betas=betas)\n\n opt_p = optim.Adam(net.pixel_discriminator.parameters(), lr=lr,\n weight_decay=weight_decay, betas=betas)\n\n opt_cali = optim.Adam(net.calibrator_T.parameters(), lr=lr, weight_decay=weight_decay, betas=betas)\n\n\n \n lr_decay_step = 10\n lr_decay_rate = 0.5\n \n lr_scheduler_cali = optim.lr_scheduler.StepLR(opt_cali, step_size=lr_decay_step, gamma=lr_decay_rate)\n lr_scheduler_p_dis = optim.lr_scheduler.StepLR(opt_p, step_size=lr_decay_step, gamma=lr_decay_rate)\n lr_scheduler_dis = optim.lr_scheduler.StepLR(opt_dis, step_size=lr_decay_step, gamma=lr_decay_rate)\n \n lr_schedulers = [lr_scheduler_cali, lr_scheduler_p_dis, lr_scheduler_dis]\n\n \n ##############\n # Train Adda #\n ##############\n\n\n eventer = Eventer(cfg, 'PATCH_SIZE')\n \n for epoch in range(cfg['CALIBRATOR']['CALI_EPOCH']):\n \n err = train(train_src_data, train_tgt_data, net, opt_dis, opt_p, opt_cali, lr_schedulers,epoch,cfg)\n \n #test(test_src_data, net) for source performance test\n print('{} test'.format(tgt))\n \n net.pixel_discriminator.eval()\n net.discriminator.eval()\n net.calibrator_T.eval()\n net.src_net.eval() \n test_acc = test(test_tgt_data, net,cfg)\n\n eventer.add_scalar('test_acc', test_acc, epoch)\n eventer.write_gradients(net.calibrator_T,epoch)\n #eventer.write_gradients(net.pixel_discriminator,epoch)\n eventer.write_gradients(net.discriminator,epoch) \n\n ##############\n # Save Model #\n ##############\n os.makedirs(cfg['PATH']['OUTDIR'], exist_ok=True)\n\n outfile = cfg['PATH']['CALIBRATOR_NET_PATH']\n \n print('Saving calibrator net to', outfile)\n net.save(outfile)", "def resnet_v1(input_shape, depth, num_classes=10,input_tensor=None):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n if (input_tensor == None):\n inputs = Input(shape=input_shape)\n else:\n inputs = input_tensor\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n kernel_size=30,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n kernel_size=20,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=10,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling1D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def train(self):\n if not os.path.isdir(self.C.log_path):\n os.mkdir(self.C.log_path)\n \n callback = TensorBoard(self.C.log_path, write_graph=True, write_images=True)\n callback.set_model(self.model_all)\n\n epoch_length = self.C.epoch_length\n epochs = self.C.epochs\n iter_num = 0\n train_step = 0\n\n losses = np.zeros((epoch_length, 5))\n rpn_accuracy_monitor = []\n rpn_accuracy_epoch = []\n best_loss = np.Inf\n start_time = time.time()\n\n for epoch in range(epochs):\n progbar = generic_utils.Progbar(epoch_length)\n print('Epoch {}/{}'.format(epoch + 1, epochs))\n\n while True:\n if len(rpn_accuracy_monitor) == epoch_length and self.C.verbose:\n mean_overlapping_bboxes = float(sum(rpn_accuracy_monitor)) / len(rpn_accuracy_monitor)\n rpn_accuracy_monitor = []\n print('Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(mean_overlapping_bboxes, epoch_length))\n if mean_overlapping_bboxes == 0:\n print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')\n\n # data generator\n X, Y, img_data = next(self.data_gen_train)\n\n loss_rpn = self.model_region_proposal.train_on_batch(X, Y)\n self._write_log(callback, ['rpn_cls_loss', 'rpn_reg_loss'], loss_rpn, train_step)\n\n P_rpn = self.model_region_proposal.predict_on_batch(X)\n\n R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], self.C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300)\n # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format\n X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, self.C, self.class_mapping)\n\n if X2 is None:\n rpn_accuracy_monitor.append(0)\n rpn_accuracy_epoch.append(0)\n continue\n \n # sampling positive/negative samples\n neg_samples = np.where(Y1[0, :, -1] == 1)\n pos_samples = np.where(Y1[0, :, -1] == 0)\n\n if len(neg_samples) > 0:\n neg_samples = neg_samples[0]\n else:\n neg_samples = []\n\n if len(pos_samples) > 0:\n pos_samples = pos_samples[0]\n else:\n pos_samples = []\n\n rpn_accuracy_monitor.append(len(pos_samples))\n rpn_accuracy_epoch.append((len(pos_samples)))\n\n if self.C.num_roi > 1:\n if len(pos_samples) < self.C.num_roi//2:\n selected_pos_samples = pos_samples.tolist()\n else:\n selected_pos_samples = np.random.choice(pos_samples, \n self.C.num_roi//2, \n replace=False).tolist()\n try:\n selected_neg_samples = np.random.choice(neg_samples, \n self.C.num_roi-len(selected_pos_samples), \n replace=False).tolist()\n except:\n selected_neg_samples = np.random.choice(neg_samples, \n self.C.num_roi-len(selected_pos_samples), \n replace=True).tolist()\n sel_samples = selected_pos_samples + selected_neg_samples\n \n else:\n # in the extreme case where num_roi = 1, we pick a random pos or neg sample\n selected_pos_samples = pos_samples.tolist()\n selected_neg_samples = neg_samples.tolist()\n if np.random.randint(0, 2):\n sel_samples = random.choice(neg_samples)\n else:\n sel_samples = random.choice(pos_samples)\n\n loss_class = self.model_classifier.train_on_batch([X, X2[:, sel_samples, :]], \n [Y1[:, sel_samples, :], \n Y2[:, sel_samples, :]])\n self._write_log(callback, \n ['detection_cls_loss', 'detection_reg_loss', 'detection_acc'], \n loss_class, train_step)\n train_step += 1\n\n losses[iter_num, 0] = loss_rpn[1]\n losses[iter_num, 1] = loss_rpn[2]\n losses[iter_num, 2] = loss_class[1]\n losses[iter_num, 3] = loss_class[2]\n losses[iter_num, 4] = loss_class[3]\n\n iter_num += 1\n\n progbar.update(iter_num, \n [('rpn_cls', np.mean(losses[:iter_num, 0])), \n ('rpn_regr', np.mean(losses[:iter_num, 1])),\n ('det_cls', np.mean(losses[:iter_num, 2])), \n ('det_regr', np.mean(losses[:iter_num, 3]))])\n\n if iter_num == epoch_length:\n loss_rpn_cls = np.mean(losses[:, 0])\n loss_rpn_regr = np.mean(losses[:, 1])\n loss_class_cls = np.mean(losses[:, 2])\n loss_class_regr = np.mean(losses[:, 3])\n class_acc = np.mean(losses[:, 4])\n\n mean_overlapping_bboxes = float(sum(rpn_accuracy_epoch)) / len(rpn_accuracy_epoch)\n rpn_accuracy_epoch = []\n\n if self.C.verbose:\n print('Mean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format(mean_overlapping_bboxes))\n print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc))\n print('Loss RPN classifier: {}'.format(loss_rpn_cls))\n print('Loss RPN regression: {}'.format(loss_rpn_regr))\n print('Loss Detector classifier: {}'.format(loss_class_cls))\n print('Loss Detector regression: {}'.format(loss_class_regr))\n print('Elapsed time: {}'.format(time.time() - start_time))\n\n curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr\n iter_num = 0\n start_time = time.time()\n\n self._write_log(callback,\n ['Elapsed_time', 'mean_overlapping_bboxes', 'mean_rpn_cls_loss', \n 'mean_rpn_reg_loss', 'mean_detection_cls_loss', 'mean_detection_reg_loss', \n 'mean_detection_acc', 'total_loss'],\n [time.time() - start_time, mean_overlapping_bboxes, loss_rpn_cls, \n loss_rpn_regr, loss_class_cls, loss_class_regr, class_acc, curr_loss],\n epoch)\n\n if curr_loss < best_loss:\n if self.C.verbose:\n print('Total loss decreased from {} to {}, saving weights'.format(best_loss,curr_loss))\n best_loss = curr_loss\n self.model_all.save_weights(self.C.model_path)\n break\n \n print(\"traing completed.\")", "def model_CNN(x_train, y_train, x_test=None, y_test=None, kwargs={}):\n \"\"\"\n Notes on Input shape\n 4D tensor with shape (batch_size, timesteps, features, `colors`).\n 4D tensor with shape: (samples, rows, cols, channels)\n `channels_last` (default)\n Output 4D tensor with shape: (samples, new_rows, new_cols, filters)\n \"\"\"\n ######## CNN for stocks\n # create and fit CNN\n # input_shape = StockDate x Lookback x Features\n from keras.layers import Conv2D, MaxPooling2D\n from keras.optimizers import SGD\n\n\n layers = kwargs.get('layers', 10 ) #TODO\n nodes = kwargs.get('nodes', None) #TODO\n\n if nodes is None or nodes==0 or nodes==[0]:\n nodes = [np.shape(x_train)[1]*3]\n elif isinstance(nodes, (int, np.integer)): # turn int to list\n nodes = [nodes]\n\n if layers > 1 and len(nodes) < layers:\n nodes = list(np.pad(nodes,[0,layers-len(nodes)], mode='constant',constant_values=nodes[-1]))\n\n ndim = np.max([2,len(np.shape(x_train))]) # Min 2D\n if ndim==2:\n input_shape=(x_train.shape[1],)\n elif ndim==3:\n input_shape=(x_train.shape[1],x_train.shape[2])\n elif ndim==4:\n input_shape=(x_train.shape[1],x_train.shape[2],x_train.shape[3])\n else:\n input_shape=x_train.shape[1:]\n if kwargs.get('learning_rate', False):\n lr = kwargs.get('learning_rate')\n else:\n lr = False\n\n if False:\n conv = (3, 3)\n else:\n conv = (2, 2)\n n_conv = 5\n\n if np.ndim(y_train)==1:\n n_out = 1 #e.g. forecast y as float, just 1 step ahead.\n else:\n n_out = np.shape(y_train)[1] #e.g. onehot encoded, or n-steps ahead.\n\n dropout = kwargs.get('dropout',0) # dropout rate between 0 and 1.\n #stateful = kwargs.get('stateful',True)\n actvn = 'relu' #kwargs.get('actvn','relu')\n actvl = kwargs.get('actvl','sigmoid')\n model=[]\n model = Sequential() # https://keras.io/models/sequential/\n model.reset_states()\n # input: 100x100 images with 3 channels -> (100, 100, 3) tensors.\n # this applies 32 convolution filters of size 3x3 each.\n model.add(Conv2D(n_conv, conv, activation=actvn, input_shape=input_shape))\n #model.add(Conv2D(n_conv, conv, activation=actvn))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(dropout ))\n\n model.add(Conv2D(n_conv*2, conv, activation=actvn))\n #model.add(Conv2D(n_conv*2, conv, activation=actvn))\n #model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(dropout ))\n\n model.add(Flatten())\n model.add(Dense(np.min(input_shape), activation=actvn))\n model.add(Dropout(dropout*2))\n model.add(Dense(n_out, activation=actvl))\n\n if hasattr(kwargs,'optimizer'):\n optimizer = kwargs['optimizer']\n elif lr:\n optimizer = SGD(lr=lr, decay=1e-6, momentum=0.01, nesterov=True)\n else:\n optimizer = 'Nadam' #keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)\n\n if is_bool_dtype(y_train):\n model.compile(loss='binary_crossentropy', optimizer=optimizer)\n if is_categorical_dtype(y_train) or kwargs.get('onehot',False):\n #TODO Multiple Category\n model.compile(loss='categorical_crossentropy', optimizer=optimizer)\n else:\n #model.compile(loss='mean_squared_error', optimizer=optimizer)\n model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[r2_keras])\n\n\n if kwargs.get('verbose',False) > 1:\n model.summary()\n print(\"Inputs: {}\".format(model.input_shape))\n print(\"Outputs: {}\".format(model.output_shape))\n print(\"Actual input: {}\".format(x_train.shape))\n print(\"Actual output: {}\".format(y_train.shape))\n print('Model Loss: ' + model.loss)\n\n # For compatability with other models;\n model.score = model.evaluate\n\n return model #self.model=model", "def resnet50(pretrained=False, mode='rgb', **kwargs):\n if mode == 'flow':\n model = ResNet(Bottleneck, [3, 4, 6, 3], inp=20, **kwargs)\n else:\n model = ResNet(Bottleneck, [3, 4, 6, 3]) #, **kwargs)\n return model", "def _resnet_model_fn(features, labels, mode, params):\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n weight_decay = params.weight_decay\n momentum = params.momentum\n\n tower_features = features\n tower_labels = labels\n tower_losses = []\n tower_gradvars = []\n tower_preds = []\n\n # channels first (NCHW) is normally optimal on GPU and channels last (NHWC)\n # on CPU. The exception is Intel MKL on CPU which is optimal with\n # channels_last.\n data_format = params.data_format\n if not data_format:\n if num_gpus == 0:\n data_format = \"channels_last\"\n else:\n data_format = \"channels_first\"\n\n if num_gpus == 0:\n num_devices = 1\n device_type = \"cpu\"\n else:\n num_devices = num_gpus\n device_type = \"gpu\"\n\n for i in range(num_devices):\n worker_device = \"/{}:{}\".format(device_type, i)\n if variable_strategy == \"CPU\":\n device_setter = cifar10_utils.local_device_setter(\n worker_device=worker_device\n )\n elif variable_strategy == \"GPU\":\n device_setter = cifar10_utils.local_device_setter(\n ps_device_type=\"gpu\",\n worker_device=worker_device,\n ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(\n num_gpus, tf.contrib.training.byte_size_load_fn\n ),\n )\n with tf.variable_scope(\"resnet\", reuse=bool(i != 0)):\n with tf.name_scope(\"tower_%d\" % i) as name_scope:\n with tf.device(device_setter):\n loss, gradvars, preds = _tower_fn(\n is_training,\n weight_decay,\n tower_features[i],\n tower_labels[i],\n data_format,\n params.num_layers,\n params.batch_norm_decay,\n params.batch_norm_epsilon,\n )\n tower_losses.append(loss)\n tower_gradvars.append(gradvars)\n tower_preds.append(preds)\n if i == 0:\n # Only trigger batch_norm moving mean and variance update from\n # the 1st tower. Ideally, we should grab the updates from all\n # towers but these stats accumulate extremely fast so we can\n # ignore the other stats from the other towers without\n # significant detriment.\n update_ops = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, name_scope\n )\n\n # Now compute global loss and gradients.\n gradvars = []\n with tf.name_scope(\"gradient_averaging\"):\n all_grads = {}\n for grad, var in itertools.chain(*tower_gradvars):\n if grad is not None:\n all_grads.setdefault(var, []).append(grad)\n for var, grads in six.iteritems(all_grads):\n # Average gradients on the same device as the variables\n # to which they apply.\n with tf.device(var.device):\n if len(grads) == 1:\n avg_grad = grads[0]\n else:\n avg_grad = tf.multiply(\n tf.add_n(grads), 1.0 / len(grads)\n )\n gradvars.append((avg_grad, var))\n\n # Device that runs the ops to apply global gradient updates.\n consolidation_device = (\n \"/gpu:0\" if variable_strategy == \"GPU\" else \"/cpu:0\"\n )\n with tf.device(consolidation_device):\n # Suggested learning rate scheduling from\n # https://github.com/ppwwyyxx/tensorpack/blob/master/examples/ResNet/cifar10-resnet.py#L155\n num_batches_per_epoch = cifar10.Cifar10DataSet.num_examples_per_epoch(\n \"train\"\n ) // (\n params.train_batch_size * num_workers\n )\n boundaries = [\n num_batches_per_epoch * x\n for x in np.array([80, 120, 160], dtype=np.int64)\n ]\n staged_lr = [\n params.learning_rate * x for x in [1, 0.1, 0.01, 0.001]\n ]\n\n learning_rate = tf.train.piecewise_constant(\n tf.train.get_global_step(), boundaries, staged_lr\n )\n\n loss = tf.reduce_mean(tower_losses, name=\"loss\")\n\n # examples_sec_hook = cifar10_utils.ExamplesPerSecondHook(\n # params.train_batch_size, every_n_steps=10\n # )\n\n # tensors_to_log = {\"learning_rate\": learning_rate, \"loss\": loss}\n\n # logging_hook = tf.train.LoggingTensorHook(\n # tensors=tensors_to_log, every_n_iter=100\n # )\n\n # train_hooks = [logging_hook, examples_sec_hook]\n train_hooks = []\n\n # Hyper-parameter \"momentum\" is only used for the Momentum Optimizer\n # Other optimizers use their default parameters.\n if params.optimizer == \"momentum\":\n optimizer = tf.train.MomentumOptimizer(\n learning_rate=learning_rate, momentum=momentum\n )\n elif params.optimizer == \"adam\":\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n elif params.optimizer == \"adagrad\":\n optimizer = tf.train.AdagradOptimizer(\n learning_rate=learning_rate\n )\n elif params.optimizer == \"adadelta\":\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate=learning_rate\n )\n elif params.optimizer == \"sgd\":\n optimizer = tf.train.GradientDescentOptimizer(\n learning_rate=learning_rate\n )\n elif params.optimizer == \"rmsprop\":\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate=learning_rate\n )\n else:\n raise ValueError(\"unrecognized optimizer name\")\n # TODO: RAdam is implemented in tensorflow-addons v0.6, which requires tf 2.0\n # Upgrade code by removing tf.contrib modules.\n # optimizer = tfa.optimizers.RectifiedAdam(lr=learning_rate)\n\n if params.sync:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer, replicas_to_aggregate=num_workers\n )\n sync_replicas_hook = optimizer.make_session_run_hook(\n params.is_chief\n )\n train_hooks.append(sync_replicas_hook)\n\n # Create single grouped train op\n train_op = [\n optimizer.apply_gradients(\n gradvars, global_step=tf.train.get_global_step()\n )\n ]\n train_op.extend(update_ops)\n train_op = tf.group(*train_op)\n\n predictions = {\n \"classes\": tf.concat(\n [p[\"classes\"] for p in tower_preds], axis=0\n ),\n \"probabilities\": tf.concat(\n [p[\"probabilities\"] for p in tower_preds], axis=0\n ),\n }\n stacked_labels = tf.concat(labels, axis=0)\n metrics = {\n \"accuracy\": tf.metrics.accuracy(\n stacked_labels, predictions[\"classes\"]\n )\n }\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n training_hooks=train_hooks,\n eval_metric_ops=metrics,\n )", "def train_and_test(resume_training=False, tensorboard_debug=False, cli_debug=False):\r\n if tensorboard_debug:\r\n # Open tf debug session connected to tensor board, this only really works well on linux\r\n k.set_session(TensorBoardDebugWrapperSession(tf.Session(), '127.0.0.1:6064'))\r\n elif cli_debug:\r\n # Open tf debug session with local cli, run manually via ssh\r\n k.set_session(LocalCLIDebugWrapperSession(tf.Session()))\r\n\r\n if resume_training:\r\n checkpoint_dir = latest_checkpoint(\"colorizer\")\r\n print(f\"Latest checkpoint: {checkpoint_dir}\")\r\n model = load_model(str(checkpoint_dir)) if checkpoint_dir is not None else None\r\n else:\r\n model = None\r\n\r\n # Initialize image generators\r\n data_generator = ImageDataGenerator(validation_split=0.3)\r\n\r\n train_generator = BinnedImageGenerator(\r\n str(Config.data_folder),\r\n data_generator,\r\n target_size=(256, 256),\r\n batch_size=Config.batch_size,\r\n shuffle=True,\r\n subset=\"training\")\r\n\r\n test_generator = BinnedImageGenerator(\r\n str(Config.data_folder),\r\n data_generator,\r\n target_size=(256, 256),\r\n batch_size=Config.batch_size,\r\n subset=\"validation\")\r\n\r\n # Start training\r\n train_model(train_generator, test_generator, model)", "def main_train(lr, bs, cuda_id, not_distrib, fp16, loss_scale):\r\n torch.backends.cudnn.benchmark = True\r\n if fp16: assert torch.backends.cudnn.enabled, \"missing cudnn\"\r\n stats = (np.array([ 0.4914 , 0.48216, 0.44653]), np.array([ 0.24703, 0.24349, 0.26159]))\r\n sz=32\r\n PATH = Path(\"../../data/cifar10/\")\r\n tfms = tfms_from_stats(stats, sz, aug_tfms=[RandomCrop(sz), RandomFlip()], pad=sz//8)\r\n data1 = ImageClassifierData.from_paths(PATH, val_name='test', tfms=tfms, bs=bs)\r\n m = wrn_22().cuda()\r\n if not not_distrib: m = nn.parallel.DistributedDataParallel(m, device_ids=[cuda_id], output_device=cuda_id)\r\n learn = ConvLearner.from_model_data(m, data1)\r\n learn.crit = nn.CrossEntropyLoss()\r\n learn.metrics = [accuracy]\r\n trn_tfms = CustomTfm(0.5, 4, 32, 1)\r\n val_tfms = None\r\n data = DataBunch.from_files(PATH, trn_tfms, val_tfms, stats, torch.device('cuda', cuda_id), distrib=not not_distrib, val_name='test', bs=bs)\r\n learn.data.trn_dl, learn.data.val_dl = data.trn_dl, data.val_dl\r\n if fp16: learn.half()\r\n x,y = next(iter(data.trn_dl))\r\n opt_fn = get_opt_fn('Adam', 0.95, 0.99, False)\r\n learn.opt_fn = opt_fn\r\n cyc_len, pct = 30, 0.075\r\n nbs = [cyc_len * (1-pct) / 2, cyc_len * (1-pct) / 2, cyc_len * pct]\r\n phases = get_phases(lr, (0.95,0.85), opt_fn, 10, nbs, 0.1, True, False)\r\n #print_lr = PrintLR(learn)\r\n learn.fit_opt_sched(phases, loss_scale=loss_scale)", "def _run_epoch(sess, model, args, data, index=0, tb_summaries=None,\n id_to_word=None, train_op=None, verbose=False):\n epoch_start_time = time.time()\n # total cost and number of words evaluated in this epoch\n costs, total_words = 0.0, 0.0\n # epoch size is number of batches in each epoch\n epoch_size = (len(data[index]) - 1) // model.config['batch_size']\n state = sess.run(model.initial_state)\n\n # iterate through batches\n for step, (x, y) in enumerate(data_reader.batch_iterator(\n data[index], model.config['batch_size'])):\n # return these parameters after running TF session\n fetches = {\n 'cost': model.cost[index],\n 'final_state': model.final_state,\n 'seq_len': model.seq_len\n }\n # only train model has optimizer operation\n if train_op is not None:\n fetches['train_op'] = train_op[index]\n\n # create dict to feed input, targets, and rnn into TF session\n feed_dict = utils.create_feed_dict(model, args, x, y, state)\n # run all parameters in fetches dict\n vals = sess.run(fetches, feed_dict)\n\n costs += vals['cost']\n # number of words evaluated\n total_words += np.sum(vals['seq_len'])\n # use perplexity to evaluate language models\n perplexity = np.exp(costs / total_words)\n\n if verbose and step % (epoch_size // 2) == 1:\n # display perplexity and top word predictions for sequence\n _display_epoch_metrics(step, epoch_size, perplexity, total_words,\n epoch_start_time, args, model, sess,\n index, feed_dict, vals, id_to_word, y)\n\n # generate sample text while training to monitor progress\n if args.display_text == 'True' and model.name == 'Train':\n generate.generate_text(sess, model, id_to_word, train_ind=index)\n\n # write TensorBoard summaries for Train/Valid\n if args.save_path != '' and model.name != 'Test':\n summary = sess.run(tb_summaries.summary_op,\n {tb_summaries.ppl_summary: perplexity})\n model.file_writer.add_summary(summary, get_or_create_global_step().eval())\n\n return perplexity", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n\n return model", "def get_cifar10():\n\n from keras.datasets import cifar10\n\n # input image dimensions\n img_rows, img_cols = 32, 32\n n_channels = 3\n num_classes = 10\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n if K.image_data_format() == 'channels_first':\n print (\"Using Channels first\")\n x_train = x_train.reshape(x_train.shape[0], n_channels, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], n_channels, img_rows, img_cols)\n input_shape = (n_channels, img_rows, img_cols)\n else:\n print(\"Channels last\")\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, n_channels)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, n_channels)\n input_shape = (img_rows, img_cols, n_channels)\n\n return (x_train, y_train), (x_test, y_test)", "def epoch_recon(models,datapath,img_size=100,adjust=False,epochs=120,max_diff=0.1,step_size=0.01):\n target = set_creation(datapath,img_size=img_size,nrand=1000,adjust=adjust) # Load 1000 images\n out,results = success_rate(model,target,img_size,args.discrepancy)\n acc_size = numpy.arange(0,max_diff,step_size) # Define discrepancy ranges\n results = numpy.zeros((epochs,len(acc_size))) # Initialize results array (epoch vs. reconstruction accuracy)\n for epoch in range(epochs): # Loop over epochs\n model_epoch = models[epoch+1] # Load epoch model\n model_epoch.eval() # Set model to evaluation mode\n out, _ = model_epoch(target.float()) # Execute trained model to data\n for j in range(len(out)): # Loop over all output data\n out[j][0] = (out[j][0]-out[j][0].min())/(out[j][0].max()-out[j][0].min()) # Normalized outputs\n diff = abs(out-target).reshape(len(out),img_size,img_size).data.numpy() # Calculate difference between original and output images\n acc = numpy.array([[len(var[numpy.where((i<=var)&(var<i+step_size))]) for var in diff] for i in acc_size]) # Find how many pixels are found in each discrepancy range \n acc = acc/img_size**2*100 # Convert the values to percentages\n results[epoch] = numpy.mean(acc,axis=1) # Calculate mean percentage accross all images\n plt.style.use('seaborn') # Set seaborn style\n fig = plt.figure(figsize=(10,6),dpi=80) # Initialize figure\n ax1 = fig.add_axes([0.10,0.10,0.83,0.69]) # Main plot\n ax2 = fig.add_axes([0.95,0.10,0.03,0.69]) # Colorbar\n ax3 = fig.add_axes([0.10,0.82,0.83,0.15],sharex=ax1) # Histogram\n img = ax1.imshow(results.T[::-1],aspect='auto',cmap='summer',extent=[0,epochs,0,max_diff])\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Discrepancy threshold')\n plt.colorbar(img,label='Percentage of pixels',cax=ax2) # Plot colorbar\n y = [sum(results[i]) for i in range(epochs)] # Sum all percentages for each epoch\n x = numpy.arange(epochs)\n ax3.bar(x,y,width=1,align='edge',color='lightgrey')\n ax3.set_facecolor('white')\n ax3.set_ylim(min(y)-1,max(y)+1)\n ax3.set_title('Reconstruction accuracy')\n plt.setp(ax3.get_xticklabels(), visible=False)\n plt.show()", "def build(self):\n input_shape_img = (None, None, 3)\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(None, 4))\n shared_layers = self.cnn_model.nn_base(img_input, trainable=True)\n num_anchors = len(self.C.anchor_scales) * len(self.C.anchor_ratios)\n \n output_region_proposal = self.region_proposal_net(shared_layers, num_anchors)\n output_classifier = self.classifier(shared_layers,\n self.cnn_model.classifier_layers, \n roi_input, self.C.num_roi, \n num_class=len(self.class_count), trainable=True)\n \n self.model_region_proposal = Model(img_input, output_region_proposal[:2])\n self.model_classifier = Model([img_input, roi_input], output_classifier)\n self.model_all = Model([img_input, roi_input], output_region_proposal[:2] + output_classifier)\n\n optimizer = Adam(lr=1e-5)\n self.model_region_proposal.compile(optimizer=optimizer, \n loss=[losses.rpn_loss_cls(num_anchors), \n losses.rpn_loss_regr(num_anchors)])\n self.model_classifier.compile(optimizer=optimizer, \n loss=[losses.class_loss_cls, \n losses.class_loss_regr(len(self.class_count)-1)], \n metrics={'dense_class_{}'.format(len(self.class_count)): 'accuracy'})\n self.model_all.compile(optimizer='sgd', loss='mae')\n\n # print(self.model_all.summary())\n plot_model(self.model_region_proposal, show_shapes=True, to_file='./frcnn/images/region_proposal.png')\n plot_model(self.model_classifier, show_shapes=True, to_file='./frcnn/images/classifier.png')\n plot_model(self.model_all, show_shapes=True, to_file='./frcnn/images/model_all.png')" ]
[ "0.6698724", "0.6544087", "0.64400065", "0.640361", "0.63593155", "0.63541394", "0.6300235", "0.6192498", "0.61555034", "0.6152484", "0.61209786", "0.60836124", "0.6074417", "0.605095", "0.60388404", "0.6032587", "0.6015973", "0.6003886", "0.59807396", "0.59580404", "0.59542066", "0.59390336", "0.5925847", "0.59107965", "0.5908761", "0.58984566", "0.5890044", "0.58875597", "0.5883409", "0.5881315", "0.58706605", "0.58671147", "0.5856021", "0.58504415", "0.5836468", "0.58166456", "0.5812679", "0.5811903", "0.58039", "0.5799228", "0.57960063", "0.5795398", "0.57922655", "0.57760954", "0.57706636", "0.5767298", "0.5760102", "0.575771", "0.575771", "0.5748956", "0.574012", "0.5722571", "0.57181275", "0.57016695", "0.5696625", "0.5686351", "0.5685785", "0.56735", "0.56672096", "0.56667495", "0.5666043", "0.56539434", "0.56532073", "0.5650845", "0.56505054", "0.5649409", "0.5649334", "0.5648685", "0.56474644", "0.56456393", "0.56395704", "0.5634692", "0.56303656", "0.56279284", "0.5627286", "0.5625585", "0.5624667", "0.56217843", "0.5618968", "0.561848", "0.56154525", "0.56141233", "0.5604709", "0.5598402", "0.55963606", "0.55957186", "0.55908203", "0.5584298", "0.5582925", "0.5581266", "0.5577324", "0.55756474", "0.55723757", "0.5571139", "0.5565069", "0.55630356", "0.5559328", "0.5554188", "0.5553244", "0.55525106" ]
0.60113895
17
Parse the auditbeat log file, to generate audit event model and write to the result file(optional)
def parse(self, output=True): if not self.type == LogType.audit: log.error("LogParser doesn't support nonetype yet.") return stashes = list() with open(self.path_log, 'r') as f: for line in f.readlines(): event: Dict = json.loads(line) keys = event.keys() # drop irrelevant keys of dict for key in DROPS: if key in event.keys(): event.pop(key) # retrieve json info timestamp, process, file = None, None, None if "@timestamp" in event.keys(): timestamp = event["@timestamp"] if "process" in event.keys(): process = event["process"] if "file" in event.keys(): file = event["file"] try: audit:Dict = event["auditd"] except KeyError: raise KeyError(f"line: {line} does not have audit field, parse failed.") # recontruct audit unit paths, session = None, None if "paths" in audit.keys(): paths = audit["paths"] if "session" in audit.keys(): session = audit["session"] try: msg_type, result, sequence, data = \ audit["message_type"],audit["result"], audit["sequence"], audit["data"] except KeyError: raise KeyError(f"Audit {audit} does not have certain keys, parse failed.") auditd = Auditd(paths, msg_type, sequence, result, data, session) beat_state = BeatState(timestamp, process, file, auditd) # # TODO: the current code is to add dict format data # self.events.append(beat_state) stashes.append(beat_state) return stashes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_file(self):\n with open(self.file_name, 'r', errors='ignore') as log_file:\n for line in log_file:\n self.process_line(line)", "def __parse(self):\n lines = self.file.readlines()\n name_idx = 2\n name_idx_found = False\n pathre = re.compile(r\"^[A-Z]:[\\\\/]\\w+\")\n for i in range(0, len(lines)):\n line = lines[i]\n if line.strip() != \"\": # check if line isn't empty\n if pathre.match(line):\n self.path = line.strip()\n continue\n tokens = line.split()\n time_str = tokens[0] + \" \" + tokens[1]\n try:\n time = datetime.strptime(time_str, \"%m/%d/%y %H:%M:%S\")\n except ValueError:\n raise LogParseError('Invalid log format. Date must be first \\\n token for each log event.') \n if not name_idx_found:\n name_idx = tokens.index('Monitoring')\n name_idx_found = True\n name = \"\"\n if tokens[name_idx].strip() == 'Monitoring':\n name = tokens[name_idx].lower() + \" \" + tokens[name_idx + 1].lower()\n duration = 0.0\n else:\n name = tokens[name_idx].lower()\n duration = tokens[name_idx + 1]\n self.events[name] = Event(time, name, duration)\n self.start = self.events['monitoring started']\n self.end = self.events['monitoring stopped']", "def events(self) -> Generator[dict, None, None]:\n\n for audit_file, audit_type in self.identified_files.items():\n temp_file_path = f\"{self.tempdir.name}/{audit_file}\"\n\n if audit_type == \"stateagentinspector\":\n yield from self.parse_agent_events(temp_file_path)\n\n # If we have atleast the hits.json file, we can make alert nodes\n if self.alert_files[\"hits.json\"]:\n yield from self.parse_alert_files(self.tempdir.name)\n\n self.tempdir.cleanup()", "def parse_log_file(filename, job_name):\n\n time_re = \"(\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2})\"\n time_pat = re.compile(time_re)\n pat = re.compile(time_re + \".*RUM\\.Workflow.*(START|FINISH)\\s+(.*)\")\n\n time_fmt = \"%Y/%m/%d %H:%M:%S\"\n\n first_time = None\n \n with open(filename) as f:\n for line in f:\n if first_time is None:\n m = time_pat.match(line)\n if m is None:\n raise Exception(\"Couldn't parse time from \" + line)\n tm = m.group(1)\n print \"TM is \" + str(tm)\n first_time = time.strptime(tm, time_fmt)\n print \"First time is \" + str(first_time)\n\n yield Event(first_time, 'START', 'log', job_name, filename)\n m = pat.match(line)\n if (m is not None):\n (tm, type, step) = m.groups()\n t = time.strptime(tm, time_fmt)\n e = Event(t, type, step, job_name, filename)\n yield e", "def generate_audit(self, output_path):\n \n with open(output_path, 'wb') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow([entry[0] for entry in self.fields])\n for trade, bo_error in self.errors.items():\n values = self.get_values(trade)\n csvwriter.writerow(values)\n \n print('Output written to %s' % output_path)", "def process_log_file(cur, filepath):\n \n # open log file\n df = pd.read_json(filepath,lines=True)\n\n # filter by NextSong action - i.e. get only listening music events from the logs\n df = df[(df.page == \"NextSong\")]\n\n # insert time records\n __insert_time_data(cur, df)\n \n # insert user records\n __insert_user_data(cur, df)\n \n # insert songplay records\n __insert_songplay_data(cur, df)\n \n # erase dataframe\n df = df.iloc[0:0]", "def parse(self):\n i = 0\n while i < len(self.__lines):\n line = self.__lines[i]\n dt = re.match(r\"(\\d{4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2})\", line)\n if not dt:\n i += 1\n continue\n log = {\n \"datetime\": dt.group()\n }\n line = line[dt.end()+1:].rstrip(\"\\n\")[::-1]\n qq_flag = line.find(\"(\")\n log[\"qq\"] = line[qq_flag-1:0:-1]\n log[\"name\"] = line[:qq_flag:-1].strip(\" \")\n i += 1\n log[\"content\"] = self.__lines[i].rstrip(\"\\n\")\n while self.__lines[i+1] != \"\\n\":\n i += 1\n log[\"content\"] += \" \" + self.__lines[i].rstrip(\"\\n\")\n self.__logs.append(log)\n i += 2", "def events(self):\n for line_num, line in enumerate(self.file_handler):\n if not line:\n break\n # process line input to dictionary\n data = json.loads(line)\n # add id information\n data['id'] = line_num\n # update timestamp history\n timestamp = self._get_timestamp(data)\n self.last_two_timestamps = [self.last_two_timestamps[-1], timestamp]\n self.event_timestamps[line_num] = timestamp\n\n self.alarms.append(0) # add field for alarms\n self.users.append(data['user']) # add field for user\n self.anomalies.append(data.get('is_anomaly', 0)) # add field for anomalies\n if 'is_anomaly' in data:\n del data['is_anomaly'] # remove anomaly information from data for contestants\n\n # return line id and serialized JSON as string representing one event\n str_dump = json.dumps(data)\n logger.info(self._get_inner_time() + ' > ' + str_dump)\n yield line_num, str_dump", "def export_log(self):\r\n if self.log[\"datetime\"] is not None and not self.log[\"datetime\"] == \"\":\r\n logs_dir = ''\r\n user = 'default'\r\n program_data = 'data\\program_data.json5'\r\n with open(program_data) as f:\r\n config = json.load(f)\r\n logs_dir = config.get(\"logs_records_path\", \"\")\r\n user = config.get(\"user\", \"default\")\r\n file_name = user+\" \"+self.log[\"datetime\"].replace(\"/\", \"\")\r\n file_name = file_name.replace(\" \", \"_\")\r\n file_name = file_name.replace(\":\", \"\")\r\n cwd = os.getcwd()\r\n if not logs_dir == \"\" and os.path.exists(logs_dir):\r\n if not user in os.listdir(logs_dir):\r\n os.makedirs(os.path.join(logs_dir, user))\r\n logs_dir = os.path.join(logs_dir, user)\r\n file_name = os.path.join(logs_dir, file_name)\r\n self.save_records(file_name)\r\n elif \"logs\" in os.listdir(cwd):\r\n folder = os.path.join(cwd, \"logs\")\r\n file_name = os.path.join(folder, file_name)\r\n self.save_records(file_name)\r\n self.reset_values()", "def read_game_logs(file_path):\n\n if os.path.isfile(file_path):\n with open(file_path, \"r\") as read_file:\n log = json.load(read_file)\n # event_type = set([e[\"event\"] for e in log ])\n # the event types: command, text_message, set_attribute, join\n # print(\"event types\", event_type)\n\n # sort all messages chronologically\n log.sort(key=lambda x: x[\"date_modified\"])\n\n start = None\n end = None\n real_end = None # WHen The came master says COngrats or you die, because rest of the messages looks like bugs...\n episode_list = []\n length = len(log)\n game_finished = False\n # Episode are being searched between 2 starts commands\n # only the one where the command done has been issued is kept\n for i, l in enumerate(log):\n if \"command\" in l.keys():\n if l[\"command\"] == \"start\":\n if start == None:\n start = i\n elif end == None:\n end = i\n if l[\"command\"] == \"done\":\n game_finished = True\n\n if l[\"user\"][\"id\"] == 1 and l[\"event\"] == \"text_message\" and type(l[\"message\"]) is str and (\n l[\"message\"].startswith(\"Congrats\") or l[\"message\"].startswith(\n \"The rescue robot has not reached you\")):\n real_end = i + 1 # +1 because we want to include this message in the log slice...\n if start is not None and end is not None:\n if game_finished:\n episode_list.append(log[start:real_end])\n start = end\n end = None\n real_end = None\n game_finished = False\n\n if i + 1 == length:\n if start is not None and end is None and game_finished:\n episode_list.append(log[start:real_end])\n\n score_list = {}\n for i, e in enumerate(episode_list):\n # the number of answers the avatar utters gives us the number of question asked\n # num_questions = sum(\n # [1 for m in e if m[\"user\"][\"name\"] == \"Avatar\" and m[\"event\"] == \"text_message\"])\n\n # Just sum every messages ending with a question mark issueed by the user...\n num_questions = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].endswith(\"?\")])\n\n # user id 1 is alway the game master, we are looping here on the messages of the \"real\" player\n # when we tell the avatar to change location, we don't get an answer, this is why the substraction gives the number of orders\n # this does not include the order \"done\"\n # num_orders = sum(\n # [1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n # \"event\"] == \"text_message\"]) - num_questions\n\n # Just sum every order of type \"go west\". Describe orders are not counted.\n num_orders = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and (\n \"east\" in m[\"message\"].lower() or \"north\" in m[\"message\"].lower() or \"west\" in m[\n \"message\"].lower() or \"south\" in m[\"message\"].lower() or \"back\" in m[\"message\"].lower())])\n\n game_won = sum([1 for m in e if m[\"user\"][\"id\"] == 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].startswith(\"Congrats\")]) > 0\n\n # Work-Around - the final reward giving +1.0 on success and -1.0 on loss happens after the messages\n # Saying \"congratulations\" or \"you die horribly\" just repeating the message when the game starts.\n # We had to exclude that message to segment finished games but this is why we have to add these rewards here manually...\n\n final_reward = -1.0\n if game_won:\n final_reward = 1.0\n score_list[i] = {\"score\": sum([m[\"message\"][\"observation\"][\"reward\"] for m in e if\n \"message\" in m.keys() and type(m[\"message\"]) is dict])+final_reward,\n \"num_questions\": num_questions, \"num_orders\": num_orders, \"game_session\": e,\n \"game_won\": game_won}\n\n return score_list\n\n else:\n raise Exception(f\"{file_path} is not a correct file path.\")", "def generate_ev_file(id_test):\n print(\"generate_ev_file\")\n \n ev_output_file_name=id_test+\".ev\"\n ev_input_file_name=id_test+\"_events.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+ev_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+ev_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"campaign\"):\n print(\"First field of events file input should be 'campaing' but is:\"+field_names_list[0])\n print(\"Cannot generate event file\")\n return\n else:\n print(field_names_list)\n for line in input_reader:\n #generate timestamp for campaign\n #campania = int(int(ms)/100)+int(ss)*10+int(mm)*600+int(hh)*36000\n campaign = int(line[0])\n ms = (campaign*100)%1000\n ss = ((campaign*100)//1000)%60\n mm = ((campaign*100)//60000)%60\n hh = ((campaign*100)//360000)\n timeFormat = \"{:0>2d}\"\n msFormat = \"{:0>3d}\"\n timestamp_begin_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms))\n timestamp_end_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms+1))\n \n print(\"timestamp generated: \"+timestamp_begin_event)\n \n #generate events\n #begin events\n \n \n port_idx =0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n begin_event=CELL_DEVS_EXTERNAL_EVENT_BEGIN+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n \n f_output.write(timestamp_begin_event+\" \"+begin_event+\"\\n\")\n \n #end events\n port_idx=0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n end_event=CELL_DEVS_EXTERNAL_EVENT_ENDS+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n f_output.write(timestamp_end_event+\" \"+end_event+\"\\n\")\n \n \n \n f_input.close()\n f_output.close()", "def __parse(self):\n lines = self.file.readlines()\n for i in range(0, len(lines)):\n line = lines[i]\n tokens = line.split()\n if tokens[0] == \"#start\":\n trial_name = tokens[1]\n trial = Trial(trial_name)\n self.trials[trial_name] = trial\n elif tokens[0] == \"#end\":\n continue\n else:\n date_str = tokens[0] + \" \" + tokens[1]\n date = datetime.strptime(date_str, \"%m/%d/%y %H:%M:%S\")\n sound_file = line[18:-1].strip()\n event = Event(date, sound_file, 0)\n trial.addevent(event)", "def process(self, event):\n # the file will be processed there\n print event.src_path, event.event_type # print now only for degug\n\n for i in self.ignore:\n if i in event.src_path or os.path.isdir(event.src_path):\n print \"Ignoring...\"\n return\n\n mod_file = event.src_path.split(self.source)[1]\n for r in self.rules:\n mod_file = mod_file.replace(r[0], r[1])\n\n print \"Writing:\", (self.destination + mod_file)\n \n input_file = utils.readFile(event.src_path)\n\n file_type = mod_file.split(\".\")[-1]\n reverted = utils.revert( input_file, \"(*\", \"*)\" ) if file_type == \"thy\" else utils.revert( input_file, \"/*\", \"*/\" )\n \n if len( reverted ) == 0 and len( input_file ) != 0:\n print \"Something might be wrong??\"\n else: utils.writeFile( self.destination + mod_file, reverted )", "def processEventLog(log):\n pass", "def parse_log(path_to_log):\n regex_iteration = re.compile('Iteration (\\d+), loss = ([\\.\\deE+-]+)')\n regex_train_output = re.compile('Train net output #(\\d+): (\\S+) = ([\\.\\deE+-]+)')\n regex_learning_rate = re.compile('lr = ([\\.\\deE+-]+)')\n regex_test_output = re.compile('Test net output #(\\d+): detection_eval = ([\\.\\deE+-]+)')\n\n\n # Pick out lines of interest\n iteration = 0\n loss = -1\n learning_rate = 0.001\n train_dict_list = []\n train_row = None\n test_score=0.0\n\n logfile_year = extract_seconds.get_log_created_year(path_to_log)\n with open(path_to_log) as f:\n start_time = extract_seconds.get_start_time(f, logfile_year)\n last_time = start_time\n\n for line in f:\n iteration_match = regex_iteration.search(line)\n if iteration_match:\n iteration = float(iteration_match.group(1))\n loss = float(iteration_match.group(2))\n try:\n time = extract_seconds.extract_datetime_from_line(line,\n logfile_year)\n except:\n # Skip lines with bad formatting, for example when resuming solver\n continue\n\n # if it's another year\n if time.month < last_time.month:\n logfile_year += 1\n time = extract_seconds.extract_datetime_from_line(line, logfile_year)\n last_time = time\n\n seconds = (time - start_time).total_seconds()\n\n learning_rate_match = regex_learning_rate.search(line)\n\n if learning_rate_match:\n learning_rate = float(learning_rate_match.group(1))\n\n test_score_match = regex_test_output.search(line)\n if test_score_match:\n test_score = float(test_score_match.group(2))\n\n train_dict_list, train_row = parse_line_for_net_output(\n regex_train_output, train_row, train_dict_list,\n line, iteration, seconds, learning_rate,loss,test_score\n )\n\n\n return train_dict_list", "def parse(self, **kwargs):\n output_filename = self.node.get_option('output_filename')\n jobname = self.node.get_option('jobname')\n if jobname is not None:\n output_filename = \"log-\" + jobname + \".yaml\"\n # Check that folder content is as expected\n files_retrieved = self.retrieved.list_object_names()\n files_expected = [output_filename]\n # Note: set(A) <= set(B) checks whether A is a subset of B\n if not set(files_expected) <= set(files_retrieved):\n self.logger.error(\"Found files '{}', expected to find '{}'\".format(\n files_retrieved, files_expected))\n return self.exit_codes.ERROR_MISSING_OUTPUT_FILES\n\n # add output file\n self.logger.info(\"Parsing '{}'\".format(output_filename))\n# print(self.retrieved._repository._get_base_folder().get_abs_path(output_filename))\n output = BigDFTLogfile(self.retrieved._repository._get_base_folder().\n get_abs_path(output_filename))\n try:\n output.store()\n except ValidationError:\n self.logger.info(\"Impossible to store LogFile - ignoring '{}'\".\n format(output_filename))\n\n# with self.retrieved.open(output_filename, 'rb') as handle:\n# output_node = SinglefileData(file=handle)\n# output_dict_aiida=orm.Dict(dict=output_dict)\n# output_dict_aiida.store()\n# output_log_aiida=BigDFTLogfile(output)\n self.out('bigdft_logfile', output)\n\n return ExitCode(0)", "def setup_audit_log(cfg=CFG):\n if not runez.DRYRUN and not runez.log.file_handler:\n runez.log.setup(\n file_format=\"%(asctime)s %(timezone)s [%(process)d] %(context)s%(levelname)s - %(message)s\",\n file_level=logging.DEBUG,\n file_location=cfg.meta.full_path(\"audit.log\"),\n greetings=\":: {argv}\",\n rotate=\"size:500k\",\n rotate_count=1,\n )", "def write_to_file(train_file, test_file, log_dict):\n i = 0\n train_events = []\n test_events = []\n\n for key in log_dict:\n trace = log_dict[key]\n if random.randint(0,1) == 0: # Add file to training set with 50% chance\n for e_idx in range(len(trace)):\n train_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Add file to test set\n if random.randint(0,100) > 50: # No anomaly injection with 50% chance\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Anomaly injection\n trace, types = introduce_anomaly(trace, single=False)\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",1,\\\"\" + str(types) + \"\\\"\")\n\n with open(train_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in train_events:\n fout.write(e + \"\\n\")\n\n with open(test_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in test_events:\n fout.write(e + \"\\n\")", "def _readin_evtx(file):\n\tcontent = []\n\tunparsed_entries = 0\n\twith evtx.Evtx(file) as log:\n\t\tc = 0\n\t\tsources = []\n\t\tfor record in log.records():\n\t\t\tc += 1\n\t\t\t_print_progress(c)\n\t\t\ttry:\n\t\t\t\tobj = untangle.parse(record.xml())#untangle can produce an OSError on Windows, since Windows uses a different format for timestamps\n\t\t\texcept OSError:\n\t\t\t\tc -= 1\n\t\t\t\tunparsed_entries += 1\n\t\t\t\tcontinue\n\t\t\tcurr_obj = obj.Event.System\n\t\t\tdate = curr_obj.TimeCreated['SystemTime']\n\t\t\tif '.' in date:\n\t\t\t\tdate = datetime.datetime.strptime(date,\"%Y-%m-%d %H:%M:%S.%f\")\n\t\t\telse:\n\t\t\t\tdate = datetime.datetime.strptime(date,\"%Y-%m-%d %H:%M:%S\")\n\t\t\tfull_line = record.xml()\n\t\t\tif hasattr(curr_obj,'Provider'):\n\t\t\t\tsource = curr_obj.Provider['Name']\n\t\t\telse:\n\t\t\t\tsource = ''\n\t\t\tif ( (not source in sources) and (not sources == '')):\n\t\t\t\tsources.append(source)\n\t\t\tline_nr = curr_obj.EventRecordID.cdata\n\t\t\tcontent.append(logfile_entry(int(line_nr), file, curr_obj.EventID.cdata, full_line, date, curr_obj.Computer.cdata, source))\n\t\t_delete_print()\n\tif unparsed_entries > 0:\n\t\tprint('Unfortunately, {} entries could not be parsed. Please see the documentation'.format(unparsed_entries))\n\t\tprint()\n\treturn logfile(file, len(content), 'evtx', content, sources)", "async def _record_logs(self, report):\n\t\tif report.action == Frame.Report.PARSE:\n\t\t\t# Collects the tests parsing log for further writing to Test_Parser.log\n\t\t\tif report.success:\n\t\t\t\tself._parse_logs[\"success\"] += [report.log]\n\t\t\telse:\n\t\t\t\tself._parse_logs[\"failure\"] += [report.log]\n\t\telif report.action == Frame.Report.EXECUTE:\n\t\t\t# Writes a test log and dump to the results directory\n\t\t\ttest_log = (\"EXECUTE STATUS: SUCCESS\\n\\n\" if report.success else \"EXECUTE STATUS: FAILURE\\n\\n\") + report.log\n\t\t\tfor task in as_completed([self._event_loop.run_in_executor(self._thread_executor, FileSystem.dump_to, \n\t\t\t\t self._result_directory_name + \"/Log/\" + report.test_name + \".log\", test_log)]):\n\t\t\t\tawait task\n\t\t\tfor task in as_completed([self._event_loop.run_in_executor(self._thread_executor, TestLogger._write_test_dump, \n\t\t\t\t self._result_directory_name + \"/Dump/\" + report.test_name + \".pcap\", report.dump)]):\n\t\t\t\tawait task", "def test_fortify_parse_class_audit(fortify_tool_plugin):\n package = Package('test', os.path.dirname(__file__))\n tree = etree.parse(os.path.join(os.path.dirname(__file__),\n 'class_audit.fvdl'))\n root = tree.getroot()\n issues = fortify_tool_plugin.parse_output(root, package)\n assert len(issues) == 1\n assert issues[0].filename\n assert issues[0].line_number == '542'\n assert issues[0].tool == 'fortify'\n assert issues[0].issue_type == 'structural'\n assert issues[0].severity == '3'\n assert issues[0].message", "def process(self, event):\n # the file will be processed there\n print (event.src_path, event.event_type) # print now only for degug", "def ParseLogFile(log_file, test_data_dict, failure_dict, test, builder,\n build_num, build_link):\n\n lines = []\n with open(log_file, 'r') as infile:\n lines = infile.readlines()\n\n passed = {}\n failed = {}\n not_run = {}\n date = ''\n status = ''\n board = ''\n num_provision_errors = 0\n build_ok = True\n afe_line = ''\n\n for line in lines:\n if line.rstrip() == '<title>404 Not Found</title>':\n print('Warning: File for %s (build number %d), %s was not found.' %\n (builder, build_num, test))\n build_ok = False\n break\n if '[ PASSED ]' in line:\n test_name = line.split()[0]\n if test_name != 'Suite':\n passed[test_name] = True\n elif '[ FAILED ]' in line:\n test_name = line.split()[0]\n if test_name == 'provision':\n num_provision_errors += 1\n not_run[test_name] = True\n elif test_name != 'Suite':\n failed[test_name] = True\n elif line.startswith('started: '):\n date = line.rstrip()\n date = date[9:]\n date_obj = time.strptime(date, '%a %b %d %H:%M:%S %Y')\n int_date = (\n date_obj.tm_year * 10000 + date_obj.tm_mon * 100 + date_obj.tm_mday)\n date = time.strftime('%a %b %d %Y', date_obj)\n elif not status and line.startswith('status: '):\n status = line.rstrip()\n words = status.split(':')\n status = words[-1]\n elif line.find('Suite passed with a warning') != -1:\n status = 'WARNING'\n elif line.startswith('@@@STEP_LINK@Link to suite@'):\n afe_line = line.rstrip()\n words = afe_line.split('@')\n for w in words:\n if w.startswith('http'):\n afe_line = w\n afe_line = afe_line.replace('&amp;', '&')\n elif 'INFO: RunCommand:' in line:\n words = line.split()\n for i in range(0, len(words) - 1):\n if words[i] == '--board':\n board = words[i + 1]\n\n test_dict = test_data_dict[test]\n test_list = test_dict['tests']\n\n if build_ok:\n for t in test_list:\n if not t in passed and not t in failed:\n not_run[t] = True\n\n total_pass = len(passed)\n total_fail = len(failed)\n total_notrun = len(not_run)\n\n else:\n total_pass = 0\n total_fail = 0\n total_notrun = 0\n status = 'Not found.'\n if not build_ok:\n return [], date, board, 0, ' '\n\n build_dict = dict()\n build_dict['id'] = build_num\n build_dict['builder'] = builder\n build_dict['date'] = date\n build_dict['build_link'] = build_link\n build_dict['total_pass'] = total_pass\n build_dict['total_fail'] = total_fail\n build_dict['total_not_run'] = total_notrun\n build_dict['afe_job_link'] = afe_line\n build_dict['provision_errors'] = num_provision_errors\n if status.strip() == 'SUCCESS':\n build_dict['color'] = 'green '\n elif status.strip() == 'FAILURE':\n build_dict['color'] = ' red '\n elif status.strip() == 'WARNING':\n build_dict['color'] = 'orange'\n else:\n build_dict['color'] = ' '\n\n # Use YYYYMMDD (integer) as the build record key\n if build_ok:\n if board in test_dict:\n board_dict = test_dict[board]\n else:\n board_dict = dict()\n board_dict[int_date] = build_dict\n\n # Only keep the last 5 records (based on date)\n keys_list = board_dict.keys()\n if len(keys_list) > MAX_SAVE_RECORDS:\n min_key = min(keys_list)\n del board_dict[min_key]\n\n # Make sure changes get back into the main dictionary\n test_dict[board] = board_dict\n test_data_dict[test] = test_dict\n\n if len(failed) > 0:\n RecordFailures(failure_dict, board, test, builder, int_date, log_file,\n build_num, failed)\n\n summary_result = '[%2d/ %2d/ %2d]' % (total_pass, total_fail, total_notrun)\n\n return summary_result, date, board, int_date, build_dict['color']", "def main():\n \n Y1, Y2 = 2005, 2017 ### range with coordinates supplied in pre-2018 generated archive\n\n if len(sys.argv) > 1 and int(sys.argv[1]) > 0:\n Y1 = int(sys.argv[1])\n \n if len(sys.argv) > 2 and int(sys.argv[2]) > Y1:\n Y2 = int(sys.argv[2])\n \n with open('data/audit.log','w') as output:\n for Y in range(Y1, Y2):\n df = pd.read_csv('data/{}.csv'.format(Y), low_memory = False)\n output.write('\\n--- {} --------------------\\n'.format(Y))\n\n # remove `deleted` records\n df['deleted'] = df['deleted'].apply(yes_no)\n df = df[df['deleted'] == 0]\n\n # remove misc misdemeanors\n df = df[~df['category'].isin(drop)]\n\n # validate date and expand into Y,N,D,W,H\n df['dt'] = df['incident_date'].apply(extract)\n df = df[~df['dt'].isnull()]\n\n # convert from plane state to longitude-latitude\n df['ll'] = df.apply(to_lnglat, axis = 1)\n\n # init features\n features = df.loc[:,['category','stat','address','city','zip']]\n features['id'] = df['incident_id']\n dt = ['year','month','day','weekday','hour']\n for i in range(len(dt)):\n features[dt[i]] = df['dt'].apply(lambda x: x[i] )\n\n features['lng'] = df['ll'].apply(lambda x: x[0])\n features['lat'] = df['ll'].apply(lambda x: x[1])\n\n features['gang'] = df['gang_related'].apply(yes_no)\n features['category'] = df['category'].apply(collapse)\n cat = set(features.groupby(['category']).size().reset_index(name='count')['category'].tolist())\n output.write('Categories: {}\\n'.format(len(cat)))\n\n output.write('Date miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['year'] > 2000) & (~features['weekday'].isnull())])/len(features))))\n output.write('Location miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['zip'] > 0) | (features['lat'] > 0)])/len(features))))\n\n # keep records with valid date\n features['date'] = df['dt'].apply(lambda x: datetime.date(x[0], x[1], x[2]))\n features = features[(features['year'] > 2000) & (~features['weekday'].isnull())]\n output.write('Time miss: {:.4f}%\\n'.format(100 * len(features[features['hour'] == -1])/len(features)))\n\n # potential `time-unknown` issue\n output.write('Hour ZERO: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 0])/len(features)))\n output.write('Hour NOON: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 12])/len(features)))\n\n features = features[(features['zip'] > 0) | (features['lat'] > 0)]\n\n # get the best possible coordinates + zipcode assessment\n features[['zip','lng','lat']] = features[['zip','lng','lat']].apply(fix_location, axis = 1)\n output.write('Failed location: {:.4f}%\\n'.format(100 * len(features[features['zip'].isnull()])/len(features)))\n features = features[~features['zip'].isnull()]\n features['zip'] = df['zip'].apply(lambda x: str(x)[:5])\n \n # normalize city attr\n features = features.join(zipcodes[['zip','city']].set_index('zip'), on = 'zip', lsuffix = '_orig', rsuffix = '')\n features.loc[features['city'].isnull(), 'city'] = features.loc[features['city'].isnull(), 'city_orig']\\\n .apply(lambda x: x if type(x) == float else ' '.join([l[0].upper() + l[1:] for l in x.split()]))\n\n # reduce to LA bounding-box\n features = features[(features['lng'] > -119) & (features['lng'] < -116)]\n features = features[(features['lat'] > 32) & (features['lat'] < 35)]\n\n # save csv\n features[fields].to_csv('data/F{}.csv'.format(Y), index = False)\n features[fields].to_json('data/F{}.json'.format(Y), orient = 'records')\n output.close()", "def parse_log_file(self, compute_stress=False):\n output_filename = self.node.get_option('output_filename')\n output_txt = self.retrieved.get_object_content(output_filename)\n try:\n output_data = read_log_file(output_txt, compute_stress=compute_stress)\n except Exception:\n traceback.print_exc()\n return None, self.exit_codes.ERROR_LOG_PARSING\n return output_data, None", "def parse_cutadapt_logs(self, f):\n fh = f['f']\n regexes = {\n 'bp_processed': \"Total basepairs processed:\\s*([\\d,]+) bp\",\n 'bp_written': \"Total written \\(filtered\\):\\s*([\\d,]+) bp\",\n 'quality_trimmed': \"Quality-trimmed:\\s*([\\d,]+) bp\",\n 'r_processed': \"Total reads processed:\\s*([\\d,]+)\",\n 'r_with_adapters': \"Reads with adapters:\\s*([\\d,]+)\"\n }\n s_name = None\n for l in fh:\n # New log starting\n if l.startswith('This is cutadapt'):\n s_name = None\n \n # Get sample name from end of command line params\n if l.startswith('Command line parameters'):\n s_name = l.split()[-1]\n s_name = self.clean_s_name(s_name, f['root'])\n if s_name in self.cutadapt_data:\n log.debug(\"Duplicate sample name found! Overwriting: {}\".format(s_name))\n self.cutadapt_data[s_name] = dict()\n self.cutadapt_length_counts[s_name] = dict()\n self.cutadapt_length_exp[s_name] = dict()\n self.cutadapt_length_obsexp[s_name] = dict()\n \n if s_name is not None:\n # Search regexes for overview stats\n for k, r in regexes.items():\n match = re.search(r, l)\n if match:\n self.cutadapt_data[s_name][k] = int(match.group(1).replace(',', ''))\n\n if 'length' in l and 'count' in l and 'expect' in l:\n # Nested loop to read this section while the regex matches\n for l in fh:\n r_seqs = re.search(\"^(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\", l)\n if r_seqs:\n a_len = int(r_seqs.group(1))\n self.cutadapt_length_counts[s_name][a_len] = int(r_seqs.group(2))\n self.cutadapt_length_exp[s_name][a_len] = float(r_seqs.group(3))\n if float(r_seqs.group(3)) > 0:\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2)) / float(r_seqs.group(3))\n else:\n # Cheating, I know. Infinity is difficult to plot.\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2))\n else:\n break\n \n # Calculate a few extra numbers of our own\n for s_name in self.cutadapt_data.keys():\n if 'bp_processed' in self.cutadapt_data[s_name] and 'bp_written' in self.cutadapt_data[s_name]:\n self.cutadapt_data[s_name]['percent_trimmed'] = (float(self.cutadapt_data[s_name]['bp_processed'] - self.cutadapt_data[s_name]['bp_written']) / self.cutadapt_data[s_name]['bp_processed']) * 100", "def inner():\n for line in file_obj:\n logdata = tilak_haproxylog.parse_line(line)\n if logdata is not None:\n logdata[\"hits\"] = 1\n for value_key in value_keynames:\n if value_key not in logdata:\n logdata[value_key] = 0\n status_code = int(logdata[\"status_code\"])\n if 100 <= status_code <= 199:\n logdata[\"rsp_1xx\"] = 1\n elif 200 <= status_code <= 299:\n logdata[\"rsp_2xx\"] = 1\n elif 300 <= status_code <= 399:\n logdata[\"rsp_3xx\"] = 1\n elif 400 <= status_code <= 499:\n logdata[\"rsp_4xx\"] = 1\n elif 500 <= status_code <= 599:\n logdata[\"rsp_5xx\"] = 1\n else:\n logdata[\"rsp_other\"] = 1\n ret_data = dict(zip(index_keynames, (logdata[index_key] for index_key in index_keynames)))\n ret_data.update(dict(zip(value_keynames, (logdata[value_key] for value_key in value_keynames))))\n yield (logdata[\"ts\"], ret_data)", "def parseLog(self, log_lines):\n abstract", "def parseMonitorLog(log_file, attack_props):\n if not os.path.exists(log_file):\n return\n report = open(log_file, 'r')\n lines = report.readlines()\n #print lines\n report.close()\n \n readingStations = False\n readingAps = False\n for line in lines:\n line = line.strip()\n #print line\n if not readingStations and not readingAps:\n if line.startswith(\"BSSID\"):\n readingAps = True\n continue\n elif line.startswith(\"Station\"):\n readingStations = True\n continue\n elif readingAps:\n if len(line) < 4:\n readingAps =False\n else:\n fields = line.split(',')\n #print fields\n ap_mac = fields[0].strip()\n if attack_props.hasAP(ap_mac):\n ap = attack_props.getActiveAP(ap_mac)\n else:\n ap = AccessPoint(ap_mac, attack_props.log_path)\n attack_props.addActiveAP(ap)\n ap.update(fields)\n elif readingStations and len(line) > 4:\n fields = line.split(',')\n station_mac = fields[0].strip()\n ap_mac = fields[5].strip()\n if attack_props.hasAP(ap_mac):\n ap = attack_props.getAP(ap_mac) \n if ap.stations.has_key(station_mac):\n station = ap.stations[station_mac]\n else:\n station = Station(station_mac)\n ap.stations[station_mac] = station\n station.ap = station\n station.update(fields)", "def run(self):\n if self.log_file: # if path of SSH-log file is valid\n # Rotate & parse the log file\n self.parse_log_file()\n # Analyze the log for deviating algorithm\n self.check_manipulation()", "def parse_log(self,filename,log_year):\n\n \n download_filename=os.sep.join([self.source_dir,filename])\n my_logger.debug(\"parsing log file: %s\" % download_filename)\n try:\n f = open(download_filename,mode='rt')\n except IOError:\n my_logger.debug( \"can't open file %s\" % download_filename)\n return\n\n #\n # return list of report objects\n L=[]\n\n #\n # parse & extract fields into new report object\n # parse to determine exact category\n # parse to determine geoscope\n state = STATE_INIT\n new_state = STATE_INIT\n current_crime_category=None\n line_index = 0\n previous_report_index=0\n for line in f:\n line_index=line_index+1\n #\n # state machine:\n # transition from init -> find_category \n # transition from find_category to find_report after finding first category\n\n if state==STATE_INIT:\n new_state = STATE_FIND_CATEGORY\n\n elif state==STATE_FIND_CATEGORY:\n #\n # find first instance of crime category heading\n match_crime_header = CATEGORY_REGEXP.search(line)\n match_report=REPORT_DATE_REGEXP.search(line)\n \n if match_crime_header and (match_report==None):\n #\n # found crime header\n my_logger.debug(\"========== TRANSITION TO FIND_REPORT\\n\")\n my_logger.debug('%d %s' % (line_index,line))\n new_state = STATE_FIND_REPORT\n\n #\n # remember where this category occurred\n category_line_index=line_index\n\n current_crime_category = self.extract_crime_category(match_crime_header)\n \n elif match_crime_header and match_report:\n #\n # error: both detectors triggered by this line\n my_logger.debug('match_crime_header and match_report triggered by (%s)' % line)\n raise ValueError\n elif (match_crime_header==None) and (match_report):\n #\n # error: found report line before first category\n my_logger.debug(\"found report prematurely in (%s)\\n\" % line)\n raise ValueError\n else:\n #\n # neither crime header nor crime report, so ignore it\n pass\n\n elif state==STATE_FIND_REPORT:\n my_logger.debug('%d %s' % (line_index,line[0:-1])) # -1 to avoid extra LF\n \n #\n # sanity check:\n # \"run\" of valid reports is too long\n if (category_line_index-line_index) > 20:\n my_logger.debug(\"run of reports too long: skipped category?\")\n raise ValueError\n\n match_crime_header = CATEGORY_REGEXP.search(line)\n match_report=REPORT_DATE_REGEXP.search(line)\n\n if match_crime_header and (match_report==None):\n #\n # came across new crime category\n current_crime_category = self.extract_crime_category(match_crime_header)\n new_state = STATE_FIND_REPORT\n\n category_line_index=line_index\n\n elif (match_crime_header==None) and match_report:\n #\n # found report\n new_state = STATE_FIND_REPORT\n\n report=self.parse_report_line(line)\n report['category']=current_crime_category\n report['line_num']=line_index\n report['date_year']=log_year\n L.append(report)\n\n #\n # sanity check\n # reports should be <= 2 lines apart\n if (line_index - max([category_line_index,previous_report_index])) > 2:\n my_logger.debug('WARNING: possible skipped report')\n my_logger.debug('current line: %d' % line_index)\n my_logger.debug('last report or category: %d' %\n max([category_line_index,previous_report_index]))\n\n # remember this line index\n previous_report_index=line_index\n\n else:\n #\n # neither regexp matched, so ignore it\n pass\n\n state=new_state\n\n f.close()\n return L", "def process_log(log_file):\n result_list = []\n file_root = get_fname_root(log_file)\n\n with open(log_file) as l_file:\n reading_steps = False\n result_dict = {}\n for line in l_file:\n line = line.strip()\n if STEP_PAT.match(line):\n reading_steps = True\n result_dict[FILE_NAME] = file_root\n result_dict[TIMESTEP] = int(line.split()[2])\n elif reading_steps:\n if len(line) == 0:\n break\n s_line = line.split()\n if s_line[0] == TOTENG:\n for key_id, key in enumerate([TOTENG, KINENG, TEMP]):\n result_dict[key] = float(s_line[2 + key_id * 3])\n elif s_line[0] == POTENG:\n for key_id, key in enumerate([POTENG, E_BOND, E_ANGL]):\n result_dict[key] = float(s_line[2 + key_id * 3])\n elif s_line[0] == E_DIHED:\n for key_id, key in enumerate([E_DIHED, E_IMPRO, E_VDWL]):\n result_dict[key] = float(s_line[2 + key_id * 3])\n elif s_line[0] == E_COUL:\n for key_id, key in enumerate([E_COUL, E_LONG, PRESS]):\n result_dict[key] = float(s_line[2 + key_id * 3])\n result_list.append(dict(result_dict))\n else:\n # when stop matching, done reading file (either by normal or abnormal termination)\n break\n\n return result_list", "def gen_parse_log(file_info, fails_percent):\n logging.info(\"parsing log %s started\", file_info.path)\n url_time_re = re.compile(r'.*?(?:GET|POST)\\s+(.*?)\\s+.*?(\\d+\\.\\d*)(?:$|\\n|\\r)')\n log_file = open(file_info.path, \"r\") if not file_info.ext else gzip.open(file_info.path, \"r\")\n\n fails = 0\n count = 0\n urls = {}\n all_time = 0\n\n for log_line in log_file:\n if not isinstance(log_line, str):\n log_line = log_line.decode()\n count += 1\n match = url_time_re.match(log_line)\n if match:\n if match.group(1).strip() in urls:\n urls[match.group(1).strip()].append(float(match.group(2)))\n else:\n urls[match.group(1).strip()] = [float(match.group(2))]\n all_time += float(match.group(2))\n else:\n fails += 1\n\n MAIN_LOGGER.info(\"%f fails\", round(fails / count * 100, 2))\n\n if (fails / count * 100) > float(fails_percent):\n MAIN_LOGGER.error(\"can not parse file\")\n return None\n\n log_file.close()\n res_list = []\n for url in urls:\n entries = urls[url]\n res_list.append(\n {\"url\": url,\n \"count\": len(entries),\n \"entries\": entries,\n \"time_sum\": round(sum(entries), 3),\n }\n )\n\n sorted_res = list(sorted(res_list, key=lambda string: string[\"time_sum\"], reverse=True))\n for log_line in sorted_res:\n len_line = len(log_line['entries'])\n sum_line = sum(log_line['entries'])\n log_line[\"count_perc\"] = round(float(len_line) / float(count) * 100, 3)\n log_line[\"time_avg\"] = round(float(sum_line) / float(len_line), 3)\n log_line[\"time_max\"] = round(max(log_line['entries']), 3)\n log_line[\"time_med\"] = round(median(log_line['entries']), 3)\n log_line[\"time_perc\"] = round(float(sum_line) / float(all_time) * 100, 3)\n del log_line['entries']\n yield log_line", "def handler(context, event):\n\n if _ensure_str(event.trigger.kind) != 'http' or _invoked_by_cron(event):\n body = event.body.decode('utf-8')\n context.logger.info('Received event body: {0}'.format(body))\n\n # serialized record\n serialized_record = json.dumps({\n 'body': body,\n 'headers': {\n _ensure_str(header): _ensure_str(value)\n for header, value in event.headers.items()\n },\n 'timestamp': datetime.datetime.utcnow().isoformat(),\n })\n\n # store in log file\n with open(events_log_file_path, 'a') as events_log_file:\n events_log_file.write(serialized_record + ', ')\n\n else:\n\n # read the log file\n try:\n with open(events_log_file_path, 'r') as events_log_file:\n events_log_file_contents = events_log_file.read()\n except IOError:\n events_log_file_contents = ''\n\n # make this valid JSON by removing last two chars (, ) and enclosing in [ ]\n encoded_event_log = '[' + events_log_file_contents[:-2] + ']'\n\n context.logger.info('Returning events: {0}'.format(encoded_event_log))\n\n # return json.loads(encoded_event_log)\n return encoded_event_log", "def _dump_test_parser_log(self):\n\t\tFileSystem.dump_to(self._result_directory_name + \"/\" + \"Test_Parser.log\", self._form_test_parser_log())", "def testProcess(self):\n plugin = skydrivelog.SkyDriveLog1TextPlugin()\n storage_writer = self._ParseTextFileWithPlugin(['skydrive_v1.log'], plugin)\n\n number_of_event_data = storage_writer.GetNumberOfAttributeContainers(\n 'event_data')\n self.assertEqual(number_of_event_data, 18)\n\n number_of_warnings = storage_writer.GetNumberOfAttributeContainers(\n 'extraction_warning')\n self.assertEqual(number_of_warnings, 1)\n\n number_of_warnings = storage_writer.GetNumberOfAttributeContainers(\n 'recovery_warning')\n self.assertEqual(number_of_warnings, 0)\n\n expected_event_values = {\n 'added_time': '2013-08-01T21:22:28.999+00:00',\n 'data_type': 'skydrive:log:entry',\n 'detail': '17.0.2011.0627 (Ship)',\n 'log_level': 'DETAIL',\n 'module': None,\n 'source_code': 'global.cpp:626!logVersionInfo'}\n\n event_data = storage_writer.GetAttributeContainerByIndex('event_data', 0)\n self.CheckEventData(event_data, expected_event_values)", "def parse(file):\n logger.info('parsing DL7 dive log data')\n log = Log()\n content = file.readline()\n while not content == '':\n __parse_line(log, content)\n content = file.readline()\n return log", "def validate_file(self, log_file):\n line_number = 1\n for line in open(log_file.get_path()):\n invalid_file_info = []\n errormessage = \"\"\n timestamp = \"\"\n #check if timestamp exists.\n style_num = 0\n for expression in self.expressions:\n timestamp = re.search(expression,line)\n if timestamp is not None:\n timestamp = timestamp.group(0)\n log_entry_time = dt.strptime(timestamp, self.date_time_styles[style_num])\n #compare timestamp to user timestamp\n if not (self.start_date <= log_entry_time <= self.end_date):\n errormessage = \"Timestamp invalid.\"\n break\n style_num = style_num + 1\n if not timestamp:\n errormessage = \"Timestamp does not exist.\"\n if errormessage:\n invalid_file_info.append(line_number)\n invalid_file_info.append(errormessage)\n log_file.add_errors(invalid_file_info)\n log_file.mark_invalid()\n line_number = line_number + 1\n\n return log_file", "def test_log(self):\r\n # expected result when no result_path is provided\r\n self.default_app(\r\n seq_path=self.tmp_seq_filepath,\r\n result_path=None,\r\n log_path=self.tmp_log_filepath,\r\n )\r\n\r\n # open the actual log file and the expected file, and pass into lists\r\n with open(self.tmp_log_filepath) as f:\r\n obs = [l.strip() for l in list(f)]\r\n exp = rdp_test1_log_file_contents.split('\\n')\r\n # sort the lists as the entries are written from a dict,\r\n # so order may vary\r\n obs.sort()\r\n exp.sort()\r\n self.assertEqual(obs, exp)", "def dump2txt(cls, argv):\n try:\n for filename in argv[1:]:\n parser = cls(filename)\n invalid_str = r\"[\\\\/:*?\\\"<>|]\" # Not allowed to use filename\n # Remove invalid text\n subject = re.sub(invalid_str, \"\", parser.subject)\n # Remove local time \"+09:00\", \"-\"\n title_date = parser.date[:-len(\"+09:00\")].replace(\"-\", \"\")\n # Remove invalid strings\n date = re.sub(invalid_str, \"\", title_date)\n result = parser.get_attr_data()\n # Overwrite same date+subject eml\n with open(f'{date}_{subject}.txt', 'w',\n encoding='utf-8') as _f:\n _f.write(result)\n except BaseException as e:\n with open('eml2ext_error.txt', 'w', encoding='utf-8') as _f:\n print(f'error {e}')\n # _f.write(e)", "def transform_log(filepath):\n cols = ['song', 'artist', 'userId', 'firstName', 'lastName',\n 'gender', 'level', 'sessionId', 'location', 'userAgent']\n\n result = ''\n with open(filepath, 'rt') as f:\n for line in f:\n jf = json.loads(line)\n if jf['userId'] and (jf['page'] == 'NextSong'):\n jf['userAgent'] = jf['userAgent'].strip('\"')\n \n temp1 = '\\t'.join([str(v) if (v := jf[k]) else '' for k in cols])\n\n t = round(jf['ts']/1000) # UNIX timestamp, ignore ms\n x = datetime.datetime.fromtimestamp(t)\n \n temp2 = '\\t'.join([x.strftime(\"%Y-%m-%d %H:%M:%S\"),\n str(x.hour),\n str(x.day),\n str(x.isocalendar()[1]),\n str(x.month),\n str(x.year),\n str(x.weekday() not in [5, 6])])\n\n result += temp1 + '\\t' + temp2 + '\\n'\n return result", "def _read_log(self):\n\n line_regex = compile(r\"\\[I\\]\\s*\\(\\d+ms\\)[^\\d]+(?P<counter>\\d+)\"\n r\"[^\\d]+(?P<timestamp>\\d+(\\.\\d+)?)[^\\d]+\"\n r\"(?P<acceleration>\\d+);\")\n values = []\n with open(self.filepath) as file:\n for line in file:\n match = line_regex.match(line)\n if match:\n values.append({\n 'counter':\n int(match['counter']),\n 'timestamp':\n int(float(match['timestamp']) * 1000),\n 'acceleration':\n int(match['acceleration'])\n })\n\n self.values = values", "def analysis(N):\n\n http_log_paths = get_http_logs()\n\n httplogs = []\n\n for path in http_log_paths:\n file = path+'/http.log'\n if os.path.isfile(file):\n httplogs.append(file)\n else:\n pass #print(path)\n\n fields = []\n\n for log in httplogs:\n with open(log) as f:\n lines = f.readlines()\n rows = len(lines)\n filesize = sum([len(line) for line in lines])\n\n tss = [] # time series\n methods = []\n uris = []\n uas = []\n request_body_lens = []\n response_body_lens = []\n status_codes = []\n filenames = []\n\n tmp = []\n\n for line in lines[8:len(lines)-1]:\n fs = line.strip().split('\\t')\n\n \"\"\"\n ts = fileds[0]\n uid = fileds[1]\n orig_h = fileds[2]\n orig_p = fileds[3]\n resp_h = fileds[4]\n resp_p = fileds[5]\n trans_depth = fileds[6]\n method = fileds[7]\n host = fileds[8]\n uri = fileds[9]\n referrer = fileds[10]\n user_agent = fileds[11]\n request_body_len = fileds[12]\n response_body_len = fileds[13]\n status_code = fileds[14]\n status_msg = fileds[15]\n info_code = fileds[16]\n info_msg = fileds[17]\n filename = fileds[18]\n tags = fileds[19]\n username = fileds[20]\n password = fileds[21]\n proxied = fileds[22]\n orig_fuids = fileds[23]\n orig_mime_types = fileds[24]\n resp_fuids = fileds[25]\n resp_mime_types = fileds[26]\n\n tss.append(ts)\n methods.append(method)\n uris.append(uri)\n uas.append(user_agent)\n request_body_lens.append(request_body_len)\n response_body_lens.append(response_body_len)\n status_codes.append(status_code)\n filenames.append(filename)\n \"\"\"\n\n tmp.append(fs[N])\n\n #print(log, rows, ','.join(methods))\n\n # time intervals\n #tss_sorted = sorted(map(float,tmp))\n #tss_sorted = map(float, tmp)\n #intervals = map(int,[tss_sorted[i+1]-tss_sorted[i] for i in range(len(tss_sorted)-1)])\n #print('%s %s' % (log, ' '.join(map(str,intervals))))\n #file = urlparse(fs[N]).path.split('/')[-1].split('.')\n #if len(file)>1:\n # tmp.append(file[-1])\n #tmp.append(urlparse(fs[N]).path.split('/')[-1])\n #tmp.append(urlparse(fs[N]).path)\n\n #fields.append(set(tmp))\n #fields.append(intervals)\n fields.append(tmp)\n\n\n dic = {}\n for i in fields:\n for j in i:\n if j in dic:\n dic[j] += 1\n else:\n dic[j] = 1\n ls = sorted(dic.items(), lambda x,y: cmp(x[1], y[1]), reverse = True)\n for i in range(len(ls)):\n print('%s\\t%s' %(ls[i][0], ls[i][1]))\n #print('%s' % join(ls[i][1]))\n\n\n \"\"\"\n col = []\n for i in fields:\n for j in i:\n col.append(j)\n print('%s' % ' '.join(map(str,col)))\n \"\"\"\n\n\n \"\"\"\n dic = {}\n for i in fields:\n for j in i:\n sub = j.split('.')\n if sub[0] in dic:\n dic[sub[0]] += 1\n else:\n dic[sub[0]] = 1\n\n\n if len(sub) > 1:\n if sub[-2]+'.'+sub[-1] in dic:\n dic[sub[-2]+'.'+sub[-1]] += 1\n else:\n dic[sub[-2]+'.'+sub[-1]] = 1\n\n\n ls = sorted(dic.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)\n for i in range(len(ls)):\n print('%s\\t%s' % (ls[i][0], ls[i][1]))\n # print('%s' % join(ls[i][1]))\n\n \"\"\"", "def read_stats_in_log(model_tarfile):\n # log file is YYYY0101.fms.out\n log_filename = os.path.basename(model_tarfile).replace(\"ascii_out.tar\", \"fms.out\")\n year = int(os.path.basename(model_tarfile).replace(\"0101.ascii_out.tar\", \"\"))\n\n if not log_filename.startswith('./'):\n log_filename = './' + log_filename\n\n with tarfile.open(model_tarfile, \"r:\") as f:\n logdump = f.extractfile(log_filename).read().decode('utf-8')\n\n lines = logdump.split(sep='\\n')\n\n linestart_stats = [lines.index(line) for line in lines if line.find('Total runtime') != -1 ][-1]\n lineend_stats = [lines.index(line) for line in lines if line.find('high water mark') != -1 ][-1]\n\n raw_stats = lines[linestart_stats-1:lineend_stats]\n\n # infer the number of data columns\n columns = raw_stats[0].split()\n ncol_data = len(columns)\n # add a column for model component\n columns = ['model'] + columns\n formatted_lines = []\n for line in raw_stats[1:]:\n data = line.split()[-ncol_data:]\n data_num = []\n for item in data:\n data_num.append(float(item))\n line = line.replace(item, '')\n modelname = line.strip()\n formatted_line = [modelname] + data_num\n formatted_lines.append(formatted_line)\n\n df = pd.DataFrame(formatted_lines, columns = columns)\n df['year'] = year\n\n return df", "def audit(self, message):\n channel = self.config.get('AUDIT_CHANNEL', False)\n log_file = self.config.get('AUDIT_FILE', False)\n if channel: outputs.append([channel, message])\n if log_file:\n with open(log_file, 'a') as f: f.write(message)\n logging.warning('AUDIT: ' + message)", "def __init__(self, fname):\n self.format = 2\n self.target = {}\n self.filters = {}\n self.comment = {}\n\n try:\n rec = re.compile('file\\s+object\\s+filter', re.I)\n rre = re.compile('(run\\d\\d\\d\\d?)(.*)')\n old = re.compile('\\s*(\\S+)\\s+(\\S+)\\s+(.*)$')\n oldii = re.compile('\\s*(\\S+)\\s*$')\n with open(fname) as f:\n for line in f:\n m = rec.search(line)\n if m:\n self.format = 1\n if len(self.comment):\n raise Exception('Error in night log = ' + fname + ', line = ' + line)\n\n mr = rre.match(line)\n if mr:\n run = mr.group(1)\n if self.format == 2:\n self.comment[run] = mr.group(2).strip()\n else:\n m = old.search(mr.group(2))\n if m:\n self.target[run] = m.group(1)\n self.filters[run] = m.group(2)\n self.comment[run] = m.group(3)\n else:\n m = oldii.search(mr.group(2))\n if m:\n self.target[run] = m.group(1)\n except FileNotFoundError:\n sys.stdout.write(f'Night log = {fname} does not exist\\n')\n except Exception as err:\n sys.stdout.write(f'Problem on night log = {fname}:' + str(err) + '\\n')", "def main():\n\n # parses arguments\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-s', action='store', dest='start_index', type=int,\n help='The starting index for events. Default is 0')\n\n parser.add_argument('-e', action='store', dest='end_index', type=int,\n help='The starting index for events. Default is 5,000')\n\n results = parser.parse_args()\n\n start_index = results.start_index or 0\n\n end_index = results.end_index or 5000\n\n scraper = Scraper()\n\n # these are the event column titles from the sample import csv given by localist\n event_column_titles = [\n 'Title','Description','Date From','Date To','Recurrence','Start Time','End Time',\n 'Location','Address','City','State','Event Website','Room','Keywords','Tags',\n 'Photo URL','Ticket URL','Cost','Hashtag','Facebook URL','Group','Department',\n 'Allow User Activity','Allow User Attendance','Visibility','Featured Tabs',\n 'Sponsored','Venue Page Only','Exclude From Trending','Event Types','Invited Audience', 'Original URL',\n 'Location Details'\n ]\n\n out_stream = open('event_import.csv', 'w')\n\n writer = Writer(event_column_titles, out_stream)\n\n writer.write_headers()\n\n # iterates through the specified event numbers and scrapes each one and writes\n # it to the output file\n for i in range(start_index, end_index + 1):\n current_url = 'http://test-ucscevents.pantheonsite.io/event/' + str(i)\n print(\"processing url: \" + current_url)\n r = requests.get(current_url)\n if r.status_code != requests.codes.ok:\n print(' 404')\n else:\n soup = get_soup_from_url(current_url)\n events = scraper.scrape_event(soup)\n for event in events:\n event['Original URL'] = current_url\n\n writer.write_object(event) # event written to output file here\n\n out_stream.close()", "def update_logs(event, log, action_log, error_log):\n\tif event[\"type\"] == \"error\":\n\t\t#Update the error log file\n\telse:\n\t\t# event[\"type\"] == \"action\"\n\t\t#Update action file", "def process_log_file(cur, filepath):\n \n # open log file\n \n df = pd.read_json(filepath, lines = True)\n \n # filter by NextSong action\n df = df[df['page']=='NextSong']\n # convert timestamp column to datetime\n t = pd.to_datetime(df.ts, unit='ms')\n df.ts = t\n \n # insert time data records\n time_data = [t, t.dt.hour, t.dt.day, t.dt.weekofyear,\n t.dt.month, t.dt.year, t.dt.weekday]\n \n # column_labels = ['timestamp','Hour', \n # 'Day','Month','Year''Weekday']'\n column_labels = ['timestamp','hour','day','weekofyear','month','year','weekday']\n time_df = pd.DataFrame(dict(zip(column_labels, time_data)))\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n \n # load user table\n user_df = df[['userId','firstName', \n 'lastName','gender','level']]\n\n # insert user records\n for i, row in user_df.iterrows(): \n cur.execute(user_table_insert, row)\n \n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist,\n row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (index, row.ts, row.userId, row.level,\n songid, artistid, row.sessionId, \n row.location, row.userAgent)\n \n \n cur.execute(songplay_table_insert, songplay_data)", "def main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-i\", \"--input\",\n dest='input',\n required=True,\n help=\"Output file from mutalyzer\")\n\n parser.add_argument(\"-f\", \"--fasta\",\n dest='fasta_file',\n required=True,\n help=\"FASTA file\")\n\n parser.add_argument(\"-o\", \"--out\",\n dest='output',\n default='mutalyzer.out',\n help=\"outfile name\")\n\n parser.add_argument(\"-v\", \"--verbose\",\n dest=\"logLevel\",\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n default=\"INFO\",\n help=\"Set the logging level\")\n\n args = parser.parse_args()\n logging.basicConfig(stream=sys.stderr, level=args.logLevel,\n format='%(name)s (%(levelname)s): %(message)s')\n\n logger = logging.getLogger(__name__)\n logger.setLevel(args.logLevel)\n fasta_file = Fasta(args.fasta_file)\n o = open(args.output, 'w')\n\n # Read in mutalyzer file\n with open(args.input, 'r') as f:\n for line in f:\n if line.startswith('Input'):\n continue\n submitted_variant, errors, returned_variant = parse_line(line)\n if returned_variant is None:\n logger.warning('Line does not have enough columns. Skipping: {}'.format(line))\n continue\n logger.debug('Var: {}'.format(submitted_variant))\n event_type, parse_function = get_event_type_function(returned_variant)\n chr, start, ref, alt = parse_function(returned_variant, fasta_file)\n outline = [submitted_variant, errors, returned_variant, event_type, chr, start, ref, alt]\n outline = '\\t'.join([str(x) for x in outline])\n o.write(outline + '\\n')\n\n o.close()\n return 0", "def save_to_base(log_file, logons_err=None):\n source_file = open(log_file)\n\n if logons_err:\n if os.path.isfile(logons_err):\n os.remove(logons_err)\n output_err = open(logons_err, \"a\", encoding='utf-8')\n else:\n output_err = open(logons_err, \"a\", encoding='utf-8')\n \n lines = source_file.readlines()\n \n #get max last logon date from ADLogonFromComputer\n last_logon_date = ADLogonFromComputer.objects.all().aggregate(Max('logon_date'))['logon_date__max']\n if last_logon_date:\n begin_date = last_logon_date-datetime.timedelta(2)\n else:\n begin_date = None\n import_err = []\n for line in lines:\n try:\n s_line = line.split('; ')\n if len(s_line) == 7:\n l_date = datetime.datetime.strptime(s_line[0], \"%d.%m.%Y\").date()\n \n if begin_date == None:\n #l_time = datetime.datetime.strptime(s_line[1], \"%H:%M:%S,%f\").time()\n l_time = datetime.datetime.strptime(s_line[1], \"%H:%M:%S\").time()\n comp = s_line[3]\n login = s_line[4]\n #l_time = datetime.datetime.strptime(s_line[1][-3], \"%H:%M:%S\").time()\n \n print('_____________________')\n print(s_line)\n #print('Дата: %s' %(s_line[0]))\n print('Дата: %s' %(l_date))\n #print('Время: %s' %(s_line[1]))\n print('Время: %s' %(l_time))\n print('Компьютер: %s' % (comp))\n print('Логин: %s' % (login))\n \n try:\n ad_logon = ADLogonFromComputer.objects.get(logon_date=l_date, logon_time=l_time, computer_name=comp, login_name=login)\n except:\n ad_logon = ADLogonFromComputer(logon_date=l_date, logon_time=l_time, computer_name=comp, login_name=login)\n ad_logon.save()\n \n try:\n ldap_obj=LdapInfo.objects.get(samaccountname__iexact=ad_logon.login_name)\n ad_logon.person=ldap_obj.person\n ad_logon.save()\n except:\n import_err.append(ad_logon.login_name)\n\n elif l_date > begin_date:\n l_time = datetime.datetime.strptime(s_line[1], \"%H:%M:%S\").time()\n comp = s_line[3]\n login = s_line[4]\n #l_time = datetime.datetime.strptime(s_line[1][-3], \"%H:%M:%S\").time()\n \n print('_____________________')\n print(s_line)\n #print('Дата: %s' %(s_line[0]))\n print('Дата: %s' %(l_date))\n #print('Время: %s' %(s_line[1]))\n print('Время: %s' %(l_time))\n print('Компьютер: %s' % (comp))\n print('Логин: %s' % (login))\n \n try:\n ad_logon = ADLogonFromComputer.objects.get(logon_date=l_date, logon_time=l_time, computer_name=comp, login_name=login)\n except:\n ad_logon = ADLogonFromComputer(logon_date=l_date, logon_time=l_time, computer_name=comp, login_name=login)\n ad_logon.save()\n \n try:\n ldap_obj=LdapInfo.objects.get(samaccountname__iexact=ad_logon.login_name)\n ad_logon.person=ldap_obj.person\n ad_logon.save()\n except:\n import_err.append(ad_logon.login_name)\n elif logons_err:\n output_err.write(line)\n except:\n if logons_err:\n output_err.write(line)\n if logons_err:\n output_err.close()\n source_file.close()\n print('Ошибка поиска ADLogonFromComputer.login_name в LdapInfo.samaccountname: ',import_err)", "def readtxt(obslog):\n\n logger = log.getLogger('obslog.readtxt')\n\n if not os.path.exists(obslog):\n logger.error('Cannot access %s', obslog)\n raise SystemExit\n\n logger.info('Reading %s', obslog)\n\n with open(obslog) as f: # Since we will have to go through the data twice, read the whole file at once.\n data = f.readlines()\n\n header = ['Observation ID', 'Data Labels', 'File Numbers', 'Dataset UT', 'Target Name', 'Filters', 'Slit',\n 'Grating/Wavelength', 'Camera/Prism', 'ExpTime/LNR/Coadds', 'ACQ']\n\n pattern = dict() # Enforce formatting rules to avoid parsing comments as data:\n pattern['Observation ID'] = re.compile(r'^G[NS]-[0-9]{4}[AB]-([CQ]|DD|FT|LP|SV)-[0-9]{0,3}-[0-9]+$')\n pattern['Data Labels'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['File Numbers'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['Dataset UT'] = re.compile(r'^[0-9]{2}:[0-9]{2}:[0-9]{2}$') # 09:58:15\n pattern['Target Name'] = re.compile(r'[a-zA-Z0-9_-]+') # Match any string\n pattern['Filters'] = re.compile(r'[A-Z0-9\\-]+') # H, XD, H2, X, J, H\n pattern['Slit'] = re.compile(r'[a-zA-Z0-9]+') # 0.675, ACQ, LgPin\n pattern['Grating/Wavelength'] = re.compile(r'[0-9]{2,3}/[0-9]\\.[0-9]{2}') # 32/1.65, 111/1.68\n pattern['Camera/Prism'] = re.compile(r'[A-Z]{2}/[A-Z]{3}') # LB/MIR, SB/SXD\n pattern['ExpTime/LNR/Coadds'] = re.compile(r'[0-9]+\\.[0-9]/[0-9]+/[0-9]+') # 0.2/1/25, 300.0/32/1\n pattern['ACQ'] = re.compile(r'^Y*$') # Y or ''\n\n indx = {}\n for line in data:\n if 'Electronic Observing Log' in line:\n date = line.split()[-1][7:]\n logger.debug('Log date: %s', date)\n if line[0:14] == 'Observation ID': # This defines the start of the header row\n for h in header:\n indx[h] = line.find(h) # Find where each column starts\n break # No need to go farther\n\n width = {} # Find the width of each row\n for i in range(len(header) - 1): # This requires that 'header' be an ordered array (not a dictionary)\n width[header[i]] = indx[header[i + 1]] - indx[header[i]]\n width[header[i+1]] = 1 # The ACQ field is either 'Y' or blank\n\n val = {}\n match = {}\n info = {}\n for line in data:\n logger.debug('\\n%s', line)\n files = []\n for h in header:\n val[h] = line[indx[h]: indx[h] + width[h]].strip()\n match[h] = re.match(pattern[h], val[h])\n logger.debug('%s: \"%s\" %s' % (h, val[h], match[h]))\n\n # Maybe throw a warning if only match 1 fails; indicating a likely bad pattern specification?\n\n if None in match.values():\n logger.debug('Failed to match all patterns -> This is a comment')\n continue\n\n if '-' in val['File Numbers']:\n start, stop = val['File Numbers'].split('-')\n for i in range(int(start), int(stop)+1):\n files.append(i)\n else:\n files.append(int(val['File Numbers']))\n\n for filenum in files:\n f = 'N%sS%04d.fits' % (date, filenum)\n logger.debug('File: %s', f)\n info[f] = {}\n for h in [header[0]] + header[3:]: # Skip 'Data Labels' and \"File Numbers'\n info[f][h] = val[h]\n\n logger.debug('info: %s', info)\n return info", "def analyze(self, database, line, filename): #pylint: disable-msg=R0912,R0915\n \n the_type = Analyzer.mapper[filename]\n \n if the_type == 'xferlog' and self._accepting_new:\n result = Analyzer.x_parser.parse_one_line(line)\n \n #file push and action complete\n if result.get('action', None) == 'push' and result.get('completion_status', None) == 'c':\n \n # a file could have been before because there could be a dirmon message before that one\n records = database(filename = result['file'])\n \n #add file in db\n now = datetime.datetime.utcnow()\n if len(records) == 0:\n database.insert(filename = result['file'], \\\n size = long(result['file_size']), \\\n uplinked = result['time'], \\\n metadata = result['metadata'], \\\n created = now, \\\n last_update= now)\n else:\n for rec in records: \n #update filename info\n database.update(rec, \\\n size = long(result['file_size']), \\\n uplinked = result['time'], \\\n metadata = result['metadata'], \\\n last_update= now) \n \n \n elif result.get('action', None) == 'delete':\n \n #look for a file name x in the db in order to delete it\n records = database(filename = result['file'])\n for rec in records:\n #simply delete it at the moment\n database.delete(rec)\n analyze_utils.print_rec_in_logfile(rec)\n else:\n # warn in log file\n Analyzer.LOG.warning(\"Ignore line %s because it is a not COMPLETE push or it is a delete file\" % (line))\n \n elif the_type == 'dirmon':\n result = Analyzer.d_parser.parse_one_line(line)\n \n if result.get('job_status', None) == 'created':\n \n dirmon_dir = result['metadata']['dirmon_dir']\n #special case for DWD (should hopefully disappear in the future\n if dirmon_dir == 'wmo-ra6' or dirmon_dir.startswith('DWD'):\n records = self.get_dwd_record(database, result, dirmon_dir)\n \n else: \n records = database(filename = result['file'])\n \n if self._accepting_new and len(records) == 0:\n #no file created so it means that the xferlog message has not been received\n # add it in the table\n now = datetime.datetime.utcnow()\n \n database.insert(filename = result['file'], \\\n jobname = result['job'], \\\n queued = result['time'], \\\n metadata = result['metadata'], created = now, last_update= now)\n else:\n for rec in records:\n \n r_job = database(jobname = result['job'])\n \n #if job reconcile both info\n if r_job and not r_job[0]['filename']:\n \n #update filename info\n database.update(rec, queued = result['time'], \\\n jobname = r_job[0]['jobname'], \\\n announced = r_job[0]['announced'], \\\n finished = r_job[0]['finished']) \n \n #delete job record\n database.delete(r_job[0])\n \n else:\n #no other record with job name, update record\n database.update(rec, jobname = result['job'], \\\n queued = result['time'], \\\n last_update = datetime.datetime.utcnow()) \n \n elif result.get('job_status', None) == 'file_deleted': \n #file has been deleted before to be treated\n # TO Be added in warnings db\n records = database(filename = result['file'])\n for rec in records:\n #simply delete it at the moment\n database.delete(rec)\n Analyzer.LOG.warning(\"file %s deleted before to be jobbed\" % (rec['filename']))\n analyze_utils.print_rec_in_logfile(rec) \n \n elif the_type == 'tc-send':\n result = Analyzer.s_parser.parse_one_line(line)\n \n # look for job_status == job_announced\n if result.get('job_status', None) == 'announced' :\n \n # get all records concerned by this job\n records = database(jobname = result.get('job', None))\n \n if self._accepting_new and len(records) == 0:\n now = datetime.datetime.utcnow()\n # add a line in the to print table\n database.insert(jobname = result.get('job', None), \\\n announced = result['time'], created = now, last_update= now, channel = result['channel'])\n else:\n for rec in records:\n #found a job so update this line in db\n database.update(rec, jobname = result.get('job', None), \\\n announced = result['time'],last_update= datetime.datetime.utcnow(), channel = result['channel']) \n \n elif result.get('job_status', None) == 'blocked':\n \n #get all records concerned by this job\n records = database(jobname = result.get('job', None))\n \n if self._accepting_new and len(records) == 0:\n now = datetime.datetime.utcnow()\n # no dirmon message received so check in the waiting list and update it or add it in the waiting list if not present\n database.insert(jobname = result.get('job', None), \\\n blocked = result['time'], created = now, last_update= now, channel = result['channel'])\n else:\n for rec in records:\n # update info with blocked time\n database.update(rec, blocked = result['time'], channel = result['channel']) \n elif result.get('job_status', None) == 'aborted':\n # flag it as aborted and if it is aborted again update the update time\n #get all records concerned by this job\n records = database(jobname = result.get('job', None))\n \n if self._accepting_new and len(records) == 0:\n now = datetime.datetime.utcnow()\n \n # no dirmon message received so check in the waiting list and update it or add it in the waiting list if not present\n # put it in finish at the moment but it should be treated differently\n database.insert(jobname = result.get('job', None), \\\n aborted = result['time'], \\\n created = now, \\\n last_update= now, channel = result['channel'])\n else:\n for rec in records:\n # update info with finished time\n database.update(rec, aborted = result['time'], \\\n last_update= datetime.datetime.utcnow(), channel = result['channel'])\n \n elif result.get('job_status', None) == 'finished':\n \n #get all records concerned by this job\n records = database(jobname = result.get('job', None))\n \n if self._accepting_new and len(records) == 0:\n now = datetime.datetime.utcnow()\n # no dirmon message received so check in the waiting list and update it or add it in the waiting list if not present\n database.insert(jobname = result.get('job', None), \\\n finished = result['time'], \\\n finished_time_insert = datetime.datetime.utcnow(), \\\n created = now, \\\n last_update= now, channel = result['channel'])\n \n else:\n for rec in records:\n # update info with finished time\n database.update(rec, finished = result['time'], \\\n finished_time_insert = datetime.datetime.utcnow(), \\\n last_update= datetime.datetime.utcnow(), channel = result['channel']) \n else:\n \n Analyzer.LOG.debug(\"Ignored record = %s \\n\" % (result))\n \n # no status so it should be WRN or ERR\n self.warn_err_db.insert(lvl = result.get('lvl', None), \\\n msg = result.get('msg', None), \\\n created = datetime.datetime.utcnow())\n \n #Analyzer.LOG.info(\"Insert message in error or warn db %s\" %(result))", "def _assess(self):\n # get eruptions\n with open(os.sep.join(getfile(currentframe()).split(os.sep)[:-2]+['data','eruptive_periods.txt']),'r') as fp:\n self.tes = [datetimeify(ln.rstrip()) for ln in fp.readlines()]\n # check if data file exists\n self.exists = os.path.isfile(self.file)\n if not self.exists:\n t0 = datetime(2011,1,1)\n t1 = datetime(2011,1,2)\n self.update(t0,t1)\n # check date of latest data in file\n self.df = pd.read_csv(self.file, index_col=0, parse_dates=[0,], infer_datetime_format=True)\n self.ti = self.df.index[0]\n self.tf = self.df.index[-1]", "def file_parser(filename):\n LOG_FORMAT = \"%a %l %u %t \\\"%r\\\" %>s %b %D\"\n\n line_parser = apache_log_parser.make_parser(LOG_FORMAT)\n\n parsed_entries = []\n\n with open(filename) as f:\n for line in f:\n parsed_entries.append(line_parser(line))\n\n # Sort the parsed log entries by timestamp. Some of the log entries in the\n # provided example take a long time to process so they are not in order,\n # this messes up splitting the entries into minute chunks for processing.\n parsed_entries.sort(key=lambda x: x.get('time_received_utc_datetimeobj'))\n\n return parsed_entries", "def extract(self):\n self.logger.info(f'Opening {self.file}')\n try:\n f = open(self.file, 'r')\n except FileNotFoundError:\n self.logger.info('Could not open the .vrt file')\n return\n\n result = {}\n self.logger.info('Extracting')\n for line in f:\n # Only attempt to extract if line is correct\n if self.is_target(line):\n # Add the closing text tag to make the line have proper XML\n xml_attributes = XML.fromstring(line.rstrip()+'</text>').attrib\n # Extract wanted parameters\n month = xml_attributes['date'][5:7] # Extract month\n # Add month to the keys\n if month not in result.keys():\n result[month] = []\n result[month].append({\n 'title': xml_attributes['title'], # Extract thread title\n 'thread_id': xml_attributes['thread_id'], # Extract thread id\n 'datetime': xml_attributes['datetime'] # Extract datetime\n })\n self.logger.info('Extraction complete')\n self.db = result\n # self.db[self.year] = result # Assign results to year\n self.save_result() # Save the results", "def clean_log(path):\n # Read the experiment log from a csv file\n assert len(glob.glob(path + \"/*.csv\")) == 1, 'problem with number of .csv files'\n log = pd.read_csv(glob.glob(path + \"/*.csv\")[0],parse_dates = True, index_col = 0, skiprows = 1, skipfooter = 1, engine='python')\n include = ['time']\n exclude = ['response', 'psychopy' , 'start_time']\n # Select the columns where the timestamp of the event was written\n # event_time_columns = [col_name for col_name in log.columns if any(substring in col_name for substring in include)\n # and not any(substring in col_name for substring in exclude)]\n \n event_time_columns = [col_name for col_name in log.columns if 'time' in col_name and 'response' not in col_name and 'psychopy' not in col_name and 'start_time' not in col_name]\n \n events_log = log[event_time_columns]\n # Event types have to be encoded with ints starting from 1 for MNE\n event_id = {event_name : idx + 1 for idx, event_name in enumerate(events_log.columns)} \n return events_log, event_id", "def _analyzeFile(self, filename):\n date = os.path.basename(filename)[:10]\n if filename.endswith('gz'):\n f = gzip.open(filename)\n else:\n f = open(filename)\n lines = f.read().splitlines()\n for line in lines:\n if re.search('joined the game', line):\n self._analyzeLine(line, date, self._start_times)\n elif re.search('left the game', line) or re.search('lost connection',\n line):\n self._analyzeLine(line, date, self._end_times)\n elif re.search('Stopping server', line):\n self._server_stop_times.append(ConvertTime(date, line))", "def preprocess(file):\n valid_logs = list()\n with open(file, 'r') as f:\n prev_log = None\n for log in f:\n log = log.strip()\n if not find_match(log):\n continue\n if VIEW_PATTERN.match(log):\n log_info = extract_info(log)\n if 'handle' in log_info['content']:\n continue\n if prev_log is None:\n prev_log = log\n valid_logs.append(log)\n else:\n if LOCATION_LISTENER_PATTERN.match(log) or SENSOR_LISTENER_PATTERN.match(log):\n valid_logs.append(log)\n prev_log = log\n else:\n if compare_logs(prev_log, log):\n continue\n valid_logs.append(log)\n prev_log = log\n save_path = save_file(file, 'preprocess', '\\n'.join(valid_logs))\n return save_path", "def parse(self, filename):\n def invalid_line(line, reason):\n stats.count_lines_invalid.increment()\n if config.options.debug >= 2:\n logging.debug('Invalid line detected (%s): %s' % (reason, line))\n\n if filename == '-':\n filename = '(stdin)'\n file = sys.stdin\n else:\n if not os.path.exists(filename):\n print >> sys.stderr, \"\\n=====> Warning: File %s does not exist <=====\" % filename\n return\n else:\n if filename.endswith('.bz2'):\n open_func = bz2.BZ2File\n elif filename.endswith('.gz'):\n open_func = gzip.open\n else:\n open_func = open\n file = open_func(filename, 'r')\n\n if config.options.show_progress:\n print 'Parsing log %s...' % filename\n\n if config.format:\n # The format was explicitely specified.\n format = config.format\n\n if isinstance(format, W3cExtendedFormat):\n format.create_regex(file)\n\n if format.regex is None:\n return fatal_error(\n \"File is not in the correct format, is there a '#Fields:' line? \"\n \"If not, use the --w3c-fields option.\"\n )\n else:\n # If the file is empty, don't bother.\n data = file.read(100)\n if len(data.strip()) == 0:\n return\n try:\n file.seek(0)\n except IOError:\n pass\n\n format = self.detect_format(file)\n if format is None:\n return fatal_error(\n 'Cannot guess the logs format. Please give one using '\n 'either the --log-format-name or --log-format-regex option'\n )\n # Make sure the format is compatible with the resolver.\n\n if config.options.dump_log_regex:\n logging.info(\"Using format '%s'.\" % format.name)\n if format.regex:\n logging.info(\"Regex being used: %s\" % format.regex.pattern)\n else:\n logging.info(\"Format %s does not use a regex to parse log lines.\" % format.name)\n logging.info(\"--dump-log-regex option used, aborting log import.\")\n os._exit(0)\n\n hits = []\n for lineno, line in enumerate(file):\n try:\n line = line.decode(config.options.encoding)\n except UnicodeDecodeError:\n invalid_line(line, 'invalid encoding')\n continue\n\n stats.count_lines_parsed.increment()\n if stats.count_lines_parsed.value <= config.options.skip:\n continue\n\n match = format.match(line)\n if not match:\n invalid_line(line, 'line did not match')\n continue\n\n hit = Hit(\n filename=filename,\n lineno=lineno,\n status=format.get('status'),\n full_path=format.get('path'),\n is_download=False,\n is_robot=False,\n is_error=False,\n is_redirect=False,\n args={},\n )\n\n if config.options.regex_group_to_page_cvars_map:\n self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_page_cvars_map, True)\n\n if config.options.regex_group_to_visit_cvars_map:\n self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_visit_cvars_map, False)\n\n if config.options.regex_groups_to_ignore:\n format.remove_ignored_groups(config.options.regex_groups_to_ignore)\n\n try:\n hit.query_string = format.get('query_string')\n hit.path = hit.full_path\n except BaseFormatException:\n hit.path, _, hit.query_string = hit.full_path.partition(config.options.query_string_delimiter)\n\n # W3cExtendedFormat detaults to - when there is no query string, but we want empty string\n if hit.query_string == '-':\n hit.query_string = ''\n\n hit.extension = hit.path.rsplit('.')[-1].lower()\n\n try:\n hit.referrer = format.get('referrer')\n\n if hit.referrer.startswith('\"'):\n hit.referrer = hit.referrer[1:-1]\n except BaseFormatException:\n hit.referrer = ''\n if hit.referrer == '-':\n hit.referrer = ''\n\n try:\n hit.user_agent = format.get('user_agent')\n\n # in case a format parser included enclosing quotes, remove them so they are not\n # sent to Piwik\n if hit.user_agent.startswith('\"'):\n hit.user_agent = hit.user_agent[1:-1]\n except BaseFormatException:\n hit.user_agent = ''\n\n hit.ip = format.get('ip')\n try:\n hit.length = int(format.get('length'))\n except (ValueError, BaseFormatException):\n # Some lines or formats don't have a length (e.g. 304 redirects, W3C logs)\n hit.length = 0\n\n try:\n hit.generation_time_milli = float(format.get('generation_time_milli'))\n except BaseFormatException:\n try:\n hit.generation_time_milli = float(format.get('generation_time_micro')) / 1000\n except BaseFormatException:\n try:\n hit.generation_time_milli = float(format.get('generation_time_secs')) * 1000\n except BaseFormatException:\n hit.generation_time_milli = 0\n\n if config.options.log_hostname:\n hit.host = config.options.log_hostname\n else:\n try:\n hit.host = format.get('host').lower().strip('.')\n\n if hit.host.startswith('\"'):\n hit.host = hit.host[1:-1]\n except BaseFormatException:\n # Some formats have no host.\n pass\n\n # Add userid\n try:\n hit.userid = None\n\n userid = format.get('userid')\n if userid != '-':\n hit.args['uid'] = hit.userid = userid\n except:\n pass\n\n # add event info\n try:\n hit.event_category = hit.event_action = hit.event_name = None\n\n hit.event_category = format.get('event_category')\n hit.event_action = format.get('event_action')\n\n hit.event_name = format.get('event_name')\n if hit.event_name == '-':\n hit.event_name = None\n except:\n pass\n\n # add session time\n try:\n hit.session_time = None\n\n session_time = format.get('session_time')\n hit.session_time = int(session_time)\n except:\n pass\n\n # Check if the hit must be excluded.\n if not all((method(hit) for method in self.check_methods)):\n continue\n\n # Parse date.\n # We parse it after calling check_methods as it's quite CPU hungry, and\n # we want to avoid that cost for excluded hits.\n date_string = format.get('date')\n try:\n hit.date = datetime.datetime.strptime(date_string, format.date_format)\n except ValueError:\n invalid_line(line, 'invalid date')\n continue\n\n # Parse timezone and substract its value from the date\n try:\n timezone = float(format.get('timezone'))\n except BaseFormatException:\n timezone = 0\n except ValueError:\n invalid_line(line, 'invalid timezone')\n continue\n\n if timezone:\n hit.date -= datetime.timedelta(hours=timezone/100)\n\n if config.options.replay_tracking:\n # we need a query string and we only consider requests with piwik.php\n if not hit.query_string or not hit.path.lower().endswith(config.options.replay_tracking_expected_tracker_file):\n invalid_line(line, 'no query string, or ' + hit.path.lower() + ' does not end with piwik.php')\n continue\n\n query_arguments = urlparse.parse_qs(hit.query_string)\n if not \"idsite\" in query_arguments:\n invalid_line(line, 'missing idsite')\n continue\n\n try:\n hit.args.update((k, v.pop().encode('raw_unicode_escape').decode(config.options.encoding)) for k, v in query_arguments.iteritems())\n except UnicodeDecodeError:\n invalid_line(line, 'invalid encoding')\n continue\n\n hits.append(hit)\n if len(hits) >= config.options.recorder_max_payload_size * len(Recorder.recorders):\n Recorder.add_hits(hits)\n hits = []\n if len(hits) > 0:\n Recorder.add_hits(hits)", "def read_linelog():", "def get_and_append_log_events(self):\n\n log_events = self.get_log_events()\n\n # Write log events to file.\n if len(log_events) > 0:\n self.write_log_events(log_events)", "def _ParseAndAnalyzeFile(\n self, path_segments, parser, plugin, knowledge_base_values=None):\n knowledge_base_object = self._SetUpKnowledgeBase(\n knowledge_base_values=knowledge_base_values)\n\n storage_writer = self._ParseFile(\n path_segments, parser, knowledge_base_object)\n\n mediator = analysis_mediator.AnalysisMediator(\n storage_writer, knowledge_base_object)\n\n for event in storage_writer.GetSortedEvents():\n event_data = None\n event_data_identifier = event.GetEventDataIdentifier()\n if event_data_identifier:\n event_data = storage_writer.GetEventDataByIdentifier(\n event_data_identifier)\n\n event_data_stream = None\n if event_data:\n event_data_stream_identifier = event_data.GetEventDataStreamIdentifier()\n if event_data_stream_identifier:\n event_data_stream = storage_writer.GetEventDataStreamByIdentifier(\n event_data_stream_identifier)\n\n plugin.ExamineEvent(mediator, event, event_data, event_data_stream)\n\n analysis_report = plugin.CompileReport(mediator)\n storage_writer.AddAnalysisReport(analysis_report)\n\n return storage_writer", "def parse(file_name, user, agenda_type, db):\n db.execute('''select section from roll where onyen=%(onyen)s''', dict(onyen=user))\n row = db.fetchone()\n section = None if row is None else row.section\n\n # Get Recitation zoom\n db.execute(\"\"\"select url from zoom where type='recitation'\"\"\")\n row = db.fetchone()\n recitation_zoom_url = row.url if row else None\n\n # Get lecture zoom\n lecture_zoom_urls = []\n if section in ['001', '003']:\n db.execute(\"\"\"select url from zoom where type='lecture' and section='001'\"\"\")\n lecture_zoom_urls.append(db.fetchone().url)\n if section in ['002', '003']:\n db.execute(\"\"\"select url from zoom where type='lecture' and section='002'\"\"\")\n lecture_zoom_urls.append(db.fetchone().url)\n\n # Get checklist information\n checklist_info = get_checklist_info(db, user, agenda_type)\n\n if agenda_type == 'la':\n first_day_of_class = date(2021, 1, 12)\n else:\n first_day_of_class = date(2021, 1, 19)\n last_day_of_classes = date(2021, 5, 5)\n today = date.today()\n with open(file_name, \"rt\") as fp:\n agenda = fp.read().split(\"\\n| \")\n day = first_day_of_class\n result = []\n for one_days_details in agenda:\n lines = one_days_details.split(\"\\n\")\n title = lines[0]\n output_lines = []\n for line in lines[1:]:\n if line.startswith(\"S \"):\n line = slide_line(line)\n elif line.startswith(\"#\"):\n line = comment_line(line, user)\n elif line.startswith(\"Z\"):\n line = zoom_line(line, day, section, lecture_zoom_urls, recitation_zoom_url)\n elif line.startswith(\"CL\"):\n line = checklist_line(line, day, checklist_info)\n output_lines.append(line)\n when = set_when(day, today)\n\n result.append(\n {\"date\": day, \"title\": title, \"when\": when,\n \"body\": renderMarkdown(renderTemplate(\"\\n\".join(output_lines)))})\n day = increment_day(day, last_day_of_classes, result, agenda_type)\n return result", "def handle(self, *args, **options):\n today = datetime.datetime.today()\n print '****** Processing data analysis at %s.*******\\n' % today\n\n #print analysis.calculate_summary_stats()\n #print analysis.calculate_action_stats()\n #print analysis.calculate_user_stats()\n\n outfile = open('user_timestamps.csv', 'w')\n analysis.user_timestamps(None, \"2012-09-04\", \"2012-10-01\", outfile)\n outfile.close()\n\n outfile = open('user_point_timestamps.csv', 'w')\n analysis.user_point_timestamps(\"2012-09-04\", \"2012-10-01\", outfile)\n outfile.close()\n\n outfile = open('energy_goal_timestamps.csv', 'w')\n analysis.energy_goal_timestamps(\"2012-09-04\", \"2012-10-01\", outfile)\n outfile.close()\n\n today = datetime.datetime.today()\n print '****** End processing data analysis at %s.*******\\n' % today", "def log_extract(log_info):\n \n #Handle file names, strings and open file-like objects equivalently\n with uber_open_rmode(log_info) as log_info:\n \n headers = []\n footers = []\n i = 0\n \n #for all lines in file/output\n for line in log_info:\n \n #skip blank lines\n if len(line.split()) == 0:\n continue\n \n #This is listed before both run and minimize simulations \n if 'Memory usage per processor =' in line:\n headers.append(i+1)\n \n #This follows both run and minimize simulations\n elif 'Loop time of' in line:\n footers.append(i-1)\n \n i += 1\n \n #Add last line to footers for incomplete logs\n footers.append(i)\n \n log_info.seek(0)\n \n #Create DataModelDict root\n log_dict = DM()\n log_dict['LAMMPS-log-thermo-data'] = DM()\n \n #for all lines in file/output\n for header, footer in zip(headers, footers):\n\n #Read thermo data\n df = pd.read_csv(log_info, header=header, nrows=footer-header, sep='\\s+', engine='python', skip_blank_lines=True)\n log_info.seek(0) \n\n #Convert to DataModelDict\n thermo = DM()\n for j in df:\n thermo[str(j)] = df[j].values.tolist()\n \n #Append simulation results to DataModelDict root\n simulation = DM([('thermo', thermo)])\n log_dict['LAMMPS-log-thermo-data'].append('simulation', simulation)\n \n return log_dict", "def parse(self):\n\n coverage_data = {\n 'packages': {},\n 'summary': {'lines-total': 0, 'lines-covered': 0,\n 'branches-total': 0, 'branches-covered': 0},\n 'timestamp': str(int(time.time()))\n }\n package = None\n current_file = None\n file_lines_total = 0\n file_lines_covered = 0\n file_lines = {}\n file_methods = {}\n file_branches_total = 0\n file_branches_covered = 0\n\n for line in self.lcov_data.split('\\n'):\n if line.strip() == 'end_of_record':\n if current_file is not None:\n package_dict = coverage_data['packages'][package]\n package_dict['lines-total'] += file_lines_total\n package_dict['lines-covered'] += file_lines_covered\n package_dict['branches-total'] += file_branches_total\n package_dict['branches-covered'] += file_branches_covered\n file_dict = package_dict['classes'][current_file]\n file_dict['lines-total'] = file_lines_total\n file_dict['lines-covered'] = file_lines_covered\n file_dict['lines'] = dict(file_lines)\n file_dict['methods'] = dict(file_methods)\n file_dict['branches-total'] = file_branches_total\n file_dict['branches-covered'] = file_branches_covered\n coverage_data['summary']['lines-total'] += file_lines_total\n coverage_data['summary']['lines-covered'] += file_lines_covered\n coverage_data['summary']['branches-total'] += file_branches_total\n coverage_data['summary']['branches-covered'] += file_branches_covered\n\n line_parts = line.split(':')\n input_type = line_parts[0]\n\n if input_type == 'SF':\n # Get file name\n file_name = line_parts[-1].strip()\n relative_file_name = os.path.relpath(file_name, self.base_dir)\n package = '.'.join(relative_file_name.split(os.path.sep)[0:-1])\n class_name = file_name.split(os.path.sep)[-1]\n if package not in coverage_data['packages']:\n coverage_data['packages'][package] = {\n 'classes': {}, 'lines-total': 0, 'lines-covered': 0,\n 'branches-total': 0, 'branches-covered': 0\n }\n coverage_data['packages'][package]['classes'][\n relative_file_name] = {\n 'name': class_name, 'lines': {}, 'lines-total': 0,\n 'lines-covered': 0, 'branches-total': 0,\n 'branches-covered': 0\n }\n package = package\n current_file = relative_file_name\n file_lines_total = 0\n file_lines_covered = 0\n file_lines.clear()\n file_methods.clear()\n file_branches_total = 0\n file_branches_covered = 0\n elif input_type == 'DA':\n # DA:2,0\n (line_number, line_hits) = line_parts[-1].strip().split(',')\n line_number = int(line_number)\n if line_number not in file_lines:\n file_lines[line_number] = {\n 'branch': 'false', 'branches-total': 0,\n 'branches-covered': 0\n }\n file_lines[line_number]['hits'] = line_hits\n # Increment lines total/covered for class and package\n if int(line_hits) > 0:\n file_lines_covered += 1\n file_lines_total += 1\n elif input_type == 'BRDA':\n # BRDA:1,1,2,0\n (line_number, block_number, branch_number, branch_hits) = line_parts[-1].strip().split(',')\n line_number = int(line_number)\n if line_number not in file_lines:\n file_lines[line_number] = {\n 'branch': 'true', 'branches-total': 0,\n 'branches-covered': 0, 'hits': 0\n }\n file_lines[line_number]['branch'] = 'true'\n file_lines[line_number]['branches-total'] += 1\n file_branches_total += 1\n if branch_hits != '-' and int(branch_hits) > 0:\n file_lines[line_number]['branches-covered'] += 1\n file_branches_covered += 1\n elif input_type == 'BRF':\n file_branches_total = int(line_parts[1])\n elif input_type == 'BRH':\n file_branches_covered = int(line_parts[1])\n elif input_type == 'FN':\n # FN:5,(anonymous_1)\n function_name = line_parts[-1].strip().split(',')[1]\n function_name = self.demangle_function_name(function_name)\n file_methods[function_name] = '0'\n elif input_type == 'FNDA':\n # FNDA:0,(anonymous_1)\n (function_hits, function_name) = line_parts[-1].strip().split(',')\n function_name = self.demangle_function_name(function_name)\n file_methods[function_name] = function_hits\n\n # Exclude packages\n excluded = [x for x in coverage_data['packages'] for e in self.excludes\n if re.match(e, x)]\n for package in excluded:\n del coverage_data['packages'][package]\n\n # Compute line coverage rates\n for package_data in list(coverage_data['packages'].values()):\n package_data['line-rate'] = self._percent(\n package_data['lines-total'],\n package_data['lines-covered'])\n package_data['branch-rate'] = self._percent(\n package_data['branches-total'],\n package_data['branches-covered'])\n\n return coverage_data", "def test_log_analyze_parse(self):\n # Log the project.\n build_cmd = f\"{self.compiler_bin} -c {self.source_file_path}\"\n build_file_path = self._log(build_cmd)\n\n self.assertTrue(os.path.exists(build_file_path))\n with open(build_file_path, 'r',\n encoding=\"utf-8\", errors=\"ignore\") as f:\n build_actions = json.load(f)\n self.assertEqual(len(build_actions), 1)\n\n # Analyze the project.\n report_dir = os.path.join(self.test_workspace, \"report_dir\")\n process = subprocess.Popen(\n [\n self.codechecker_bin, \"analyze\",\n build_file_path,\n \"-o\", report_dir\n ],\n encoding=\"utf-8\",\n errors=\"ignore\")\n process.communicate()\n self.assertEqual(process.returncode, 0)\n\n # Parse the results.\n process = subprocess.Popen(\n [\n self.codechecker_bin, \"parse\",\n report_dir,\n \"-e\", \"json\"\n ],\n stdout=subprocess.PIPE,\n encoding=\"utf-8\",\n errors=\"ignore\")\n out, _ = process.communicate()\n self.assertEqual(process.returncode, 2)\n reports = json.loads(out)\n self.assertTrue(len(reports) > 0)", "def _parse(self):\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if SENDING in line:\n self._req_set.add(self._get_request(line, True))\n line = file.readline()\n except Exception as err:\n print(\"Failed to read garbage collector log. Log was not a complete test log.\\n\"\n f\"{err!s}\")\n raise TestFailedException", "def log_event_to_file(event):\n with open('eventlogs/{}.json'.format(time.time()), 'w') as event_write:\n event_write.write(json_dumpstring(event))\n pass", "def process_log_file(cur, filepath):\n # open log file\n data_frame = pd.read_json(filepath, lines=True)\n\n # filter by NextSong action\n data_frame = data_frame[data_frame.page == 'NextSong']\n\n # convert timestamp column to datetime\n time_value = pd.to_datetime(data_frame['ts'])\n\n # insert time data records\n time_data = (time_value, time_value.dt.year, time_value.dt.month,\n time_value.dt.isocalendar().week, time_value.dt.dayofweek,\n time_value.dt.day, time_value.dt.hour)\n\n column_labels = ('timestamp', 'year', 'month', 'week', 'weekday', 'day', 'hour')\n\n time_df = pd.DataFrame.from_dict(dict(zip(column_labels, time_data)))\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n\n # load user table\n user_df = data_frame[['userId', 'firstName', 'lastName', 'gender', 'level']]\n\n # insert user records\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n # insert songplay records\n for index, row in data_frame.iterrows():\n\n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n\n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = row[['ts', 'userId', 'level', 'sessionId', 'location', 'userAgent']]. \\\n append(pd.Series([songid,\n artistid],\n index=[\n 'songid',\n 'artistid']))\n songplay_data['ts'] = pd.to_datetime(songplay_data['ts'])\n\n cur.execute(songplay_table_insert, songplay_data)", "def process_files(user, application, complete_path, init_es, tool, scan_name, user_host, to_name,hook_log=None):\n try:\n application = Application.objects.get(id=application)\n scan = Scan.objects.get(name=scan_name)\n scan.scanlog.status = 'Initiated'\n scan.scanlog.save()\n scan_log = scan.scanlog\n scan_log.status = 'In Progress'\n scan_log.save()\n try:\n if tool == 'Burp': \n parse_burp(complete_path,user,init_es)\n elif tool == 'ZAP':\n ext = complete_path.split('.')[-1]\n if ext == 'json':\n parse_zap_json(complete_path,user,init_es)\n elif ext == 'xml':\n parse_zap(complete_path,user,init_es)\n elif tool == 'AppSpider':\n parse_appspider(complete_path,user,init_es)\n elif tool == 'Arachni':\n parse_arachni(complete_path,user,init_es)\n elif tool == 'Bandit':\n parse_bandit(complete_path,user,init_es)\n elif tool == 'Checkmarx':\n parse_checkmarx(complete_path,user,init_es)\n elif tool == 'AppScan - DAST':\n parse_appscan_dast(complete_path,user,init_es)\n elif tool == 'AppScan - SAST':\n parse_appscan_sast(complete_path,user,init_es)\n elif tool == 'OWASP Dependency Checker':\n parse_owasp_dep_checker(complete_path,user,init_es)\n elif tool == 'w3af':\n w = W3afParser(complete_path,user,init_es,tool)\n w.parse_xml()\n elif tool == \"HP Fortify\":\n parse_hp_fortify(complete_path,user,init_es)\n elif tool == \"Xanitizer\":\n parse_xanitizer(complete_path,user,init_es)\n elif tool == \"FindSecBugs\":\n parser_findsecbug(complete_path,user,init_es) \n info_debug_log(ip=user_host,user=user,event='XML Parsing',status='success')\n if hook_log:\n hook_log.scan_process_event = True\n hook_log.scan_process_exception = ''\n hook_log.scan_process_datetime = timezone.now()\n hook_log.scan_id = scan.name\n hook_log.vul_process_event = True\n hook_log.vul_process_exception = ''\n hook_log.vul_process_datetime = timezone.now()\n hook_log.save()\n scan_log.status = 'Completed'\n scan_log.save()\n except BaseException as e:\n scan_log.status = 'Killed'\n scan_log.save()\n scan.delete()\n log_exception(e)\n if hook_log:\n hook_log.vul_process_event = False\n hook_log.vul_process_exception = e\n hook_log.vul_process_datetime = timezone.now()\n hook_log.scan_process_event = False\n hook_log.scan_process_exception = e\n hook_log.scan_process_datetime = timezone.now()\n hook_log.scan_id = ''\n hook_log.save()\n # general_error_messages.delay(path='process_files function',msg=log_exception(e))\n critical_debug_log(ip=user_host,user=user,event=e,status='failure')\n except BaseException as e:\n log_exception(e)\n scan_log.status = 'Killed'\n scan_log.save()\n critical_debug_log(ip=user_host,user=user,event=e,status='failure')\n if hook_log:\n hook_log.scan_process_event = False\n hook_log.scan_process_exception = e\n hook_log.scan_process_datetime = timezone.now()\n hook_log.scan_id = ''\n hook_log.save() \n finally:\n info_debug_log(ip=user_host,user=user,event='Remove file after XML parsing',status='success')\n remove_file(complete_path)", "def processLine(self,line,logger=None):\n line=line.rstrip()\n # see where we are in the execution\n if self.stage == 'initialise':\n if AthenaLogChecker._startOfExecuteRE.match(line):\n if logger: logger.info(\"Athena execute()...\")\n self.stage = 'execute'\n return None\n elif self.stage == 'execute':\n if AthenaLogChecker._startOfFinaliseRE.match(line):\n if logger: logger.info(\"Athena finalise()...\")\n self.stage = 'finalise'\n self.event = None\n return None\n match = AthenaLogChecker._eventNumberRE.match(line)\n if match:\n self.event = match.group('event')\n if logger: logger.debug( \"Athena event %s\" , self.event )\n return None\n if AthenaLogChecker._noMoreEventNumberRE.match(line):\n oldEvent = self.event\n self.event = None\n if logger and oldEvent is not None:\n logger.debug( \"No more event numbers available\" )\n return None\n # match ignore patterns\n ignore = AtlasErrorCodes.matchIgnorePattern(line,self.release)\n if ignore:\n if ignore.re.pattern == r'.*?\\s+?INFO .+':\n return None\n self.ignoreCount += 1\n if logger:\n logger.debug(\"ignoring error in line: \\\"%s\\\"\", line)\n logger.debug(\" because it matched: \\\"%s\\\"\", ignore.re.pattern)\n return None\n # then match known error patterns\n match, err = AtlasErrorCodes.matchErrorPattern(line,self.release)\n if err:\n self.processError(err)\n if logger:\n logger.debug(\"matched error category %s in line: %s\", err.category.acronym, line)\n logger.debug(\" because it matched: \\\"%s\\\"\", match.re.pattern)\n return err\n # finally, perform generic error match\n err = self.extractError(line)\n if err:\n self.processError(err)\n if logger:\n logger.verbose(\"non-matched error in line: %s\", line)\n return err\n return None", "def __init__(self):\r\n self.file_object = './ExecutionLogs/PredictFromModel.log'\r\n\r\n \"\"\" Initialize logger class for log writing \"\"\"\r\n self.log_writer = logger.logger(self.file_object)", "def post(self):\n incidentType = None\n incidentTypeNames = [incident.name.lower() for incident in IncidentType.all()]\n \n city = None\n cityNames = [city.name.lower() for city in City.all()]\n \n lines = self.request.get('report').split('\\n')\n lineNum = 0\n \n startDate = None\n endDate = None\n \n for strippedLine in [line.strip() for line in lines if line.strip() != '']:\n try:\n if 0 == lineNum:\n startDate = strippedLine\n elif 1 == lineNum:\n endDate = strippedLine\n else:\n if strippedLine.lower() in incidentTypeNames:\n incidentType = IncidentType.all().filter('name =', strippedLine.lower()).get()\n city = None\n elif strippedLine.lower() in cityNames:\n city = City.all().filter('name =', strippedLine.lower()).get()\n else:\n match = re.search('^[0-9]+', strippedLine)\n lineItemTokens = strippedLine.split(',')\n if None != match and 3 == len(lineItemTokens):\n timeToken = lineItemTokens[0]\n descToken = lineItemTokens[1].title()\n addressToken = lineItemTokens[2].title()\n \n # Check for date, anything on or after 3 pm is the start date\n # anything on or after 12 am is the end date\n dateString = endDate\n if int(match.group(0)) >= 3 and int(match.group(0)) < 12 and -1 != timeToken.find('p.m.'):\n dateString = startDate\n \n # Now make sure lines such as 3 p.m = 3:00 p.m.\n if (-1 == timeToken.find(':')):\n timeToken = timeToken.replace(' ', ':00 ')\n \n # Now replace AM or PM with a.m or p.m.\n dateOfIncident = datetime.strptime(\n '%s %s' %(dateString, timeToken.replace('a.m.', 'AM').replace('p.m.', 'PM')), \n '%m/%d/%Y %I:%M %p')\n \n # We've parsed out all informaton so create a record...just bail on this record\n # if we're in a bad state\n assert(None != incidentType and None != city)\n incidentReport = IncidentReport(\n incidentType = incidentType,\n description = descToken,\n time = dateOfIncident,\n city = city,\n address = addressToken)\n incidentReport.save()\n \n # Add background task for getting geo coordinates of the address\n taskqueue.add(url='/retrieve-incident-latlong', params={'id': incidentReport.key().id()})\n #\n else:\n incidentType = None\n city = None\n raise Exception('Unrecognized line')\n #\n #\n #\n # \n except (Exception, AssertionError) as e:\n if isinstance(e, AssertionError):\n e = 'Incident type or city was null.'\n self.response.out.write(\"Error processing line: %s <br />\" %(strippedLine))\n self.response.out.write('<div style=\"margin:5px;\">%s</div>' %(str(e)))\n self.response.out.write('<br />')\n #\n lineNum += 1\n #", "def __logtofile(self, log_name):\n logger = logging.getLogger(log_name)\n\n file_path = os.path.join(self.log_file_path, log_name + '.txt')\n\n formatter = logging.Formatter('<%(asctime)s> %(levelname)-8s %(message)s',\n datefmt='%y-%m-%d %H:%M:%S')\n self.file_handlers[logger] = logging.FileHandler(file_path, mode='w')\n self.file_handlers[logger].setFormatter(formatter)\n self.file_handlers[logger].setLevel(logging.DEBUG)\n logger.addHandler(self.file_handlers[logger])\n\n logger.info('SAVING LOGS IN: %s' % file_path)", "def process_log_file(cur, filepath):\r\n df=pd.read_json(filepath,lines=True)\r\n df2=df\r\n df=df[df['page']=='NextSong']\r\n ser=pd.to_datetime(df['ts'],unit='ms')\r\n times=[]\r\n for i in ser:\r\n times.append([i,i.hour,i.day,i.week,i.month,i.year,i.day_name()])\r\n for i in times:\r\n cur.execute(time_table_insert,i)\r\n df=df[['userId','firstName','lastName','gender','level']]\r\n for i,row in df.iterrows():\r\n cur.execute(users_table_insert,list(row))\r\n for i, row in df2.iterrows():\r\n cur.execute(song_select, (row.song, row.artist, row.length))\r\n res = cur.fetchone()\r\n if res:\r\n song_id, artist_id = res\r\n else:\r\n song_id, artist_id = None, None\r\n\r\n songplay_data = (\r\n i, pd.to_datetime(row.ts, unit='ms'),int(row.userId), row.level, song_id, artist_id, row.sessionId,\r\n row.location, row.userAgent)\r\n cur.execute(songplays_table_insert, songplay_data)", "def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))", "def run(self) -> int:\n if self.model_name:\n _ = self.is_exluded_model(self.model_name)\n logger.info(f\"Running audit of model [bold magenta]{self.model_name}.[/bold magenta]\\n\")\n path_file, schema_exists, _ = self.find_model_schema_file(self.model_name)\n if not path_file:\n logger.info(f\"Could not find {self.model_name} in the project at {self.dbt_path}\")\n return 1\n if not schema_exists:\n logger.info(\"The model is not documented.\")\n return 1\n self.model_content = open_yaml(path_file)\n self.derive_model_coverage()\n else:\n logger.info(f\"Running audit of dbt project in {self.dbt_path}.\\n\")\n self.derive_project_coverage()\n return 0", "def main(input_filepath, output_filepath, data_type):\n a = jsonCSV(input_filepath, os.path.join(output_filepath, data_type+'.csv'))\n column_names = a.get_superset_column_names()\n a.read_write(column_names)\n\n logger = logging.getLogger(__name__)\n logger.info('transform log files into csv')", "def read_logs(self):\n for system, filenames in SmokeTests.INPUT_FILES.items():\n input_file = filenames[\"logs\"]\n with open(input_file) as fin:\n self._logs[system] = fin.read()", "def load(logFile):\n pass #TODO", "def parselog(filen, progress=0):\n\n # Process a file and return a populated logfile object\n #\n # Maximum size of text buffer to use\n bufsize = 50\n # Initial size of chunks to process\n chunksize = 50\n # Regular expression object\n regex = patternmatch()\n # Buffer objects\n buff = buffer(bufsize)\n tablebuff = tablebuffer()\n linecount = 0\n # New (empty) logfile object\n log = logfile(filen)\n prog = False\n summary = None\n # Open the file for reading\n f = open(filen, \"r\")\n # Read line-by-line\n for line in f:\n linecount += 1\n # Progress indicator (if requested)\n # Report reaching \"progress\" number of lines\n if progress:\n if not linecount % progress:\n print(\"Processed \" + str(linecount) + \" lines\")\n # Append line to buffers\n buff.append(line)\n tablebuff.append(line)\n # Get a chunk of text to process\n bufftext = buff.tail(chunksize)\n # Test the line for matches\n #\n # Data line i.e. CCP4 program keywords\n result = regex.isdataline(line)\n if result:\n if not prog or not prog.isprogram():\n # Found a data line outside the context\n # of a program\n # Assume that we are now inside a program\n prog = log.addprogram()\n # Set the start line to be immediately\n # after the previous fragment\n try:\n previous_fragment = log.fragment(log.nfragments() - 2)\n start = previous_fragment.get_endline() + 1\n except IndexError:\n # Failed to get end line of previous\n # fragment\n start = 0\n log.set_fragment_start(start)\n # Remove any html tags and store\n data_line = strip_logfile_html(result[\"data_line\"])\n prog.addkeyword(data_line)\n # File opening report line i.e. logical name/filename pairs\n result = regex.isfileopen(line)\n if result:\n if not prog or not prog.isprogram():\n # Found a file opening report outside the context\n # of a program\n # Assume that we are now inside a program\n prog = log.addprogram()\n # Set the start line to be immediately\n # after the previous fragment\n try:\n previous_fragment = log.fragment(log.nfragments() - 2)\n start = previous_fragment.get_endline() + 1\n except IndexError:\n # Failed to get end line of previous\n # fragment\n start = 0\n log.set_fragment_start(start)\n # Store the logical name/filename pair\n prog.addlogicalname(result[\"logical_name\"], result[\"filename\"])\n # Start of a summary block i.e. <!--SUMMARY_BEGIN-->\n result = regex.issummary_begin(line)\n if result:\n summary = log.addsummary(linecount)\n # End of a summary block i.e. <!--SUMMARY_END-->\n result = regex.issummary_end(line)\n if result:\n if not summary:\n # Make a new summary with no start\n summary = log.addsummary()\n # Close out the current summary\n summary.set_end(linecount)\n # Test the buffer for matches\n #\n # CCP4 program banner\n result = regex.isccp4banner(bufftext)\n if result:\n ##print \"Found CCP4 program banner\"\n ##print \"Result = \"+str(result)\n prog = log.addprogram()\n prog.set_isccp4(True)\n prog.set_attributes_from_dictionary(result)\n log.set_fragment_start(linecount)\n buff.clear()\n tablebuff.clear()\n continue\n # SHELX program banner\n result = regex.isshelxbanner(bufftext)\n if result:\n ##print \"Found SHELX program banner\"\n ##print \"Result = \"+str(result)\n prog = log.addprogram()\n prog.set_attributes_from_dictionary(result)\n log.set_fragment_start(linecount)\n buff.clear()\n tablebuff.clear()\n continue\n # CCP4 program termination\n result = regex.isccp4termination(bufftext)\n if result:\n ##print \"Found CCP4 program termination\"\n ##print \"Result = \"+str(result)\n if not prog:\n # Outside the context of any fragment, and\n # found the end of a program before its start\n log.set_fragment_end(offsetline(linecount, result))\n prog = log.addprogram()\n elif not prog.isprogram():\n # Within the context of a fragment which\n # is not a program and found the end of a\n # program before its start\n log.set_fragment_end(offsetline(linecount, result))\n prog = log.addprogram()\n prog.set_attributes_from_dictionary(result)\n log.set_fragment_end(linecount)\n prog.set_termination(True)\n # Clear the current pointer\n prog = False\n buff.clear()\n tablebuff.clear()\n continue\n # SHELX program termination\n result = regex.isshelxtermination(bufftext)\n if result:\n ##print \"Found SHELX program termination\"\n ##print \"Result = \"+str(result)\n if not prog:\n # Found the end of a program before its start\n prog = log.addprogram()\n prog.set_attributes_from_dictionary(result)\n log.set_fragment_end(linecount)\n prog.set_termination(True)\n # Clear the current pointer\n prog = False\n buff.clear()\n tablebuff.clear()\n continue\n # CCP4 table\n if tablebuff.complete():\n if not prog:\n # Found a table outside the context of a program\n ##print \"Adding table as a fragment\"\n prog = log.newfragment()\n log.set_fragment_start(linecount)\n table_error = False\n table = prog.addtable(tablebuff.all())\n if not table:\n print(\"*** Failed to extract table data ***\")\n table_error = True\n elif table.parse_error():\n print(\"*** Failed to parse table data ***\")\n table_error = True\n if table_error:\n print(\"\\tLogfile: \" + str(log.filename()))\n print(\"\\tTable start: L\" + str(linecount - len(tablebuff) + 1))\n print(\"\\tTable end : L\" + str(linecount))\n # Add the table to the log, regardless of status\n log.addtable(table)\n # clear the buffers\n buff.clear()\n tablebuff.clear()\n continue\n # CCP4 keytext message\n result = regex.isccp4keytext(bufftext)\n if result:\n ##print \"Found CCP4 keytext\"\n ##print \"Result = \"+str(result)\n if not prog:\n # Found a message outside the context of a program\n ##print \"Adding keytext as a fragment\"\n prog = log.newfragment()\n log.set_fragment_start(linecount)\n keytext = prog.addkeytext(\n result[\"name\"], result[\"junk_text\"], result[\"message\"]\n )\n log.addkeytext(keytext)\n buff.clear()\n tablebuff.clear()\n continue\n # CCP4i header\n result = regex.isccp4iheader(bufftext)\n if result:\n ##print \"Found CCP4i header\"\n ##print \"Result = \"+str(result)\n log.append_ccp4i_header(result)\n buff.clear()\n continue\n # CCP4i tail\n result = regex.isccp4itail(bufftext)\n if result:\n ##print \"Found CCP4i tail\"\n ##print \"Result = \"+str(result)\n log.append_ccp4i_tail(result)\n buff.clear()\n tablebuff.clear()\n continue\n # CCP4i information\n result = regex.isccp4i_information(bufftext)\n if result:\n ##print \"Found CCP4i information\"\n ##print \"Result = \"+str(result)\n # Make a new fragment - these messages shouldn't\n # appear inside the context of another program\n prog = log.addccp4i_info()\n prog.set_attributes_from_dictionary(result)\n log.set_fragment_start(linecount)\n log.set_fragment_end(linecount)\n # Clear the current context\n prog = False\n buff.clear()\n tablebuff.clear()\n continue\n # Ensure that the endline of the last fragment\n # is assigned\n log.set_fragment_end(linecount)\n # Close the file\n f.close()\n return log", "def carve(self, bs, dataFile, verbose=False):\n _bs = bs\n records = []\n headers = []\n\n i = 0\n # Find all occurrences of the magic string\n found = _bs.findall(evt_header.MagicString, bytealigned=False)\n readSoFarBits = 0\n for idx in found:\n _bs.pos = idx\n r = EvtRecord()\n r.setPathname(dataFile)\n r.setPosition(_bs.pos)\n\n # Read an EVT header field:\n # The algorithm here is to find the message separator \n # and use that as a basis for locating the other fields.\n # Since we split large input files, \"offset\" fields are\n # invalid. \n\n # Message length\n fieldBits = 32\n lenIdx = idx - fieldBits # Set position to idx of length\n _bs.pos = lenIdx\n recordLength = _bs.read(fieldBits).uintle\n r.setField(\"length\", recordLength)\n readSoFarBits += fieldBits\n\n # Calculate size of variable data at end of record \n varDataSize = evt_record.FixedSize - recordLength \n # When reading the size in a header\n if varDataSize < 0: \n varDataSize = 0\n\n # Reset stream position\n _bs.pos = idx\n\n # Message separator\n fieldBits = 32 \n # Check to see if we are reading past end of stream\n data = self.carveField(_bs, \"reserved\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"reserved\", data)\n\n # Record number\n fieldBits = 32 \n data = self.carveField(_bs, \"recordNumber\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"recordNumber\", data)\n\n # Date created\n fieldBits = 32 \n data = self.carveField(_bs, \"timeGenerated\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"timeGenerated\", data)\n\n # Date written\n fieldBits = 32 \n data = self.carveField(_bs, \"timeWritten\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"timeWritten\", data)\n\n # Event ID\n fieldBits = 16 \n data = self.carveField(_bs, \"eventID\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventID\", data)\n \n # Event RVA offset\n fieldBits = 16 \n data = self.carveField(_bs, \"eventRVA\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventRVA\", data)\n\n # Event type\n fieldBits = 16 \n data = self.carveField(_bs, \"eventType\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventType\", data)\n\n # Num strings\n fieldBits = 16 \n data = self.carveField(_bs, \"numStrings\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"numStrings\", data)\n\n # Category\n fieldBits = 16 \n data = self.carveField(_bs, \"eventCategory\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventCategory\", data)\n\n # Reserved flags \n fieldBits = 16 \n data = self.carveField(_bs, \"reservedFlags\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"reservedFlags\", data)\n\n # Closing record number\n fieldBits = 32 \n data = self.carveField(_bs, \"closingRecordNumber\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"closingRecordNumber\", data)\n\n # String offset\n fieldBits = 32 \n data = self.carveField(_bs, \"stringOffset\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"stringOffset\", data)\n\n # User SID length\n fieldBits = 32\n data = self.carveField(_bs, \"userSidLength\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"userSidLength\", data)\n\n # User SID offset\n fieldBits = 32 \n data = self.carveField(_bs, \"userSidOffset\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"userSidOffset\", data)\n\n # Data length\n fieldBits = 32 \n data = self.carveField(_bs, \"dataLength\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"dataLength\", data)\n\n # Data offset\n fieldBits = 32\n data = self.carveField(_bs, \"dataOffset\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"dataOffset\", data)\n\n # Variable data\n # FIXME: dont rely on peek() to avoid reading past end of stream\n fieldBits = int(r.getField(\"length\"))\n try:\n data = _bs.peek(\"bytes\" + \":\" + str(fieldBits))\n except bitstring.ReadError:\n if verbose:\n print \"[EVT]: Unable to read EVT data field; \"\\\n \"it would be truncated\"\n break\n data = self.carveField(_bs, \"varData\", \"bytes\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"varData\", data)\n\n # SID\n # FIXME: find out why sidLength is so weird\n #sidLength = r.getField(\"userSidLength\")\n #if sidLength > 0:\n # sidOffset = r.getField(\"userSidOffset\")\n # if sidOffset <= _bs.length:\n # _bs.pos = sidOffset\n # fieldBits = sidLength\n # if readSoFarBits + fieldBits >= _bs.len:\n # fieldBits = _bs.len - _bs.pos\n # sid = _bs.read(fieldBits).uint\n # r.setField(\"sid\", sid)\n # break\n # sid = _bs.read(fieldBits).uint\n # r.setField(\"sid\", sid)\n #readSoFarBits += fieldBits\n records.append(r)\n return (headers, records)", "def parser(path):\n # Initialize empty arrays\n total_arr_10 = []\n docker_arr_10 = []\n orchestration_arr_10 = []\n total_arr_100 = []\n docker_arr_100 = []\n orchestration_arr_100 = []\n total_arr_200 = []\n docker_arr_200 = []\n orchestration_arr_200 = []\n total_arr_300 = []\n docker_arr_300 = []\n orchestration_arr_300 = []\n\n # Search for log files and parse the results\n for entry in os.listdir(path):\n if entry.endswith('.log'):\n if '_10_' in entry:\n with open(path + '/' + entry, 'r') as f_10:\n data = f_10.readlines()\n # Last three lines are of interest\n stripped_data = data[-3:]\n for e,item in enumerate(stripped_data):\n # Split newlins\n result_10 = item.replace('\\n','')\n # Get the timestamp and return in seconds\n before_comma_10 = result_10[-12:]\n result_10 = return_seconds(before_comma_10)\n # Append to total array\n if e == 0:\n total_arr_10.append(result_10)\n # Append to docker array\n elif e == 1:\n docker_arr_10.append(result_10)\n # Append to orchestration array\n elif e == 2:\n orchestration_arr_10.append(result_10)\n elif '_100_' in entry:\n with open(path + '/' + entry, 'r') as f_100:\n data = f_100.readlines()\n stripped_data = data[-3:]\n for e,item in enumerate(stripped_data):\n result_100 = item.replace('\\n','')\n before_comma_100 = result_100[-12:]\n result_100 = return_seconds(before_comma_100)\n if e == 0:\n total_arr_100.append(result_100)\n elif e == 1:\n docker_arr_100.append(result_100)\n elif e == 2:\n orchestration_arr_100.append(result_100)\n elif '_200_' in entry:\n with open(path + '/' + entry, 'r') as f_200:\n data = f_200.readlines()\n stripped_data = data[-3:]\n for e,item in enumerate(stripped_data):\n result_200 = item.replace('\\n','')\n before_comma_200 = result_200[-12:]\n result_200 = return_seconds(before_comma_200) \n if e == 0:\n total_arr_200.append(result_200)\n elif e == 1:\n docker_arr_200.append(result_200)\n elif e == 2:\n orchestration_arr_200.append(result_200)\n elif '_300_' in entry:\n with open(path + '/' + entry, 'r') as f_300:\n data = f_300.readlines()\n stripped_data = data[-3:]\n for e,item in enumerate(stripped_data):\n result_300 = item.replace('\\n','')\n before_comma_300 = result_300[-12:]\n result_300 = return_seconds(before_comma_300) \n if e == 0:\n total_arr_300.append(result_300)\n elif e == 1:\n docker_arr_300.append(result_300)\n elif e == 2:\n orchestration_arr_300.append(result_300)\n\n return total_arr_10, docker_arr_10, orchestration_arr_10, total_arr_100, docker_arr_100, orchestration_arr_100, total_arr_200, docker_arr_200, orchestration_arr_200, total_arr_300, docker_arr_300, orchestration_arr_300", "def annotate_file(self, infile):\n with open(infile, 'r') as f:\n raw = f.read()\n parser = yatapi.trigger.TrigEditParser()\n trigs = parser.extract_triggers(raw)\n for trig in trigs:\n for condition in trig['conditions']:\n self._annotate_statement(parser.parse_statement(condition), CONDITION)\n for action in trig['actions']:\n self._annotate_statement(parser.parse_statement(action), ACTION)", "def process_log_file(cur, filepath):\n\n df = pd.read_json(filepath, lines=True)\n\n df = df[df[\"page\"] == \"NextSong\"]\n\n t = df['ts'] = pd.to_datetime(df['ts'], unit='ms')\n\n accessor = t.dt\n time_data = (t, accessor.hour, accessor.day, accessor.week,\n accessor.month, accessor.year, accessor.weekday)\n\n time_df = pd.DataFrame.from_dict({\n \"timestamp\": t,\n \"hour\": accessor.hour,\n \"day\": accessor.day,\n \"week\": accessor.week,\n \"month\": accessor.month,\n \"year\": accessor.year,\n \"weekday\": accessor.weekday\n })\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n\n user_df = df[['userId', 'firstName', 'lastName', 'gender', 'level']]\n\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n for index, row in df.iterrows():\n\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n\n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n start_time = row[\"ts\"]\n user_id = row[\"userId\"]\n level = row[\"level\"]\n song_id = songid\n artist_id = artistid\n session_id = row['sessionId']\n location = row['location']\n user_agent = row['userAgent']\n\n songplay_data = (start_time, user_id, level, song_id, artist_id, session_id,\n location, user_agent)\n cur.execute(songplay_table_insert, songplay_data)", "def process_file(self):\n self._processing_logger.log_info('Start processing')\n self.parsing_start_time = datetime.datetime.now()\n if os.path.exists(self.tmp_stat_file_path) \\\n and not HcsParsingUtils.active_processing_exceed_timeout(self.tmp_stat_file_path):\n self._processing_logger.log_info('This file is processed by another parser, skipping...')\n return 2\n self.create_tmp_stat_file()\n hcs_index_file_path = self.hcs_root_dir + MEASUREMENT_INDEX_FILE_PATH\n time_series_details = self._extract_time_series_details(hcs_index_file_path)\n self.generate_ome_xml_info_file()\n xml_info_tree = ET.parse(self.ome_xml_info_file_path).getroot()\n plate_width, plate_height = self._get_plate_configuration(xml_info_tree)\n wells_tags = self.read_wells_tags()\n if wells_tags:\n self._processing_logger.log_info(\"Tags \" + str(wells_tags))\n if not TAGS_PROCESSING_ONLY and not EVAL_PROCESSING_ONLY:\n if not self._localize_related_files():\n self._processing_logger.log_info('Some errors occurred during copying files from the bucket, exiting...')\n return 1\n else:\n self._processing_logger.log_info('Localization is finished.')\n local_preview_dir = os.path.join(self.tmp_local_dir, 'preview')\n hcs_local_index_file_path = get_path_without_trailing_delimiter(self.tmp_local_dir) \\\n + MEASUREMENT_INDEX_FILE_PATH\n for sequence_id, timepoints in time_series_details.items():\n self._processing_logger.log_info('Processing sequence with id={}'.format(sequence_id))\n sequence_index_file_path = self.extract_sequence_data(sequence_id, hcs_local_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {}'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_index_file_path, local_preview_dir, sequence_id))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful...')\n return 1\n sequence_overview_index_file_path, wells_grid_mapping = self.build_sequence_overview_index(sequence_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {} \"{}\"'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_overview_index_file_path, local_preview_dir,\n sequence_id, 'overview_data.ome.tiff'))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful: well preview generation failure')\n return 1\n self.write_dict_to_file(os.path.join(local_preview_dir, sequence_id, 'wells_map.json'),\n self.build_wells_map(sequence_id, wells_grid_mapping, wells_tags))\n if LOCALIZE_USE_PIPE == \"true\":\n cloud_transfer_result = os.system('pipe storage cp -f -r \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n else:\n cloud_transfer_result = os.system('aws s3 sync \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n if cloud_transfer_result != 0:\n self._processing_logger.log_info('Results transfer was not successful...')\n return 1\n self._write_hcs_file(time_series_details, plate_width, plate_height)\n if not EVAL_PROCESSING_ONLY:\n tags_processing_result = self.try_process_tags(xml_info_tree, wells_tags)\n if TAGS_PROCESSING_ONLY:\n if wells_tags:\n for sequence_id, timepoints in time_series_details.items():\n path = os.path.join(self.hcs_img_service_dir, sequence_id, 'wells_map.json')\n self.write_dict_to_file(path, self.update_wells_json(path, wells_tags))\n return tags_processing_result\n if not TAGS_PROCESSING_ONLY:\n eval_processing_result = self.try_process_eval()\n if EVAL_PROCESSING_ONLY:\n return eval_processing_result\n self.create_stat_file()\n return 0", "def _forward_log(self):\n\n if self.log is None:\n return\n\n fd = None\n try:\n fd = os.open(\"%s.out\" % self.vm_log_path, os.O_RDONLY)\n data = \"\"\n while True:\n new_data = os.read(fd, 4096)\n if new_data == \"\":\n self._log_to_file(data)\n return\n\n data += new_data\n lines = data.split(\"\\n\")\n for line in lines[:-1]:\n self._log_to_file(line)\n data = lines[-1]\n\n finally:\n if fd is not None:\n os.close(fd)", "def log(self):\n f = open(self.log_dir + 'parsed.log', 'a')\n try:\n # Write: local time | CurrentCost \"time\" | id | temp/C | power/W \n f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\n\" \n % (self.ts('now'), self.ts('cc'), self.id, self.temp, self.watts))\n finally:\n f.close()", "def _parse(self, max_seq):\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n # Find the start of a new sequence\n if GENERATION in line and RENDERING_SEQUENCE in line:\n # extract the sequence length\n num_reqs = int(line.split(GENERATION)[1].split(': ')[0])\n seq_num = int(line.split(RENDERING_SEQUENCE)[1])\n\n if max_seq > 0 and seq_num > max_seq:\n return\n\n seq = ParsedSequence([])\n # Add each request in the sequence to the sequence object\n for i in range(num_reqs):\n while line and SENDING not in line:\n line = file.readline()\n if REPLAY_START in line:\n self._skip_replay(file)\n\n seq += self._get_request(line, True)\n line = file.readline()\n\n # Extend the list of sequences in this log\n self._seq_list += [seq]\n elif CHECKER_START in line:\n self._handle_checker(seq, line, file)\n line = file.readline()\n\n except Exception as err:\n print(\"Failed to read fuzzing log. Log was not a complete test log.\\n\"\n f\"{err!s}\")\n raise TestFailedException", "def parse(self):\n try:\n self.open_file()\n lines = list(self._file)\n\n if len(lines) > 0:\n text = ''.join(lines)\n regex = 'Song \\d+\\nStart (\\d+:\\d+:\\d+)\\nEnd (\\d+:\\d+:\\d+)\\nLength (\\d+.\\d+)'\n match = re.findall(regex, text)\n if len(match):\n starts = []\n ends = []\n lengths = []\n\n for i in range(len(match)):\n starts.append(match[i][0])\n ends.append(match[i][1])\n lengths.append(float(match[i][2]))\n\n for i in range(len(match)):\n self.debug_data.append({\n 'start':starts[i],'end':ends[i],'length':lengths[i]})\n\n match = re.search('T\\d_S(\\d{4})_.*.txt', self._filepath)\n if match:\n self._experiment_metadata['session_id'] = int(match.groups()[0])\n else:\n raise EIMParsingError(\"No valid session id found in filename %s\" % self._filepath)\n\n finally:\n if self._file and not self._file.closed:\n self.close_file()", "def parse_log_file(self):\n # Open log file\n log_file_data = utils.open_file(self.log_file)\n for line in log_file_data:\n algo = line.strip(\"\\n\").split(\":\")[1]\n if len(algo) > 3:\n hash_algo = algo.split(\"$\")[1]\n if hash_algo not in self.used_algo:\n self.used_algo.append(hash_algo)", "def process_log_file(cur, filepath):\n # open log file\n df = get_file_df(filepath)\n\n # filter by NextSong action\n df = df[df['page'] == 'NextSong']\n\n # convert timestamp column to datetime\n df['ts'] = df['ts'].apply(lambda x: datetime.datetime.fromtimestamp(x/1000)) \n t = df\n \n time_data = []\n for td in t['ts']:\n wd = True if td.weekday() <=6 else False\n time_data.append([str(td.time()), td.hour, td.day, td.week, td.month, td.year, wd])\n column_labels = ('start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday')\n\n # insert time data records\n time_df = pd.DataFrame(time_data, columns=column_labels)\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n\n # load user table\n user_df = df[['userId', 'firstName', 'lastName', 'gender', 'level']].copy()\n\n # insert user records\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n #user_id, level, song_id, artist_id, session_id, location, user_agent\n songplay_data = [row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent]\n cur.execute(songplay_table_insert, songplay_data)", "def preProcess(self,filename,fileoutput):\t\n\tdata=[]\n\tval =set()\n\tfo = open(fileoutput, \"wb\")\n\twith open(filename) as data_file:\n \tfor tags in data_file:\n\t\t\tif \"timestamp\" not in tags: \n \t \t continue\n\t\t\tts = re.search('timestamp: (.+?)\\)', tags).group(1)\n\t\t\tval =set()\n\t\t\tval.update({tag for tag in tags.split() if tag.startswith(\"#\")})\n\t\t\t#print val\n\t\t\tif len(val) >1:\n\t\t\t\tself.maintainWindow(data,ts)\n\t\t\t\tdata.append((ts,val))\n\t\t\t\tself.createAdjList(val,\"add\")\n\t\t\t\tprint(\"***\")\n\t\t\telse:\n\t\t\t\tself.maintainWindow(data,ts)\n\t\t\t\tprint(\"@@@@\")\n\t\t\tresult = self.calculateRollingAverages() \n\t\t\tfo.write(result+\"\\n\")\n fo.close()\n data_file.close()", "def process_logs(logs):\n all_data = {}\n for log in logs:\n with open(log) as f:\n data = json.load(f)\n scenario = data[0].get(\"scenario\", None)\n if scenario is None:\n # No scenario name, no way to organize the data\n continue\n\n # Use the log's date as the run identifier\n # This assumes the format is SCENARIO-YYYY-MM-DD.json\n # NOTE: This may not match the GitHub Action run dates due to tests taking\n # a very long time.\n day = datetime.strptime(log[1+len(scenario):-5], \"%Y-%m-%d\").strftime(\"%Y%m%d\")\n if day not in all_data:\n all_data[day] = {}\n\n # Group them by scenario, assume each file is from one scenario per day\n all_data[day][scenario] = data\n return all_data", "def process_log_file(cur, filepath):\n # open log file\n df = pd.read_json(filepath, lines=True)\n\n # filter by NextSong action\n df = df[df['page']=='NextSong']\n\n # convert timestamp column to datetime\n t = pd.to_datetime(df['ts'], unit='ms')\n \n # insert time data records\n #timestamp, hour, day, week of year, month, year, and weekday\n hour = t.dt.hour\n day = t.dt.day\n weekofyear = t.dt.weekofyear\n month = t.dt.month\n year = t.dt.year\n weekday = t.dt.weekday\n\n time_data = [df['ts'], hour, day, weekofyear, month, year, weekday]\n column_labels = ['timestamp', 'hour', 'day', 'week of year', 'month', 'year', 'weekday']\n time_df = pd.DataFrame.from_dict(dict(zip(column_labels, time_data)))\n\n for i, row in time_df.iterrows():\n try:\n cur.execute(time_table_insert, list(row))\n except psycopg2.Error as e:\n print(\"Error: Unable to insert record in time table in row number : {}\".format(i))\n print(e)\n\n # load user table\n user_df = df[['userId', 'firstName', 'lastName', 'gender', 'level']]\n\n # insert user records\n for i, row in user_df.iterrows():\n try:\n cur.execute(user_table_insert, row)\n except psycopg2.Error as e:\n print(\"Error: Unable to insert record in users table in row number : {}\".format(i))\n print(e)\n\n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n try:\n cur.execute(song_select, (row.song, row.artist, row.length))\n except psycopg2.Error as e:\n print(\"Error: Unable to execute song_select query to join songs and artists table in row number : {}\".format(index))\n print(e)\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n #timestamp, user ID, level, song ID, artist ID, session ID, location, and user agent\n songplay_data = (row.ts, row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)\n try:\n cur.execute(songplay_table_insert, songplay_data)\n except psycopg2.Error as e:\n print(\"Error: Unable to insert record in songplays table in row number : {}\".format(i))\n print(e)", "def process_log_file(cur, filepath):\r\n\r\n # open log file\r\n df = pd.read_json(filepath, lines=True)\r\n df2 = pd.read_json(filepath, lines=True)\r\n\r\n # filter by NextSong action for missing data\r\n df2 = df2[df2['page']=='NextSong']\r\n\r\n\r\n\r\n # insert missing records into Song and Artist Table\r\n for i, row in df2.iterrows():\r\n cur.execute(artist_table_insert, (row.artist + str(i), row.artist, row.location, 0, 0))\r\n for i, row in df2.iterrows():\r\n cur.execute(song_table_insert, (row.song + str(i), row.song, row.artist + str(i), 0, row.length))\r\n\r\n # filter by NextSong action\r\n df = df[df['page']=='NextSong']\r\n\r\n # convert timestamp column to datetime\r\n t = pd.to_datetime(df['ts'], unit='ms')\r\n\r\n # extract time data from timestamp\r\n time_data = {'start_time': t,'hour': pd.Series(t).dt.hour, 'day':pd.Series(t).dt.day,\r\n 'month': pd.Series(t).dt.month, 'year': pd.Series(t).dt.year,\r\n 'weekday': pd.Series(t).dt.dayofweek}\r\n #column_labels = []\r\n # insert time data records\r\n time_df = pd.DataFrame(time_data)\r\n\r\n for i, row in time_df.iterrows():\r\n cur.execute(time_table_insert, list(row))\r\n\r\n # load user table\r\n user_df = df[['userId', 'firstName','lastName','gender','level']]\r\n user_df.drop_duplicates(subset='userId',keep ='first',inplace = True)\r\n\r\n # insert user records\r\n for i, row in user_df.iterrows():\r\n cur.execute(user_table_insert, row)\r\n\r\n # insert songplay records\r\n for index, row in df.iterrows():\r\n\r\n # get songid and artistid from song and artist tables\r\n print(cur.mogrify(song_select, (row.song, row.artist, row.length)))\r\n cur.execute(song_select, (row.song, row.artist, row.length))\r\n results = cur.fetchone()\r\n\r\n if results:\r\n songid, artistid = results[0],results[1]\r\n else:\r\n songid, artistid = \"None\" + str(index), \"None\" + str(index)\r\n\r\n # insert songplay record\r\n songplay_data = (df[['ts', 'userId', 'level', 'sessionId','location','userAgent' ]])\r\n songplay_data['ts'] = pd.to_datetime(df['ts'], unit='ms')\r\n cur.execute(songplay_table_insert, (index, row.ts, row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent))\r\n #conn.commit()\r", "def seed_movie_data(filename):\n\n #open file and go through it line by line\n log_file = open(filename)\n\n for line in log_file:\n data = line.strip().split(\"|\")\n\n #get data from split line\n id = int(data[0])\n release = data[2]\n url = data[4]\n\n #titles might have accented characters so test for this and decode\n #them if so\n title = data[1]\n try:\n title = unicode(title)\n except UnicodeError:\n title = title.decode(\"utf-8\")\n\n #if there's a date there, parse it\n if release:\n release = datetime.strptime(data[2], \"%d-%b-%Y\")\n #otherwise, set release to None so it will become NULL in the database\n else:\n release = None\n\n #create a new record and add it to the queue\n new_movie = Movie(movie_id=id, title=title, \n released_at=release, imdb_url=url)\n db.session.add(new_movie)\n\n #commit changes\n db.session.commit()", "def log_results(self, filename=None):\n\n self.ad_log['train_auc'] = self.diag['train']['auc'][-1]\n self.ad_log['train_accuracy'] = self.diag['train']['acc'][-1]\n self.ad_log['train_time'] = self.train_time\n\n self.ad_log['test_auc'] = self.diag['test']['auc'][-1]\n self.ad_log['test_accuracy'] = self.diag['test']['acc'][-1]\n self.ad_log['test_time'] = self.test_time\n\n self.ad_log.save_to_file(filename=filename)" ]
[ "0.61394227", "0.6064906", "0.59986395", "0.5834477", "0.5793847", "0.57524127", "0.5733288", "0.5686557", "0.5646877", "0.55704165", "0.55531", "0.5498819", "0.5413356", "0.53873146", "0.5380587", "0.5363391", "0.5344947", "0.53289014", "0.53170085", "0.5265918", "0.5260259", "0.524851", "0.5238336", "0.52189004", "0.52171785", "0.5207587", "0.5203746", "0.5198268", "0.51955444", "0.5180761", "0.5178855", "0.5169392", "0.51677823", "0.51456916", "0.5144302", "0.5135181", "0.5127614", "0.51260287", "0.51244456", "0.51171184", "0.5114685", "0.51086676", "0.510863", "0.5096245", "0.5090605", "0.50754297", "0.50698316", "0.50681627", "0.5060257", "0.50551814", "0.5045247", "0.50397867", "0.50285053", "0.5026171", "0.502142", "0.50199383", "0.501637", "0.49991396", "0.49979606", "0.49915066", "0.49899343", "0.49780688", "0.49779153", "0.4976384", "0.49683774", "0.49567276", "0.49563462", "0.49539492", "0.4951", "0.4945037", "0.49424642", "0.4937958", "0.49329746", "0.4931248", "0.49309304", "0.49217555", "0.4920302", "0.49182284", "0.49175754", "0.49105048", "0.49077192", "0.48891678", "0.48873428", "0.48862287", "0.48845112", "0.48842603", "0.48793545", "0.48758298", "0.48747382", "0.48746318", "0.48728687", "0.48687792", "0.48683527", "0.48626068", "0.48592374", "0.48521575", "0.48451024", "0.48395386", "0.4838085", "0.48333505" ]
0.63416183
0
Initialise clusters by alternating the bins to which the vectors are assigned.
def alternating_bins_initialisation(self, pixel_data, a=None, b=None): if not a or not b: a = 0 b = len(pixel_data) clusters = defaultdict(list) for i in range(a, b): # selecting sevens as data set clusters[i % self.K].append(pixel_data[i]) return clusters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center", "def initClusters(self):\n if len(self.labelList) != len(self.pointList):\n \traise ValueError(\"Label List and Point List not the same length!\")\n for i in range(len(self.labelList)):\n self.centroids[self.labelList[i]] = self.pointList[i]\n self.pointcounts[self.labelList[i]] = 1", "def populate_grid(self):\n from cemc_cpp_code import hoshen_kopelman\n self.bins[:, :, :] = 0\n for atom in self.atoms:\n if atom.symbol in self.track_elements:\n n = self.get_bin(atom.index)\n self.bins[n[0], n[1], n[2]] += 1\n\n # Run the Hoshen-Kopelman algorithm to label the \n # bins into clusters\n self.clusters = hoshen_kopelman(self.bins)", "def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters", "def _initial_clusters(self):\n clusters = []\n for i in range(self.point_count):\n clusters.append(self._create_cluster_from_index(i))\n return clusters", "def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion", "def initialize(self):\n self.SIZE = self.vectors.shape[0]\n # todo can use max distance to allocation farthest apart points\n self.centroids = self.vectors[[random.randint(1, self.SIZE) for x in range(self.K)], :]", "def atlas_clusters():\n pass", "def initialize(img):\n w, h, _ = img.shape\n for c in current_cluster_centers:\n x = np.random.randint(w)\n y = np.random.randint(h)\n c[:] = img[x, y]", "def initialize_dom(img: np.ndarray):\n\n channels = img.shape[2]\n\n for cluster in range(numclusters):\n for channel in range(channels):\n cmin = np.amin(img[:,:,channel]) # channel's min\n cmax = np.amax(img[:,:,channel]) # channel's max\n current_cluster_centers[cluster, 0, channel] = np.random.uniform(cmin, cmax)\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins", "def compute_clusters(self, documents):\n ###TODO\n for d in range(0, len(documents)):\n maxi = 999999999\n for cid in range(0, len(self.means)):\n dist = self.distance(documents[d], self.means[cid], self.norms[cid])\n if dist < maxi:\n maxi = dist\n clust = cid \n self.cluster[d] = clust", "def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]", "def denseBinsToClusters(candidates, plot=False, debug=False):\n graph = np.identity(len(candidates))\n for i in range(len(candidates)):\n for j in range(len(candidates)):\n graph[i, j] = int(neighbour(candidates[i], candidates[j]))\n # Find connected components in order to merge neighbouring bins\n nbConnectedComponents, components = scipy.sparse.csgraph.connected_components(\n graph, directed=False)\n if debug:\n print(graph)\n print(nbConnectedComponents, components)\n candidates = np.array(candidates)\n clusterAssignment = -1 * np.ones(data.shape[0])\n # For every cluster\n for i in range(nbConnectedComponents):\n # Get dense units of the cluster - 获取集群的密集单元\n cluster_dense_units = candidates[np.where(components == i)[0]]\n if debug:\n for v in cluster_dense_units:\n for z in v:\n print(z)\n clusterDimensions = {}\n for j in range(len(cluster_dense_units)):\n for k in range(len(cluster_dense_units[j])):\n if cluster_dense_units[j][k].dimension not in clusterDimensions:\n clusterDimensions[cluster_dense_units[j][k].dimension] = []\n clusterDimensions[cluster_dense_units[j][k].dimension].extend(cluster_dense_units[j][k].points)\n points = reduce(np.intersect1d, list(clusterDimensions.values()))\n clusterAssignment[points] = i\n if plot:\n pred = -1 * np.ones(data.shape[0])\n pred[points] = i\n plt.figure()\n plt.title(f'In yellow, clusters in {list(clusterDimensions.keys())} dimensions ')\n plt.scatter(data[:, 0], data[:, 1], c=pred)\n for g in grid[0]:\n plt.axvline(x=g, c='red', linestyle='--')\n for g in grid[1]:\n plt.axhline(y=g, c='red', linestyle='--')\n plt.show()\n if debug:\n print(clusterDimensions.keys(), points)\n return clusterAssignment", "def _cluster_into_bins(eval_data, ref_data, num_clusters):\r\n\r\n cluster_data = np.vstack([eval_data, ref_data])\r\n kmeans = sklearn.cluster.MiniBatchKMeans(n_clusters=num_clusters, n_init=10)\r\n labels = kmeans.fit(cluster_data).labels_\r\n\r\n eval_labels = labels[:len(eval_data)]\r\n ref_labels = labels[len(eval_data):]\r\n\r\n eval_bins = np.histogram(eval_labels, bins=num_clusters,\r\n range=[0, num_clusters], density=True)[0]\r\n ref_bins = np.histogram(ref_labels, bins=num_clusters,\r\n range=[0, num_clusters], density=True)[0]\r\n return eval_bins, ref_bins", "def _init_cluster(self):\n self._Init_Cluster()", "def clusters_allocate_cells(self):\n for cluster in self.clusters:\n cluster.cells[:] = []\n for cell in self.block_proc:\n wdists = []\n for cluster in self.clusters:\n s = cluster.size\n d = ( (cell.x-cluster.x)**2 + (cell.y-cluster.y)**2 +\n (cell.z-cluster.z)**2 )\n d = numpy.sqrt(d)\n c = self.c\n # TODO: choose a better distance function below\n r = d*(c+(1-c)*numpy.exp(-s/d))\n r = numpy.clip(r,0,r)\n wdists.append(r)\n self.clusters[numpy.argmin(wdists)].cells.append(cell)", "def __init__(self, count):\n\n self.clusters_count = count\n self._leaders = [i for i in range(count)]\n self._ranks = [0] * count", "def __init__(self):\n ## self.clusters[cluster] = list of coordinates\n self.clusters = {}\n ## self.centroids[cluster] = centroid\n self.centroids = {}", "def _init_centroid(self, seed: int):\n random.seed(seed)\n self.centroid_info = dict()\n self.cluster_result = dict()\n self.centroid_stable_flag = dict()\n for key_index, chosen_value in enumerate(\n random.sample(self.list_data, self.n_cluster)):\n self.centroid_info.setdefault(\"c\" + str(key_index), float(chosen_value))\n self.cluster_result.setdefault(\"c\" + str(key_index), list())\n self.centroid_stable_flag.setdefault(\"c\" + str(key_index), False)", "def cluster(self):\n print(\"Calculating distances\")\n self.all_distances()\n\n print(\"Start making sets\")\n clusters = self.clusters\n\n # Generates a set with neighbours for each point\n for row in self.distances:\n clusters.append(set(np.where(row < self.distance_threshold)[0].tolist()))\n\n print(\"Merging sets\")\n for cluster1 in range(self.point_count):\n for cluster2 in range(self.point_count):\n if clusters[cluster2] is not None and clusters[cluster1] is not None:\n if not clusters[cluster1].isdisjoint(clusters[cluster2]) and cluster1 != cluster2:\n clusters[cluster1].update(clusters[cluster2])\n clusters[cluster2] = None\n # Deletes empty clusters\n clusters = [points for points in clusters if points is not None]\n # Sorts clusters by their size\n clusters.sort(key=len, reverse=True)\n # Builds main set\n for point_set in clusters[0:self.cluster_count_threshold]:\n self.main_cluster.update(point_set)\n\n self.main_cluster = list(self.main_cluster)\n self.clusters = clusters", "def __init__(self, bins):\n self.bins = bins", "def random_init(self, train_data):\n\n centroids=np.zeros((self.n_clusters_, train_data.shape[1]))\n for c in range(self.n_clusters_):\n for f in range(train_data.shape[1]):\n centroids[c,f]=random.uniform(min(train_data[:,f]), max(train_data[:,f]))\n\n return centroids", "def makeCluster(self):\n for i in range(self.k):\n #vector of length total users, pick random number 1-5\n self.centroids.append(np.random.uniform(low=1,high=5,size=len(self.user)))\n memberList = []\n self.membership.append(memberList)\n self.centroids = np.round(self.centroids)\n\n for movie in self.dictionary.keys():\n #Finds the index of the closest centroid\n closest = np.argmin(self.calculateDistance(self.dictionary[movie]))\n newVector = []\n newVector.append(movie)\n #Add the movie to the list of members of the closest centroid\n self.membership[closest].append(newVector)\n self.recalculateCentroid(self.membership[closest], closest)", "def make_all_zero(curr_clusters, k, num_of_cords):\r\n for i in range(k):\r\n for j in range(num_of_cords):\r\n curr_clusters[i][j] = 0", "def _10x10_grid_clusters_spread():\n return [mn(mean=np.array([i * 25, j * 25]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(10)\n for j in range(10)]", "def initial_clusters(self, points):\n groups = {}\n d = int(256 / (self.initial_k))\n for i in range(self.initial_k):\n j = i * d\n groups[(j, j, j)] = []\n for i, p in enumerate(points):\n # if i%100000 == 0:\n # print('processing pixel:', i)\n go = min(groups.keys(), key=lambda c: euclidean_distance(p, c)) \n groups[go].append(p)\n return [g for g in groups.values() if len(g) > 0]", "def initializeClusters(numClusters: int, numPrototypes: int) -> ndarray:\n result: ndarray = np.empty((numClusters, numPrototypes), dtype=int)\n for i in range(numClusters):\n result[i, :] = [j for j in range(i * numPrototypes, (i + 1) * numPrototypes)]\n return result", "def __grow_cluster(self, init_loc, thresh):\n cluster = np.zeros_like(self.__array, dtype=bool)\n cluster[init_loc[0], init_loc[1]] = True\n pocket = [init_loc]\n adjacent = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n m, n = self.__array.shape\n while pocket:\n pt = pocket.pop(0)\n neighbors_in_cluster = [\n (pt[0] - i, pt[1] - j) for (i, j) in adjacent\n if 0 <= pt[0] - i < m and 0 <= pt[1] - j < n and\n not cluster[pt[0] - i, pt[1] - j] and\n np.absolute(self.__array[pt[0], pt[1]]\n - self.__array[pt[0] - i, pt[1] - j])\n < thresh]\n for nbr in neighbors_in_cluster:\n pocket.append(nbr)\n cluster[nbr[0], nbr[1]] = True\n return cluster", "def assign_clusters(self):\n running_perts = {}\n for name in self.tensor_info:\n item = self.tensor_info[name]\n pert_list = item[1]\n pert_names = []\n prob_list = []\n if pert_list is not None:\n for pert in pert_list:\n pert_names.append(pert.__class__.__name__)\n prob_list.append(pert.p)\n pert_names = '_'.join(pert_names)\n if pert_names not in running_perts:\n running_perts[pert_names] = [(name, prob_list)]\n else:\n running_perts[pert_names].append((name, prob_list))\n\n running_perts.pop('')\n\n assert len(running_perts) <= len(self.clusters), \"More different perturbations than clusters available, cannot assign tensors to clusters\"\n\n # ONLY BITWISEPERT FOR THE TIME BEING\n bitwises = running_perts['BitwisePert']\n bitwise_probs = [item[1][0] for item in bitwises]\n centers, _ = kmeans(bitwise_probs, len(self.clusters))\n groups, _ = vq(bitwise_probs, centers)\n\n for tensor, cluster in zip(bitwises, groups):\n name = tensor[0]\n tensor_ref = self.tensor_info[name][0]\n repr = self.tensor_info[name][2]\n self.clusters[cluster].add_tensor(tensor_ref, repr)\n\n for cluster, rate in zip(self.clusters, centers):\n pert_dict = {\n \"name\": \"BitwisePert\",\n \"p\": rate}\n pert = P.construct_pert(pert_dict)\n cluster.set_perturb([pert])", "def cluster(self):\n logger.debug(\"Beginning feature based clustering on %d clusters.\" % len(self.c2b))\n # Merge the two nearest clusters until we can't.\n #\n while self.mergeNearestClusters():\n pass\n logger.debug(\"After clustering, there are now %d clusters remaining.\" % len(self.c2b))\n return self.c2b.values()", "def _init_homolog_centers(self, method=\"kmeans\", min_spot_num=2, axis_infos=Axis3D_infos):\n if hasattr(self, 'chr_2_homolog_centers') and not self.overwrite:\n if self.verbose:\n print(f\"- directly return chr_2_homolog_centers\")\n return\n if method == 'kmeans':\n from sklearn.cluster import KMeans\n # chr_2_init_centers\n self.chr_2_homolog_centers = {}\n self.chr_2_cand_hzxys = {}\n self.chr_2_cand_ids = {}\n # loop through chrs\n for _chr_name, _exp_num in self.chr_2_copyNum.items():\n _chr_coords_df = self.merged_coords.loc[self.merged_coords['chr']==str(_chr_name)]\n # if not spots exists, skip\n if len(_chr_coords_df) < min_spot_num:\n continue\n # get coordinates\n _chr_hzxys = _chr_coords_df[['center_intensity']+[f\"center_{_x}\" for _x in axis_infos]].values\n _chr_ids = _chr_coords_df['chr_order'].values\n # append\n self.chr_2_cand_hzxys[_chr_name] = _chr_hzxys\n self.chr_2_cand_ids[_chr_name] = _chr_ids\n # calculate weights\n _uinds, _uind_counts = np.unique(_chr_ids, return_counts=True)\n _ind_2_weight = {_i:1/_c for _i,_c in zip(_uinds, _uind_counts)}\n _chr_weights = np.array([_ind_2_weight[_i] for _i in _chr_ids])\n # K-means\n if method =='kmeans':\n _model = KMeans(n_clusters=_exp_num, random_state=0)\n _model.fit(_chr_hzxys[:,1:], sample_weight=_chr_weights)\n #_init_labels = _model.labels_\n _init_centers = _model.cluster_centers_\n # save for now\n self.chr_2_homolog_centers[_chr_name] = _init_centers", "def _2x3_grid_clusters_spread():\n return [mn(mean=np.array([i * 25, j * 25]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(2)\n for j in range(3)]", "def clusters(self,rng):\n #clusts = subclust(normalize(self.training_data),0.4,0.5)\n if self.extended:\n dat = self.training_data / rng\n else:\n dat = self.training_data[:,0:-1] / rng[0:-1]\n\n clusts = subclust(normalize(dat))\n\n print len(clusts),\"initial clusters for class\",self.name\n if self.extended:\n return np.array([self.training_data[i] for i in clusts])\n else:\n return np.array([self.training_data[i,0:-1] for i in clusts])", "def customNcuts(self):\n # computing neighboors graph\n A = kneighbors_graph(self.values, self.k, mode='distance', include_self=False).toarray()\n\n for i in range(self.values.shape[0]):\n for j in range(self.values.shape[0]):\n if A[i][j] > 0:\n\n v1 = (self.values[i][3], self.values[i][4], self.values[i][5])\n v2 = (self.values[j][3], self.values[j][4], self.values[j][5])\n\n magnitude1 = np.sqrt(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2])\n magnitude2 = np.sqrt(v2[0] * v2[0] + v2[1] * v2[1] + v2[2] * v2[2])\n ang = np.arccos(np.dot(v1, v2) / (magnitude1 * magnitude2))\n\n A[i][j] = max(self.values[i][7], self.values[j][7]) * A[i][j]\n\n # init SpectralClustering\n sc = SpectralClustering(4, affinity='precomputed', n_init=10, assign_labels = 'discretize')\n\n # cluster\n labels = sc.fit_predict(A)\n\n return labels", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def gen_clusters(links, posts):\n\n clusters = list(iter_clusters(links, posts))\n return clusters", "def gen_clusters(self, proc_cells=None, proc_num_particles=None, **kwargs):\n cell_np = {}\n for tmp_cells_np in self.proc_block_np:\n cell_np.update(tmp_cells_np)\n self.cell_np = cell_np\n if proc_cells is None:\n proc_cells, proc_num_particles = LoadBalancer.distribute_particles_geometric(\n self.cell_np, self.num_procs)\n self.np_req = numpy.average(proc_num_particles)\n self.clusters = [Cluster(cells, cell_np, self.np_req, **kwargs) \n for cells in proc_cells]\n self.calc()", "def __init__(self,\n n_clusters=0,\n centroids=None,\n data=None,\n labels=[],\n distance='cov',\n threshold=0.38,\n dimension=128,\n update_centroids=True):\n self.n_clusters = n_clusters\n self.threshold = threshold\n self.distance = distance\n self.dimension = dimension\n self.update_centroids = update_centroids\n if centroids is None:\n self.centroids = np.zeros((n_clusters, 1, dimension))\n else:\n self.centroids = np.array(centroids)\n\n # if data is None:\n # self.data = np.zeros((n_clusters, 1, dimension))\n # else:\n # self.data = np.array(data)\n self.labels = np.array(labels, dtype=np.int32)", "def _5x5_grid_clusters():\n return [mn(mean=np.array([i, j]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(5)\n for j in range(5)]", "def __init__(self, centroids = None, n_clusters = None, n_features = None, alpha=1.0, **kwargs):\n \n super(ClusteringLayer, self).__init__(**kwargs)\n self.alpha = alpha\n self.initial_centroids = centroids\n\n if centroids is not None:\n n_clusters, n_features = centroids.shape\n\n self.n_features, self.n_clusters = n_features, n_clusters\n\n assert self.n_clusters is not None\n assert self.n_features is not None", "def get_centers_from_bins(bins):\n return 0.5 * (bins[:-1] + bins[1:])", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def init_centroids(self, data_points):\n # print(\"Init centroid\")\n # return list(map(lambda x: x[1], random.sample(labelled_data, self.k)))\n\n # Project the data: this step will take several seconds\n\n centroids_scaled = self.naive_sharding(data_points, self.k)\n return list(centroids_scaled)\n\n #sample = np.random.permutation(len(labelled_data))[:self.k]\n\n #return list(map(lambda x: labelled_data[x][1], sample))", "def _prep_buckets(buckets, len_x):\n if isinstance(buckets, int):\n lims = np.linspace(0, len_x-1, buckets+1, dtype=int)\n else:\n lims = buckets\n buckets = len(lims)-1\n\n # Determine center of each bucket\n mids = np.rint(np.convolve(lims, np.ones(2), 'valid') / 2).astype(int)\n mids[0] = 0\n mids[-1] = len_x - 1\n\n return lims, mids", "def clusters(self):\n raise NotImplementedError", "def __init__(self, init_centers):\n\n assert len(init_centers.shape) == 2, f\"init_centers should be a KxD matrix. Got: {init_centers.shape}\"\n (self.K, self.D) = init_centers.shape\n assert self.K > 1, f\"There must be at least 2 clusters. Got: {self.K}\"\n\n # Shape: K x D\n self.centers = np.copy(init_centers)", "def assign_labels_to_centroids(self):\n labelled_centroids = []\n for i in range(len(self.final_clusters)):\n labels = map(lambda x: x[0], self.final_clusters[i])\n # pick the most common label\n most_common = Counter(labels).most_common(1)[0][0]\n c = np.round(len([item for item in self.final_clusters[i] if item[0]==1])/len(self.final_clusters[i]),2)\n if c>=0.46:\n most_common = 1.0\n centroid = (most_common, self.final_centroids[i])\n labelled_centroids.append(centroid)\n\n self.labelled_centroids = labelled_centroids\n print(\"cluster_0: \", np.round(len([item for item in self.final_clusters[0] if item[0]==1])/len(self.final_clusters[0]),2), \"size_0: \", len(self.final_clusters[0]))\n print(\"cluster_1: \", np.round(len([item for item in self.final_clusters[1] if item[0]==1])/len(self.final_clusters[1]),2), \"size_1: \", len(self.final_clusters[1]))\n #print(\"cluster_2: \", np.round(len([item for item in self.final_clusters[2] if item[0]==1])/len(self.final_clusters[2]),2), \"size_2: \", len(self.final_clusters[2]))\n #print(\"cluster_3: \", np.round(len([item for item in self.final_clusters[3] if item[0]==1])/len(self.final_clusters[3]),2), \"size_2: \", len(self.final_clusters[3]))", "def _10x10_grid_clusters_close():\n return [mn(mean=np.array([i * 5, j * 5]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(10)\n for j in range(10)]", "def initialize_clusters(points, k):\r\n return points[np.random.randint(points.shape[0], size=k)]", "def _get_brown_clusters(self, a_feats, a_toks1, a_toks2):\n bcluster_str = \"\"\n for w1, _ in a_toks1:\n if w1 not in BROWN_CLUSTERS:\n continue\n for w2, _ in a_toks2:\n if w2 not in BROWN_CLUSTERS:\n continue\n bcluster_str = \"BrownCluster-\" + BROWN_CLUSTERS[w1] + '%' + \\\n BROWN_CLUSTERS[w2]\n a_feats[bcluster_str] = 1.", "def find_clusters(self, order=\"Vup-Hup\", plot_step=0):\n self.graph.numbuckets = int((self.graph.size * (self.graph.size // 2 - 1) * 2) * (1 + straightness_par))\n self.graph.buckets = [[] for _ in range(self.graph.numbuckets)]\n self.graph.wastebasket = []\n self.graph.maxbucket = 0\n\n cID, s = 0, self.graph.size\n\n if order == \"Vup-Hup\":\n vertices = self.graph.V.values()\n if order == \"Vdo-Hdo\":\n vertices = [self.graph.V[(t, y, x)]\n for x in reversed(range(s))\n for y in reversed(range(s))\n for t in range(2)\n ]\n elif order == \"Hup-Vdo\":\n vertices = [self.graph.V[(t, y, x)]\n for y in reversed(range(s))\n for x in range(s)\n for t in range(2)\n ]\n elif order == \"Hdo-Vdo\":\n vertices = [self.graph.V[(t, y, x)]\n for y in reversed(range(s))\n for x in reversed(range(s))\n for t in range(2)\n ]\n elif order == \"random\":\n vertices = random.sample(list(self.graph.V.values()), s*s*2)\n\n anyons = [vertex for vertex in vertices if vertex.state]\n\n for vertex in anyons:\n if vertex.cluster is None:\n cluster = self.graph.add_cluster(cID)\n cluster.add_vertex(vertex)\n cluster.rad = [0, 0]\n cluster.med = [vertex.sID[1], vertex.sID[2]]\n self.cluster_new_vertex(cluster, vertex, plot_step)\n cluster_place_bucket(self.graph, cluster, self.vcomb)\n cID += 1\n\n if self.uf_plot is not None and not plot_step:\n self.uf_plot.plot_removed(self.graph, \"Clusters initiated.\")\n elif self.uf_plot is not None:\n self.uf_plot.waitforkeypress(\"Clusters initiated.\")", "def initialize_pos(img: np.ndarray):\n\n h, w = img.shape[0:2]\n\n for cluster in range(numclusters):\n i = np.random.randint(h) # row index\n j = np.random.randint(w) # col index\n current_cluster_centers[cluster, 0, :] = img[i, j, :]\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def _2x3_grid_clusters_close():\n return [mn(mean=np.array([i * 5, j * 5]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(2)\n for j in range(3)]", "def compute_clusters(self, p: float):\n w = self.w\n h = self.h\n self.p = p\n self.sample = self._get_sample(p)\n self.cluster = np.zeros((w, h), dtype=int)\n x = self.sample\n visited = np.full((w, h), False)\n k = 0 # cluster index\n myvertex = 1\n stack = []\n # as long as we havent treated the last myvertex, continue\n while myvertex < w * h + 1:\n # put the next site in myvertex in to the stack if the site is\n # unvisited, otherwise myvertex ++\n iv = (myvertex - 1) % w\n jv = (myvertex - 1) // w\n if not visited[iv, jv] and x[iv, jv] == 1:\n stack.append([iv, jv])\n k += 1 # increment cluster index\n else:\n myvertex += 1\n\n while stack:\n # pop the current myvertex from the stack and set its cluster\n # label to k and mark as visited\n i, j = stack.pop(0)\n self.cluster[i, j] = k\n visited[i, j] = True\n # check all of its six neighbors, if neighbor is unvisited and\n # connected to current site,\n # then set its cluster label to k and marked visited and\n # push this site into stack, otherwise do nothing\n # check the 12clock neighbor\n if j < h-1 and not visited[i, j+1] and x[i, j+1] == 1:\n self.cluster[i, j+1] = k\n visited[i, j+1] = True\n stack.append([i, j+1])\n # check the 2clock neighbor\n if i < w-1 and not visited[i+1, j] and x[i+1, j] == 1:\n self.cluster[i+1, j] = k\n visited[i+1, j] = True\n stack.append([i+1, j])\n # check the 4clock neighbor\n if i < w-1 and j > 0 and not visited[i+1, j-1] \\\n and x[i+1, j-1] == 1:\n self.cluster[i+1, j-1] = k\n visited[i+1, j-1] = True\n stack.append([i+1, j-1])\n # check the 6clock neighbor\n if j > 0 and not visited[i, j-1] and x[i, j-1] == 1:\n self.cluster[i, j-1] = k\n visited[i, j-1] = True\n stack.append([i, j-1])\n # check the 8clock neighbor\n if i > 0 and not visited[i-1, j] and x[i-1, j] == 1:\n self.cluster[i-1, j] = k\n visited[i-1, j] = True\n stack.append([i-1, j])\n # check the 10clock neighbor\n if i > 0 and j < h-1 and not visited[i-1, j+1] \\\n and x[i-1, j+1] == 1:\n self.cluster[i-1, j+1] = k\n visited[i-1, j+1] = True\n stack.append([i-1, j+1])", "def init_cluster_centroids(x, number_of_clusters):\n return x[np.random.choice(x.shape[0], number_of_clusters, replace=False), :]", "def start_algorithm(self):\r\n self.kmeans.set_data(self.tweets)\r\n clusters = self.kmeans.start_algorithm()\r\n min_size = len(self.tweets) * 0.005\r\n if min_size < 50:\r\n min_size = 50\r\n max_size = len(self.tweets) * 0.20\r\n\r\n amount = 0\r\n\r\n while amount < len(clusters):\r\n amount = len(clusters)\r\n pool = mp.Pool(self.cores)\r\n new_clusters = pool.starmap(recluster,\r\n zip(clusters, repeat(min_size), repeat(self.guard), repeat(self.function)))\r\n pool.close()\r\n pool.join()\r\n clusters = new_clusters\r\n temp = []\r\n for cluster in clusters:\r\n if isinstance(cluster, Cluster):\r\n temp.append(cluster)\r\n else:\r\n temp += cluster\r\n clusters = temp\r\n return clusters", "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def cluster(self):\r\n\t\tself.clusterer.fit(self.koopman_feature_array)\r\n\t\tself.labels = self.clusterer.labels_\r\n\t\tfor j in range(max(self.labels)+1):\r\n\t\t\tself.koop_cluster_list.append([self.koop_list[i] for i in range(len(self.labels)) if self.labels[i] == j])\r\n\t\t\tself.koop_cluster_memb_prob_list.append([self.clusterer.probabilities_[i] for i in range(len(self.labels)) if self.labels[i] == j])", "def cluster_bal_iter(self):\n # moving\n for j,cluster in enumerate(self.clusters):\n cluster.move()\n self.clusters_allocate_cells()\n for j,cluster in enumerate(self.clusters):\n cluster.calc()\n #print j, '\\t', cluster.center, '\\t', cluster.np, '\\t', cluster.size\n \n # resizing\n for j,cluster in enumerate(self.clusters):\n cluster.resize()\n self.clusters_allocate_cells()\n for j,cluster in enumerate(self.clusters):\n cluster.calc()\n #print j, '\\t', cluster.center, '\\t', cluster.np, '\\t', cluster.size\n \n self.calc()", "def concentric_clusters(N = 1000, r1 = 1, r2 = 5, w1 = 0.8, w2 = 1.0/3, arms = 64):\n \n #Number of samples in each cluster\n N1 = int(np.floor(1.0*N/2))\n N2 = N - N1\n \n phi1 = np.random.rand(N1,1) * 2 * np.pi;\n dist1 = r1 + 1.0*(np.random.randint(0,5,size =(N1,1)))/5*w1*r1\n d1x = dist1* np.cos(phi1) \n d1y = dist1* np.sin(phi1) \n cluster1 = np.concatenate((d1x,d1y,\\\n np.zeros((N1,1))),axis = 1)\n \n perarm = int(np.round(N2/arms))\n N2 = perarm*arms;\n radperarm = (2*np.pi)/arms;\n phi2 = 1.0*(np.arange(1,N2+1,dtype = int) - np.arange(1,N2+1,\\\n dtype=int)%perarm)/perarm*(radperarm)\n dist2 = r2 * (1 - w2/2) + (r2*w2*np.arange(1,N2+1,\\\n dtype=int)%perarm)/perarm\n d2x = dist2*np.cos(phi2)\n d2y = dist2*np.sin(phi2)\n cluster2 = np.concatenate((d2x[:,np.newaxis],d2y[:,np.newaxis],\\\n np.ones((N2,1))),axis = 1) \n \n #Concatenating the final data\n data = np.concatenate((cluster1,cluster2),axis = 0)\n return data", "def __init__(self, vectors):\n \n self.vectors = vectors\n self.centroid = self.calcCentroid()", "def generate_centers(self):\n\t\tcenters = []\n\t\tsize = self.config.image_size\n\t\tfor i in range(self.config.num_obj):\n\t\t\tflag = True\n\t\t\twhile flag:\n\t\t\t\tc = np.random.randint(int(size * 0.05), int(size * 0.95), 2)\n\t\t\t\tflag = False\n\t\t\t\tfor center in centers:\n\t\t\t\t\tif (abs(center[0] - c[0]) <= 0.1 * size) or (abs(center[1] - c[1]) <= 0.1 *size):\n\t\t\t\t\t\tflag = False\n\t\t\tcenters.append(c)\n\t\t\t\t\n\t\treturn centers", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def __post_init__(self):\n all_vecs = {}\n for n2 in self._get_n2():\n all_vecs[n2] = all_vecs.get(n2, 0) + 1\n\n object.__setattr__(self, \"_n2\", np.array(list(all_vecs.keys())).reshape(-1, 1))\n object.__setattr__(\n self, \"_multiplicity\", np.array(list(all_vecs.values())).reshape(-1, 1)\n )\n object.__setattr__(\n self,\n \"_normalization\",\n 2 * np.pi * np.log(self.N)\n if self.spherical\n else 2 * np.pi * np.log(self.N) - 4 * (CATALAN - np.pi / 2 * np.log(2)),\n )", "def construct_bins(self, training_samples, bins_file):\n\n if self.__read_from_bins_file(bins_file):\n return\n n, d = training_samples.shape\n k = self.number_of_bins\n if self.whitening:\n self.training_mean = np.mean(training_samples, axis=0)\n self.training_std = np.std(training_samples, axis=0) + self.ndb_eps\n\n if self.max_dims is None and d > 1000:\n # To ran faster, perform binning on sampled data dimension (i.e. don't use all channels of all pixels)\n self.max_dims = d // 6\n\n whitened_samples = (training_samples - self.training_mean) / self.training_std\n d_used = d if self.max_dims is None else min(d, self.max_dims)\n self.used_d_indices = np.random.choice(d, d_used, replace=False)\n\n clusters = KMeans(n_clusters=k, max_iter=100, n_jobs=-1).fit(whitened_samples[:, self.used_d_indices])\n\n bin_centers = np.zeros([k, d])\n for i in range(k):\n bin_centers[i, :] = np.mean(whitened_samples[clusters.labels_ == i, :], axis=0)\n\n # Organize bins by size\n label_vals, label_counts = np.unique(clusters.labels_, return_counts=True)\n bin_order = np.argsort(-label_counts)\n self.bin_proportions = label_counts[bin_order] / np.sum(label_counts)\n self.bin_centers = bin_centers[bin_order, :]\n self.ref_sample_size = n\n self.__write_to_bins_file(bins_file)", "def __update_clusters(self, medoids):\n\n self.__belong = [0] * len(self.__pointer_data)\n self.__clusters = [[] for _ in range(len(medoids))]\n for index_point in range(len(self.__pointer_data)):\n index_optim = -1\n dist_optim = 0.0\n\n for index in range(len(medoids)):\n dist = euclidean_distance_square(\n self.__pointer_data[index_point],\n self.__pointer_data[medoids[index]],\n )\n\n if (dist < dist_optim) or (index == 0):\n index_optim = index\n dist_optim = dist\n\n self.__clusters[index_optim].append(index_point)\n self.__belong[index_point] = index_optim\n\n # If cluster is not able to capture object it should be removed\n self.__clusters = [\n cluster for cluster in self.__clusters if len(cluster) > 0\n ]", "def compute_clusters(self, p: float):\n w = self.w\n h = self.h\n self.p = p\n self.sample = self._get_sample(p)\n self.cluster = np.zeros((w + 1, h + 1), dtype=int)\n visited = np.full((w + 1, h + 1), False)\n k = 0 # cluster index\n myvertex = 1\n stack = []\n # as long as we havent treated the last myvertex, continue\n while myvertex < (w + 1) * (h + 1):\n # put the next site in myvertex in to the stack if the site is\n # unvisited, otherwise myvertex ++\n iv = (myvertex - 1) % (w + 1)\n jv = (myvertex - 1) // (w + 1)\n if not visited[iv, jv]:\n stack.append([iv, jv])\n k += 1 # increment cluster index\n else:\n myvertex += 1\n\n while stack:\n # pop the current myvertex from the stack and set its cluster\n # label to k and mark as visited\n i, j = stack.pop(0)\n self.cluster[i, j] = k\n visited[i, j] = True\n # check all of its 4 neighbors, if neighbor is unvisited and\n # connected to current site,\n # then set its cluster label to k and marked visited and\n # push this site into stack, otherwise do nothing\n # check the left neighbor, first coordinate must >0 to have\n # a left neighbor\n if i > 0 and not visited[i - 1, j] and \\\n self.sample[i - 1, j, 0] == 1:\n self.cluster[i - 1, j] = k\n visited[i - 1, j] = True\n stack.append([i - 1, j])\n # check the right neighbor, first coordinate must be < w\n # to have a right neighbor\n if i < w and not visited[i + 1, j] and \\\n self.sample[i, j, 0] == 1:\n self.cluster[i + 1, j] = k\n visited[i + 1, j] = True\n stack.append([i + 1, j])\n # check the up neighbor, second coordinate must be < h\n # to have such a neighbor\n if j < h and not visited[i, j + 1] and \\\n self.sample[i, j, 1] == 1:\n self.cluster[i, j + 1] = k\n visited[i, j + 1] = True\n stack.append([i, j + 1])\n # check the bottom neighbor, second coordinate must be > 0\n if j > 0 and not visited[i, j - 1] and \\\n self.sample[i, j - 1, 1] == 1:\n self.cluster[i, j - 1] = k\n visited[i, j - 1] = True\n stack.append([i, j - 1])", "def form_clusters(self, labelled_data, unlabelled_centroids):\n # enumerate because centroids are arrays which are unhashable,\n centroids_indices = range(len(unlabelled_centroids))\n # initialize an empty list for each centroid. The list will contain\n # all the datapoints that are closer to that centroid than to any other.\n # That list is the cluster of that centroid.\n clusters = {c: [] for c in centroids_indices}\n \n for (label, Xi) in labelled_data:\n # for each datapoint, pick the closest centroid.\n smallest_distance = float(\"inf\")\n for cj_index in centroids_indices:\n cj = unlabelled_centroids[cj_index]\n distance = np.linalg.norm(Xi - cj)\n if distance < smallest_distance:\n closest_centroid_index = cj_index\n smallest_distance = distance\n # allocate that datapoint to the cluster of that centroid.\n clusters[closest_centroid_index].append((label,Xi))\n return list(clusters.values())", "def __update_clusters(self, medoids):\r\n\r\n self.__belong = [0] * len(self.__pointer_data)\r\n self.__clusters = [[] for i in range(len(medoids))]\r\n for index_point in range(len(self.__pointer_data)):\r\n index_optim = -1\r\n dist_optim = 0.0\r\n\r\n for index in range(len(medoids)):\r\n dist = euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[medoids[index]])\r\n\r\n if (dist < dist_optim) or (index is 0):\r\n index_optim = index\r\n dist_optim = dist\r\n\r\n self.__clusters[index_optim].append(index_point)\r\n self.__belong[index_point] = index_optim\r\n\r\n # If cluster is not able to capture object it should be removed\r\n self.__clusters = [cluster for cluster in self.__clusters if len(cluster) > 0]", "def _empty_clusters(clusters):\n for clst in clusters:\n clst.points = []", "def bin_discretize(self, variables=[], bins=3,\n min_const_samples_bin_size=1.0/3):\n self.edges=np.zeros((self.arity.size,bins+1))\n for i in variables:\n un_cnt=np.unique(self.data[:,i],return_counts=True)\n constvals=un_cnt[0][un_cnt[1]>self.data.shape[0]*min_const_samples_bin_size]\n mask=np.ones(self.data.shape[0],dtype=bool)\n if constvals.size>0:\n for j,cv in enumerate(constvals):\n mask*=(self.data[:,i]!=cv)\n self.data[self.data[:,i]==cv,i]=j\n\n size=np.sum(mask)/bins\n sorted_i=np.argsort(self.data[mask,i])\n edges=[self.data[mask,i][sorted_i[int(size*num)-1]] for num in range(1,bins)]\n self.edges[i]=[self.data[mask,i][sorted_i[0]]]+edges+[self.data[mask,i][sorted_i[-1]]]\n self.data[mask,i]=np.searchsorted(edges,self.data[mask,i])+constvals.size\n self.arity[i]=len(edges)+1+constvals.size", "def cluster(self, bufr, elevmap, xpos, zpos, w, d, count, options, minscl, maxscl):\r\n #create a cluster of shapes on an elevation map\r\n blist = []\r\n for v in range(count):\r\n x = xpos + random.random() * w - w * 0.5\r\n z = zpos + random.random() * d - d * 0.5\r\n rh = random.random() * (maxscl - minscl) + minscl\r\n rt = random.random() * 360.0\r\n y = elevmap.calcHeight(x, z) + rh * 2\r\n blist.append([bufr, x, y, z, 0.0, rt, 0.0, rh, rh, rh])\r\n\r\n #self.merge(bufr, x, y, z, 0.0, rt, 0.0, rh, rh, rh)\r\n self.merge(blist)", "def __init__(self, nbins):\n self.nbins = nbins\n # Since the kernel used to compute the Parzen histogram covers more\n # than one bin, we need to add extra bins to both sides of the\n # histogram to account for the contributions of the minimum and maximum\n # intensities. Padding is the number of extra bins used at each side\n # of the histogram (a total of [2 * padding] extra bins). Since the\n # support of the cubic spline is 5 bins (the center plus 2 bins at each\n # side) we need a padding of 2, in the case of cubic splines.\n self.padding = 2\n self.setup_called = False", "def reset_arrays(self):\n super().reset_arrays()\n self.bins = self.bin_array\n self.occs = np.zeros(len(self.bins),dtype=int) if np.size(self.bins) else []", "def init_centers(self, S, labels):\n unique_labels = np.unique(labels)\n centers = None\n\n for label in unique_labels:\n idx = np.squeeze(labels == label)\n cur_S = S[idx, :]\n cur_center = np.mean(cur_S, axis=0)\n if centers is None:\n centers = cur_center\n else:\n centers = np.vstack((centers, cur_center))\n centers = np.asarray(centers, dtype=floatX)\n self.C.set_value(centers)", "def _relocate_clusters(self, cluster_labels):\n for cluster_label in range(self.k):\n if cluster_labels[cluster_label] is not None:\n # mean of the pixels assigned to cluster\n p_sum, p_count = np.asarray(\n cluster_labels[\n cluster_label\n ]).sum(axis=0), len(cluster_labels[cluster_label])\n self._clusters[cluster_label] = p_sum / p_count", "def _5x5_grid_clusters_spread():\n return [mn(mean=np.array([i * 25, j * 25]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(5)\n for j in range(5)]", "def recenter(self, n):\n clustering = self\n for i in xrange(n):\n clustering = Clustering(clustering.recenter_iterator(), threshold=clustering.threshold,\n consensus_threshold=clustering.consensus_threshold)\n return clustering", "def update_clusters(self, data):\n\n self.unassigned_data_updated = [None]\n cluster_list = []\n cluster_indices = []\n bud_list = []\n bud_indices = []\n\n # First of all, seperate buds and clusters\n for cluster_index, cluster in self.clusters.items():\n if cluster.number_of_members < self.minnpix_cluster:\n bud_list.append(cluster)\n bud_indices.append(cluster_index)\n else:\n cluster_list.append(cluster)\n cluster_indices.append(cluster_index)\n\n # Take all of the bud data and put it back into an unassigned list\n unassigned_data_updated = []\n for i in range(len(bud_indices)):\n self.clusters.pop(bud_indices[i])\n idx = np.squeeze(np.where(self.cluster_arr[1,:] == bud_indices[i]))\n self.cluster_arr[1,idx] = -1.0\n unassigned_data_updated.extend(data[:,bud_list[i].cluster_members].T)\n\n # Turn that into an array and transpose\n unassigned_data_updated = np.asarray(unassigned_data_updated)\n unassigned_data_updated = unassigned_data_updated.T\n\n # There shouldn't be any in theory, but just check there are no duplicates\n try:\n unassigned_data_updated = np.unique(unassigned_data_updated, axis=1)\n except ValueError:\n pass\n\n # Sort the unassigned data according to peak intensity and send it back\n if np.size(unassigned_data_updated) != 0.0:\n sortidx = argsort(unassigned_data_updated[2,:], reversed=True)\n self.unassigned_data_updated = unassigned_data_updated[:,sortidx]\n else:\n pass\n\n return cluster_list, cluster_indices", "def _cluster(self):\n self._not_included = self.data\n self.leaves = []\n flag = int(rand() * len(self.data))\n flag = self._generate(flag)\n while len(self._not_included) > 0:\n flag = self._generate(flag)\n if flag == -1:\n break\n pass\n self._remember.append({\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n print(len(self._remember), {\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n return", "def Clusters(self):\n return", "def setup_cluster(num_cpus, outdir, verbose, error_profile):\r\n\r\n server_socket = setup_server()\r\n workers, client_socks_and_adrs = setup_workers(\r\n num_cpus, outdir, server_socket,\r\n verbose=verbose,\r\n error_profile=error_profile)\r\n # we don't need the client adresses anywhere, so get rid of them\r\n client_sockets = [sock for sock, addr in client_socks_and_adrs]\r\n\r\n return client_sockets, workers, server_socket", "def _initialise_classifier(self, comparison_vectors):\n\n # Set the start point of the classifier.\n self.kernel.init = numpy.array(\n [\n [0.05] * len(list(comparison_vectors)),\n [0.95] * len(list(comparison_vectors)),\n ]\n )", "def initialize_pp(img: np.ndarray):\n\n h, w, c = img.shape\n pixels = img.copy().reshape(h*w, c)\n\n # Choose one center uniformly at random \n # from among the data points\n r = np.random.randint(h*w)\n current_cluster_centers[0, 0, :] = pixels[r, :]\n\n # remove that point from the data set\n pixels = np.delete(pixels, r, axis=0)\n\n # For each data point x, compute D(x), \n # the distance between x and the nearest center \n # that has already been chosen.\n for k in range(1, numclusters):\n dist_sq = np.zeros(pixels.shape[0])\n for i in range(pixels.shape[0]): # over data points\n dist = []\n for j in range(k): # over current clusters\n # calculate distance to the cluster\n diff = pixels[i, :] - current_cluster_centers[j, 0, :]\n dist.append(np.inner(diff, diff))\n \n # choose the distance closest to the cluster\n dist_sq.itemset(i, min(dist))\n\n probs = dist_sq / dist_sq.sum()\n cumprobs = probs.cumsum()\n r = np.random.uniform()\n for i, prob in enumerate(cumprobs):\n if r <= prob:\n index = i\n break\n \n # add a new cluster\n current_cluster_centers[k, 0, :] = pixels[index, :]\n\n # remove that point from the data set\n pixels = np.delete(pixels, index, axis=0)\n\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def get_clusters(self,points):\n self.points = points\n self.__dabest = [self.__cmeans(points,i) for i in range(self.__start,self.__end)]\n ##self.hull = \n return self.__dabest", "def __post_init__(self):\n all_vecs = {}\n for n2 in self._get_n2():\n all_vecs[n2] = all_vecs.get(n2, 0) + 1\n\n norms = {}\n\n object.__setattr__(self, \"_n2\", np.array(list(all_vecs.keys())).reshape(-1, 1))\n object.__setattr__(\n self, \"_multiplicity\", np.array(list(all_vecs.values())).reshape(-1, 1)\n )\n object.__setattr__(\n self, \"_normalization\", np.pi ** 2 * self.N * norms[self.nstep]\n )\n\n raise NotImplementedError(\"Need to implement dispersion Lüscher counter terms.\")", "def Initialize(self, *args):\n return _Bnd.Bnd_BoundSortBox2d_Initialize(self, *args)", "def splitCluster(self, cluster):\n\t\tmaxValue = self.getMaxValue(self.clusterList[cluster])\n\t\tminValue = self.getMinValue(self.clusterList[cluster])\n\t\tmidValue = round(maxValue - ((maxValue - minValue) / 2))\n\n\t\t# Create a set of centroid\n\t\tfirstCentroid = random.randint(minValue, midValue)\n\t\tsecondCentroid = random.randint(midValue, maxValue)\n\n\t\tcpyCluster = self.clusterList[cluster]\n\t\tnextName = str(len(self.clusterList))\n\t\tself.clusterList[cluster] = []\n\t\tself.clusterList[nextName] = []\n\n\t\tfor value in cpyCluster:\n\t\t\tif abs(value - firstCentroid) < abs(value - secondCentroid):\n\t\t\t\tself.clusterList[cluster].append(value)\n\t\t\telse:\n\t\t\t\tself.clusterList[nextName].append(value)\n\t\t\tpass\n\t\tpass\n\t\tprint(self.clusterList)", "def __init__(self, a, b, c):\n self.normal = np.cross(a - b, c - b)\n self.offset = np.sum(b * self.normal)\n self.xbounds = sorted(list(set([a[0], b[0], c[0]])))\n self.ybounds = sorted(list(set([a[1], b[1], c[1]])))\n self.zbounds = sorted(list(set([a[2], b[2], c[2]])))", "def create_cluster_matrices(self, partition_tree):\n \n self.mask_matrix = make_masks(partition_tree)\n self.merge_clusters = -np.ones((2 * partition_tree.shape[0] + 1, 3), dtype=np.int64)\n self.merge_clusters[partition_tree.shape[0] + 1:,:2] = partition_tree[:,:2]\n for i in range(self.merge_clusters.shape[0]):\n if self.merge_clusters[i,0] < 0:\n self.merge_clusters[i,2] = 1\n else:\n self.merge_clusters[i,2] = self.merge_clusters[self.merge_clusters[i,0],2] + self.merge_clusters[self.merge_clusters[i,1],2]", "def build_bonds(self):\n shape_prime = np.array([self.shape[0]-1,self.shape[1]-1,self.shape[2]-1])\n zeros = np.array([0,0,0])\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n for k in range(self.shape[2]):\n for b,bond in enumerate(self.cell.bonds):\n newbond = copy.deepcopy(bond)\n newbond.cell1 += [i,j,k]\n newbond.cell2 += [i,j,k]\n #ToDo make a function to shorten those lines\n if np.prod(newbond.cell1 <= shape_prime) and np.prod(newbond.cell2<=shape_prime) and np.prod(zeros <=newbond.cell1) and np.prod(zeros <= newbond.cell2):\n newbond.coordinate1 = self.sites[newbond.cell1[0],newbond.cell1[1],newbond.cell1[2],newbond.site1].coordinate\n newbond.coordinate2 = self.sites[newbond.cell2[0],newbond.cell2[1],newbond.cell2[2],newbond.site2].coordinate\n self.bonds.append(newbond)", "def test_centrally(self):\n import numpy as np\n import histogrammar\n\n h = histogrammar.CentrallyBin([0, 10, 20, 40, 100])\n h.fillnumpy([-5, 5, 5, 50, 10, 100, 1000, 50, 50])\n\n np.testing.assert_array_equal(h.bin_entries(), [1., 3., 0., 3., 2.])\n np.testing.assert_array_equal(h.bin_edges(), [float('-inf'), 5., 15., 30., 70., float('inf')])\n np.testing.assert_array_equal(h.bin_centers(), [0., 10., 20., 40., 100.])\n assert h.num_bins() == 5\n assert h.n_bins == 5\n np.testing.assert_almost_equal(h.mpv, 10.)\n\n np.testing.assert_array_equal(h.bin_entries(10, 40), [3., 0., 3.])\n np.testing.assert_array_equal(h.bin_edges(10, 40), [5., 15., 30., 70.])\n np.testing.assert_array_equal(h.bin_centers(10, 40), [10., 20., 40.])\n assert h.num_bins(10, 40) == 3\n\n np.testing.assert_array_equal(h.bin_entries(5, 70), [3., 0., 3.])\n np.testing.assert_array_equal(h.bin_edges(5, 70), [5., 15., 30., 70.])\n np.testing.assert_array_equal(h.bin_centers(5, 70), [10., 20., 40.])\n assert h.num_bins(5, 70) == 3\n\n np.testing.assert_array_equal(h.bin_entries(5, 110), [3., 0., 3., 2.])\n np.testing.assert_array_equal(h.bin_edges(5, 110), [5., 15., 30., 70., float('inf')])\n np.testing.assert_array_equal(h.bin_centers(5, 110), [10., 20., 40., 100.])\n assert h.num_bins(5, 110) == 4", "def compute_clusters(self, p: float):\n pass", "def create_from_bounds(self, lbs, ubs):\n self.base_vertices = (np.array([lbs])+np.array([ubs])).T/2\n self.base_vectors = np.diag((np.array(ubs)-np.array(lbs))/2)", "def initialize_centers(data, k):\n x_data_min = min(p[0] for p in data)\n x_data_max = max(p[0] for p in data)\n y_data_min = min(p[1] for p in data)\n y_data_max = max(p[1] for p in data)\n\n return generate_random_data(\n k,\n x_data_min,\n x_data_max,\n y_data_min,\n y_data_max\n )", "def compute_centers_of_hypercubes(self):\n for hc in self.hypercubes.flatten():\n for i in range(self.dims - 1, -1, -1):\n index = self.dims - (i + 1)\n hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]", "def __init__(self, conn, args, data, split_type, num_clusters):\n\n self.conn = conn\n self.args = args\n self.data = data\n self.split_type = split_type\n\n self.pca_model = None\n self.cluster_model = None\n self.algorithm = args['cluster_algorithm']\n\n # http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n affinity_propagation = cluster.AffinityPropagation()\n ms = cluster.MeanShift(bin_seeding=True)\n spectral = cluster.SpectralClustering(n_clusters=num_clusters, \n eigen_solver='arpack',\n affinity=\"nearest_neighbors\", \n random_state=self.args['seed'])\n ward = cluster.AgglomerativeClustering(n_clusters=num_clusters, \n linkage='ward')\n birch = cluster.Birch(n_clusters=num_clusters)\n two_means = cluster.MiniBatchKMeans(n_clusters=num_clusters,\n random_state=self.args['seed'])\n average_linkage = cluster.AgglomerativeClustering(linkage=\"average\", \n n_clusters=num_clusters)\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n kmeans = cluster.KMeans(n_clusters=num_clusters, random_state=self.args['seed'])\n dbscan = cluster.DBSCAN()\n \n self.clustering_algorithms = {\n 'MiniBatchKMeans': two_means,\n 'AffinityPropagation': affinity_propagation,\n 'MeanShift': ms,\n 'SpectralClustering': spectral,\n 'Ward': ward,\n 'AgglomerativeClustering': average_linkage,\n 'DBSCAN': dbscan,\n 'Birch': birch,\n 'HDBSCAN': hdbsc,\n 'KMeans': kmeans\n }", "def _assign_vectors_to_nearest_centroid(all_features, centroid):\n #TODO: sprawdz co lepiej dziala\n new_centroid_coor = np.zeros([len(centroid), all_features[0].size])\n #new_centroid_coor = centroid\n new_centroid_counter = np.zeros(len(centroid))\n\n dist = pdist(centroid, all_features)\n #min_dist = dist.min(axis=0)\n min_dist_index = dist.argmin(axis=0)\n\n for x in range(len(min_dist_index)):\n id = min_dist_index[x]\n new_centroid_coor[id] = np.add(new_centroid_coor[id],\n all_features[x])\n new_centroid_counter[id] += 1\n\n new_centroid_coor_out = []\n for i in range(len(new_centroid_coor)):\n if new_centroid_counter[i] == 0:\n new_centroid_coor_out.append(centroid[i])\n else:\n new_centroid_coor_out.append(np.divide(new_centroid_coor[i],new_centroid_counter[i]))\n\n return np.array(new_centroid_coor_out), new_centroid_counter", "def initialize_cells(self):\n for loc in np.ndindex(*self.shape): # TODO: see if nested for loop is faster than this\n c = Cell(loc, self)\n self.cells.append(c)" ]
[ "0.73569727", "0.6890101", "0.66579676", "0.65458935", "0.6461489", "0.6289846", "0.62089014", "0.6200618", "0.61897707", "0.60647523", "0.60470694", "0.603594", "0.6009594", "0.6008557", "0.596944", "0.59262115", "0.5919822", "0.58949685", "0.58171034", "0.58091116", "0.57956856", "0.57412386", "0.57399946", "0.57152337", "0.57129997", "0.57125646", "0.57111555", "0.5706938", "0.57018036", "0.5694504", "0.5685563", "0.5685182", "0.5676545", "0.567245", "0.5656609", "0.56492597", "0.5639644", "0.5630756", "0.5624815", "0.5614464", "0.561426", "0.5604445", "0.5593241", "0.55816567", "0.5578004", "0.5573455", "0.55724454", "0.55723536", "0.55675316", "0.5546682", "0.5534171", "0.5530268", "0.5529417", "0.552135", "0.5520895", "0.5508321", "0.55018425", "0.549971", "0.54959697", "0.5486075", "0.5477888", "0.54739106", "0.54600114", "0.5449321", "0.543876", "0.54232794", "0.542024", "0.5410454", "0.5386908", "0.538011", "0.5371686", "0.5362682", "0.5355624", "0.534314", "0.53416777", "0.53388363", "0.5332541", "0.5330602", "0.5328804", "0.5320688", "0.53144616", "0.5312412", "0.53111213", "0.53096527", "0.5294172", "0.529366", "0.52837265", "0.52818525", "0.5278305", "0.52756804", "0.52708995", "0.52572435", "0.52551544", "0.52523285", "0.5250826", "0.52486694", "0.52357453", "0.52351755", "0.523304", "0.52258366" ]
0.700094
1
Setup and calculate codebook vectors
def calculate_cb_vecs(self, clusters): if not clusters or not clusters[0]: return None # :param:`n` is the dimension of the vectors n = len(clusters[0][0]) # Initialize the codebook vectors to 0 cb_vectors = np.zeros([n * self.K]).reshape(self.K, n) for i in range(self.K): sum = np.zeros([n], dtype=np.uint).reshape(1, n) for vector in clusters[i]: sum += vector # divide the sum of the vectors by the size of the cluster cb_vectors[i] = np.divide(sum, len(clusters[i])) return cb_vectors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model", "def setUp(self):\n self.vencode_obj = iext.GetVencodes(inputs=self.inputs,\n files_path=self.files_path,\n cell_type=self.celltype_analyse,\n algorithm=self.algorithm,\n n_regulatory_elements=self.k,\n number_vencodes=4,\n thresholds=self.thresholds, n_samples=10000,\n merge={\"replicate_suffix\": self.replicate_suffix})\n self.vencodes = self.vencode_obj.coordinates", "def setUp(self):\n self.vencode_obj = iext.GetVencodes(inputs=self.inputs,\n files_path=self.files_path,\n cell_type=self.celltype_analyse,\n algorithm=\"sampling\",\n n_regulatory_elements=self.k,\n number_vencodes=4,\n thresholds=self.thresholds, n_samples=1000,\n merge={\"replicate_suffix\": self.replicate_suffix})\n self.vencodes = self.vencode_obj.coordinates", "def create_sample_vectors(cleaned_data_directory, out_vectors_path):\n vectors = []\n\n for filename in os.listdir(cleaned_data_directory):\n if not filename.endswith(\".txt\"):\n continue\n\n path = os.path.join(cleaned_data_directory, filename)\n f = open(path, mode='r', encoding='utf8')\n\n print(\"Processing\", path)\n\n lang = filename[:2]\n lang_number = language_codes.index(lang)\n\n print(f\"\\tLanguage: {lang} ({lang_number})\")\n print(\"\\tReading...\", end=' ')\n\n file_content = f.read()\n content_length = len(file_content)\n\n print(\"done.\")\n print(\"\\tExtracting vectors...\", end=' ')\n\n sample_start_index = 0\n count = 0\n\n while sample_start_index + text_sample_size < content_length:\n sample = get_sample(file_content, sample_start_index, text_sample_size)\n input_vector = build_input_vector(sample)\n vector = input_vector + [lang_number]\n vectors.append(vector)\n sample_start_index += text_sample_size\n count += 1\n\n print(\"done.\")\n print(f\"\\tExtracted {count} vectors.\")\n\n del file_content\n\n print(f\"Total {len(vectors)} vectors.\")\n\n np_vectors = np.array(vectors, dtype=np.uint16)\n np.random.shuffle(np_vectors)\n\n print(f\"Converted to NumPy array, shape: {np_vectors.shape}.\")\n\n np.savez_compressed(out_vectors_path, data=np_vectors)\n\n print(f\"Saved to {out_vectors_path}.\")", "def vectorize(self,clean_path):\n \n #load pretrained embedding model (GloVe)\n glove = spacy.load('en_core_web_lg')\n #extract unique words (aka vocabulary)\n unique_words = set()\n for d in self.docs: \n txt = d.text\n doc = glove(txt)\n for word in doc: \n if word.has_vector:\n unique_words.add(word.text)\n #change set to list type\n unique_words = list(unique_words)\n #save vector representation\n word_vectors = np.array([glove(word).vector for word in unique_words if glove(word).has_vector])\n #index vectors by corresponding word \n corpus_vectors = pd.DataFrame(word_vectors, index=unique_words)\n with open(clean_path + 'corpus_vectors.pkl', 'wb') as f:\n pickle.dump(corpus_vectors,f)\n self.vectors = corpus_vectors\n print('Saved embedding vectors.')\n return", "def svm():", "def test_y_vector_init(self):\n # If you change the y-vector ordering, change here too #Y_VECTOR_CHANGESITE\n\n eng_fields = np.zeros(EngineeringState.N_ENGINEERING_FIELDS)\n component_array = eng_fields[EngineeringState._COMPONENT_START_INDEX:EngineeringState._COMPONENT_END_INDEX]\n for comp_i in range(0, N_COMPONENTS):\n component_array[comp_i + N_COMPONENTS * 0] = True # connected\n component_array[comp_i + N_COMPONENTS * 1] = 1 + (0.01 * comp_i) # capacity\n component_array[comp_i + N_COMPONENTS * 2] = 222200 + comp_i # temperature\n component_array[comp_i + N_COMPONENTS * 3] = comp_i % 2 # coolant_hab_one\n component_array[comp_i + N_COMPONENTS * 4] = True # coolant_hab_two\n component_array[comp_i + N_COMPONENTS * 5] = False # coolant_ayse\n\n coolant_array = eng_fields[EngineeringState._COOLANT_START_INDEX:EngineeringState._COOLANT_END_INDEX]\n for cool_i in range(0, N_COOLANT_LOOPS):\n coolant_array[cool_i + N_COOLANT_LOOPS * 0] = 555500 + cool_i # coolant_temp\n coolant_array[cool_i + N_COOLANT_LOOPS * 1] = cool_i % 2 # primary_pump_on\n coolant_array[cool_i + N_COOLANT_LOOPS * 2] = True # secondary_pump_on\n\n rad_array = eng_fields[EngineeringState._RADIATOR_START_INDEX:EngineeringState._RADIATOR_END_INDEX]\n for rad_i in range(0, N_RADIATORS):\n rad_array[rad_i + N_RADIATORS * 0] = rad_i % 4 # attached_to_coolant_loop\n rad_array[rad_i + N_RADIATORS * 1] = rad_i % 2 # functioning\n\n y0 = np.concatenate((np.array([\n 0x111, 0x222, # x\n 0x333, 0x444, # y\n 0x555, 0x777, # vx\n 0x888, 0x999, # vy\n 0.01, 0.02, # heading\n 0.03, 0.04, # spin\n 0xEEE, 0xFFF, # fuel\n 5, 6, # throttle\n 1, -1, # only First is landed on Second\n 0, 1, # Second is broken\n common.SRB_EMPTY,\n 1 # time_acc\n ]),\n eng_fields\n ))\n\n ps = PhysicsState(y0, self.proto_state)\n self.assertTrue(np.array_equal(ps.y0(), y0.astype(ps.y0().dtype)))\n self.assertEqual(ps['First'].landed_on, 'Second')\n\n proto_state = ps.as_proto()\n proto_state.timestamp = 50\n self.assertEqual(proto_state.entities[0].x, 0x111)\n self.assertEqual(proto_state.entities[0].y, 0x333)\n self.assertEqual(proto_state.entities[1].x, 0x222)\n self.assertEqual(proto_state.entities[1].y, 0x444)\n self.assertEqual(proto_state.entities[0].vx, 0x555)\n self.assertEqual(proto_state.entities[0].vy, 0x888)\n self.assertEqual(proto_state.entities[1].vx, 0x777)\n self.assertEqual(proto_state.entities[1].vy, 0x999)\n self.assertEqual(proto_state.entities[0].heading, 0.01)\n self.assertEqual(proto_state.entities[1].heading, 0.02)\n self.assertEqual(proto_state.entities[0].spin, 0.03)\n self.assertEqual(proto_state.entities[1].spin, 0.04)\n self.assertEqual(proto_state.entities[0].fuel, 0xEEE)\n self.assertEqual(proto_state.entities[1].fuel, 0xFFF)\n self.assertEqual(proto_state.entities[0].landed_on, 'Second')\n self.assertEqual(proto_state.entities[1].landed_on, '')\n self.assertEqual(proto_state.timestamp, 50)\n self.assertTrue(proto_state.entities[1].broken)\n\n for i, component in enumerate(ps.engineering.components):\n self.assertEqual(component.connected, True, msg=i)\n self.assertEqual(component.capacity, 1 + (0.01 * i), msg=i)\n self.assertEqual(component.temperature, 222200 + i, msg=i)\n self.assertEqual(component.coolant_hab_one, bool(i % 2), msg=i)\n self.assertEqual(component.coolant_hab_two, True, msg=i)\n self.assertEqual(component.coolant_ayse, False, msg=i)\n\n for i, coolant in enumerate(ps.engineering.coolant_loops):\n self.assertEqual(coolant.coolant_temp, 555500 + i, msg=i)\n self.assertEqual(coolant.primary_pump_on, bool(i % 2), msg=i)\n self.assertEqual(coolant.secondary_pump_on, True, msg=i)\n\n for i, radiator in enumerate(ps.engineering.radiators):\n pass\n self.assertEqual(radiator.attached_to_coolant_loop, i % 4, msg=i)\n self.assertEqual(radiator.functioning, bool(i % 2), msg=i)", "def setUp(self):\n add_celltype_file = cv.expression_data2\n self.add_celltype_ctp = \"celltypetarget2\"\n add_celltype_ = [add_celltype_file, self.add_celltype_ctp, {\"files_path\": \"test\"}]\n self.vencode_obj = iext.GetVencodes(inputs=self.inputs,\n files_path=self.files_path,\n cell_type=self.add_celltype_ctp,\n algorithm=self.algorithm,\n n_regulatory_elements=self.k,\n number_vencodes=4,\n thresholds=self.thresholds, n_samples=10000,\n merge={\"replicate_suffix\": self.replicate_suffix},\n add_celltype=add_celltype_)\n self.vencodes = self.vencode_obj.coordinates", "def setUp(self):\n self.vencode_obj = iext.GetVencodesFantom(files_path=self.files_path,\n cell_type=self.celltype_analyse,\n algorithm=self.algorithm,\n n_regulatory_elements=self.k,\n number_vencodes=4,\n parsed=self.parsed,\n thresholds=self.thresholds, n_samples=10000,\n data_type=self.data_type, sample_type=self.sample_type)\n self.vencodes = self.vencode_obj.coordinates", "def build(self,documents):\n\t\tself.vectorKeywordIndex = self.getVectorKeywordIndex(documents)\n\n\t\tself.documentVectors = [self.createVector(document) for document in documents]", "def get_vectors(self, corpus_size, vectors_size, vectors_type):\n vectors = np.zeros((corpus_size, vectors_size))\n for i in range(0, corpus_size):\n prefix = vectors_type + '_' + str(i)\n vectors[i] = self.model_dbow.docvecs[prefix]\n return vectors", "def init_LVQ_pvectors(som, taggings, x_train, y_train):\n p_vectors = np.ndarray(shape = (som.x, som.y), dtype = prototype)\n for i in range(som.x):\n for j in range(som.y):\n p_vectors[i][j] = prototype(taggings[i][j], som.weights[(i,j)])\n return p_vectors", "def setUp(self):\n self.vencode_obj = iext.GetVencodesFantom(files_path=self.files_path,\n cell_type=self.celltype_analyse,\n algorithm=\"sampling\",\n n_regulatory_elements=self.k,\n number_vencodes=4,\n parsed=self.parsed,\n thresholds=self.thresholds, n_samples=10000,\n data_type=self.data_type, sample_type=self.sample_type)\n self.vencodes = self.vencode_obj.coordinates", "def setUp(self):\n self.validate_with = outside_data.Bed(source=\"get_val_ven_test.bed\", files_path=self.files_path,\n folder=\"Validation_files\")\n self.vencode_obj = iext.GetVencodes(validate_with=(self.validate_with, (\":\", r\"\\..\", \",\")),\n inputs=self.inputs,\n files_path=self.files_path,\n cell_type=self.celltype_analyse,\n algorithm=self.algorithm,\n n_regulatory_elements=self.k,\n number_vencodes=4,\n thresholds=self.thresholds, n_samples=10000,\n merge={\"replicate_suffix\": self.replicate_suffix})\n self.vencodes = self.vencode_obj.coordinates", "def get_vectors(model, corpus_size, vectors_size, vectors_type):\r\n vectors = np.zeros((corpus_size, vectors_size))\r\n for i in range(0, corpus_size):\r\n prefix = vectors_type + '_' + str(i)\r\n vectors[i] = model.docvecs[prefix]\r\n return vectors", "def get_source_vectors(testsmells):\n\n for testsmell in testsmells:\n df = pd.read_csv('data/' + testsmell + '_data.csv')\n df['Vector'] = ''\n\n repnames = df['App'].unique().tolist()\n for repname in repnames:\n print('Processing project \\'' + repname + '\\' for ' + testsmell + '...')\n currdf = df[df['App'] == repname]\n repo = Repo('repositories/' + repname)\n vectors = []\n \n # Get the vectors for each Java file in the dataframe\n for _, row in tqdm(list(currdf.iterrows())): \n try:\n repo.git.checkout(row['CommitSHA'], force=True)\n file_path = 'repositories/' + repname + '/' + row['RelativeTestFilePath']\n vectors.append(get_vector(file_path))\n except GitCommandError as err:\n print('Failed for ' + row['App'] + ':' + row['CommitSHA'])\n print(err)\n vectors.append('')\n \n df.loc[df['App'] == repname, 'Vector'] = vectors # Set the vectors on the dataframe\n \n filename = 'data/' + testsmell + '_vectors.csv'\n df.to_csv(filename, index=False)", "def gen_review_vecs(reviews, model, num_features):\n\n curr_index = 0\n review_feature_vecs = np.zeros((len(reviews), num_features), dtype=\"float32\")\n\n # index2word is a list consisting of all words in the vocabulary\n # Convert list to set for speed\n index2word_set = set(model.wv.index2word)\n for review in reviews:\n\n #if curr_index%1000 == 0.:\n # print \"Vectorizing review %d of %d\" % (curr_index, len(reviews))\n \n review_feature_vecs[curr_index] = review_to_vec(review, model, num_features , index2word_set)\n curr_index += 1\n \n return review_feature_vecs", "def create_vectors(\n dataset_path_train: str, dataset_path_test: str,\n vectors_path_train: str, vectors_path_test: str\n) -> int:\n dtypes = {\n \"id\": int,\n \"keyword\": str,\n \"location\": str,\n \"text\": str,\n \"text_stemmed\": str,\n \"text_lemmatized\": str,\n }\n\n df_train = pd.read_csv(\n f\"/data/{dataset_path_train}\",\n index_col=\"id\",\n dtype={**dtypes, \"target\": int},\n converters={\"tokens\": ast.literal_eval})\n df_train[\"text_preprocessed\"] = df_train[\"tokens\"].apply(\n lambda x: \" \".join(x))\n\n df_test = pd.read_csv(\n f\"/data/{dataset_path_test}\",\n index_col=\"id\",\n dtype=dtypes,\n converters={\"tokens\": ast.literal_eval})\n df_test[\"text_preprocessed\"] = df_test[\"tokens\"].apply(\n lambda x: \" \".join(x))\n\n vectorizer = sklearn.feature_extraction.text.CountVectorizer()\n vectors_train = vectorizer.fit_transform(df_train[\"text_preprocessed\"])\n vectors_test = vectorizer.transform(df_test[\"text_preprocessed\"])\n\n with open(f\"/data/{vectors_path_train}\", \"wb\") as f:\n pickle.dump(vectors_train, f)\n with open(f\"/data/{vectors_path_test}\", \"wb\") as f:\n pickle.dump(vectors_test, f)\n\n return 0", "def __init__(self, lower, upper):\n\n self.vector = self._initialise(lower, upper)\n self.seeds = 0\n self.year = 0\n self.valid = True", "def load_vectors(args):\n dict_fold = 'train' # which fold of the data will be used to produce results\n if args.task == 'conneau' or 'xling':\n data_dir = os.path.join(args.data_dir, 'MUSE')\n dict_dir = os.path.join(data_dir, 'crosslingual/')\n if args.task == 'xling':\n dict_dir = os.path.join(dict_dir, 'xling-dictionaries/bli_datasets/')\n else:\n dict_dir = os.path.join(dict_dir, 'dictionaries/')\n\n src_path = os.path.join(data_dir, 'wiki.' + args.src_lang + '.vec')\n trg_path = os.path.join(data_dir, 'wiki.' + args.trg_lang + '.vec')\n src_freq_path = None\n trg_freq_path = None\n if dict_fold == 'test':\n postfix = '.5000-6500.txt'\n elif dict_fold == 'train':\n postfix = '.0-5000.txt'\n else:\n raise ValueError('Unrecognized dictionary fold for evaluation')\n elif args.task == 'dinu':\n data_dir = os.path.join(args.data_dir,'dinu')\n dict_dir = os.path.join(data_dir, 'dictionaries/')\n src_path = os.path.join(data_dir, 'embeddings', args.src_lang + '.emb.txt')\n trg_path = os.path.join(data_dir, 'embeddings', args.trg_lang + '.emb.txt')\n src_freq_path = None\n trg_freq_path = None\n postfix = '.{}.txt'.format(dict_fold)\n elif args.task == 'zhang':\n order = [args.src_lang,args.trg_lang]\n if args.src_lang == 'en':\n order = order[::-1]\n data_dir = os.path.join(args.home_dir,'pkg/UBiLexAT/data/','-'.join(order))\n dict_dir = data_dir\n src_path = os.path.join(data_dir, 'word2vec.' + args.src_lang)\n trg_path = os.path.join(data_dir, 'word2vec.' + args.trg_lang)\n src_freq_path = os.path.join(data_dir, 'vocab-freq.' + args.src_lang)\n trg_freq_path = os.path.join(data_dir, 'vocab-freq.' + args.trg_lang)\n postfix = '.train.txt'\n\n srcfile = open(src_path, encoding=args.encoding, errors='surrogateescape')\n trgfile = open(trg_path, encoding=args.encoding, errors='surrogateescape')\n src_words, xs = embeddings.read(srcfile, args.maxs)\n trg_words, xt = embeddings.read(trgfile, args.maxt)\n srcfile.close()\n trgfile.close()\n \n if src_freq_path:\n with open(src_freq_path, encoding=args.encoding, errors='surrogateescape') as f:\n lines = [a.split(' ') for a in f.read().strip().split('\\n')]\n freq_src = {k: int(v) for (k,v) in lines}\n\n with open(trg_freq_path, encoding=args.encoding, errors='surrogateescape') as f:\n lines = [a.split(' ') for a in f.read().strip().split('\\n')]\n freq_trg = {k: int(v) for (k,v) in lines}\n\n # Build word to index map\n src_word2ind = {word: i for i, word in enumerate(src_words)}\n trg_word2ind = {word: i for i, word in enumerate(trg_words)}\n\n if args.task == 'zhang':\n dict_path = os.path.join(dict_dir, 'all.' + '-'.join(order) + '.lex')\n flip = False\n elif args.task == 'dinu' and args.src_lang != 'en':\n # Only has dicts in one direction, flip\n dict_path = os.path.join(dict_dir, args.trg_lang + '-' + args.src_lang + postfix)\n src_to_en = os.path.join(dict_dir, 'en' + '-' + args.src_lang + postfix)\n en_to_trg = os.path.join(dict_dir, args.trg_lang + '-' + 'en' + postfix)\n flip = True\n elif args.task == 'xling':\n dict_path = os.path.join(dict_dir, args.src_lang+'-'+args.trg_lang+'/yacle.test.freq.2k.'+args.src_lang+'-' + args.trg_lang + '.tsv')\n src_to_en = os.path.join(dict_dir, args.src_lang+'-'+'en'+'/yacle.test.freq.2k.'+args.src_lang+'-' + 'en' + '.tsv')\n en_to_trg = os.path.join(dict_dir, 'en'+'-'+args.trg_lang+'/yacle.test.freq.2k.'+'en'+'-' + args.trg_lang + '.tsv')\n\n flip = False\n if not os.path.exists(dict_path):\n dict_path = os.path.join(dict_dir, args.trg_lang+'-'+args.src_lang+'/yacle.test.freq.2k.'+args.src_lang+'-' + args.trg_lang + '.tsv')\n flip = True\n\n else:\n src_to_en = os.path.join(dict_dir, args.src_lang + '-' + 'en' + postfix)\n en_to_trg = os.path.join(dict_dir, 'en' + '-' + args.trg_lang + postfix)\n dict_path = os.path.join(dict_dir, args.src_lang + '-' + args.trg_lang + postfix)\n flip = False\n\n\n if not os.path.exists(dict_path):\n # create new dict\n print('Warning: no dict found, creating dictionary')\n create_dict_for(src_to_en, en_to_trg, dict_path, args)\n\n dictf = open(dict_path, encoding=args.encoding, errors='surrogateescape')\n src2trg = collections.defaultdict(set)\n oov = set()\n vocab = set()\n max_srcind = 0 # These are mostly for debug\n max_trgind = 0\n for line in dictf:\n splitted = line.split()\n if len(splitted) > 2:\n # Only using first translation if many are provided\n src, trg = splitted[:2]\n elif len(splitted) == 2:\n src, trg = splitted\n else:\n # No translation? Only happens for Zhang data so far\n continue\n if flip: src, trg = trg, src\n try:\n src_ind = src_word2ind[src]\n trg_ind = trg_word2ind[trg]\n src2trg[src_ind].add(trg_ind)\n vocab.add(src)\n max_srcind = max(max_srcind, src_ind)\n max_trgind = max(max_trgind, trg_ind)\n except KeyError:\n oov.add(src)\n\n return xs, xt, src_words, trg_words, src_word2ind, trg_word2ind, src2trg", "def normal_vectors(ATOM_list, frame, number_of_atoms, number_of_molecules, number_of_vectors, centerlist, referencelist, boxX, boxY, boxZ):\t\n\tNORMAL_VECTORS_out=open(\"normalvectors.xyz\",'a')\n\t\n\tNORMAL_VECTORS_check=open(\"check_normalvectors.xyz\",'a')\n\t\n\tANGLEDIST_VECTORS_out=open(\"angle_dist_vectors.xyz\",'a')\n\t\n\t\n\tVorzugsrichtung_out=open(\"sum_over_all_normalvectors.dat\",'a')\n\t\n\t\n\t\n\tNORMAL_VECTORS_check.write(str( (number_of_vectors*(2 + int(len(referencelist))) )*number_of_molecules )+'\\n')\n\tNORMAL_VECTORS_check.write(str(frame)+'\\n')\n\t\n\tNORMAL_VECTORS_out.write(str( (number_of_vectors*2)*number_of_molecules )+'\\n')\n\tNORMAL_VECTORS_out.write(str(frame)+'\\n')\n\n\tANGLEDIST_VECTORS_out.write(str( (number_of_vectors*2)*number_of_molecules )+'\\n')\n\tANGLEDIST_VECTORS_out.write(str(frame)+'\\n')\n\t\n\tvorzugsvektor = np.asarray([0,0,0])\n\treference=0\n\t\n\t\n\tfor l in range(0,number_of_molecules):\n\t\tfor vec_count in range(0,number_of_vectors):\n\t\t\tr = []\n\t\t\t#get the 'middle' of the DPI molecule\n\t\t\t\n\t\t\n\t\t\t#get geometric center\n\t\t\tcenter=np.asarray([0,0,0])\n\t\t\t#append all vectors defining the center\n\t\t\tfor i in range(len(centerlist[vec_count])):\n\t\t\t\t# x y z\n\t\t\t\tr.append( np.asarray( [ ATOM_list[centerlist[vec_count][i] + reference][1],ATOM_list[centerlist[vec_count][i] + reference][2],ATOM_list[centerlist[vec_count][i] + reference][3] ] ) ) \n\t\t\t\tcenter = center + r[i]\n\t\t\tcenter = center / len(centerlist[vec_count])\n\t\t\n\t\t\t#calculate all reference vectors\n\t\t\tr=[]\n\t\t\tfor i in range(len(referencelist[vec_count])):\n\t\t\t\tr.append(np.asarray( [ ATOM_list[referencelist[vec_count][i] + reference][1],ATOM_list[referencelist[vec_count][i] + reference][2],ATOM_list[referencelist[vec_count][i] + reference][3] ] ) )\n\t\t\n\t\t\t#get the cross products\n\t\t\tnormal_vec= np.asarray([0,0,0])\n\t\t\tfor i in range(len(r)):\n\t\t\t\tif (i<(len(r)-1)):\n\t\t\t\t\t\n\t\t\t\t\t#minimum image convenction\n\t\t\t\t\tif ( abs(r[i][0]-center[0]) < abs(r[i][0]-center[0]+boxX) ) and ( abs(r[i][0]-center[0]) < abs(r[i][0]-center[0]-boxX) ):\n\t\t\t\t\t\tdx = r[i][0]-center[0]\n\t\t\t\t\telif (abs(r[i][0]-center[0]+boxX) < abs(r[i][0]-center[0]-boxX) ):\n\t\t\t\t\t\tdx = r[i][0]-center[0]+boxX\n\t\t\t\t\telse:\n\t\t\t\t\t\tdx = r[i][0]-center[0]-boxX\n\t\t\t\t\t\n\t\t\t\t\tif ( abs(r[i][1]-center[1]) < abs(r[i][1]-center[1]+boxY) ) and ( abs(r[i][1]-center[1]) < abs(r[i][1]-center[1]-boxY) ):\n\t\t\t\t\t\tdy = r[i][1]-center[1]\n\t\t\t\t\telif (abs(r[i][1]-center[1]+boxY) < abs(r[i][1]-center[1]-boxY) ):\n\t\t\t\t\t\tdy = r[i][1]-center[1]+boxY\n\t\t\t\t\telse:\n\t\t\t\t\t\tdy = r[i][1]-center[1]-boxY\n\t\t\t\t\t\t\n\t\t\t\t\tif ( abs(r[i][2]-center[2]) < abs(r[i][2]-center[2]+boxZ) ) and ( abs(r[i][2]-center[2]) < abs(r[i][2]-center[2]-boxZ) ):\n\t\t\t\t\t\tdz = r[i][2]-center[2]\n\t\t\t\t\telif (abs(r[i][2]-center[2]+boxZ) < abs(r[i][2]-center[2]-boxZ) ):\n\t\t\t\t\t\tdz = r[i][2]-center[2]+boxZ\n\t\t\t\t\telse:\n\t\t\t\t\t\tdz = r[i][2]-center[2]-boxZ\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t#minimum image convenction\n\t\t\t\t\tif ( abs(r[i+1][0]-center[0]) < abs(r[i+1][0]-center[0]+boxX) ) and ( abs(r[i+1][0]-center[0]) < abs(r[i+1][0]-center[0]-boxX) ):\n\t\t\t\t\t\tdx2 = r[i+1][0]-center[0]\n\t\t\t\t\telif (abs(r[i+1][0]-center[0]+boxX) < abs(r[i+1][0]-center[0]-boxX) ):\n\t\t\t\t\t\tdx2 = r[i+1][0]-center[0]+boxX\n\t\t\t\t\telse:\n\t\t\t\t\t\tdx2 = r[i+1][0]-center[0]-boxX\n\t\t\t\t\t\n\t\t\t\t\tif ( abs(r[i+1][1]-center[1]) < abs(r[i+1][1]-center[1]+boxY) ) and ( abs(r[i+1][1]-center[1]) < abs(r[i+1][1]-center[1]-boxY) ):\n\t\t\t\t\t\tdy2 = r[i+1][1]-center[1]\n\t\t\t\t\telif (abs(r[i+1][1]-center[1]+boxY) < abs(r[i+1][1]-center[1]-boxY) ):\n\t\t\t\t\t\tdy2 = r[i+1][1]-center[1]+boxY\n\t\t\t\t\telse:\n\t\t\t\t\t\tdy2 = r[i+1][1]-center[1]-boxY\n\t\t\t\t\t\t\n\t\t\t\t\tif ( abs(r[i+1][2]-center[2]) < abs(r[i+1][2]-center[2]+boxZ) ) and ( abs(r[i+1][2]-center[2]) < abs(r[i+1][2]-center[2]-boxZ) ):\n\t\t\t\t\t\tdz2 = r[i+1][2]-center[2]\n\t\t\t\t\telif (abs(r[i+1][2]-center[2]+boxZ) < abs(r[i+1][2]-center[2]-boxZ) ):\n\t\t\t\t\t\tdz2 = r[i+1][2]-center[2]+boxZ\n\t\t\t\t\telse:\n\t\t\t\t\t\tdz2 = r[i+1][2]-center[2]-boxZ\n\t\t\t\t\t\n\t\t\t\t\tvec1=np.asarray([dx,dy,dz])\n\t\t\t\t\tvec2=np.asarray([dx2,dy2,dz2])\n\t\t\t\t\t#dy = r[i][1]-center[1]\n\t\t\t\t\t#dz = r[i][2]-center[2]\n\t\t\t\t\t#normal_vec = normal_vec + np.cross(r[i]-center, r[i+1]-center)\n\t\t\t\t\tnormal_vec = normal_vec + np.cross(vec1, vec2)/np.linalg.norm( np.cross(vec1, vec2) )\n\t\t\t\telif ( i==len(r) ):\n\t\t\t\t\t\n\t\t\t\t\t#minimum image convenction\n\t\t\t\t\tdx = min( abs(r[i][0]-center[0]), abs(r[i][0]-center[0]+boxX), abs(r[i][0]-center[0]-boxX) )\n\t\t\t\t\tdy = min( abs(r[i][1]-center[1]), abs(r[i][1]-center[1]+boxX), abs(r[i][1]-center[1]-boxX) )\n\t\t\t\t\tdz = min( abs(r[i][2]-center[2]), abs(r[i][2]-center[2]+boxX), abs(r[i][2]-center[2]-boxX) )\n\t\t\t\t\t\n\t\t\t\t\tdx2 = min( abs(r[0][0]-center[0]), abs(r[0][0]-center[0]+boxX), abs(r[0][0]-center[0]-boxX) )\n\t\t\t\t\tdy2 = min( abs(r[0][1]-center[1]), abs(r[0][1]-center[1]+boxX), abs(r[0][1]-center[1]-boxX) )\n\t\t\t\t\tdz2 = min( abs(r[0][2]-center[2]), abs(r[0][2]-center[2]+boxX), abs(r[0][2]-center[2]-boxX) )\n\t\t\t\t\t\n\t\t\t\t\tvec1=np.asarray([dx,dy,dz])\n\t\t\t\t\tvec2=np.asarray([dx2,dy2,dz2])\n\t\t\t\t\t#dy = r[i][1]-center[1]\n\t\t\t\t\t#dz = r[i][2]-center[2]\n\t\t\t\t\t#normal_vec = normal_vec + np.cross(r[i]-center, r[i+1]-center)\n\t\t\t\t\tnormal_vec = normal_vec + np.cross(vec1, vec2)/np.linalg.norm(np.cross(vec1, vec2))\n\t\t\t\t\t#normal_vec = normal_vec + np.cross(r[i]-center, r[0]-center) \n\t\t\n\t\t\t#normalize\n\t\t\tnormal_vec = normal_vec/np.linalg.norm(normal_vec)\n\t\t\n\t\t\t#\t#direction vectors\n\t\t\t#\tdir_vec1 = np.asarray([r[6][0],r[6][1],r[6][2]])\n\t\t\t#\tdir_vec2 = np.asarray([r[7][0],r[7][1],r[7][2]])\n\t\t\t#\tdir_vec3 = np.asarray([r[8][0],r[8][1],r[8][2]])\n\t\t\t#\tdir_vec4 = np.asarray([r[9][0],r[9][1],r[9][2]])\n\t\t\t#\tdir_vec = (dir_vec1 + dir_vec2 + dir_vec3 + dir_vec4)/4.0\n\t\t\t#\tdir_vec = mittelpunkt_dpi - dir_vec\t\t\t\n\n\t\t\t#change direction according to bending\n\t\t\t#\tif normal_vec[0]!=0 and dir_vec[0]!=0:\n\t\t\t#\t\tif normal_vec[0]/abs(normal_vec[0])!=dir_vec[0]/abs(dir_vec[0]):\n\t\t\t#\t\t\tnormal_vec[0] = -normal_vec[0]\n\t\t\t#\tif normal_vec[1]!=0 and dir_vec[1]!=0:\n\t\t#\t\t\tif normal_vec[1]/abs(normal_vec[1])!=dir_vec[1]/abs(dir_vec[1]):\n\t\t#\t\t\t\tnormal_vec[1] = -normal_vec[1]\n\t\t#\t\tif normal_vec[2]!=0 and dir_vec[2]!=0:\n\t\t#\t\t\tif normal_vec[2]/abs(normal_vec[2])!=dir_vec[2]/abs(dir_vec[2]):\n\t\t#\t\t\t\tnormal_vec[2] = -normal_vec[2]\n\n\t\t\t\n\t\t\t#check for a vorzugsvektor\n\t\t\tvorzugsvektor = vorzugsvektor + normal_vec\n\t\t\tNORMAL_VECTORS_out.write('C' +'\t'+ str((center)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tNORMAL_VECTORS_out.write('O' +'\t'+ str((center+normal_vec)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tANGLEDIST_VECTORS_out.write('C' +'\t'+ str((center)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tANGLEDIST_VECTORS_out.write('H' +'\t'+ str((normal_vec)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tNORMAL_VECTORS_check.write('C' +'\t'+ str((center)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tNORMAL_VECTORS_check.write('O' +'\t'+ str((center+normal_vec)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tNORMAL_VECTORS_check.write('H' +'\t'+ str((normal_vec)).replace('[', '').replace(']','')+ '\\n')\n\t\t\tfor i in range(len(referencelist[vec_count])):\n\t\t\t\tNORMAL_VECTORS_check.write('N' +'\t'+ str(ATOM_list[referencelist[vec_count][i] + reference][1])+'\t'+ str(ATOM_list[referencelist[vec_count][i] + reference][2])+'\t'+ str(ATOM_list[referencelist[vec_count][i] + reference][3])+ '\\n')\n\n\t\t#always skip 1 entire molecule forward\n\t\treference=reference + number_of_atoms/number_of_molecules\n\t\t\n\t#calculate a vorzugsvektor as sum over DIPBI normal vectors divided by number of DPBI vectors\n\tvorzugsvektor = vorzugsvektor/(number_of_vectors*frames*number_of_molecules)\n\tVorzugsrichtung_out.write(str(frame) + ' ' + str((vorzugsvektor)).replace('[', '').replace(']','')+'\\n')\n\tNORMAL_VECTORS_out.close()\n\tNORMAL_VECTORS_check.close()\n\treturn", "def build_doc_sense_vec(self):\n\t\twith codecs.open(self.vocab_file, encoding='utf-8', mode='r') as infile:\n\t\t\tline = infile.readline()\n\t\t\ti = 0\n\t\t\twhile line:\n\t\t\t\tword = line.split()[0]\n\t\t\t\tif not self.word2IdVocabulary.has_key(word):\n\t\t\t\t\t# print i, word\n\t\t\t\t\t# else:\n\t\t\t\t\tself.word2IdVocabulary[word] = i\n\t\t\t\tif not self.id2WordVocabulary.has_key(i):\n\t\t\t\t\tself.id2WordVocabulary[i] = word\n\t\t\t\tline = infile.readline()\n\t\t\t\ti += 1\n\t\t\tself.vocab_num = len(self.word2IdVocabulary)\n\t\t\tprint \"vocabulary number:\" + str(self.vocab_num)\n\n\t\twith codecs.open(self.vec_file, encoding='utf-8', mode='r') as vecfile:\n\t\t\twith codecs.open(self.vec_out_file, encoding='utf-8', mode='a+') as vec_outfile:\n\n\t\t\t\tfor i, line in enumerate(vecfile):\n\t\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\t\tprint i\n\t\t\t\t\t# if i > 72:\n\t\t\t\t\t# \tbreak\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ta, b, c = map(int, line.split()[:3])\n\t\t\t\t\t\tprint('Number of sememes: {}\\n'\n\t\t\t\t\t\t\t 'Number of words: {}\\n'\n\t\t\t\t\t\t\t 'Dimension of vectors: {}'.format(a, b, c))\n\t\t\t\t\telif i > 462667:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tvector_list.append(sline[1:])\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\t# vector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_array\n\t\t\t\t\t\t# vec_outfile.write(line)\n\t\t\t\t\telif i > 462887:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tsense_num = int(sline[1])\n\t\t\t\t\t\tvectors = sline[2:sense_num*c+2] # (sense_num*c+2)\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tfor start in range(0, len(vectors), c):\n\t\t\t\t\t\t\tvector_list.append(list(map(float, vectors[start: start+c])))\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\tvector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_mean\n\t\t\t\t\t\t'''j = 0\n\t\t\t\t\t\tfor each_sense_vec in vector_array:\n\t\t\t\t\t\t\tif len(vector_array) > 1:\n\t\t\t\t\t\t\t\tnew_line = word + '_' + str(j) + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tformatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n'\n\t\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnew_line = word + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t x: '%6f' % x})[1:-1] + '\\n'\n\n\t\t\t\t\t\t\tvec_outfile.write(new_line)'''\n\n\t\twith codecs.open(self.doc_file, encoding='utf-8', mode='r') as docfile:\n\t\t\twith codecs.open(self.doc_out_file, encoding='utf-8', mode='a+') as doc_outfile:\n\t\t\t\twith codecs.open(self.vec_out_file_bydoc, encoding='utf-8', mode='a+') as vec_outfile_bydoc:\n\t\t\t\t\tprint \"Processing document file......\"\n\t\t\t\t\tline = docfile.readline().strip('\\n')\n\t\t\t\t\twhile line:\n\t\t\t\t\t\twords = line.split()\n\t\t\t\t\t\tnew_words = [x for x in words]\n\t\t\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\t\t\tword_id = self.word2IdVocabulary[words[i]]\n\t\t\t\t\t\t\tsense_vecs = self.vectors[word_id]\n\t\t\t\t\t\t\tsense_num = len(sense_vecs)\n\t\t\t\t\t\t\tif sense_num > 1:\n\t\t\t\t\t\t\t\tcontext_words = []\n\t\t\t\t\t\t\t\tfor x in range(i-int(self.context_num), i+int(self.context_num)+1):\n\t\t\t\t\t\t\t\t\tif x != i and 0 <= x < len(words):\n\t\t\t\t\t\t\t\t\t\tcontext_words.append(words[x])\n\t\t\t\t\t\t\t\tsense_index = self.select_attention(context_words, sense_vecs)\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[sense_index]\n\t\t\t\t\t\t\t\tnew_wordi = words[i] + '_' + str(sense_index)\n\t\t\t\t\t\t\t\tself.vector_word_doc[new_wordi.encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\t\tnew_words[i] = new_wordi\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[0]\n\t\t\t\t\t\t\t\tself.vector_word_doc[words[i].encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\tvec_outfile_bydoc.write(new_words[i] + ' ' + np.array2string(word_vec_i, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n')\n\n\t\t\t\t\t\tdoc_outfile.write(' '.join(new_words) + '\\n')\n\n\t\t\t\t\t\tline = docfile.readline()\n\n\t\treturn self.vector_word_doc", "def get_vectors(model, corpus_size, vectors_size, vectors_type):\n vectors = np.zeros((corpus_size, vectors_size))\n for i in range(0, corpus_size):\n prefix = vectors_type + '_' + str(i)\n vectors[i] = model.docvecs[prefix]\n return vectors", "def test_vector_class():\n points = 10\n riskfree = .03\n maturity = 30/365\n moneyness = np.linspace(-.04, .04, points)\n premium = np.ones_like(moneyness) * .05\n call = True\n data = {'riskfree': riskfree, 'maturity': maturity,\n 'moneyness': moneyness, 'call': call, 'premium': premium}\n\n sigma = np.ones(points) * .13\n bsm = BSmodel(sigma, data)\n\n print(bsm.premium())\n\n weights = np.ones(points) * .63\n means = np.vstack([np.ones(points) * -.01, np.ones(points) * .09])\n stds = np.vstack([np.ones(points) * .16, np.ones(points) * .05])\n param = np.vstack([weights, means, stds])\n mbs = MBSmodel(param, data)\n\n print(mbs.premium())\n\n param_a, param_p = np.ones(points) * 4.5, np.ones(points) * 2\n param_c = -.05 * np.ones(points)\n gb2 = GB2model([param_a, param_p, param_c], data)\n\n print(gb2.premium())", "def __init__(self):\n # Create an 8-byte initialization vector", "def __init__(self, data, codebook, init_learning_rate=0.1, labels=None):\n self.data = data\n assert len(data) > 0\n self.data_vector_size = len(data[0])\n self.codebook = codebook\n self.init_learning_rate = init_learning_rate\n self.labels = labels\n self.ready_for_prediction = False", "def main(data, setup):\n # input check \n varnames = ('vm_raw', 'vm_raw_theo')\n for varname in varnames:\n if varname not in data.keys():\n raise LookupError('data must contain variable %s.' %s)\n\n # display info message\n chrono = setup['chrono']\n chrono.issue('target velocity: correct for sensor motion...')\n\n # retrieve varialbes\n vnys = data['nqv']\n v_sensor_r = data['v_sensor_r']\n\n # ========== main =================================== #\n for key_raw in ('vm_raw', 'vm_raw_theo'):\n key_c = key_raw.replace('raw', 'raw_c')\n\n # sum\n vm_raw = data[key_raw]\n v_sum = (vm_raw + np.expand_dims(v_sensor_r, 1))\n\n # mod\n data[key_c] = symmod(v_sum, vnys)\n # ==================================================== #\n\n return data", "def vectors():\n r = db.execute(\"select word, year, c from counts where conf=? order by word, year\", (conf,))\n vects = defaultdict(dict)\n for w,y,c in r:\n l = vects[w]\n l[y] = float(c) \n\n\n ret = []\n for w in vects:\n d = vects[w]\n\n # if word is super uncommon, skip it\n if (max(d.values()) <= 3):\n continue\n if (max([v / (1.+year2c.get(y,0)) for y, v in d.items()]) < .1): \n continue\n\n # some years may not have the word\n counts = dict2arr(d, xrange(minyear, maxyear+1), 1.0)\n\n \n # naive window averaging smoothing over the trend curve\n smooth = []\n for i in xrange(len(counts)):\n smooth.append(np.mean(counts[max(0,i-2):i+2]))\n if max(smooth) > 2:\n ret.append([w] + smooth)\n return np.array(ret)", "def __post_init__(self):\n all_vecs = {}\n for n2 in self._get_n2():\n all_vecs[n2] = all_vecs.get(n2, 0) + 1\n\n norms = {}\n\n object.__setattr__(self, \"_n2\", np.array(list(all_vecs.keys())).reshape(-1, 1))\n object.__setattr__(\n self, \"_multiplicity\", np.array(list(all_vecs.values())).reshape(-1, 1)\n )\n object.__setattr__(\n self, \"_normalization\", np.pi ** 2 * self.N * norms[self.nstep]\n )\n\n raise NotImplementedError(\"Need to implement dispersion Lüscher counter terms.\")", "def setUp(self):\n self.s = Solution4()\n self.pre_ = [1,2,3,4,5,6,7]\n self.in_ = [3,2,4,1,6,5,7]", "def __init__(self, vectors):\n \n self.vectors = vectors\n self.centroid = self.calcCentroid()", "def main():\n\n # Get the arguments\n args = docopt(\"\"\"Make count-based vector space from corpus.\n\n Usage:\n count.py [-l] <windowSize> <corpDir> <outPath> <lowerBound> <upperBound>\n \n Arguments:\n \n <corpDir> = path to corpus directory with zipped files, each sentence in form 'year\\tword1 word2 word3...'\n <outPath> = output path for vectors\n <windowSize> = the linear distance of context words to consider in each direction\n <lowerBound> = lower bound for time period\n <upperBound> = upper bound for time period\n\n Options:\n -l, --len normalize final vectors to unit length\n\n \"\"\")\n \n is_len = args['--len']\n corpDir = args['<corpDir>']\n outPath = args['<outPath>']\n windowSize = int(args['<windowSize>']) \n lowerBound = int(args['<lowerBound>'])\n upperBound = int(args['<upperBound>'])\n \n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n logging.info(__file__.upper())\n start_time = time.time()\n\n # Build vocabulary\n logging.info(\"Building vocabulary\")\n sentences = PathLineSentences_mod(corpDir, lowerBound=lowerBound, upperBound=upperBound)\n vocabulary = list(set([word for sentence in sentences for word in sentence if len(sentence)>1])) # Skip one-word sentences to avoid zero-vectors\n w2i = {w: i for i, w in enumerate(vocabulary)}\n \n # Initialize co-occurrence matrix as dictionary\n cooc_mat = defaultdict(lambda: 0)\n\n # Get counts from corpus\n sentences = PathLineSentences_mod(corpDir, lowerBound=lowerBound, upperBound=upperBound)\n logging.info(\"Counting context words\")\n for sentence in sentences:\n for i, word in enumerate(sentence):\n lowerWindowSize = max(i-windowSize, 0)\n upperWindowSize = min(i+windowSize, len(sentence))\n window = sentence[lowerWindowSize:i] + sentence[i+1:upperWindowSize+1]\n if len(window)==0: # Skip one-word sentences\n continue\n windex = w2i[word]\n for contextWord in window:\n cooc_mat[(windex,w2i[contextWord])] += 1\n\n \n # Convert dictionary to sparse matrix\n logging.info(\"Converting dictionary to matrix\")\n cooc_mat_sparse = dok_matrix((len(vocabulary),len(vocabulary)), dtype=float)\n try:\n cooc_mat_sparse.update(cooc_mat)\n except NotImplementedError:\n cooc_mat_sparse._update(cooc_mat)\n \n if is_len:\n # L2-normalize vectors\n l2norm1 = linalg.norm(cooc_mat_sparse, axis=1, ord=2)\n l2norm1[l2norm1==0.0] = 1.0 # Convert 0 values to 1\n cooc_mat_sparse /= l2norm1.reshape(len(l2norm1),1)\n\n # Make space\n vocabulary = [v.encode('utf-8') for v in vocabulary]\n countSpace = Space(SparseMatrix(cooc_mat_sparse), vocabulary, vocabulary)\n \n # Save the Space object in pickle format\n save_pkl_files(countSpace, outPath, save_in_one_file=False) \n \n logging.info(\"Corpus has size %d\" % sentences.corpusSize)\n logging.info(\"--- %s seconds ---\" % (time.time() - start_time))", "def _compute_util_data(self):\n\n print(\"Computing PCA of document vectors.\")\n self.pca = PCA(n_components = 3)\n\n print(\"Computing document clusters in PCA basis.\")\n inferred_vecs = np.array([self.model.infer_vector(doc.words) for doc in self.tagged_docs])\n self.pca_reduced_vecs = self.pca.fit_transform(inferred_vecs)\n n_clusters = 25 # TODO find way to determine approx cluster size\n self.kmeans = KMeans(init = 'k-means++', n_clusters = n_clusters, random_state = 0)\n self.kmeans_preds = self.kmeans.fit_predict(self.pca_reduced_vecs)", "def get_word_vector():\n\n patten = r\"[0-9\\s+\\.\\!\\/_,$%^*()?;;:-【】+\\\"\\']+|[+——!,;:。?、~@#¥%……&*()]+\"\n s1 = input(\"句子1:\").strip()\n s2 = input(\"句子2:\").strip()\n s1 = re.sub(patten, \" \", s1)\n s2 = re.sub(patten, \" \", s2)\n cut1 = jieba.cut(s1)\n cut2 = jieba.cut(s2)\n\n list_word1 = (' '.join(cut1)).split()\n list_word2 = (' '.join(cut2)).split()\n print(list_word1)\n print(list_word2)\n\n key_word = list(set(list_word1 + list_word2)) # 取并集\n print(key_word)\n\n word_vector1 = np.zeros(len(key_word)) # 给定形状和类型的用0填充的矩阵存储向量\n word_vector2 = np.zeros(len(key_word))\n\n for i in range(len(key_word)): # 依次确定向量的每个位置的值\n for j in range(len(list_word1)): # 遍历key_word中每个词在句子中的出现次数\n if key_word[i] == list_word1[j]:\n word_vector1[i] += 1\n for k in range(len(list_word2)):\n if key_word[i] == list_word2[k]:\n word_vector2[i] += 1\n\n print(word_vector1) # 输出向量\n print(word_vector2)\n return word_vector1, word_vector2", "def test_suite():\r\n test(add_vectors([1, 1], [1, 1]) == [2, 2])\r\n test(add_vectors([1, 2], [1, 4]) == [2, 6])\r\n test(add_vectors([1, 2, 1], [1, 4, 3]) == [2, 6, 4])\r\n test(scalar_mult(5, [1, 2]) == [5, 10])\r\n test(scalar_mult(3, [1, 0, -1]) == [3, 0, -3])\r\n test(scalar_mult(7, [3, 0, 5, 11, 2]) == [21, 0, 35, 77, 14])\r\n test(dot_product([1, 1], [1, 1]) == 2)\r\n test(dot_product([1, 2], [1, 4]) == 9)\r\n test(dot_product([1, 2, 1], [1, 4, 3]) == 12)\r\n test(cross_product([2,3,4], [5,6,7]) == [-3, 6, -3])", "def unit_vectors(self):\n # return {'comp1': CartesianRepresentation(...),\n # 'comp2': CartesianRepresentation(...),\n # 'comp3': CartesianRepresentation(...)}\n raise Exception(\"Not yet implemented\")", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def test_svm():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = r.randint(1, 10598)\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n\n # iris\n pres = \"Test pour le data set Iris (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Iris, quantum_instance)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Breast_cancer, quantum_instance)\n\n # digits (it's long so be careful)\n #pres = \"Test pour le data set Digits (difficile, classique)\"\n #test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n pres = \"Test pour le data set Wine (moyen, classique)\"\n test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func(pres, 25, 10, 2, True, Gaussian, quantum_instance)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func(pres, 10, 15, 14, True, Sequence, quantum_instance)", "def initializeVectorField(self, ctrs, vecs, label):\n self.ctrs = ctrs\n self.vecs = vecs\n self.Npts = len(vecs)\n self.label = label\n return", "def infer_vectors(self, reports, labels):\n logger.info('Inferring vectors from Doc2Vec model')\n tagged_docs = self.tag_dataset(reports, labels)\n vecs = [self.model.infer_vector(tag.words) for tag in tagged_docs]\n vecs = np.array(vecs)\n return vecs", "def bow_vecs(docs):\n return CECTORIZER.transform(docs).toarray()", "def create_vectorized_features(data_dir, feature_version=2):\n extractor = PEFeatureExtractor(feature_version)\n\n print(\"Vectorizing training set\")\n X_path = os.path.join(data_dir, \"X_train.dat\")\n y_path = os.path.join(data_dir, \"y_train.dat\")\n raw_feature_paths = [os.path.join(data_dir, \"train_features_{}.jsonl\".format(i)) for i in range(6)]\n nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])\n vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)\n\n print(\"Vectorizing test set\")\n X_path = os.path.join(data_dir, \"X_test.dat\")\n y_path = os.path.join(data_dir, \"y_test.dat\")\n raw_feature_paths = [os.path.join(data_dir, \"test_features.jsonl\")]\n nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])\n vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)", "def getVectors(self):\n vectors = dict()\n i = 0\n N = len(self.db.invertedIndex)\n for w, (idf, docs) in self.db.invertedIndex.items():\n for doc, tf in docs.items():\n try:\n vectors[doc][i] = tf * idf\n except KeyError as k:\n vectors[doc] = {i: tf * idf}\n i += 1\n i = 0;\n return vectors", "def vectors(self, adr = 0x10000):\n\n\t\tself.__vector(adr - 2, \"RST\")\n\t\tself.__vector(adr - 4, \"NMI\")\n\t\tself.__vector(adr - 6, \"SWI\")\n\t\tself.__vector(adr - 8, \"IRQ\")\n\t\tx = self.p.t.add(adr - 8, adr, \"tbl\")\n\t\tx.blockcmt += \"\\n-\\nMC6800 Vector Table\\n\\n\"", "def train_lvq(numcbv, run_len_mult = 40, fpath = \"datasets\\\\lvq\"):\n # Number of iterations recommended by Kohonen is 40 times the number of codebook vectors\n runlen = run_len_mult * numcbv\n \n #run length for 'sammon'. Doesn't affect learning. May not be necessary.\n #runlen2 = 100\n \n #codebook size 40 will create files \"lvq/c40e.cod\", \"lvq/c40o.sam\" etc.\n cb = \"lvq\\\\c\" + str(numcbv)\n train = fpath + \"_train.txt\"\n test = fpath + \"_test.txt\"\n\n # Little lambdas just to help with readability below.\n cmd = lambda X: \"binaries_windows\\\\\"+X+\".exe\"\n din = lambda X: \" -din \" + str(X)\n cout = lambda X: \" -cout \" + str(X) \n cin = lambda X: \" -cin \" + str(X)\n rlen = lambda X: \" -rlen \" + str(X)\n noc = lambda X: \" -noc \" + str(X)\n cfout = lambda X: \" -cfout \" + str(X) \n \n # Initialize LVQ with even codebooks per class\n check_call(cmd(\"eveninit\") + din(train) + cout(cb + \"e.cod\") + noc(numcbv) )\n \n # Balance codebooks. Optional.\n check_call(cmd(\"balance\") + din(train) + cin(cb + \"e.cod\") + cout(cb + \"b.cod\") )\n \n #Codebook Training\n check_call(cmd(\"olvq1\") + din(train) + cin(cb + \"b.cod\") + cout(cb + \"o.cod\") + rlen(runlen) )\n \n # Compute accuracy for training and testing set.\n check_call(cmd(\"accuracy\") + din(train) + cin(cb + \"o.cod\") + cfout(cb + \"_train.cfo\") )\n check_call(cmd(\"accuracy\") + din(test) + cin(cb + \"o.cod\") + cfout(cb + \"_test.cfo\") )\n \n #Optional. Slow.\n #call(cmd(\"sammon\") + cin(cb + \"o.cod\") + cout(cb + \"o.sam\") + rlen(runlen2) )", "def __init__( self ):\n self.NQ = 16\n self.Nbranches = 3\n self.NatomsUC = 1\n self.dim = 3\n self.QVectors = np.zeros( ( self.NQ , 3 ) )\n self.MakeQVectors()\n self.EigenVectors = np.zeros( [ self.NQ , \n self.Nbranches ,\n self.NatomsUC , \n self.dim ] )\n self.MakeEigenVectors()", "def calculate_vectors(self, spectrum_list: List[Spectrum]) -> np.ndarray:\n n_rows = len(spectrum_list)\n reference_vectors = np.empty(\n (n_rows, self.output_vector_dim), dtype=\"float\")\n binned_spectrums = self.model.spectrum_binner.transform(spectrum_list, progress_bar=self.progress_bar)\n for index_reference, reference in enumerate(\n tqdm(binned_spectrums,\n desc='Calculating vectors of reference spectrums',\n disable=(not self.progress_bar))):\n reference_vectors[index_reference, 0:self.output_vector_dim] = \\\n self.model.base.predict(self._create_input_vector(reference), verbose=0)\n return reference_vectors", "def generate_voc(self):\n\n observations = [\"walk\", \"shop\", \"clean\", \"tennis\", \"read\"]\n states = [\"sunny\", \"rainy\", \"snowy\"]\n\n # Sort them alphabetically, just to be on the safe side\n observations.sort()\n states.sort()\n\n return (observations, states)", "def boxVectors(self, stuff):\n try:\n # We may be changing the box, so delete the cached box lengths to\n # make sure they are recomputed if desired\n del self._boxLengths\n except AttributeError:\n pass\n self.box_vectors = stuff", "def setUp(self):\n self.iv1 = Interval(1, 10)\n self.iv2 = Interval(5, 15)\n self.iv1_r = Interval(10, 1)\n self.iv2_r = Interval(15, 5)\n self.iv3 = Interval(3, 8)\n self.iv4 = Interval(11, 20)", "def prepare(self):\n ls=len(self.v)\n self.S=numpy.zeros(ls)\n self.A=numpy.zeros((ls,ls))\n\n for k,v in self.e.items():\n b,e=k\n bi,ei=self.rv[b],self.rv[e]\n self.A[bi,bi]-=v\n self.A[bi,ei]+=v", "def create_vector_datapackage(pk_type, path, file_flag, out_path):\n process_source(pk_type, path, file_flag, out_path)", "def __init__(self,vector):\n self._vector = vector", "def feature_vecs_DOC(train_pos, train_neg, test_pos, test_neg):\n # Doc2Vec requires LabeledSentence objects as input.\n # Turn the datasets from lists of words to lists of LabeledSentence objects.\n # YOUR CODE HERE\n labeled_train_pos = []\n labeled_train_neg = []\n labeled_test_pos = []\n labeled_test_neg = []\n i = 0\n for line in train_pos:\n labeled_train_pos.append(LabeledSentence(line, ['TRAIN_POS_%i' % i]))\n i += 1\n i = 0\n for line in train_neg:\n labeled_train_neg.append(LabeledSentence(line, ['TRAIN_NEG_%i' % i]))\n i += 1\n i = 0\n for line in test_pos:\n labeled_test_pos.append(LabeledSentence(line, ['TEST_POS_%i' % i]))\n i += 1\n i = 0\n for line in test_neg:\n labeled_test_neg.append(LabeledSentence(line, ['TEST_NEG_%i' % i]))\n i += 1\n\n # Initialize model\n model = Doc2Vec(min_count=1, window=10, size=100, sample=1e-4, negative=5, workers=4)\n sentences = labeled_train_pos + labeled_train_neg + labeled_test_pos + labeled_test_neg\n model.build_vocab(sentences)\n\n # Train the model\n # This may take a bit to run \n for i in range(5):\n print \"Training iteration %d\" % (i)\n random.shuffle(sentences)\n model.train(sentences)\n\n # Use the docvecs function to extract the feature vectors for the training and test data\n # YOUR CODE HERE\n train_pos_vec = []\n train_neg_vec = []\n test_pos_vec = []\n test_neg_vec = []\n for j in range(len(train_pos)):\n train_pos_vec.append(model.docvecs['TRAIN_POS_%i' % j])\n for j in range(len(train_neg)):\n train_neg_vec.append(model.docvecs['TRAIN_NEG_%i' % j])\n for j in range(len(test_pos)):\n test_pos_vec.append(model.docvecs['TEST_POS_%i' % j])\n for j in range(len(test_neg)):\n test_neg_vec.append(model.docvecs['TEST_NEG_%i' % j])\n\n # Return the four feature vectors\n return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec", "def mk_bitvecs(self):\n self.bitvec = ''.join([f'{b:#010b}'[2:] for b in self.code ][::-1])\n self.bitvec_data = ''.join([f'{b:#010b}'[2:] for b in self.input][::-1])\n\n # Pad with some zeros to catch the last instructions.\n self.bitvec = '0'*64 + self.bitvec", "def opcion1_automatica(v):\n v_range = len(v)\n\n for i in range(v_range):\n tit = ('Harry Potter', 'Percy Jackson', 'El Principito', 'Cien años de soledad',\n 'El señor de los anillos', 'Un mundo feliz', 'Orgullo y prejuicio',\n 'Crimen y castigo', 'Lolita', 'Ulises', 'El gran Gatsby', 'Mil soles espléndidos',\n 'Alicia en el país de las maravillas', 'Rebelión en la granja', 'Los pilares de la tierra',\n 'Guerra y paz', 'Memorias de una geisha', 'Frankenstein', 'Los viajes de Gulliver', 'La ladrona de libros')\n\n gen = ('Autoayuda', 'Arte', 'Ficción', 'Computación', 'Economía',\n 'Escolar', 'Sociedad', 'Gastronomía', 'Infantil', 'Otros')\n\n lang_list = ('Español', 'Inglés', 'Francés', 'Italiano', 'Otros')\n titulo = random.choice(tit)\n genero = random.choice(gen)\n isbn = auto_gen_isbn()\n idioma = random.choice(lang_list)\n precio = round(random.uniform(0, 2000), 2)\n v[i] = Libro(isbn, titulo, genero, idioma, precio)\n\n print()\n print('\\t\\tVECTOR CARGADO')\n print()", "def _vectorize_data(self, docs: []):\n print('Vectorizing data...')\n tfidf = TfidfVectorizer()\n encoded_data = tfidf.fit_transform(docs)\n return encoded_data", "def build_vocabulary(self):\n \n for iCount in range(0,len(self.documents)):\n for jCount in range(iCount,len(self.documents[iCount])):\n self.vocabulary.append(self.documents[iCount][jCount])\n\n self.vocabulary = set(self.vocabulary)\n\t\t\n self.vocabulary = sorted(self.vocabulary)\n\t\t#print(\"Value of the vocabulary\")\n self.vocabulary_size = len(self.vocabulary)", "def __init__(self, data, m=100, eta=0.1, seq_length=25, sigma= 0.01):\n\n self.m, self.eta, self.seq_length = m, eta, seq_length\n self.vocab_len = data['vocab_len']\n self.ind_to_char = data['ind_to_char']\n self.char_to_ind = data['char_to_ind']\n self.book_data = data['book_data']\n\n self.b = np.zeros((m, 1))\n self.c = np.zeros((self.vocab_len, 1))\n\n self.U = np.random.normal(0, sigma, size=(m, self.vocab_len))\n self.W = np.random.normal(0, sigma, size=(m, m))\n self.V = np.random.normal(0, sigma, size=(self.vocab_len, m))", "def generateAllRegionVectors():\n\tregionVectors = []\n\tfor i in range(NUM_REGION_VECTORS):\n\t\tregionVectors.append('{0:04x}'.format(i))\n\treturn regionVectors", "def __init__(self, name, doc_vec):\n self.name = \"\" # it's added below\n self.vector_cnt = 0\n self.centroid_vector = {}\n self.length = 0.00\n self.add_vector(name, 1, doc_vec)", "def CreateBiPennate2():\r\n \r\n print('Opening Data...')\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n Vectors1 = LongaxisOrtho(Vectors1)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/Project_Gastro/workflows/Cesim/musc_mod_v2/OutputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Rotating Vectors...')\r\n \r\n # Rotate Vectors\r\n RotVectors1 = np.zeros((np.shape(Vectors1)[0],3))\r\n\r\n idxpos = np.argwhere(Centroids1[:,1] >= 0)\r\n idxpos = idxpos.flatten()\r\n idxneg = np.argwhere(Centroids1[:,1] < 0)\r\n idxneg = idxneg.flatten()\r\n\r\n PosVectors = RotationTransform(Vectors1[idxpos,:],degZ = -30)\r\n NegVectors = RotationTransform(Vectors1[idxneg,:],degZ = 30)\r\n RotVectors1[idxpos,:] = PosVectors[:,:]\r\n RotVectors1[idxneg,:] = NegVectors[:,:]\r\n print('Vectors Rotated \\n Inserting Plane...')\r\n \r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,RotVectors1,50,4)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.1)\r\n print('Interpolation Finished \\n Plotting...')\r\n \r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(211,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,1,1)\r\n\r\n ax2 = fig.add_subplot(212,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,1,1)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"New Centroid Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/BiPennateCentralPlaneFibres.dat\",Vectors2,header = header,comments='')", "def vectorize_doc_list(docList):\n vecList = bc.encode(docList)\n return vecList", "def get_label_vectors():\n print(\"Retrieving label vectors...\")\n label_dict = {} # instantiate dict for labels:vectors\n categories = sorted([c for c in os.listdir('images/') if c[0] != '.']) # ignore hidden files\n x = np.zeros(len(categories)) # zero vector of number of categories\n for i, c in enumerate(categories): # get index and category for images\n y = x.copy() # use copy of x\n y[i] = 1 # set label index to true\n label_dict[c] = y.copy() # create label:vector\n\n return label_dict", "def Subtask4_pre_train_5():\n with open(PATH + 'pre_train_4_Subtask4.txt', encoding='utf-8') as fi:\n evi = eval(fi.read())\n\n train_data = np.load(PATH + 'pre_train_2_Subtask4.npy', allow_pickle=True).item()\n model = word2vec.KeyedVectors.load_word2vec_format(PATH + \"data/GoogleNews-vectors-negative300.bin\", binary=True)\n\n with open(PATH + 'pre_train_3_Subtask4.txt', encoding='utf-8') as f:\n document = eval(f.read())\n\n with open(PATH + 'traindata_Subtask4.txt', 'w') as fp:\n for data in train_data.items():\n claim = data[0]\n claim = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", claim)\n claim = claim.split(' ')\n claim = list(filter(lambda x: x in model.vocab, claim))\n Vi = []\n for i in range(len(claim)):\n Vi.append(model[claim[i]])\n\n V = np.zeros(len(Vi[0]))\n for i in range(len(claim)):\n for j in range(len(Vi[0])):\n V[j] = V[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V[i] * V[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V[i] = V[i] / rms\n V = V.astype(str).tolist()\n\n for doc in data[1]:\n lines = document[doc].split('\\n')\n for k in range(len(lines)):\n label = [data[0], doc, k]\n line = document[doc].split('\\n')[k]\n if line != str(k) + '\\t':\n line = line.replace(str(k) + '\\t', '')\n line = line.split('\\t')[0]\n line = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", line)\n line = line.split(' ')\n line = list(filter(lambda x: x in model.vocab, line))\n if len(line) != 0:\n Vi = []\n for i in range(len(line)):\n Vi.append(model[line[i]])\n\n V1 = np.zeros(len(Vi[0]))\n for i in range(len(line)):\n for j in range(len(Vi[0])):\n V1[j] = V1[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V1[i] * V1[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V1[i] = V1[i] / rms\n V1 = V1.astype(str).tolist()\n\n if label in evi:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 1' + '\\n')\n else:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 0' + '\\n')", "def update_doc_count_vector_on_disk(self, wipe_and_start_from_zero=False):\n logging.info('Starting method that builds term Document Count vector')\n\n int_vids_processed = 0\n int_vids_added_to_doc_count_vector = 0\n int_vids_not_touched = 0\n\n percent_increments = 10\n doc_count_file_exists = os.path.exists(self.fullpath_doc_count_vector_as_csv)\n\n # if a fresh start was requested, we wipe the SimpleDS clean\n # from the tags that mark a video as already included in the current\n # doc-count vector, and we delete the existing file.\n if wipe_and_start_from_zero:\n self.transcripts_ds.tag_remove_all_rows(self.__str_tag_vid_data_in_doc_count)\n percent_increments = 1\n if doc_count_file_exists:\n os.remove(self.fullpath_doc_count_vector_as_csv)\n doc_count_file_exists = False\n\n df_doc_count = pd.DataFrame()\n # check to see if the DC (document count) vector CSV file already exists.\n # If it does, we load a dataframe object with it, and\n # if it doesn't, we create a dataframe.\n if doc_count_file_exists:\n logging.debug('Document Count vector csv file exists. Loading dataframe from disk.')\n df_doc_count = pd.read_csv(self.fullpath_doc_count_vector_as_csv,\n sep=self.__separator)\n df_doc_count.set_index(self.__column_name_terms, drop=True, inplace=True)\n else:\n logging.debug('Document Count vector csv file does not exist. Will create it as part'\n ' of the execution of the method.')\n # If we are not loading the dataframe from disk (because it does not yet exist)\n # we need to initialize this dataframe a bit more formally than usual,\n # because the loop below adds two columns together after concatenating the current\n # iteration of the loop's dataframe, with the ongoing one.\n # If we don't initialize the dataframe here with its proper columns, the sum\n # of the two columns in the first iteration of the loop fails, because in the\n # first iteration, the cumulative df declared below would not have a column to add\n # with if it were just declared as an empty dataframe.\n df_doc_count = pd.DataFrame(\n columns=[self.__column_name_terms, self.__column_name_count])\n df_doc_count.set_index(self.__column_name_terms, drop=True, inplace=True)\n\n # now loop through the transcripts SimpleDS, and use the term count\n # dataframe of each video that has one to create a global document count vector.\n # by vector I mean a one-dimensional dataframe, where the index is\n # populated with all of the terms, and the data-column is the number of documents\n # (transcripts) where the term is present.\n counter = 0\n max_vids_to_process = self.num_vids_to_use\n percent_trkr = PercentTracker(max_vids_to_process,\n int_output_every_x_percent=percent_increments, log_level='info')\n # we are going to add a cheeky term to the dataframe of each\n # video. This will allow us to basically store, as a row, in the overall document-\n # count dataframe the size of the document universe that has been\n # examined to construct the current document-count vector.\n # we do this by temporarily adding the same string (that will never be found naturally\n # in a transcript) to every video term-count dataframe. To do this, we use the\n # small, cheeky dataframe below.\n dct_for_cheeky_df = {self.__column_name_terms: [self.__str_cheeky_document_counter],\n self.__column_name_count: [1]}\n df_cheeky = pd.DataFrame(dct_for_cheeky_df)\n df_cheeky.set_index(self.__column_name_terms, drop=True, inplace=True)\n df_cheeky.index.name = self.__column_name_terms\n for vid_id in self.transcripts_ds:\n vid_data_already_in_existing_vector = \\\n self.transcripts_ds.tag_check(vid_id, self.__str_tag_vid_data_in_doc_count)\n if vid_data_already_in_existing_vector:\n int_vids_not_touched += 1\n else:\n # we are here if the video data being examined has not yet contributed to\n # the existing doc-count vector.\n execution_should_continue = self.var_mgr.var_retrieve(my_globals.str_execution_may_go_on)\n # if the instance of this class was only asked to process a certain number\n # of videos, we don't loop through the whole SimpleDS.\n # similarly, if an external event has asked for execution to stop using the\n # variable manager\n if (counter >= max_vids_to_process) or (not execution_should_continue):\n break\n logging.debug('Builing document COUNT vector. Processing vid # ' + str(counter))\n\n df_vid_tc = self.__get_vid_term_count(vid_id)\n # check that the term COUNT df isn't empty\n if len(df_vid_tc) > 0:\n # this is where we use the cheeky dataframe describe above to add to each\n # video's term-count a specific string. This means that the overall\n # document-count vector will always have a row that tracks how many documents\n # have been used to construct the vector.\n df_vid_tc = pd.concat([df_vid_tc, df_cheeky])\n # now create a dataframe for this video that replaces all count values\n # with True. For document count, we don't care about the actual value\n # of the term-frequency, just whether it appears or not.\n # Because all terms of a document obviously appear in that document,\n # all rows will be marked as True.\n df_vid_tc = df_vid_tc > 0\n # now we convert the Trues into 1s so we can do math with them later\n # this will fail if there are any nans, but we know there are not because\n # every term exists in its own document\n df_vid_tc[self.__column_name_count] = \\\n df_vid_tc[self.__column_name_count].astype(int)\n # now we concatenate the cumulative single-column dataframe that is tracking\n # the document COUNT, with the one built above.\n df_doc_count = pd.concat([df_doc_count, df_vid_tc], join='outer', axis=1, sort=False)\n # now we have two columns, instead of just one. In both columns there are nans\n # because some terms existed in one df, and not in the other, and viceversa\n # (and in for the terms that exist in both dfs, we have 1s in both.) This\n # allows us to add the columns together below, which results in an increment of\n # +1 in the cumulative df of all the terms (those and only those) that exist in\n # the current video being processed by the loop.\n # in the line below we replace the nans that got added at the concatenations with zeros\n df_doc_count.fillna(0, inplace=True)\n # now, as explained, we add the two columns together\n df_doc_count = df_doc_count.iloc[:, 0] + df_doc_count.iloc[:, 1]\n self.transcripts_ds.tag_add(vid_id, self.__str_tag_vid_data_in_doc_count)\n int_vids_added_to_doc_count_vector += 1\n\n int_vids_processed += 1\n counter += 1\n percent_trkr.update_progress(counter, show_time_remaining_estimate=True)\n\n # we save the transcripts simpleDS to disk, because tags may have been added\n self.transcripts_ds.save2disk()\n # then save the document-count vector to disk\n df_doc_count.index.name = self.__column_name_terms\n save_index = True\n logging.debug('Saving dataframe to CSV with __separator -> ' +\n str(self.__separator) + ' and saving index = ' + str(save_index))\n df_doc_count.to_csv(self.fullpath_doc_count_vector_as_csv, sep=self.__separator,\n index=save_index, header=True)\n\n logging.info('---------- SUMMARY of updating the document count vector ----------')\n logging.info('Videos processed: ' + str(int_vids_processed))\n logging.info('Videos added to the document-count vector: ' + str(int_vids_added_to_doc_count_vector))\n logging.info('Videos not touched: ' + str(int_vids_not_touched))", "def build_model(self, documents):\n self.vectorizer = TfidfVectorizer(\n stop_words='english', lowercase=True).fit(documents)\n self.vectors = self.vectorizer.transform(documents)", "def CreateBiPennate1():\r\n \r\n print('Opening Data...')\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Rotating Vectors...')\r\n \r\n # Rotate Vectors\r\n RotVectors1 = np.zeros((Vectors1.shape[0],3))\r\n\r\n idxpos = np.argwhere(Centroids1[:,1] >= 0)\r\n idxpos = idxpos.flatten()\r\n idxneg = np.argwhere(Centroids1[:,1] < 0)\r\n idxneg = idxneg.flatten()\r\n\r\n PosVectors = RotationTransform(Vectors1[idxpos,:],degZ = 30)\r\n NegVectors = RotationTransform(Vectors1[idxneg,:],degZ = -30)\r\n RotVectors1[idxpos,:] = PosVectors[:,:]\r\n RotVectors1[idxneg,:] = NegVectors[:,:]\r\n print('Vectors Rotated \\n Inserting Plane...')\r\n \r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,RotVectors1,50,4)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,1)\r\n print('Interpolation Finished \\n Plotting...')\r\n \r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"New Centroid Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/BiPennateCentralPlaneFibres30.dat\",Vectors2,header = header,comments='')", "def init_from_lists(self,terms,vectors):\n self.terms = terms\n self.vectors = vectors\n self.real_vectors = [RealVectorFactory.generate_vector(args) for args in vectors]\n self.dict = dict(zip(self.terms, self.real_vectors))", "def pp_vectors(xm_train, xm_dev, xm_test):\n\n scaler = StandardScaler().fit(xm_train)\n\n train = scaler.transform(xm_train).T\n\n dev = scaler.transform(xm_dev).T\n\n test = scaler.transform(xm_test).T\n\n return train, dev, test", "def preexe():\n from collections import OrderedDict as odict\n term_index = odict()\n term_vec = pickle.loads(open('term_vec.pkl', 'rb').read())\n with open('./dumps.txt', 'r') as f:\n datasets = []\n for fi, line in enumerate(f):\n if fi > 50000: break\n if fi%500 == 0:\n print(\"now iter {}\".format(fi))\n terms = line.strip().split()\n for slide in range(0, len(terms) - 4, 1 ):\n ans = terms[slide+4] \n buff = []\n try:\n [buff.append(term_vec[term]) for term in terms[slide: slide+4]]\n except KeyError as e:\n continue\n datasets.append( (buff, ans, terms[slide: slide+5]) )\n if term_index.get(ans) is None:\n term_index[ans] = len(term_index)\n open('datasets.pkl', 'wb').write(pickle.dumps(datasets))\n open('term_index.pkl', 'wb').write(pickle.dumps(term_index))", "def vectorize_documents(documents, model):\n document_vectors = []\n count = 0\n for document in documents:\n count += 1\n sentence_vectors = [vectorize_sentence(sentence, model) for sentence in document]\n document_vector = get_aggregate_vector(sentence_vectors)\n document_vectors.append(document_vector)\n return document_vectors", "def CreateVector(self) -> BaseVector:", "def stimulus_vectors(vocab, beta, prefix='V'):\n v = vocab.parse('V0').v\n yield v\n for i in itertools.count(1):\n v = np.sqrt(1. - beta**2) * v + beta * vocab.parse('V' + str(i)).v\n v /= np.linalg.norm(v)\n yield v", "def initialize_libraries(experiment, ln):\n # Move into the folder to do the intial calculations in\n folder = \"initial_library\" + str(ln)\n os.chdir(folder) \n # Create a time stamp for beginning the calculations\n experiment[\"Summary\"] = \"Library \" + str(ln) + \" Initialization\\n\"\n experiment[\"Summary\"] += \"Started\" + SHARING.time_stamp()\n # Find the proper number of coordinates to consider\n N = len(experiment[\"Movements\"][ln])/2\n # Go through each antigen\n for mol in experiment[0]:\n # Apply the proper rotation\n for cn in range(N):\n # Create a generic vector of zeros of the appropriate length\n vector = [0.0] * N\n # Place a value of 1.0 in the correct location in the vector\n vector[cn] = 1.0\n # Find the angle to rotate the antigens by\n angle = experiment[\"Movements\"][ln][N+cn]\n # Rotate each of the antigens by the appropriate angle\n rmatrix = MOLECULES.calculate_rmatrix(angle, vector)\n MOLECULES.rotate(mol, rmatrix)\n # Translate each antigen by the appropriate amount\n MOLECULES.move(mol, experiment[\"Movements\"][ln][:N], '+')\n # Update the reference folder with these updated coordinates\n SHARING.output_Current(experiment, \"./Current/\") \n # Load the canonical structures\n canonicals = IPRO_FUNCTIONS.load_canonicals(experiment)\n cdrs = list(canonicals.keys())\n cdrs.sort()\n # Load the clashes\n clashes = IPRO_FUNCTIONS.load_clashes(experiment, cdrs) \n # Load the C++ scores\n raw_scores = IPRO_FUNCTIONS.load_scores(experiment[\"Folder\"])\n # Look for alternate solutions using integer cuts\n goOn = True\n # Store the solutions in a list\n solutions = [experiment[\"Scores\"][ln-1]]\n # Keep searching for alternate solutions until the quality of the result is\n # worse\n while goOn:\n # Resolve the MILP using integer cuts\n if useCPLEX:\n #solution = CPLEX.optcdr_canonicals(canonicals, clashes, \\\n # raw_scores[ln], solutions)\n pass\n else:\n solution = GAMS.optcdr_canonicals(canonicals, clashes, \\\n raw_scores[ln], solutions)\n # If the solution found has an equal objective value to the first, store\n # it and re-run the MILP\n if solution[\"Score\"] == experiment[\"Scores\"][ln-1][1][\"Score\"]:\n solutions.append([experiment[\"Scores\"][ln-1][0], solution])\n # Otherwise, break out of the loop and analyze the results\n else:\n goOn = False\n # Update the library based on the most members for the cluster\n best = 0\n # Skip this if there is only one solution after applying the integer cuts\n if len(solutions) > 1:\n # Load the clusters\n cdrs = list(canonicals.keys())\n cdrs.sort()\n clusters = load_clusters(experiment, cdrs)\n # Initialize the variables to store the solution with the most cluster\n # members\n best = None\n amount = 0\n # Go through the solutions\n for i, solution in enumerate(solutions):\n # Store the total number of members throughout the CDRs\n total = 0\n # Go through the CDRs\n for j, cdr in enumerate(cdrs):\n # Extract the number of members from the \"clusters\" dictionary \n members = clusters[cdr][solution[1][j+1]][\"Members\"]\n # 30 is the number where the permitted amino acids change from\n # \"of the same type\" to \"only those observed\" at each position\n if members > 30:\n members = 30\n # Add the number of members to the total for this solution\n total += members\n # If applicable, update the \"best\" solution found and its\n # corresponding total number of members\n if total > amount:\n best = i\n amount = total\n # Update the library based on the most structures\n experiment[\"Scores\"][ln-1] = solutions[best]\n # If the set of canonical structures has changed, update the referenced\n # values\n if best != 0:\n SHARING.output_scores(experiment, experiment[\"Folder\"] + \"Current/\", ln)\n # Copy the necessary files\n SHARING.copy_standard_files(experiment, solv = True) \n # Generate the antibody structures\n build_antibodies(experiment, canonicals, ln) \n # Go back to the home directory\n os.chdir(\"../\")\n # Try to create a new folder to handle the IPRO affinity maturation\n folder = \"library\" + str(ln)\n try:\n os.mkdir(folder)\n # If the folder already exists, delete it and make a new one. This is the\n # proper procedure since the library should only be there if the\n # initialization has already finished\n except OSError:\n os.system(\"rm -rf \" + folder)\n os.mkdir(folder)\n # Create a new Experiment class object to handle the IPRO affinity maturation\n make_IPRO_experiment(experiment, folder)\n # Delete the initialization folder\n os.system(\"rm -rf initial_\" + folder) \n # Update the summary file\n # Create a summary file\n experiment[\"Summary\"] += \"Ended\" + SHARING.time_stamp()\n name = SHARING.summary_name(SHARING.get_current())\n f = open(name, \"a\")\n f.write(experiment[\"Summary\"])\n f.close()", "def init_vars():\n\tda_vinci.base.usepackage(\"pgfkeys\")\n\tda_vinci.base.add_preamble(setup_script)", "def main():\n \n fname = sys.argv[1]\n fin = open(fname)\n a123 = []\n batms = []\n##### Read in old basis and vectors\n for line in fin:\n if line[0] == \"#\": continue\n \n line = line.split()\n line = [ float(x.strip()) for x in line[:3] ]\n \n if len(a123) == 3: batms.append(line); continue\n a123.append(line)\n \n fname = sys.argv[2]\n fin = open(fname)\n b123 = []\n for line in fin:\n if line[0] == \"#\": continue\n \n line = line.split()\n line = [ float(x.strip()) for x in line[:3] ]\n \n b123.append(line)\n if len(b123) == 3: break\n \n print \"... lattice vectors \\n old new \"\n for i in range(3):\n print (\" %1.4f | %1.4f | %1.4f %1.4f | %1.4f | %1.4f \" % \n (a123[0][i], a123[1][i], a123[2][i], b123[0][i], b123[1][i], b123[2][i]) )\n \n print \"... basis atoms = \"\n for i in range(len(batms)):\n print \" %1.4f %1.4f %1.4f\" % (batms[i][0], batms[i][1], batms[i][2])\n \n\n##### Read in new basis that you want to switch to\n##### Take any point q_A = (q1,q2,q3) then q_E = q1*a1_E + q2*a2_E + q3*a3_E = (x, y ,z)\n##### Hence, we can say that q_B = (p1,p2,p3) = q1*a1_B + q2*a2_B + q3*a3_B\n\n##### Writing in matrix form we can say that [ a1_E | a2_E | a3_E ]*q_A = q_E\n##### Apply the same logic to vector ai we see [b1_E | b2_E | b3_E ]*ai_B = ai_E\n##### Hence, --> ////ai_B = cbm*ae_E|\\\\\\\\\n \n a123 = [np.array(x) for x in a123] #old basis\n b123 = [np.array(x) for x in b123] #new basis\n \n B = np.transpose(b123)\n \n invB = np.linalg.inv(B)\n a123_B = [np.dot(x,invB) for x in a123]\n A_B = np.transpose(a123_B) #representation of old vectors in new space (colum wise)\n print \" ... representation of old vectors in the new basis = \"\n for i in range(3):\n print \" %1.7f | %1.7f | %1.7f\" % (A_B[0][i], A_B[1][i], A_B[2][i])\n \n##### Build 5 unit cells all around\n comb = [] #array containing unit cell coordinates\n for i1 in range(-2,2):\n for i2 in range(-2,2):\n for i3 in range(-2,2):\n comb.append([i1,i2,i3])\n \n nuc = len(comb)\n b2atms = [] #new basis atoms \n b2map = [] #new basis map\n\n for uc in comb:\n for i in range(len(batms)):\n tmp = [ uc[0]+batms[i][0], uc[1]+batms[i][1], uc[2]+batms[i][2] ] # add all basis atoms in each unit cell\n prcs = 4 # significat figures for rounding\n tmp = np.array(tmp)\n tmp = np.dot(A_B,tmp) # matrix multiplication\n tmp = np.round(tmp,prcs) \n eps = 0 #needed for round off error\n if -eps<=tmp[0]<1+eps and -eps<=tmp[1]<1+eps and -eps<=tmp[2]<1+eps: # if in first unit cell\n b2atms.append(tmp.tolist())\n b2map.append( [uc[0],uc[1],uc[2],i] ) \n \n print \"--> New basis has \" + str(len(b2atms)) + \" atoms in fractional coordinates:\"\n for i in range(len(b2atms)):\n print ( \" %1.4f %1.4f %1.4f <-- %1.0f %1.0f %1.0f|%1.0f\" % \n (b2atms[i][0], b2atms[i][1], b2atms[i][2], b2map[i][0], b2map[i][1], b2map[i][2], b2map[i][3]) )", "def set_bravais_vectors(p_state, ta=[1.0, 0.0, 0.0], tb=[0.0, 1.0, 0.0], tc=[0.0, 0.0, 1.0], idx_image=-1, idx_chain=-1):\n vec3 = ctypes.c_float * 3\n _Set_Bravais_Vectors(ctypes.c_void_p(p_state), vec3(ta), vec3(tb), vec3(tc))", "def define_vector_functions(self):\n\n # Exit if functions have already been defined.\n # A function decorator might work better here...\n if hasattr(self, 'velocity'):\n return None\n\n unsteady = self.config['formulation']['time']['unsteady']\n lagrangian = self.config['formulation']['domain'] == 'lagrangian'\n lin_elastic = self.config['material']['const_eqn'] == 'lin_elastic'\n elastic = self.config['material']['type'] == 'elastic'\n\n init = self.config['formulation']['initial_condition']\n\n # Trial and test functions\n self.test_vector = dlf.TestFunction(self.vectorSpace)\n self.trial_vector = dlf.TrialFunction(self.vectorSpace)\n\n if elastic and unsteady:\n if init['displacement'] is not None:\n disp = init['displacement']\n self.displacement = dlf.project(disp, self.vectorSpace)\n self.displacement0 = self.displacement.copy(deepcopy=True)\n else:\n self.displacement = dlf.Function(self.vectorSpace)\n self.displacement0 = dlf.Function(self.vectorSpace)\n self.displacement.rename(\"u\", \"displacement\")\n self.displacement0.rename(\"u0\", \"displacement\")\n\n if init['velocity'] is not None:\n vel = init['velocity']\n self.velocity = dlf.project(vel, self.vectorSpace)\n self.velocity0 = self.velocity.copy(deepcopy=True)\n else:\n self.velocity = dlf.Function(self.vectorSpace)\n self.velocity0 = dlf.Function(self.vectorSpace)\n self.velocity.rename(\"v\", \"velocity\")\n self.velocity0.rename(\"v0\", \"velocity\")\n elif unsteady: # Unsteady viscous material.\n self.displacement = 0\n self.displacement0 = 0\n\n if init['velocity'] is not None:\n vel = init['velocity']\n self.velocity = dlf.project(vel, self.vectorSpace)\n self.velocity0 = self.velocity.copy(deepcopy=True)\n else:\n self.velocity = dlf.Function(self.vectorSpace)\n self.velocity0 = dlf.Function(self.vectorSpace)\n self.velocity.rename(\"v\", \"velocity\")\n self.velocity0.rename(\"v0\", \"velocity\")\n\n # self.velocity = dlf.Function(self.vectorSpace, name=\"v\")\n # self.velocity0 = dlf.Function(self.vectorSpace, name=\"v0\")\n elif elastic: # Steady elastic material.\n if init['displacement'] is not None:\n disp = init['displacement']\n self.displacement = dlf.project(disp, self.vectorSpace)\n # self.displacement0 = self.displacement.copy(deepcopy=True)\n else:\n self.displacement = dlf.Function(self.vectorSpace)\n # self.displacement0 = dlf.Function(self.vectorSpace)\n self.displacement.rename(\"u\", \"displacement\")\n # self.displacement0.rename(\"u0\", \"displacement\")\n\n # self.displacement = dlf.Function(self.vectorSpace, name=\"u\")\n self.displacement0 = 0\n self.velocity = 0\n self.velocity0 = 0\n else: # Steady viscous material\n self.displacement = 0\n self.displacement0 = 0\n\n if init['velocity'] is not None:\n vel = init['velocity']\n self.velocity = dlf.project(vel, self.vectorSpace)\n # self.velocity0 = self.velocity.copy(deepcopy=True)\n else:\n self.velocity = dlf.Function(self.vectorSpace)\n # self.velocity0 = dlf.Function(self.vectorSpace)\n self.velocity.rename(\"v\", \"velocity\")\n # self.velocity0.rename(\"v0\", \"velocity\")\n\n # self.velocity = dlf.Function(self.vectorSpace, name=\"v\")\n self.velocity0 = 0\n\n # # Apply initial conditions if provided\n # initial_condition = self.config['formulation']['initial_condition']\n # if initial_condition['displacement'] is not None:\n # init_disp = initial_condition['displacement']\n # self.apply_initial_conditions(init_disp,\n # self.displacement,\n # self.displacement0)\n # if initial_condition['velocity'] is not None:\n # init_vel = initial_condition['velocity']\n # self.apply_initial_conditions(init_vel,\n # self.velocity,\n # self.velocity0)\n\n return None", "def test_qsvm_setup_data(self):\n ref_kernel_testing = np. array([[0.1443953, 0.18170069, 0.47479649, 0.14691763],\n [0.33041779, 0.37663733, 0.02115561, 0.16106199]])\n\n ref_support_vectors = np.array([[2.95309709, 2.51327412], [3.14159265, 4.08407045],\n [4.08407045, 2.26194671], [4.46106157, 2.38761042]])\n\n backend = BasicAer.get_backend('statevector_simulator')\n num_qubits = 2\n feature_map = SecondOrderExpansion(feature_dimension=num_qubits,\n depth=2,\n entangler_map=[[0, 1]])\n try:\n svm = QSVM(feature_map)\n\n svm.setup_training_data(self.training_data)\n svm.setup_test_data(self.testing_data)\n quantum_instance = QuantumInstance(backend, seed_transpiler=self.random_seed,\n seed_simulator=self.random_seed)\n result = svm.run(quantum_instance)\n\n np.testing.assert_array_almost_equal(\n result['kernel_matrix_testing'], ref_kernel_testing, decimal=4)\n\n self.assertEqual(len(result['svm']['support_vectors']), 4)\n np.testing.assert_array_almost_equal(\n result['svm']['support_vectors'], ref_support_vectors, decimal=4)\n\n self.assertEqual(result['testing_accuracy'], 0.5)\n except NameError as ex:\n self.skipTest(str(ex))", "def allocateVectors(self,X,Y,x,y):\n self.y=Y.init(y)\n self.dy=Y.init(y)\n self.g_x=Y.init(y)\n self.gpxdxn_p_gx=Y.init(y)\n self.gpxdxt=Y.init(y)\n self.dx_n=X.init(x)\n self.dx_ncp=X.init(x)\n self.dx_t=X.init(x)\n self.dx_t_uncorrected=X.init(x)\n self.dx_tcp_uncorrected=X.init(x)\n self.H_dxn=X.init(x)\n self.W_gradpHdxn=X.init(x)\n self.H_dxtuncorrected=X.init(x)", "def prepare_training_data(collection_of_positive_segments, collection_of_negative_segments):\n\n\n\tprint \"Preparing training data...\"\n\n\ttraining_vectors = []\n\ttraining_labels = []\n\n\n\tfor segment in collection_of_positive_segments.list_of_segments:\n\t\tvector = get_segment_feature_vector(segment)\n\n\t\ttraining_labels.append(\"Correct\")\n\t\ttraining_vectors.append(vector)\n\t\t\n\t\t\n\tfor segment in collection_of_negative_segments.list_of_segments:\n\t\tvector = get_segment_feature_vector(segment)\n\t\ttraining_labels.append(\"Incorrect\")\n\t\ttraining_vectors.append(vector)\n\t\t\n\t\n\ttraining_vectors = normalize_train_data(training_vectors)\n\t\n\t\t\n\t#data = VectorDataSet(training_vectors,L=training_labels)", "def vectorcorpus(model, wcl):\r\n corpus = np.array([model.word_vec(word) for word, _ in wcl])\r\n print('Created corpus with {} elements'.format(len(corpus)))\r\n return corpus", "def set_vectors(self, vecs):\n self.vecs = vecs[:]", "def vectorize_doc(document):\n # return document vector for tokenized input doc\n return bc.encode([document])[0]", "def _initialize(self, pos_samples: np.ndarray, neg_samples: np.ndarray) -> 'LaSVM':\n\n self._remove_all_support_vectors()\n\n pos_samples = pos_samples.copy()\n neg_samples = neg_samples.copy()\n\n if self.gamma == 'scale':\n self.gamma_ = self._scaled_gamma(np.vstack([pos_samples, neg_samples]))\n\n self.support_vectors = np.empty(shape=(0, pos_samples.shape[1]))\n\n self._add_support_vectors(pos_samples, y=np.ones(pos_samples.shape[0]))\n self._add_support_vectors(neg_samples, y=- np.ones(neg_samples.shape[0]))\n\n i, j = self._find_maximum_gradient_pair()\n self.intercept = (self.gradient[i] + self.gradient[j]) / 2\n self.delta = self.gradient[i] - self.gradient[j]\n\n return self", "def _vector_mapping(self) -> dict:\n words = set()\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n with open(doc_path, 'r') as f:\n text_words = f.readline().split()\n words = words.union(set(text_words))\n words = list(words)\n words.sort()\n\n return dict(zip(words, range(len(words))))", "def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec", "def _initialize_corpus(self):\n vocab = self.vocab # vocab is the word vector\n theta = self.theta # theta is the model parameter\n corpus = self.corpus\n\n for line in corpus:\n for word in line:\n if word not in vocab:\n vocab[word] = init_vector(self.n)\n theta[word] = init_vector(self.n)\n\n if self.verbose:\n print(f\"{len(vocab)} words have been loaded\")", "def main():\n \"\"\"Calculates BM25 and VSM score\"\"\"\n\n queries, docs, term_freq_corpus = io() \n DocQ = DocumentQuery(docs, queries[0].split(\" \"), [], {}, {}, set(), term_freq_corpus)\n #print(queries[0].split(\" \"))\n DocQ.init_doc_query(queries[0].split(\" \"), docs)\n DocQ.init_term_freq()\n for i, d in enumerate(docs):\n bm25 = DocQ.bm25_score(queries[0].split(\" \"), d)\n print(docs[i], bm25)\n\n vsm.document_filenames = {i:d for i, d in enumerate(docs)}\n vsm.N = len(docs)\n vsm.query = queries[0]\n vsm.initialize_terms_and_postings()\n vsm.initialize_document_frequencies()\n vsm.initialize_lengths()\n vsm.do_search()", "def vcode0 (vect):\n return prepKey(AA(fixedPrec(PRECISION))(vect))", "def get_new_sysvec_by_system(filenames_type, icd9_systems, cui_to_icd9_dicts, results):\n \n filename_to_embedding_matrix, idx_to_cui, cui_to_idx, cui_to_icd9_types = generate_overlapping_sets_cui(filenames_type, True, cui_to_icd9_dicts)\n \n # Obtain dictionary between cuis and the ICD9 disease systems they're part of/related to\n cui_to_systems = get_cui_to_systems(cui_to_icd9_types, icd9_systems)\n\n # Get list of ICD9 system names in this analysis\n icd9_systems_names = []\n \n for row in icd9_systems: icd9_systems_names.append(row[0]) \n n_of_systems = len(icd9_systems_names)\n \n filename_index = 0\n for filename, embedding_type, _ in filenames_type:\n# # Matrix to convert cui to positions in the relevant filename\n embedding_matrix = filename_to_embedding_matrix[filename]\n \n # Make System Vectors\n systems_sysvec = {}\n systems_n = {}\n systems_correct = {}\n for system in icd9_systems_names:\n systems_sysvec[system] = []\n systems_n[system] = 0.0001\n systems_correct[system] = 0\n\n for cui in cui_to_idx.keys():\n if cui_to_icd9_types[cui]['icd9_type'] == 'diag':\n cui_vec = embedding_matrix[cui_to_idx[cui],:]\n cui_vec = normalize(cui_vec.reshape(1, -1))[0]\n for system in cui_to_systems[cui]:\n systems_sysvec[system].append(cui_vec)\n # Below for generating random set for negative control\n ##rand_system = random.choice(icd9_systems_names)\n ##systems_sysvec[rand_system].append(cui_vec)\n \n for system in icd9_systems_names:\n systems_sysvec[system] = np.array(systems_sysvec[system])\n systems_sysvec[system] = np.mean(systems_sysvec[system], axis=0)\n \n # Calculate accuracies using System Vectors. \n for cui in cui_to_idx.keys():\n if cui_to_icd9_types[cui]['icd9_type'] == 'drug':\n cui_vec = embedding_matrix[cui_to_idx[cui],:]\n #cui_vec = cui_vec/np.linalg.norm(cui_vec) #Normalize\n cui_vec = normalize(cui_vec.reshape(1, -1))[0]\n true_systems = cui_to_systems[cui]\n \n cos_sims = np.zeros(n_of_systems)\n \n # Generate list of cos similarities with the system vectors\n for i in range(n_of_systems):\n system = systems_sysvec.keys()[i]\n system_vec = systems_sysvec[system]\n cos_sim = cosine_similarity([cui_vec], [system_vec])[0,0]\n cos_sims[i] = cos_sim\n \n n = len(true_systems) # Number of systems this cui treats or prevents or 1 if diagnosis\n pred_systems = [icd9_systems_names[i] for i in np.argsort(cos_sims)[n:]]\n \n for system in true_systems:\n systems_n[system] += 1\n if system in pred_systems: systems_correct[system] += 1\n #rand_system = random.choice(icd9_systems_names)\n #if rand_system in pred_systems: systems_correct[system] += 1\n \n system_index = 0\n for system in icd9_systems_names:\n results[system_index + 1][0] = re.sub(\",\", \" \", system)\n results[system_index + 1][filename_index + 1] = '%2.2f' %(100*systems_correct[system]/systems_n[system]) ##, np.std(np.array(systems_dcg[system])))\n results[system_index + 1][-1] = str(int(systems_n[system])) # Number of examples used for this calculation. Will be re-written by each file but that's okay as always same\n system_index += 1\n filename_index += 1\n \n return results", "def _finalize(self):\n if self.vcount > 1:\n # skewness = g1 = sqrt(n) M3/(M2^(3/2)) # zero \n # kurtosis = g2 = n M4/M2^2 - 3 # zero for normal\n # sk = (M3/nf)/(sigma**3)\n # ku = (M4/nf)/sigma**4 - 3\n n = self.vcount\n nf = float(n)\n mu2 = self.vm2/nf\n self.vvar = self.vm2/(nf-1)\n try:\n self.vskewness = self.vm3/nf/(mu2**1.5)\n self.vkurtosis = self.vm4/nf/(mu2**2)\n except:\n self.vskewness = 0\n self.vkurtosis = 0\n elif self.vcount == 1:\n self.vvar = 0\n self.vskewness = 0\n self.vkurtosis = 0\n self.dirty = False", "def build_data_vectors(annotations, tweets, Tfidf_vect, adr_lexicon_dict, should_balance_set=True):\n\n def vectorize_word(word):\n \"\"\"gives vectorized value from TfidfVectorizer for the given word\n If the word is not part of vocabulary, 0 will be returned\n\n # Arguments\n word - word to vectorize\n\n # Returns\n vectorized value\n \"\"\"\n if word in Tfidf_vect.vocabulary_:\n i = Tfidf_vect.vocabulary_[word]\n return Tfidf_vect.idf_[i]\n else:\n return 0\n\n def clean_text(text):\n \"\"\"Cleans the text\n This code snippet is taken from https://towardsdatascience.com/multi-label-text-classification-with-scikit-learn-30714b7819c5\n Author: Susan Li\n\n # Arguments\n text - text to clean\n\n # Returns\n cleaned text\n \"\"\"\n text = text.lower()\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"can not \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"\\'scuse\", \" excuse \", text)\n text = re.sub('\\W', ' ', text)\n text = re.sub('\\s+', ' ', text)\n text = text.strip(' ')\n return text\n\n X = []\n Y = []\n adr_labels_size = 0\n nonadr_labels_size = 0\n for i, (k, v) in enumerate(annotations.items()):\n tweet_text = clean_text(tweets[k])\n tokens = word_tokenize(tweet_text)\n\n for annotation_index, annotation in enumerate(v):\n prev_token_adr = False\n\n annotated_text = clean_text(annotation['annotatedText'])\n annotated_text_tokens = word_tokenize(annotated_text)\n\n for index, focus_word in enumerate(tokens):\n focus_vector = []\n\n # for Context feature, get index for 3 surrounding words on each side of focus word\n if program_args.context_feature:\n focus_vector.append(vectorize_word(tokens[index-3]) if (index-3 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-2]) if (index-2 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-1]) if (index-1 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index]))\n focus_vector.append(vectorize_word(tokens[index+1]) if (index+1 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+2]) if (index+2 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+3]) if (index+3 < len(tokens)) else 0)\n\n if program_args.adrlexicon_feature:\n if focus_word in adr_lexicon_dict:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n if program_args.prev_adrlexicon_feature:\n if prev_token_adr:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n # assign class label\n if annotation['semanticType'] == 'ADR' and focus_word in annotated_text_tokens:\n Y.append(ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n adr_labels_size += 1\n prev_token_adr = True\n else:\n Y.append(NON_ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n nonadr_labels_size += 1\n prev_token_adr = False\n\n print(\" Dataset size: {}\".format(len(X)))\n print(\" {} class size: {}\".format(ADR_MENTION_CLASS_NAME, adr_labels_size))\n print(\" {} class size: {}\".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size))\n\n if should_balance_set:\n X, Y = balance_set(X, Y, adr_labels_size, nonadr_labels_size)\n\n X = scipy.sparse.csr_matrix(X)\n return X, Y", "def initialize(self, **kwargs):\n\t\tself.code = kwargs.get('code', False) #check if valid\n\t\tif not self.code:\n\t\t\tself.code = np.random.choice(\n\t\t\t\tself.Ncolors, size=self.codelength, \n\t\t\t\treplace=True, p=self.prior) + 1\n\t\tself.code = np.array(self.code)\n\t\tif self.logging:\n\t\t\tprint \"true code: %s\" % str(self.code)\n\t\t\tprint \"+---------+---------+\"\n\t\tself.step = 0\n\t\tself.combinations = np.zeros((0, self.codelength))\n\t\tself.feedbacks = []\n\t\tself.codepool = self.get_feasible_set()\n\t\tself.end = False\n\t\tself.currentFS = [self.codepool, self.fs_probability(self.codepool), self.step]", "def prepare(self):\r\n if self.varSegment.get() == \"binary\":\r\n self.calculate(\"\")\r\n else:\r\n files = selectFile(multiple=True)\r\n for file in files:\r\n self.calculate(file)", "def _initialize(self):\n self.VT = len(self.corpus.topicDictionary)\n self.VO = len(self.corpus.opinionDictionary)\n self.DT = len(self.corpus)\n self.DO = np.array([len(p.opinionCorpus)\n for p in self.corpus.perspectives], dtype=np.int)\n self.maxDocLengthT = max([p.topicCorpus.maxDocLength\n for p in self.corpus.perspectives])\n self.maxDocLengthO = np.array([p.opinionCorpus.maxDocLength\n for p in self.corpus.perspectives],\n dtype=np.int)\n\n # topics\n self.z = np.zeros((self.DT, self.maxDocLengthT), dtype=np.int)\n self.ndk = np.zeros((self.DT, self.nTopics), dtype=np.int)\n self.nkw = np.zeros((self.nTopics, self.VT), dtype=np.int)\n self.nk = np.zeros(self.nTopics, dtype=np.int)\n self.ntd = np.zeros(self.DT, dtype=np.float)\n\n # opinions\n self.x = np.array([np.zeros((self.DO[i], self.maxDocLengthO[i]),\n dtype=np.int)\n for i, p in enumerate(self.corpus.perspectives)])\n self.nrs = np.zeros((self.nPerspectives, self.nTopics, self.VO),\n dtype=np.int)\n self.ns = np.zeros((self.nPerspectives, self.nTopics), dtype=np.int)\n\n # loop over the words in the corpus\n for d, persp, d_p, doc in self.corpus:\n for w_id, i in self.corpus.words_in_document(doc, 'topic'):\n topic = np.random.randint(0, self.nTopics)\n self.z[d, i] = topic\n self.ndk[d, topic] += 1\n self.nkw[topic, w_id] += 1\n self.nk[topic] += 1\n self.ntd[d] += 1\n\n for w_id, i in self.corpus.words_in_document(doc, 'opinion'):\n opinion = np.random.randint(0, self.nTopics)\n self.x[persp][d_p, i] = opinion\n self.nrs[persp, opinion, w_id] += 1\n self.ns[persp, opinion] += 1\n logger.debug('Finished initialization.')", "def get_vector(self): \n #print(self.state)\n '''\n print(\"\"\"\n Price {}\n Last Price {}\n Last Period Transaction {}\n Last Transaction {}\n Las Value {}\n Last day {}\n Last hour {}\n Last minute {}\n --------------\n Balance {}\n Bag {}\n \"\"\".format(\n self.state['price'],\n self.states[-1]['price'],\n self.states[-1]['transaction'],\n self.transactions[-1]['transaction'],\n self.value,\n self.state['day'],\n self.state['hour'],\n self.state['minute'], \n self.balance, \n self.bag, \n )) \n ''' \n self.state_vector = np.array([\n self.state['price'],\n self.states[-1]['price'],\n self.states[-1]['transaction'],\n self.transactions[-1]['transaction'],\n self.value,\n self.state['day'],\n self.state['hour'],\n self.state['minute'],\n ])\n\n return self.state_vector", "def get_vectors_for_all_docs(docs, vocab):\n docs_vectors = [get_feature_vector(doc, vocab) for doc in docs]\n return np.array(docs_vectors)", "def getVector(self, p):\n vector = {}\n i = 0\n tr = ParseDumpWiki.normName(p)\n if(self.db.isInPage(tr)):\n for w, (idf, docs) in self.db.invertedIndex.items():\n if (p in docs):\n vector[i] = idf * docs[p]\n i += 1\n else:\n freqDist = self.db.transformDocument(wikipedia.page(p).content)\n indexesWords = list(self.db.invertedIndex.keys())\n commonWords = set(indexesWords).intersection(freqDist.keys())\n for w in commonWords:\n idf, docs = self.db.invertedIndex[w]\n vector[indexesWords.index(w)] = idf * freqDist[w]\n return vector" ]
[ "0.6225616", "0.6097219", "0.5992996", "0.5844046", "0.58303875", "0.5777067", "0.5741174", "0.5690402", "0.56594807", "0.56027544", "0.560168", "0.55921", "0.5578438", "0.5565369", "0.55545706", "0.55346024", "0.5503242", "0.5489077", "0.54638463", "0.54570204", "0.5454407", "0.5453997", "0.5444039", "0.54086316", "0.53995943", "0.53967667", "0.5378895", "0.53442574", "0.53350616", "0.5334197", "0.53214824", "0.53035086", "0.53018147", "0.5299823", "0.5291115", "0.5288734", "0.528493", "0.52749217", "0.5273057", "0.5256605", "0.52459335", "0.5242306", "0.5237069", "0.52226025", "0.5213471", "0.52117", "0.52070075", "0.51992947", "0.51909304", "0.5180264", "0.51756316", "0.5174803", "0.517077", "0.5156306", "0.5151502", "0.5139905", "0.5138387", "0.513791", "0.5115823", "0.5113026", "0.51119083", "0.510906", "0.50995713", "0.5083712", "0.5076063", "0.50681794", "0.504375", "0.5042744", "0.5036199", "0.503535", "0.50288886", "0.50284207", "0.5026862", "0.50201815", "0.50034124", "0.49929926", "0.4990735", "0.49892104", "0.49812174", "0.4980004", "0.4976232", "0.49754766", "0.49736053", "0.49721792", "0.49683443", "0.49675655", "0.4965635", "0.4957823", "0.4956618", "0.49566066", "0.4951531", "0.49514708", "0.49304518", "0.49304283", "0.49293905", "0.4923494", "0.49232167", "0.49150544", "0.4915051", "0.49148616" ]
0.56531245
9
Runs the Kmeans algorithm.
def fix(self, pixel_data): # :param:`m` is the size of :param:`pixel_data` m = len(pixel_data) # tempDist stores distance between training points and codebook vectors tempDist = np.zeros([self.K]).reshape(self.K, 1) # tempCluster stores previous cluster composition tempCluster = defaultdict(list) # mat will contain the cluster numbers to reassign each vector mat = np.zeros([m]).reshape(m, 1) tempMat = np.ones([m]).reshape(m, 1) j = 0 # initialise clusters clusters = self.alternating_bins_initialisation(pixel_data) while not np.array_equal(tempMat, mat): # algorithm runs until the sets do not change tempMat = copy.deepcopy(mat) # cacluate codebook vectors for each cluster cb_vectors = self.calculate_cb_vecs(clusters) # preserve cluster information tempCluster = copy.deepcopy(clusters) for key in clusters: # for each cluster for index in range(len(clusters[key])): # for the length of the cluster vector = clusters[key][index] for i in range(self.K): # save distances to each codebook vector tempDist[i] = np.c_[np.linalg.norm(vector - cb_vectors[i])] mat[j][0] = np.c_[np.argmin(tempDist)] # mat[j][0] contains the minimum distance of the vector in the jth position # in the cluster dictionary j += 1 # reset cluster information clusters.clear() # reassign training points to clusters according to distance from codebook vectors # Note: new clusters are allocated in order of membership occurrence while(j >= 1): for k in tempCluster: for idx in range(len(tempCluster[k])): clusters[mat[m - j][0]].append(tempCluster[k][idx]) j -= 1 # update the codebook vectors at the end of the loop cb_vectors = self.calculate_cb_vecs(clusters) self.cb_vectors = copy.deepcopy(cb_vectors) return cb_vectors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_algorithm(self):\r\n vectors = self.vectorize_data()\r\n kmeans = KMeans(init='k-means++', n_clusters=self.cluster_amount, n_init=10)\r\n kmeans.fit(vectors)\r\n return self.cluster_tweet(kmeans.labels_)", "def main():\n data = Dummy(n_samples=500, n_dim=3)\n X = data.get_dummy()\n clustering = Kmeans(X, K=5, display=False)\n clustering.run()\n print(f\"Number of iterations: {clustering.num_iterations}\\n\")\n\n \"\"\" Test example of clustering_kmeans with unknown number of clusters K \"\"\"\n clustering = Kmeans(X,)\n clustering.silhouette_find_k()\n print(f\"Number of centroids found: {clustering.num_K}\")", "def train(self):\n\n print \"==> Running Kmeans on data set of shape: {}\".format(self.data.shape)\n km = KMeans(n_clusters = self.n_clusters)\n km.fit(self.data.values)\n self.labels = km.labels_\n self.inertia = km.inertia_", "def run_k_means(self):\r\n centroids = self.centroids\r\n\r\n for i in range(self.max_iters):\r\n self.closestcentroids()\r\n self.newcentroids()\r\n\r\n J = 0\r\n X = self.x\r\n m = len(X)\r\n idx = self.index\r\n K = self.K\r\n dim = X.shape[1]\r\n\r\n for num in range(K):\r\n # find the index of all entries where idx==n\r\n indexentries = np.nonzero(idx == num)[0]\r\n # the values in X that have the index in indesxentries\r\n values = X[indexentries]\r\n # using one of the K centroids to do the calculation. K<=2 doesn't\r\n # work here for some reason.\r\n centroid = centroids[num, 0]\r\n J += np.sum((values - centroid) ** 2)\r\n\r\n return [centroids.reshape((1, K, dim)), [X[idx == k].size for k in range(K)], J / m]", "def kmeans_clustering(self,k):\r\n \r\n print(colored(\"Performing K-means clustering with %d clusters\\n\"%k,color = 'yellow', attrs=['bold']))\r\n kmeans = KMeans(n_clusters=k, random_state=0, n_init=10, max_iter=100, n_jobs=-1, ).fit(self.X)\r\n self.labels = kmeans.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The k-means inertia is %0.002f\\n\" %(kmeans.inertia_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels , kmeans.cluster_centers_,kmeans", "def run_kmeans(self, maxiter=200, tol=1e-4, verbose=True):\n iterations = _algorithms.kmeans(self._data, self.means,\n self.kmeans_rs, tol, maxiter)\n\n if verbose:\n if iterations < maxiter:\n print(\"K-means converged after {0} iterations.\"\n .format(iterations))\n else:\n print(\"K-means *didn't* converge after {0} iterations.\"\n .format(iterations))", "def main():\n\n dist = \"Euclidean\"\n path = \"\"\n k_v = 2\n error = []\n k_vals = []\n\n for i in range(len(sys.argv)):\n if sys.argv[i] == \"--path\":\n path = sys.argv[i+1]\n if sys.argv[i] == \"--k\":\n k_v = int(sys.argv[i+1])\n if sys.argv[i] == \"[--distance Manhattan]\":\n dist = \"Manhattan\"\n if sys.argv[i] == \"[--distance Minkowski]\":\n dist = \"Minkowski\"\n\n\n training_data = create_data(path)\n\n for k in range(2,10):\n k_vals.append(k)\n if k>2:\n for i in range(len(training_data)):\n training_data[i].remove(training_data[i][-1])\n trained_data, centroids = get_clusters(training_data, k, dist)\n error.append(rms(trained_data, dist))\n plot_error(k_vals, error)\n\n for i in range(len(training_data)):\n training_data[i].remove(training_data[i][-1])\n\n trained_data, centroids = get_clusters(training_data, k_v, dist)\n\n test_clusters(trained_data, centroids)", "def train_KMean(data: np.array, labels: np.array, n_clusters: int)->None:\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n\n # Scale the data so that Euclidian distance makes sense\n means = np.mean(data, axis = 0)\n stddevs = np.std(data, axis = 0, ddof = 1)\n\n #print(means)\n #print(stddevs)\n\n data_scaled = np.zeros((n_examples, n_features))\n\n for i in range(n_features):\n data_scaled[:, i] = (data[:,i] - means[i]) / stddevs[i]\n\n study_correlation(data_scaled)\n\n # Initialize the centroids\n idx = np.random.randint(n_examples, size = n_clusters)\n centroids = data_scaled[idx, :]\n\n counter = 0\n\n while True:\n\n distances = np.array([[np.sqrt(np.sum(np.square(example-centroid))) for centroid in centroids] for example in data_scaled])\n centroid_idx = np.argmin(distances, axis = 1)\n old_centroids = centroids\n centroids = update_centroids(data_scaled, centroid_idx, n_examples)\n #displacement = get_displacement(old_centroids, centroids)\n displacement = np.linalg.norm(np.array([old - new for old, new in zip(old_centroids, centroids)]))\n\n #assert np.linalg.norm(np.array([old - new for old, new in zip([1, 2, 3, 4], [5, 6, 7, 8])])) == 8\n\n if counter == 0:\n# print(\"Initial displacement = {}\".format(displacement))\n initial_displacement = displacement\n\n counter += 1\n\n if displacement < (initial_displacement / 10000): break\n\n #print(\"Total number of loops before ending : {}\".format(counter))\n converted_predictions = convert_predictions(centroid_idx)\n accuracy = np.mean([p == l for p, l in zip(converted_predictions, labels)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass", "def K_Means(self, n_clusters: int=150):\n start_time = datetime.datetime.now()\n self.func_log(\"\\n\\tIn K-Measn()\")\n \n kmeans = KMeans(n_clusters = n_clusters)\n kmeans.fit(self.descriptor_list)\n self.visual_words = kmeans.cluster_centers_ \n \n end_time = datetime.datetime.now() \n self.func_log(\"\\n\\t\\tTime Cost: {}\\n\".format(end_time-start_time))", "def start_algorithm(self):\r\n self.kmeans.set_data(self.tweets)\r\n clusters = self.kmeans.start_algorithm()\r\n min_size = len(self.tweets) * 0.005\r\n if min_size < 50:\r\n min_size = 50\r\n max_size = len(self.tweets) * 0.20\r\n\r\n amount = 0\r\n\r\n while amount < len(clusters):\r\n amount = len(clusters)\r\n pool = mp.Pool(self.cores)\r\n new_clusters = pool.starmap(recluster,\r\n zip(clusters, repeat(min_size), repeat(self.guard), repeat(self.function)))\r\n pool.close()\r\n pool.join()\r\n clusters = new_clusters\r\n temp = []\r\n for cluster in clusters:\r\n if isinstance(cluster, Cluster):\r\n temp.append(cluster)\r\n else:\r\n temp += cluster\r\n clusters = temp\r\n return clusters", "def run(self, points, K):\n # Get size\n D, N = points.shape\n\n # DxK array initialiezd with random points\n centroids = points[:, np.random.permutation(N)[:K]]\n\n # Assigments 1xN array\n labels = np.zeros(N)\n\n for it in np.arange(self.niter):\n # 1. Compute distance to all cluster\n #v1 dirty\n distances = np.zeros([K, N])\n for n in np.arange(N):\n for k in np.arange(K):\n distances[k, n] = np.sqrt( (points[:, n] - centroids[:, k])**2 ).sum()\n #distances = np.sqrt(((points - centroids[:, np.newaxis, 0])**2)).sum(axis=0) \n\n # 2. Update assigments\n # v1 dirty\n for n in np.arange(N):\n kmin = 0\n for k in np.arange(1, K):\n if distances[k, n] <= distances[kmin, n]:\n kmin = k\n labels[n] = kmin\n # v2 quicker\n #labels = np.argmin(distances, axis=1)\n\n # 3. Update mean\n for k in np.arange(K):\n centroids[:, k] = np.mean(points[:, labels == k], axis=1)\n #np.array([points[closest==k].mean(axis=0) for k in range(centroids.shape[0])])\n\n return centroids, labels", "def run_kmeans(x, nmb_clusters, verbose=False):\n n_data, d = x.shape\n\n # faiss implementation of k-means\n clus = faiss.Clustering(d, nmb_clusters)\n clus.niter = 10\n clus.max_points_per_centroid = 10000000\n res = faiss.StandardGpuResources()\n flat_config = faiss.GpuIndexFlatConfig()\n flat_config.useFloat16 = False\n flat_config.device = 0\n index = faiss.GpuIndexFlatL2(res, d, flat_config)\n\n # perform the training\n clus.train(x, index)\n _, I = index.search(x, 1)\n \n stats = clus.iteration_stats\n losses = np.array([\n stats.at(i).obj for i in range(stats.size())\n ])\n if verbose:\n print('k-means loss evolution: {0}'.format(losses))\n\n return [int(n[0]) for n in I], losses[-1]", "def ex7():\n\n \"\"\"\n ================= Part 1: Find Closest Centroids ====================\n To help you implement K-Means, we have divided the learning algorithm\n into two functions -- find_closest_centroids and computeCentroids. In this\n part, you shoudl complete the code in the find_closest_centroids function.\n \"\"\"\n print('Finding closest centroids.\\n\\n')\n\n # Load an example dataset that we will be using\n with open('ex7/data/ex7data2.pkl', 'rb') as fin:\n X = pickle.load(fin)\n\n # Select an initial set of centroids\n K = 3 # 3 Centroids\n initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])\n\n # Find the closest centroids for the examples using the\n # initial_centroids\n idx = find_closest_centroids(X, initial_centroids)\n\n print('Closest centroids for the first 3 examples: \\n')\n print(idx[0:3])\n print('\\n(the closest centroids should be 0, 2, 1 respectively)\\n')\n\n \"\"\"\n ===================== Part 2: Compute Means =========================\n After implementing the closest centroids function, you should now\n complete the computeCentroids function.\n \n \"\"\"\n print('\\nComputing centroids means.\\n\\n')\n\n # Compute means based on the closest centroids found in the previous part.\n centroids = compute_centroids(X, idx, K)\n\n print('Centroids computed after initial finding of closest centroids: \\n')\n print(centroids)\n print('\\n(the centroids should be\\n')\n print(' [ 2.428301 3.157924 ]\\n')\n print(' [ 5.813503 2.633656 ]\\n')\n print(' [ 7.119387 3.616684 ]\\n)\\n')\n\n \"\"\"\n =================== Part 3: K-Means Clustering ======================\n After you have completed the two functions computeCentroids and\n find_closest_centroids, you have all the necessary pieces to run the\n kMeans algorithm. In this part, you will run the K-Means algorithm on\n the example dataset we have provided.\n \"\"\"\n print('\\nRunning K-Means clustering on example dataset.\\n\\n')\n\n # Load an example dataset\n with open('ex7/data/ex7data2.pkl', 'rb') as fin:\n X = pickle.load(fin)\n\n # Settings for running K-Means\n K = 3\n max_iters = 10\n\n \"\"\"\n For consistency, here we set centroids to specific values\n but in practice you want to generate them automatically, such as by\n settings them to be random examples (as can be seen in\n kmeans_init_centroids).\n \"\"\"\n initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])\n\n # Run K-Means algorithm. The 'true' at the end tells our function to plot\n # the progress of K-Means\n centroids, idx = run_kmeans(X, initial_centroids, max_iters, True)\n print('\\nK-Means Done.\\n\\n')\n\n \"\"\"\n ============= Part 4: K-Means Clustering on Pixels ===============\n In this exercise, you will use K-Means to compress an image. To do this,\n you will first run K-Means on the colors of the pixels in the image and\n then you will map each pixel on to it's closest centroid.\n \n You should now complete the code in kmeans_init_centroids.py\n \"\"\"\n\n print('\\nRunning K-Means clustering on pixels from an image.\\n\\n')\n\n # Load an image of a bird\n A = plt.imread('ex7/data/bird_small.png')\n # A = A / 255; # Divide by 255 so that all values are in the range 0 - 1\n\n # Size of the image\n img_size = A.shape\n\n # Reshape the image into an Nx3 matrix where N = number of pixels.\n # Each row will contain the Red, Green and Blue pixel values\n # This gives us our dataset matrix X that we will use K-Means on.\n X = np.reshape(A, (img_size[0] * img_size[1], 3))\n\n # Run your K-Means algorithm on this data\n # You should try different values of K and max_iters here\n K = 16\n max_iters = 10\n\n # When using K-Means, it is important the initialize the centroids\n # randomly.\n # You should complete the code in kmeans_init_centroids.py before proceeding\n initial_centroids = kmeans_init_centroids(X, K)\n\n # Run K-Means\n [centroids, idx] = run_kmeans(X, initial_centroids, max_iters)\n\n \"\"\"\n ================= Part 5: Image Compression ======================\n In this part of the exercise, you will use the clusters of K-Means to\n compress an image. To do this, we first find the closest clusters for\n each example. After that, we \n \"\"\"\n print('\\nApplying K-Means to compress an image.\\n\\n')\n\n # Find closest cluster members\n idx = find_closest_centroids(X, centroids)\n\n # Essentially, now we have represented the image X as in terms of the\n # indices in idx.\n\n # We can now recover the image from the indices (idx) by mapping each pixel\n # (specified by it's index in idx) to the centroid value\n X_recovered = centroids[idx, :]\n\n # Reshape the recovered image into proper dimensions\n X_recovered = np.reshape(X_recovered, (img_size[0], img_size[1], 3))\n\n # Display the original image\n plt.close()\n fig, (ax1, ax2) = plt.subplots(1, 2)\n ax1.imshow(A)\n ax1.set_title('Original')\n\n # Display compressed image side by side\n ax2.imshow(X_recovered)\n ax2.set_title('Compressed, with {:d} colors.'.format(K))\n plt.show()", "def computeKMeans(self, points, k):\n centroids = self.init_centroids(points,k)\n \n for i in range(5):\n closest = self.closestCentroids(points,centroids)\n centroids = self.updateCentroids(points, closest ,centroids)\n\n return centroids", "def main(seed, numpoints, dimensions, num_centres, fragments, mode, iterations,\n epsilon, arity, use_storage):\n start_time = time.time()\n\n # Generate the data\n fragment_list = []\n # Prevent infinite loops in case of not-so-smart users\n points_per_fragment = max(1, numpoints // fragments)\n\n for l in range(0, numpoints, points_per_fragment):\n # Note that the seed is different for each fragment.\n # This is done to avoid having repeated data.\n r = min(numpoints, l + points_per_fragment)\n\n fragment_list.append(\n generate_fragment(r - l, dimensions, mode, seed + l, use_storage)\n )\n\n compss_barrier()\n print(\"Generation/Load done\")\n initialization_time = time.time()\n print(\"Starting kmeans\")\n\n # Run kmeans\n centres = kmeans_frag(fragments=fragment_list,\n dimensions=dimensions,\n num_centres=num_centres,\n iterations=iterations,\n seed=seed,\n epsilon=epsilon,\n arity=arity)\n compss_barrier()\n print(\"Ending kmeans\")\n kmeans_time = time.time()\n\n # Run again kmeans (system cache will be filled)\n print(\"Second kmeans\")\n centres = kmeans_frag(fragments=fragment_list,\n dimensions=dimensions,\n num_centres=num_centres,\n iterations=iterations,\n seed=seed,\n epsilon=epsilon,\n arity=arity)\n compss_barrier()\n print(\"Ending second kmeans\")\n kmeans_2nd = time.time()\n\n print(\"-----------------------------------------\")\n print(\"-------------- RESULTS ------------------\")\n print(\"-----------------------------------------\")\n print(\"Initialization time: %f\" % (initialization_time - start_time))\n print(\"Kmeans time: %f\" % (kmeans_time - initialization_time))\n print(\"Kmeans 2nd round time: %f\" % (kmeans_2nd - kmeans_time))\n print(\"Total time: %f\" % (kmeans_2nd - start_time))\n print(\"-----------------------------------------\")\n centres = compss_wait_on(centres)\n print(\"CENTRES:\")\n print(centres)\n print(\"-----------------------------------------\")", "def k_means(prev_args, data_set_obj):\n parser = argparse.ArgumentParser(description='kmeans')\n parser.add_argument('--clusters', required=True,\n help='The number of clusters to use for kmeans.', type=int)\n parser.add_argument('--iterations', default=300,\n help='The maximum number of iterations for the algorithm.', type=int)\n parser.add_argument('--metric', default='euclidean',\n help='The distance metric to use.')\n args, unknown = parser.parse_known_args()\n kmeans = KMeans(prev_args.rank, args.clusters, args.iterations, args.metric)\n kmeans.fit_predict(data_set_obj.gallery_idx, data_set_obj)\n return kmeans.ranked_acc", "def __kmeans(self, points):\n # Prepare initial centers using K-Means++ method.\n initial_centers = kmeans_plusplus_initializer(points, 10).initialize()\n # Create instance of K-Means algorithm with prepared centers.\n self.__kmeans_instance = kmeans(sample, initial_centers)\n # Run cluster analysis and obtain results.\n kmeans_instance.process()\n kclusters = kmeans_instance.get_clusters()\n kcenters = kmeans_instance.get_centers()\n return kclusters, kcenters", "def run_various_Ks(x, K):\n m = len(x) # length of data points\n min_list = [] # list that will contain minimum costs\n Ks = [i for i in range(1,K+1)] # values of K's\n\n for i in range(1, K+1):\n # runs algorithm with different values of K\n kmeans = KMeans(n_clusters=i, random_state=0).fit(x)\n minval = kmeans.inertia_\n print(minval)\n min_list.append(minval) # appends minimum cost \n\n # Plotting J vs. K to choose best value of K\n plt.plot(Ks, min_list)\n plt.plot(Ks, min_list, '-o')\n plt.xlabel('K (# of clusters)')\n plt.ylabel('Cost function J')\n plt.title('J vs. K plot')\n plt.show()", "def KMeansCluster(matrix):\n\n # Possibly need to scale the data first\n data = scale(matrix)\n\n # Approximate the number of clusters using c = root(n/2)\n # num_clusters = int(sqrt(len(matrix) / 2))\n num_clusters = 5\n number_init = 10 # Default\n number_iter = 300\n num_cpus = 2\n\n print \"===================\"\n print \"Training KMeans with (num_clusters, num_init, num_iters, num_cpus)\"\n print num_clusters, number_init, number_iter, num_cpus\n\n # estimator = KMeans(init='k-means++', n_clusters = num_clusters, n_init = number_init)\n # estimator.fit(data)\n # clusters = k_means(data, n_clusters = num_clusters, max_iter=number_iter, n_init = number_iter, \n # init='k-means++', n_jobs = num_cpus)\n clusters = k_means(data, n_clusters = num_clusters, max_iter=number_iter, n_init = number_iter, n_jobs = num_cpus)\n\n\n return clusters", "def __kmeans__(cls, cluster_size, pca_reduced, names: list):\n import warnings\n warnings.filterwarnings(\"ignore\")\n clusterer = KMeans(n_clusters=cluster_size, random_state=10)\n cluster_labels = clusterer.fit_predict(pca_reduced)\n result = list()\n result.append(ClusterProcessor.davies_bouldin(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.variance_ratio_criterion(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.silhouette_coefficient(cluster_labels, pca_reduced, cluster_size, names))\n return result", "def kmeans(data, initial=None, K=2, distfn_method='L2', centroidfn_method='mean',\n VERBOSE=True):\n def assignment(data, assigns, means, distfn):\n \"\"\" For each observation A in DATA, assign A to the closest\n mean in MEANS, by mutating ASSIGNS.\n \"\"\"\n for i in xrange(data.shape[0]):\n bestidx, mindist = None, None\n for idx, mean in enumerate(means):\n dist = distfn(data[i,:], mean)\n if bestidx == None or dist < mindist:\n bestidx = idx\n mindist = dist\n assigns[i] = bestidx\n return assigns\n def update_means(data, assigns, means, distfn, centfn):\n \"\"\" For the clustering specified by ASSGNS, compute new means\n by mutating MEANS.\n \"\"\"\n for i in xrange(len(means)):\n rows = data[np.where(assigns == i)]\n means[i] = centfn(rows)\n return means\n if distfn_method == 'L2':\n distfn = lambda a,b: np.linalg.norm(a-b)\n elif distfn_method == 'vardiff':\n distfn = vardiff\n else:\n distfn = lambda a,b: np.linalg.norm(a-b)\n if centroidfn_method == 'mean':\n centroidfn = np.mean\n elif centroidfn_method == 'median':\n centroidfn = np.median\n else:\n centroidfn = np.mean\n\n if initial == None:\n initial_idxs = []\n _len = range(len(data))\n for _ in xrange(K):\n _i = random.choice(_len)\n while _i in initial_idxs:\n _i = random.choice(_len)\n initial_idxs.append(_i)\n initial = data[initial_idxs]\n if VERBOSE:\n print \"...initial means:\", initial\n means = initial\n assigns = np.zeros(data.shape[0])\n done = False\n iters = 0\n while not done:\n if VERBOSE:\n print \"...kmeans iteration\", iters\n # 1.) Assignment of data to current means\n prev_assigns = assigns.copy()\n assigns = assignment(data, assigns, means, distfn)\n # 2.) Halt if assignments don't change\n if np.all(np.equal(prev_assigns, assigns)):\n done = True\n else:\n # 3.) Re-compute means from new clusters\n means = update_means(data, assigns, means, distfn, centroidfn)\n iters += 1\n return assigns", "def k_means(data, k = 2, centroids = None, max_iters = 100) :\r\n if centroids == None :\r\n centroids = get_random_centroids(data, k)\r\n \r\n elif len(centroids) != k :\r\n AssertionError(\"Número de centroides no equivale a k\")\r\n \r\n for i in range(max_iters) :\r\n old_centroids = centroids\r\n \r\n assigned_centroids = assign_centroids(data, centroids)\r\n \r\n # Sum the data by cluster\r\n centroids = [[0]*data.shape[1]] * k\r\n values_in_centroid = [0] * k\r\n\r\n for i in range(k) :\r\n assigned_centroids_aux = np.column_stack([assigned_centroids]*data.shape[1])==i\r\n centroids[i] = np.sum(np.multiply(data, assigned_centroids_aux), axis = 0)\r\n values_in_centroid[i] = np.sum(assigned_centroids == i)\r\n\r\n # Mean\r\n for i in range(k) :\r\n if values_in_centroid[i] > 0 :\r\n centroids[i] = centroids[i]/values_in_centroid[i]\r\n\r\n centroids = np.stack(centroids, axis=0)\r\n\r\n error = sum([np.linalg.norm(centroids[i] - old_centroids[i]) for i in range(k)])\r\n if(error < 1e-5) : break\r\n \r\n \r\n return centroids", "def train_kmeans(encodings, k):\n kmean = KMeans(n_clusters=k)\n\n model = kmean.fit(encodings)\n\n return model", "def kmeans_007():\n n_centroids = 5000\n s = 50\n crop = 200\n # Originally, 1600 centroids for 400,000 patches, or 250 patches per centroid\n # 800000 / 5000 = will give us 160 patches per centroid\n n_patches = 800000\n rf_size = 20\n # 31 x 31 = 961 patches per image, which is 10x more patches than the original settings\n # If we set stride 2, then it's 16 x 16 patches = 256, only twice as many patches\n stride = 2\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n images = train_x_crop_scale.transform()\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n patches = patch_extractor.transform(images)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_007'.format(n_centroids),\n n_iterations=20,\n n_jobs=-1,)\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_007.npy', stride_size=stride, memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, parallel_estimator=True)\n\n \"\"\"\n wrapper.fit(train_x, train_y)\n\n test_x_crop_scale = CropScaleImageTransformer(training=False,\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n test_images = test_x_crop_scale.transform()\n test_x = kmeans_generator.transform(test_images, save_to_file='data/data_test_kmeans_features_007.npy'.format(n_centroids), memmap=True)\n res = wrapper.predict(test_x)\n sub = classes.Submission(res)\n sub.to_file('sub_kmeans_006.csv')\n \"\"\"", "def kMeans(d, k):\n #First get the random centroids from the data\n newCentroids = getRandomCentroids(d, k)\n #newCentroids = [[-2.0, 1.0], [-2.0, -2.0], [2.0, 2.0], [0.0, 0.0]]\n\n #Get the clusters from these random centroids\n clusters = initiateCentroid(d, newCentroids, k)\n oldCentroids = []\n\n counter = 0\n #While the old centroids are not equal to the new ones\n while oldCentroids != newCentroids:\n #old is equal to new\n oldCentroids = newCentroids\n #Calculate the new centroids\n k, newCentroids = calcCentroids(d, clusters)\n #Calculate the new clusters\n clusters = initiateCentroid(d, newCentroids, k)\n #Count how many iterations\n counter += 1\n\n return counter, clusters", "def kmeans_002():\n train_mmap_path = 'data/train_cropped_150_scale_15.memmap'\n test_mmap_path = 'data/test_cropped_150_scale_15.memmap'\n\n if not os.path.exists('data/train_cropped_150.memmap'):\n classes.crop_to_memmap(150, training=True)\n if not os.path.exists('data/test_cropped_150.memmap'):\n classes.crop_to_memmap(150, training=False)\n\n if not os.path.exists(train_mmap_path):\n logger.info(\"Prepping training images\")\n pre_scale = np.memmap('data/train_cropped_150.memmap', mode='r', shape=(N_TRAIN, 150, 150, 3))\n trainX = classes.rescale_memmap(15, pre_scale, train_mmap_path)\n del pre_scale\n else:\n trainX = np.memmap(train_mmap_path, mode='r', shape=(N_TRAIN, 15, 15, 3))\n\n if not os.path.exists(test_mmap_path):\n logger.info(\"Prepping testing images\")\n pre_scale = np.memmap('data/test_cropped_150.memmap', mode='r', shape=(N_TEST, 150, 150, 3))\n testX = classes.rescale_memmap(15, pre_scale, test_mmap_path)\n del pre_scale\n else:\n testX = np.memmap(test_mmap_path, mode='r', shape=(N_TEST, 15, 15, 3))\n\n\n n_jobs = multiprocessing.cpu_count()\n\n if not os.path.exists('data/mdl_kmeans_002_centroids.npy'):\n logger.info(\"Pretraining KMeans feature encoder\")\n km = models.KMeansFeatures.KMeansFeatures(rf_size=5, num_centroids=1600, num_patches=400000)\n km.fit(trainX)\n km.save_to_file('mdl_kmeans_002')\n else:\n logger.info(\"Loading KMeans feature encoder from file\")\n km = models.KMeansFeatures.KMeansFeatures.load_from_file('mdl_kmeans_002', rf_size=5)\n\n # Takes waaaay too long to finish. At least an hour per tree. Clearly too\n # many dimensions\n\n # Instead ran with ridge rf manually\n mdl = models.RandomForest.KMeansRandomForest(km, trainX, testX, n_jobs=n_jobs, cv_sample=0.5)\n # mdl.run('cv')\n mdl.run('train')\n res = mdl.run('predict')\n np.save('submissions/sub_kmeans_rf_002.npy', res)\n output = classes.Submission(res)\n output.to_file('sub_kmeans_rf_002.csv')", "def specKmeans(self, n_clusters, spectralptsfile):\n self.classifier = \"Spectral-KMeans\"\n self.inptsfile = spectralptsfile \n points = self.loadPoints()\n points = points[self.validhit_bool, :]\n # points = self.randomForestTransform(points, 5, 10)\n\n print \"Running KMeans clustering on spectral data only ...\"\n points = StandardScaler(copy=False).fit_transform(points)\n mbk = MiniBatchKMeans(n_clusters=n_clusters)\n mbk.fit(points)\n self.labels[self.validhit_bool] = mbk.labels_", "def kmeans_005():\n n_patches_vals = [500000, 600000, 700000]\n include_test_images = [False, True]\n\n scores = []\n for n_patches in n_patches_vals:\n for incl in include_test_images:\n s = 15\n crop = 150\n n_centroids = 1600\n rf_size = 5\n logger.info(\"Training with n_patches {}, with test images {}\".format(n_patches, incl))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n test_x_crop_scale = CropScaleImageTransformer(training=False,\n result_path='data/data_test_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_005_patches_{}_test{}'.format(n_patches, incl),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n if incl:\n test_images = test_x_crop_scale.transform()\n images = np.vstack([images, test_images])\n logger.info(\"Extracting patches from images ndarray shape: {}\".format(images.shape))\n\n patches = patch_extractor.transform(images)\n logger.info(\"Patches ndarray shape: {}\".format(patches.shape))\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n # Reload the original images\n images = train_x_crop_scale.transform()\n logger.info(\"Generating features on images ndarray shape: {}\".format(images.shape))\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_005_patches_{}_test_{}.npy'.format(n_patches, incl), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n\n score = (n_patches, incl, wrapper.cv_scores)\n logger.info(\"Score: {}\".format(score))\n scores.append(score)\n\n del wrapper\n gc.collect()", "def kmeans(img, k):\n # Randomly pick k pixels as initial cluster \"means\"\n # Random indices are picked without replacement; to avoid duplicate means\n n = len(img) \n rand_ind = np.random.choice(n, size=k, replace=False) \n means = img[rand_ind, :].astype(np.float32) \n\n print \"Using Kmeans..\"\n return kmeans_driver(img, means)", "def kmeans(self,mydata, k=None, centroids=None, steps=200):\n\t\tif centroids is not None and k is not None:\n\t\t\tassert(k == len(centroids))\n\t\telif centroids is not None:\n\t\t\tk = len(centroids)\n\t\telif k is not None:\n\t\t\t# Forgy initialization method: choose k data points randomly.\n\t\t\tcentroids = mydata[np.random.choice(np.arange(len(mydata)), k, False)]\n\t\telse:\n\t\t\traise RuntimeError(\"Need a value for k or centroids.\")\n\n\t\tfor _ in range(max(steps, 1)):\n\t\t\t# Squared distances between each point and each centroid.\n\t\t\tsqdists = scipy.spatial.distance.cdist(centroids, mydata, 'sqeuclidean')\n\n\t\t\t# Index of the closest centroid to each data point.\n\t\t\tclusters = np.argmin(sqdists, axis=0)\n\n\t\t\tnew_centroids = self.cluster_centroids(mydata, clusters, k)\n\t\t\tif np.array_equal(new_centroids, centroids):\n\t\t\t\tbreak\n\n\t\t\tcentroids = new_centroids\n\n\t\treturn clusters", "def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)", "def kmeans(boxes, k, dist=np.median,seed=1):\n rows = boxes.shape[0]\n distances = np.empty((rows, k)) ## N row x N cluster\n last_clusters = np.zeros((rows,))\n np.random.seed(seed)\n # initialize the cluster centers to be k items\n clusters = boxes[np.random.choice(rows, k, replace=False)]\n aveIOU=0.0\n while True:\n # 为每个点指定聚类的类别(如果这个点距离某类别最近,那么就指定它是这个类别)\n for icluster in range(k):\n distances[:,icluster] = 1 - iou(clusters[icluster], boxes)\n nearest_clusters = np.argmin(distances, axis=1)\n\n for i in range(rows ):\n aveIOU=aveIOU+1-distances[i,nearest_clusters[i]]\n aveIOU=aveIOU/rows\n\n\t# 如果聚类簇的中心位置基本不变了,那么迭代终止。\n if (last_clusters == nearest_clusters).all():\n break\n # 重新计算每个聚类簇的平均中心位置,并它作为聚类中心点\n for cluster in range(k):\n clusters[cluster] = dist(boxes[nearest_clusters == cluster], axis=0)\n last_clusters = nearest_clusters\n\n return clusters,nearest_clusters,distances,aveIOU", "def k_means_iter(X, K, n_iter):\n cost=[]\n centroids_dict={}\n for i in range (n_iter):\n groups, K_clusters=k_means(X, K)\n cost.append(compute_cost(X, groups, K_clusters))\n centroids_dict['groups'+str(i)]=groups\n centroids_dict['K_clusters'+str(i)]=K_clusters\n opt_cost_index=cost.index(min(cost))\n cluster_groups=centroids_dict['groups'+str(opt_cost_index)]\n cluster_centroids=centroids_dict['K_clusters'+str(opt_cost_index)]\n return cluster_groups,cluster_centroids", "def kmeans_006():\n n_centroids_vals = [1000, 2000, 2500, 3000]\n scores = []\n\n for n_centroids in n_centroids_vals:\n s = 15\n crop = 150\n n_patches = 400000\n rf_size = 5\n logger.info(\"Training with n_centroids {}\".format(n_centroids))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n test_x_crop_scale = CropScaleImageTransformer(training=False,\n result_path='data/data_test_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_006_centroids_{}'.format(n_centroids),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n\n patches = patch_extractor.transform(images)\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_006_centroids_{}.npy'.format(n_centroids), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n\n score = (n_centroids, wrapper.cv_scores)\n logger.info(\"Scores: {}\".format(score))\n scores.append(score)\n\n del wrapper\n gc.collect()", "def kmeans(self,initial_centers=None,tolerance=0):\r\n\t\tcenters = initial_centers \\\r\n\t\t\t\tif initial_centers is not None else self.centers()\r\n\t\t\r\n\t\titerations = 0\r\n\t\twhile True:\r\n\t\t\titerations = iterations + 1\r\n\t\t\tclusters = dict([(i,[]) for i in xrange(self.k)])\r\n\t\t\tfor a in self.data:\r\n\t\t\t\tcc = self.closest_center(centers, a)\r\n\t\t\t\tcenter = cc[0]\r\n\t\t\t\tlabel = center[0]\r\n\t\t\t\tclusters[label].append(a)\r\n\r\n\t\t\tfor label, data_points in clusters.iteritems():\r\n\t\t\t\tif not data_points:\r\n\t\t\t\t\tcenters = self.centers()\r\n\t\t\t\t\tbreak # restart at random\r\n\r\n\t\t\t\tcenter = centers[label]\r\n\t\t\t\told_center_x, old_center_y = center[1], center[2]\r\n\t\t\t\tnew_center_x, new_center_y = self.mean(data_points)\r\n\r\n\t\t\t\tif abs(new_center_x - old_center_x) <= tolerance \\\r\n\t\t\t\t\t\tand abs(new_center_y - old_center_y) <= tolerance:\r\n\t\t\t\t\t# converged\r\n\t\t\t\t\treturn {\r\n\t\t\t\t\t\t\t'iterations' : iterations,\r\n\t\t\t\t\t\t\t'clusters' : clusters, \\\r\n\t\t\t\t\t\t\t'centers' : centers }\r\n\r\n\t\t\t\t# update center to mean\r\n\t\t\t\tcenter[1] = new_center_x\r\n\t\t\t\tcenter[2] = new_center_y", "def kmeans(matrix, k) :\n clusters = [0 for i in range(k)]\n lastcluster=[0 for i in range(k)]\n min_=0\n max_=matrix.__len__()\n print \"len\",max_\n cluster = [0 for i in range(k)]\n for i in range(k) :\n\n cluster[i]=int(random.random() * (max_ - min_) + min_)\n clusters[i]=matrix[cluster[i]]\n lastcluster[i]=matrix[cluster[i]]\n #print cluster[i],clusters[i]\n\n lastmatchs = [ [] for i in range(k)]\n\n \"\"\" initial the round is 100\"\"\"\n rounds = 100\n while rounds > 0 :\n matchs = [ [] for i in range(k)]\n print 'round \\t',rounds\n for i in range(len(matrix)) :\n bestmatch_cluster = None\n\n min_distance = 100000\n for j in range(k) :\n dis = pearson_distance(clusters[j], matrix[i])\n if dis < min_distance :\n min_distance = dis\n bestmatch_cluster = j\n matchs[bestmatch_cluster].append(i)\n\n print_matchs(matchs)\n #print_matchs(lastmatchs)\n\n\n\n if matchs == lastmatchs : break\n #if cluster== lastcluster :break\n lastmatchs = [[ item for item in matchs[i] ] for i in range(k)]\n\n #move the centroids to the average of their members\n for j in range(k) :\n avg = [0.0 for i in range(len(matrix[0])) ]\n for m in matchs[j] :\n vec = matrix[m]\n for i in range(len(matrix[0])) :\n avg[i] += vec[i]\n avg = [ item / len(matrix[0]) for item in avg]\n clusters[j] = avg\n lastcluster=clusters\n\n\n rounds -= 1\n print \"rounds:\",100-rounds\n print \"result:\"\n for i in matchs:\n print i", "def run(\n self,\n number_of_clusters=None,\n max_K=8,\n method_clustering=\"pam\",\n init_clustering=\"random\",\n max_iter_clustering=100,\n discart_value_JI=0.6,\n bootstraps_JI=100,\n bootstraps_p_value=100,\n n_jobs=1,\n verbose=1,\n ):\n\n if number_of_clusters is None:\n self.k = optimizer.optimizeK(\n self.distance_matrix,\n self.y.to_numpy(),\n self.model_type,\n max_K,\n method_clustering,\n init_clustering,\n max_iter_clustering,\n discart_value_JI,\n bootstraps_JI,\n self.random_state,\n n_jobs,\n verbose,\n )\n\n if self.k == 1:\n warnings.warn(\"No stable clusters were found!\")\n return\n\n print(f\"Optimal number of cluster is: {self.k}\")\n\n else:\n self.k = number_of_clusters\n print(f\"Use {self.k} as number of cluster\")\n\n self.cluster_labels = (\n kmedoids.KMedoids(\n n_clusters=self.k,\n method=method_clustering,\n init=init_clustering,\n metric=\"precomputed\",\n max_iter=max_iter_clustering,\n random_state=self.random_state,\n )\n .fit(self.distance_matrix)\n .labels_\n )\n\n (\n self._data_clustering_ranked,\n self.p_value_of_features,\n ) = stats.calculate_global_feature_importance(\n self.X, self.y, self.cluster_labels, self.model_type\n )\n self._p_value_of_features_per_cluster = (\n stats.calculate_local_feature_importance(\n self._data_clustering_ranked, bootstraps_p_value\n )\n )", "def KMeans(dataTable, k, epsilon=0.00001, srcDims = 1000000000000000, iters=20, normData = False):\n #load up the configuration\n kmOptions = KMeansConfig(dataTable,k,epsilon,srcDims)\n \n \n #load and format the table for use.\n data = loadMatrix(dataTable)[:,:kmOptions['sourceDims']]\n \n #check if we should normalise the data (this is really quick and dirty, replace it with something better)\n if normData:\n dmax = amax(data)\n dmin = amin(data)\n data = (data-dmin)/(dmax-dmin+0.00000001)\n \n \n #make our starting point solutions from the dataset\n solutions = [array(random.sample(data,k)) for i in xrange(iters)]\n \n #chunk solutions if necessary\n for i in xrange(len(solutions)):\n sol = []\n while len(solutions[i]) > kmOptions['chunkSize']:\n sol.append(solutions[i][:kmOptions['chunkSize']])\n solutions[i] = solutions[i][kmOptions['chunkSize']:]\n sol.append(solutions[i])\n solutions[i] = sol\n \n #create our chunked problem data\n dataChunks = []\n while len(data) > kmOptions['chunkSize']:\n dataChunks.append(data[:kmOptions['chunkSize']])\n data = data[kmOptions['chunkSize']:]\n dataChunks.append(data)\n kNorm = (len(dataChunks)-1)+len(dataChunks[-1])/float(len(dataChunks[0]))\n \n #create the CUDA kernels\n program = SourceModule(open(KernelLocation+\"KMEANS_LABEL.nvcc\").read())\n prg = program.get_function(\"KMEANS_LABEL\")\n program = SourceModule(open(KernelLocation+\"KMEANS_UPDATE.nvcc\").read())\n prg2 = program.get_function(\"KMEANS_UPDATE\")\n t0 = time.time()\n \n #store the resultant performance of each solution here\n results = []\n finalSols = []\n \n #make GPU allocations and support variables\n total = 0.\n dists = [numpy.zeros(kmOptions['chunkSize']).astype(numpy.float32)+10000000000000000. for i in xrange(len(dataChunks))] #this is used as an intermediate step\n labels = [numpy.zeros(kmOptions['chunkSize']).astype(numpy.uint32) for i in xrange(len(dataChunks))] #this is used as an intermediate step\n data_gpu = drv.mem_alloc(dataChunks[0].nbytes)\n k_gpu = drv.mem_alloc(solutions[0][0].nbytes)\n labels_gpu = drv.mem_alloc(labels[0].nbytes)\n dists_gpu = drv.mem_alloc(dists[0].nbytes)\n \n #calculate KMeans\n for sol in solutions:\n t0 = time.time()\n for i in xrange(10000):\n #Step 1: find all the closest labels\n for i in xrange(len(sol)):\n #copy in blank distances, labels, and the label coordinates\n drv.memcpy_htod(k_gpu, sol[i])\n for j in xrange(len(dataChunks)):\n drv.memcpy_htod(data_gpu, dataChunks[j])\n drv.memcpy_htod(labels_gpu, labels[j])\n drv.memcpy_htod(dists_gpu, dists[j])\n prg(k_gpu,\n data_gpu,\n kmOptions[\"dimensions\"],\n labels_gpu,\n dists_gpu,\n kmOptions['k'],\n kmOptions['dataSize'],\n kmOptions['chunkSize'],\n numpy.int64(i*kmOptions['chunkSize']), #k offset\n numpy.int64(j*kmOptions['chunkSize']), #data offset\n kmOptions['maxThreads'],\n block=kmOptions['block'],\n grid=kmOptions['grid'])\n drv.memcpy_dtoh(labels[i], labels_gpu)\n #Step 2: find the new averages\n old_sol = [s.copy() for s in sol]\n for i in xrange(len(sol)):\n #load up a blank set of k matrices\n drv.memcpy_htod(k_gpu, sol[i]*0.)\n for j in xrange(len(dataChunks)):\n drv.memcpy_htod(data_gpu, dataChunks[j])\n drv.memcpy_htod(labels_gpu, labels[j])\n prg2(k_gpu,\n data_gpu,\n kmOptions[\"dimensions\"],\n labels_gpu,\n kmOptions['k'],\n kmOptions['dataSize'],\n kmOptions['chunkSize'],\n numpy.int64(i*kmOptions['chunkSize']), #label offset\n numpy.int64(j*kmOptions['chunkSize']), #data offset\n kmOptions['maxThreads'],\n block=kmOptions['block'],\n grid=kmOptions['grid'])\n drv.memcpy_dtoh(sol[i], k_gpu)\n sol[i] /= kNorm #final normalisation\n #Step 3: check that the update distance is larger than epsilon\n total = 0.\n for j in xrange(len(sol)):\n tmp = sol[j]-old_sol[j]\n tmp = tmp*tmp\n total += sum([sum(t**0.5) for t in tmp])\n if total/kmOptions['dataSize'] < kmOptions['eps']:\n break\n print \"solution done in \",time.time()-t0\n results.append((total,len(results)))\n finalSols.append(numpy.concatenate(sol)[:kmOptions['dataSize']])\n results.sort()\n return finalSols[results[0][1]]", "def kmeans_001(fit_centroids=False):\n trainX = np.memmap('data/train_cropped_150.memmap', mode='r', shape=(N_TRAIN, 150, 150, 3))\n # Not used yet\n testX = np.memmap('data/test_cropped_150.memmap', mode='r', shape=(N_TEST, 150, 150, 3))\n\n if fit_centroids:\n km = models.KMeansFeatures.KMeansFeatures(rf_size=6, num_centroids=1600, num_patches=400000)\n km.fit(trainX)\n\n km.save_to_file('mdl_kmeans_ridge_rf_001')\n # t0 = time.time()\n # pickle.dump(km, open('data/kmeans_centroids.pkl', mode='wb'))\n # print 'Pickling the KMeansFeatures object took {0} seconds'.format(time.time() - t0)\n else:\n km = models.KMeansFeatures.KMeansFeatures.load_from_file('mdl_kmeans_ridge_rf_001')\n # km = pickle.load(open('data/kmeans_centroids.pkl'))\n\n n = 10000\n\n train_x = km.transform(trainX[0:n, :])\n train_y = classes.train_solutions.data[0:n, :]\n # train_x = km.transform(trainX)\n # train_y = classes.train_solutions.data\n\n logger.info(\"Train x shape: {}\".format(train_x.shape))\n logger.info(\"Train y shape: {}\".format(train_y.shape))\n\n kf = KFold(n, n_folds=2, shuffle=True)\n\n for train, test in kf:\n # clf = models.Ridge.RidgeRFEstimator()\n # clf.rf_rgn = RandomForestRegressor(n_estimators=250, n_jobs=4, verbose=3)\n clf = RandomForestRegressor(n_estimators=20, n_jobs=4, verbose=3, random_state=0, oob_score=True)\n clf.fit(train_x[train], train_y[train])\n res = clf.predict(train_x[test])\n classes.rmse(train_y[test], res)", "def kmeans(img):\n max_iter = 10\n max_change_rate = 0.02\n dist = sys.float_info.max\n\n clustermask = np.zeros((h1, w1, 1), np.uint8)\n result = np.zeros((h1, w1, 3), np.uint8)\n\n # initializes each pixel to a cluster\n # iterate for a given number of iterations or if rate of change is\n # very small\n initialize(img)\n i = 0\n while i < max_iter and dist > max_change_rate:\n assign_to_current_mean(img, result, clustermask)\n clustermask = update_mean(img, clustermask)\n i += 1\n refill_real(img, result, clustermask, cluster_colors)\n return result", "def kmeans_004():\n crops = [200] # Should probably also add 250\n scales = [30, 50] # Scaling is probably the most important part here\n\n scores = []\n for s in scales:\n crop = 200\n n_centroids = 1600\n n_patches = 400000\n # rf_size = int(round(s * .2))\n rf_size = 10\n logger.info(\"Training with crop {}, scale {}, patch size {}, patches {}, centroids {}\".format(crop, s, rf_size, n_patches, n_centroids))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n # spherical generator\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_004_scale_{}_rf_{}'.format(s, rf_size),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n logger.info(\"Images ndarray shape: {}\".format(images.shape))\n patches = patch_extractor.transform(images)\n logger.info(\"Patches ndarray shape: {}\".format(patches.shape))\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_004_scale_{}_rf_{}.npy'.format(s, rf_size), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n logger.info(\"Train X ndarray shape: {}\".format(train_x.shape))\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n scores.append((s, wrapper.cv_scores))\n del wrapper\n gc.collect()", "def k_means_multiple(self, K):\r\n self.K = K\r\n table = []\r\n\r\n for numberoftimes in range(self.tries):\r\n self.randomcentroids()\r\n try:\r\n atry = self.run_k_means()\r\n table.append(atry)\r\n except ValueError:\r\n pass\r\n\r\n c = ['centroid position', 'how many for each', 'J']\r\n\r\n self.table = pd.DataFrame(table, columns=c).sort_index(by=['J']).head()", "def run_evaluation(self, n_runs=1, n_points=1000, n_iterations=1, min_n_components=2, max_n_components=25,\n\t\t\t\t\t n_splits=3, save_data=False, file_label='',n_microstates=None, all_methods=True,\n\t\t\t\t\t assign_transition_points=True):\n\n\t\tif self.presampled_data is not None:\n\t\t\tsampled_data = self.presampled_data[0]\n\t\t\ttrue_clustering = self.presampled_data[1]\n\t\t\tn_runs = sampled_data.shape[0]\n\n\t\tself.cluster_score_ami_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tself.cluster_score_fm_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tself.cluster_score_vm_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tdata = self.toy_model_.sample(3)\n\n\t\t# Create free energy estimators\n\t\tgmm_FE = GMM_FE.FreeEnergyClustering(data, min_n_components=min_n_components, max_n_components=max_n_components,\n\t\t\t\t\t\t\t\t\t x_lims=self.x_lims_, n_grids=self.n_grids_, stack_landscapes=False,\n\t\t\t\t\t\t\t\t\t n_splits=n_splits, n_iterations=n_iterations,convergence_tol=self.convergence_tol_,\n\t\t\t\t\t\t\t\t\t verbose=self.verbose_)\n\n\t\tkm = kmc.KMeansCluster(min_n_components, max_n_components)\n\t\taw = awc.AWCluster(min_n_components, max_n_components)\n\t\tspectral = sc.SpectralCluster(min_n_components, max_n_components)\n\n\t\tall_data = []\n\t\tfor i_run in range(n_runs):\n\t\t\tprint(\"Run: \"+str(i_run+1)+'/'+str(n_runs))\n\n\t\t\tif self.presampled_data is None:\n\t\t\t\t# Sample data\n\t\t\t\tdata = self.toy_model_.sample(n_points)\n\t\t\telse:\n\t\t\t\tdata = sampled_data[i_run]\n\t\t\t\n\t\t\tall_data.append(data)\n\n\t\t\tprint('Shape data: ' + str(data.shape))\n\n\t\t\t# Set data in model and estimate GMM density\n\t\t\tgmm_FE.data_ = data\n\t\t\tcoords, est_FE_landsc, FE_points = gmm_FE.landscape()\n\n\t\t\t# Get true cluster labels\n\t\t\tif self.presampled_data is None:\n\t\t\t\tif hasattr(self.toy_model_, \"assign_cluster_labels\"):\n\t\t\t\t\tself.true_labels_ = self.toy_model_.assign_cluster_labels(data)\n\t\t\t\telse:\n\t\t\t\t\tprint('Setting true labels.')\n\t\t\t\t\tself.true_labels_, _ = self.true_FE_.cluster(data, np.zeros(data.shape[0]))\n\t\t\telse:\n\t\t\t\tself.true_labels_ = true_clustering[i_run]\n\t\t\t\n\t\t\t# Cluster data with different methods\n\t\t\tif n_microstates is None:\n\t\t\t\tself.FE_min_labels, _ = gmm_FE.cluster(data, FE_points, assign_transition_points=assign_transition_points)\n\t\t\telse:\n\t\t\t\tkmea = KMeans(n_clusters=n_microstates).fit(data[::2])\n\t\t\t\tmicrostate_centers = kmea.cluster_centers_\n\t\t\t\tself.FE_min_labels, _ = gmm_FE.cluster(microstate_centers, FE_points, data, assign_transition_points=assign_transition_points, unravel_grid=False)\n\n\t\t\tif all_methods:\n\t\t\t\tself.km_labels = km.cluster(data)\n\t\t\t\tself.aw_labels = aw.cluster(data)\n\t\t\t\tself.spectral_labels = spectral.cluster(data)\n\n\t\t\t# Score clustering using different scoring metrics\n\t\t\t# V-measure score\n\t\t\tself.cluster_score_vm_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'vm')\n\t\t\tprint(self.cluster_score_vm_GMM_FE_min_[i_run])\n\t\t\tif all_methods:\n\t\t\t\tself.cluster_score_vm_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'vm')\n\t\t\t\tself.cluster_score_vm_kmeans_[i_run] = self._score_clustering(self.km_labels,'vm')\n\t\t\t\tself.cluster_score_vm_AW_[i_run] = self._score_clustering(self.aw_labels,'vm')\n\t\t\t\tself.cluster_score_vm_spectral_[i_run] = self._score_clustering(self.spectral_labels,'vm')\n\n\t\t\t\t# Adjusted MI\n\t\t\t\tself.cluster_score_ami_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'ami')\n\t\t\t\tself.cluster_score_ami_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'ami')\n\t\t\t\tself.cluster_score_ami_kmeans_[i_run] = self._score_clustering(self.km_labels,'ami')\n\t\t\t\tself.cluster_score_ami_AW_[i_run] = self._score_clustering(self.aw_labels,'ami')\n\t\t\t\tself.cluster_score_ami_spectral_[i_run] = self._score_clustering(self.spectral_labels,'ami')\n\n\t\t\t\t# Fowlkes Mallows\n\t\t\t\tself.cluster_score_fm_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'fm')\n\t\t\t\tself.cluster_score_fm_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'fm')\n\t\t\t\tself.cluster_score_fm_kmeans_[i_run] = self._score_clustering(self.km_labels,'fm')\n\t\t\t\tself.cluster_score_fm_AW_[i_run] = self._score_clustering(self.aw_labels,'fm')\n\t\t\t\tself.cluster_score_fm_spectral_[i_run] = self._score_clustering(self.spectral_labels,'fm')\n\t\t\n\t\tif save_data:\n\t\t\tif self.presampled_data is None:\n\t\t\t\tnp.save('data_out/sampled_data_'+self.toy_model_.name+file_label+'.npy',all_data)\n\n\t\t\tif False:\n\t\t\t\tnp.save('data_out/cluster_score_fm_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_fm_GMM_FE_min_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_kmeans_' + self.toy_model_.name +file_label +'.npy', self.cluster_score_fm_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_spectral_)\n\n\t\t\t\tnp.save('data_out/cluster_score_ami_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_ami_GMM_FE_min_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_kmeans_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_spectral_)\n\n\t\t\tnp.save('data_out/cluster_score_vm_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_vm_GMM_FE_min_)\n\t\t\tif all_methods:\n\t\t\t\tnp.save('data_out/cluster_score_vm_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_kmeans_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_spectral_)\n\t\treturn", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings", "def kmeans_cluster(X_train_input, n_clusters=100):\r\n from sklearn.cluster import MiniBatchKMeans\r\n image_descriptors = []\r\n [image_descriptors.extend(ORB_feature_extractor(img)) for img in X_train_input]\r\n image_descriptors = np.array(image_descriptors) \r\n \r\n kmeans_model = MiniBatchKMeans(n_clusters=n_clusters, init_size=5*n_clusters,\r\n random_state=34, batch_size=128).fit(image_descriptors)\r\n \r\n return kmeans_model", "def kluster(self):\n # specify path to KlustaKwik exe\n kk_path = r\"/media/robin/data/Dropbox/Programming/klustakwik/KlustaKwik\"\n if not os.path.exists(kk_path):\n print(kk_path)\n raise IOError()\n kk_proc = Popen(\n kk_path\n + \" \"\n + self.filename\n + \" \"\n + str(self.tet_num)\n + \" -UseDistributional \"\n + str(self.distribution)\n + \" -MinClusters 5\"\n \" -MaxPossibleClusters 31\"\n \" -MaskStarts 30\"\n \" -FullStepEvery 1\"\n \" -SplitEvery 40\"\n \" -UseMaskedInitialConditions 1\"\n \" -AssignToFirstClosestMask 1\"\n \" -DropLastNFeatures 1\"\n \" -RandomSeed 123\"\n \" -PriorPoint 1\"\n \" -MaxIter 10000\"\n \" -PenaltyK 1\"\n \" -PenaltyKLogN 0\"\n \" -Log 0\"\n \" -DistThresh 9.6\"\n \" -UseFeatures \" + \"\".join(map(str, self.feature_mask)),\n shell=True,\n stdout=PIPE,\n )\n # Print the output of the KlustaKwik algo\n for line in kk_proc.stdout:\n print(line.replace(\"\\n\", \"\"))\n\n \"\"\"\n\t\tnow read in the .clu.n file that has been created as a result of this\n\t\tprocess and create the Tint-friendly cut file\n\t\t\"\"\"\n clu_filename = self.filename + \".clu.\" + str(self.tet_num)\n clu_data = np.loadtxt(clu_filename)\n n_clusters = clu_data[0]\n clu_data = clu_data[1:] - 1 # -1 so cluster 0 is junk\n n_chan = 4\n n_spikes = int(clu_data.shape[0])\n cut_filename = self.filename.split(\".\")[0] + \"_\" + str(self.tet_num) + \".cut\"\n with open(cut_filename, \"w\") as f:\n f.write(\n \"n_clusters: {nClusters}\\n\".format(nClusters=n_clusters.astype(int))\n )\n f.write(\"n_channels: {nChan}\\n\".format(nChan=n_chan))\n f.write(\"n_params: {nParam}\\n\".format(nParam=2))\n f.write(\"times_used_in_Vt: {Vt} {Vt} {Vt} {Vt}\\n\".format(Vt=0))\n for i in range(0, n_clusters.astype(int)):\n f.write(\n \" cluster: {i} center:{zeros}\\n\".format(\n i=i, zeros=\" 0 0 0 0 0 0 0 0\"\n )\n )\n f.write(\n \" min:{zeros}\\n\".format(\n i=i, zeros=\" 0 0 0 0 0 0 0 0\"\n )\n )\n f.write(\n \" max:{zeros}\\n\".format(\n i=i, zeros=\" 0 0 0 0 0 0 0 0\"\n )\n )\n f.write(\n \"Exact_cut_for: {fname} spikes: {nSpikes}\\n\".format(\n fname=os.path.basename(self.filename), nSpikes=str(n_spikes)\n )\n )\n for spk in clu_data:\n f.write(\"{spk} \".format(spk=spk.astype(int)))", "def evaluation_k_means(X_selected, n_clusters, y, n_jobs = 1):\n k_means = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300,\n tol=0.0001, precompute_distances=True, verbose=0,\n random_state=None, copy_x=True, n_jobs=n_jobs)\n \n k_means.fit(X_selected)\n y_predict = k_means.labels_\n \n # calculate NMI\n nmi = normalized_mutual_info_score(y, y_predict)\n \n # calculate ACC\n y_permuted_predict = best_map(y, y_predict)\n acc = accuracy_score(y, y_permuted_predict)\n \n return nmi, acc", "def main():\n nlp = spacy.load(\"en_core_web_sm\")\n\n notebook_dir = os.getcwd()\n situ_df = pd.read_csv('data/interim/calltaker_situation.csv', \n keep_default_na = False, \n converters = {'sop': eval})\n doc_term_bow, corpus, dictionary = get_dct_dtmatrix(nlp, situ_df['sop'])\n tfidf_situ = TfidfModel(doc_term_bow)\n tfidf_mtx = bow2tfidf(doc_term_bow, tfidf_situ)\n km_190 = KMeans(n_clusters = 190, random_state = 2020).fit(tfidf_mtx)\n\n situ_topics_kmeans_tfidf = situ_df.copy()\n situ_topics_kmeans_tfidf['cluster'] = km_190.labels_\n situ_topics_kmeans_tfidf = situ_topics_kmeans_tfidf.sort_values(by = ['cluster', 'type', 'juri'], ignore_index = True)\n situ_topics_kmeans_tfidf['situ_lst'] = situ_topics_kmeans_tfidf['situation'].apply(lambda x: [x])\n situ_topics_kmeans_tfidf.to_csv('data/interim/situ_topics_kmeans_tfidf.csv', index = False)", "def _kmean(self, data, k):\n #np.seterr(divide='ignore', invalid='ignore')\n # 1 step: choose random points as initial centroids\n X_centroid = np.random.randint(low = np.min(data[0,:]), high=np.max(data[0,:]), size=k)\n Y_centroid = np.random.randint(low = np.min(data[:,1]), high=np.max(data[:,1]), size=k)\n centroids = np.array([X_centroid, Y_centroid]).T\n #\n while True:\n # calculate distance\n distance = np.array([np.linalg.norm(data-centroids[i,:], axis=1) for i in range(k)])\n # assign each point to closest centroid\n labels = np.argmin(distance, axis=0)\n # copy the centroids coordiantes\n old_centroids = copy.deepcopy(centroids)\n # update centroids coordiates\n centroids = np.array([np.nanmean(data[np.where(labels==i)[0],:], axis=0) \n if np.any(labels==i) else old_centroids[i,:] for i in range(k) ])\n # verify if centroids changed\n if np.allclose(centroids, old_centroids):\n break\n \n return labels, centroids", "def cluster(self,method=\"kmeans\",properties=None,k=3):\n try :\n from sklearn.cluster import KMeans, Ward\n from sklearn import __version__\n except :\n logger.warning(\"install scikits-learning package\")\n return\n X = [] #List of feature vector of each blob\n if not properties:\n properties = ['color','shape','position']\n if k > len(self):\n logger.warning(\"Number of clusters cannot be greater then the number of blobs in the featureset\")\n return\n for i in self:\n featureVector = []\n if 'color' in properties:\n featureVector.extend(i.mAvgColor)\n if 'shape' in properties:\n featureVector.extend(i.mHu)\n if 'position' in properties:\n featureVector.extend(i.extents())\n if not featureVector :\n logger.warning(\"properties parameter is not specified properly\")\n return\n X.append(featureVector)\n\n if method == \"kmeans\":\n \n # Ignore minor version numbers.\n sklearn_version = re.search(r'\\d+\\.\\d+', __version__).group()\n \n if (float(sklearn_version) > 0.11):\n k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)\n else:\n k_means = KMeans(init='random', k=k, n_init=10).fit(X)\n KClusters = [ FeatureSet([]) for i in range(k)]\n for i in range(len(self)):\n KClusters[k_means.labels_[i]].append(self[i])\n return KClusters\n\n if method == \"hierarchical\":\n ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)\n WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]\n for i in range(len(self)):\n WClusters[ward.labels_[i]].append(self[i])\n return WClusters", "def kmean(X,initial_centroids,max_iters):\n m = np.size(X,0)\n K = np.size(initial_centroids,0)\n centroids = initial_centroids\n idx = np.zeros((m,1))\n for i in range(1,max_iters):\n idx = nearest_cluster(X,centroids)\n centroids = update_centroids(X,idx,K)\n return centroids,idx", "def kmeans_clustering(all_features, vocab_size, epsilon, max_iter):\n\n # Your code here. You should also change the return value.\n\n def _initiate_random_centroids(all_features, vocab_size):\n \"\"\"\n Initiate random centroids in the range of input\n\n :param all_features:\n :param vocab_size:\n :return:\n \"\"\"\n centroids = []\n # 1) Genereate points for initial centroids\n\n min_feat = np.ones(all_features[0].size)*np.inf\n max_feat = np.zeros(all_features[0].size)\n\n for a in all_features:\n for p in range(len(a)):\n if a[p] < min_feat[p]:\n min_feat[p] = a[p]\n else:\n if a[p] > max_feat[p]:\n max_feat[p] = a[p]\n\n\n for _ in range(vocab_size):\n random_vector = np.multiply(np.random.rand(1, all_features[0].size),\n max_feat-min_feat) + min_feat\n centroids.append(random_vector.flatten())\n\n return np.array(centroids)\n\n def _assign_vectors_to_nearest_centroid(all_features, centroid):\n \"\"\"\n Assign vectors to nearest centroids\n\n :param all_features:\n :param centroid:\n :return:\n \"\"\"\n #TODO: sprawdz co lepiej dziala\n new_centroid_coor = np.zeros([len(centroid), all_features[0].size])\n #new_centroid_coor = centroid\n new_centroid_counter = np.zeros(len(centroid))\n\n dist = pdist(centroid, all_features)\n #min_dist = dist.min(axis=0)\n min_dist_index = dist.argmin(axis=0)\n\n for x in range(len(min_dist_index)):\n id = min_dist_index[x]\n new_centroid_coor[id] = np.add(new_centroid_coor[id],\n all_features[x])\n new_centroid_counter[id] += 1\n\n new_centroid_coor_out = []\n for i in range(len(new_centroid_coor)):\n if new_centroid_counter[i] == 0:\n new_centroid_coor_out.append(centroid[i])\n else:\n new_centroid_coor_out.append(np.divide(new_centroid_coor[i],new_centroid_counter[i]))\n\n return np.array(new_centroid_coor_out), new_centroid_counter\n\n\n def _check_convergence_condition(old_centroids, new_centroids, epsilon):\n \"\"\"\n Check convergence confition\n\n :param old_centroids:\n :param new_centroids:\n :param epsilon: if every centroid is moved by dist < epsilon KMeans terminates\n :return:\n \"\"\"\n for i in range(len(old_centroids)):\n dist = euclidean(old_centroids[i], new_centroids[i])\n if dist > epsilon:\n return False\n\n return True\n\n def delete_small_clusters(new_centroids, centroid_counter, threshold):\n \"\"\"\n Potential extension of the algorithm -> if there is not any point in the cluster, delete this cluste\n\n :param new_centroids:\n :param centroid_counter:\n :param threshold:\n :return:\n \"\"\"\n\n out_centroids = []\n for n in range(len(new_centroids)):\n if centroid_counter[n] > threshold:\n out_centroids.append(new_centroids[n])\n out_centroids = np.array(out_centroids)\n return out_centroids\n\n #MAIN\n old_centroids = _initiate_random_centroids(all_features, vocab_size)\n\n for _ in range(max_iter):\n new_centroids, centroid_counter = _assign_vectors_to_nearest_centroid(all_features, old_centroids)\n if_convergenced = _check_convergence_condition(new_centroids, old_centroids, epsilon)\n\n if if_convergenced == True:\n # return centroids if algorithm is converged\n # return delete_small_clusters(new_centroids, centroid_counter, 0)\n return new_centroids\n old_centroids = new_centroids\n\n # return centroids if reached max_iter\n # return delete_small_clusters(new_centroids, centroid_counter, 0)\n return new_centroids", "def k_means (X, K):\n K_clusters = initialize_centroids(X, K)\n m = X.shape[0]\n dif = 1\n while (dif > 10**(-7)): # we stop when the centroids almost don't move\n groups = np.empty(m)\n K_clusters_old = K_clusters\n #cluster assignment step\n for i in range(m):\n groups[i] = np.argmin(compute_distance(X[i,:],K_clusters))\n #centroids update step\n for k in range(K):\n K_clusters[k,:] = np.mean(X[groups==k,:],axis=0)\n dif = np.linalg.norm(K_clusters-K_clusters_old, 2) / (np.linalg.norm(K_clusters, 2) + np.linalg.norm(K_clusters_old, 2))\n return groups.astype(int), K_clusters", "def UI_KMeans_Orch(\n\t\t\t\t train_data,\n\t\t\t\t orig_data,\n\t\t\t\t cluster_range,\n\t\t\t\t silhouette_analysis = False,\n\t\t\t\t silhouette_cluster_range = range(0,0),\n\t\t\t\t train_col_names = None, \n\t\t\t\t x_feature_index = 0,\n\t\t\t\t y_feature_index = 1,\n\t\t\t\t viz = True,\n\t\t\t\t show = False,\n\t\t\t\t viz_name = \"\",\n\t\t\t\t test_name = \"\"):\n\n\t#Make directory on the users desktop\n\tsegmentation_folder_name = \"Customer-Segmentation-Test\" + str(dt.datetime.now().strftime(\"_%Y-%m-%d_%H.%M.%S\"))\n\tos.makedirs(str(Path.home()) + \"\\\\Desktop\\\\\" + segmentation_folder_name)\n\n\t#Make the log\n\tlog = Log(\"Master-Log\", \"Preprocess-Log\", \"SegMethod-Log\", directory = str(Path.home()) + \"\\\\Desktop\\\\\" + segmentation_folder_name)\n\t\n\tprint(\"\\nData\\n\")\n\t#Strip and replace off any spaces\n\ttest_name = test_name.strip().replace(\" \",\"_\")\n\n\t#Initialize customer segmentation test\n\ttest = CustomerSegmentation(Method = KMeans(), \n\t\t\t\t\t\t\t\tdata = train_data,\n\t\t\t\t\t\t\t\torig_data = orig_data,\n\t\t\t\t\t\t\t\tlog = log, \n\t\t\t\t\t\t\t\ttest_name = test_name)\n\n\t# Set train data and class labels\n\ttest.Preprocess.set_train_data(train_data)\n\n\tprint(\"\\nPCA\\n\")\n\t# Conduct PCA, fit and transformation\n\ttest.Preprocess.PCA_fit(viz = viz, viz_name = viz_name, show = show)\n\ttest.Preprocess.PCA_transform()\n\n\tprint(\"\\nElbow Chart Analysis\\n\")\n\t#Conduct elbow chart analysis\n\ttest.SegMethod.elbow_chart_test(cluster_range, viz = viz,show = show, viz_name = viz_name, profile = True)\n\n\tif silhouette_analysis:\n\t\tprint(\"\\nSilhouette Analysis\\n\")\n\t\t#Conduct Silhouette analysis\n\t\ttest.Preprocess.silhouette_analysis(silhouette_cluster_range, viz = viz, viz_name = viz_name, show = show)\n\n\tprint(\"\\nLog Saving\\n\")\n\t#Save Preprocess and Method logs\n\ttest.Preprocess.PreprocessLog.savePreprocessLog()\n\ttest.SegMethod.MethodLog.saveMethodLog()\n\n\t#Add final masterlog record\n\tlog.addMasterLogRecord(test)\n\tlog.saveMasterLog()", "def main():\n\n# The following codes loads the data set into a 2D np array called data\n\twith open('complete_data.csv') as features_file:\n\t\tcsv_reader = csv.DictReader(features_file, delimiter = ',')\n\t\tdata = []\n\t\tcounter = 0\n\t\tfor row in csv_reader:\n\t\t\tprint(\"csv_reader row:\", row)\n\t\t\t# if(counter == 20):\n\t\t\t# \tbreak\n\t\t\tcounter+=1\n\t\t\tcleaned_row = []\n\t\t\tcleaned_row.append(row['track'])\n\t\t\tcleaned_row.append(row['loudness'])\n\t\t\tcleaned_row.append(row['score'])\n\t\t\tdata.append(np.array(cleaned_row))\n\t\tdata = random.sample(list(data), 30)\n\t\tdata = np.array(data)\n\n\n\tX = []\n\tY = []\n\tcounter = 0\n\tfor row in data:\n\t\t# if(counter == 10):\n\t\t# \tbreak\n\t\t# counter+=1\n\t\tY.append(row[0])\n\t\tl = [float(i) for i in row[1:]]\n\t\tX.append(l)\n\tX = np.array(X)\n\tY = np.array(Y)\n\n\tcentroid_indices2,centroids2 = sk_learn_cluster(X,Y,3)\n\n\tplot_word_clusters(data, centroids2, centroid_indices2 )", "def distortion_of_kmeans_clustering(data_table):\n num_iritations = 5\n singleton_list = []\n for line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n distortion_list = []\n for num in range(20, 5, -1):\n cluster_list = kmeans_clustering(singleton_list,num, num_iritations)\n distortion = compute_distortion(data_table, cluster_list)\n distortion_list.append(distortion)\n return distortion_list\n\n#####################################################################\n# Code to load cancer data, compute a clustering and\n# visualize the results\n\n\n# def run_example():\n# \"\"\"\n# Load a data table, compute a list of clusters and\n# plot a list of clusters\n#\n# Set DESKTOP = True/False to use either matplotlib or simplegui\n# \"\"\"\n# data_table = load_data_table(DATA_3108_URL)\n# singleton_list = []\n# for line in data_table:\n# singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n num_clusters = 16\n # cluster_list = sequential_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"sequential clusters\")\n #\n # cluster_list = alg_project3_solution.hierarchical_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"hierarchical clusters\")\n #\n # cluster_list = alg_project3_solution.kmeans_clustering(singleton_list, num_clusters, 5)\n # print(\"Displaying\", len(cluster_list), \"k-means clusters\")\n\n # draw the clusters using matplotlib or simplegui\n #\n # if DESKTOP:\n # # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, False)\n # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, True) #add cluster centers\n\n # else:\n # alg_clusters_simplegui.PlotClusters(data_table, cluster_list) # use toggle in GUI to add cluster centers", "def train(self, data):\n\t\tepsilon = self.epsilon\n\t\ttempDist = 1.0\n\t\tk = self.k\n\t\tcenters = data.rdd.takeSample(False, k, 1)\n\t\ti = 0 \n\t\twhile tempDist > epsilon or self.maxNoOfIteration > i:\n\t\t\ti+=1\t\t\t\n\t\t\tclosest = data.map(lambda p: (closestCluster(p, centers), (np.array(p), 1)))\n \t\t\tpointStats = closest.reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1]))\n \t\tnewPoints = pointStats.map(lambda x: (x[0], x[1][0] / float(x[1][1]))).collect()\n \t\ttempDist = sum(np.sum((centers[index] - p) ** 2) for (index, p) in newPoints)\n \t\tfor (ind, p) in newPoints:\n\t\t\t\tcenters[ind] = p\n\t\tself.centers = centers\n\t\treturn self.centers", "def elbow_kmeans_ch(self, corpus):\n print('Iterating kmeans over range of topics...')\n km = KMeans(init='k-means++')\n visualizer = KElbowVisualizer(km,k=range(self.start, self.stop, self.step),\n metric='calinski_harabaz', timings=False)\n visualizer.fit(corpus.vectors)\n visualizer.poof(outpath= self.folder + 'elbow_c_h.png')\n print('Saved elbow curve.')\n return", "def ComputeKmeans(x, y, label, total_data, num_centroids):\n matrix = np.column_stack((x, y))\n data = np.zeros((total_data, 3))\n data[:, :2] = matrix\n s = False\n while (not s):\n try:\n c_test = num_centroids # number of centroids guessed\n centroidx = []\n centroidy = []\n flag = False\n centroidupx = np.zeros(c_test)\n centroidupy = np.zeros(c_test)\n comp = np.zeros(int(c_data))\n for i in xrange(c_test):\n if ((comp.sum() == c_data) and (i < c_test)):\n comp = np.zeros(int(c_data))\n while (not flag):\n j = np.random.randint(0, int(c_data))\n if comp[j] == 0:\n comp[j] = 1\n flag = True\n centroidx = np.append(centroidx, np.random.normal(np.random.randint(10, 60), np.random.randint(5, 10), 1))\n centroidy = np.append(centroidy, np.random.normal(np.random.randint(10, 60), np.random.randint(5, 10), 1))\n flag = False\n centroids = np.column_stack((centroidx, centroidy))\n new_label_new = np.zeros(matrix[:, 0].size)\n new_label_old = np.ones(matrix[:, 0].size)\n dist = np.zeros(centroids[:, 0].size)\n iter = 0\n # looping until no data point were reassingned\n while (iter < 50):\n a = np.zeros((c_test, 1))\n for i in xrange(matrix[:, 0].size):\n for j in xrange(centroids[:, 0].size):\n dist[j] = np.sqrt(sum((matrix[i, :] - centroids[j, :]) ** 2))\n new_label_new[i], = np.where(dist == dist.min())\n if (np.array_equal(new_label_new, new_label_old)):\n break;\n s = True\n else:\n new_label_old = np.copy(new_label_new)\n unique = np.unique(new_label_new)\n data[:, 2] = new_label_new\n for i in xrange(new_label_new.size):\n for j in xrange(unique.size):\n if (new_label_old[i] == unique[j]):\n a[j] = a[j] + 1\n\n centroidup = npi.GroupBy(data[:, 2]).sum(data)[1]\n centroidup = centroidup[:, :2]\n centroids = centroidup / a\n centroidupx = centroids[:, 0]\n centroidupy = centroids[:, 1]\n s = True\n\n plt.scatter(x, y, c=new_label_new, cmap='rainbow')\n plt.scatter(centroidx, centroidy, c='green', marker='8')\n plt.scatter(centroidupx, centroidupy, c='black')\n plt.grid(True)\n plt.show()\n iter = iter + 1\n\n except Exception as e:\n print (str(e))\n return matrix, centroids, data", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def kmean(encoder,tsne,true_data,true_label):\n enc_output = encoder.predict(true_data)\n kmean = KMeansClustering()\n kmean.fit(enc_output)\n pred = kmean.predict(enc_output)\n accuracy(true_label,pred)\n confusion_matrix(true_label,pred, save_name = \"confusion_matrix_kmean.png\")\n tsne.tsne_plot(true_data,pred,save_data_dir =\"kmean\",save_name=\"kmean\")", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def kmeans(X, n_clust):\n\n X = scale(X)\n estimator = KMeans(init = 'k-means++', n_clusters = n_clust, n_init = 10, verbose = 2)\n \n estimator.fit(X)\n labels = estimator.predict(X)\n return labels", "def k_means(data, k, max_iterations=100):\n if len(data) < k:\n raise ValueError('must have more data points than centers, got: {}'.format(len(data)))\n\n dim = 1 if len(np.shape(data)) == 1 else np.shape(data)[-1]\n means = np.empty((k, dim))\n\n # initialize means to random points in dataset\n taken = set()\n for kidx in range(k):\n rand_idx = np.random.randint(len(data))\n while rand_idx in taken:\n rand_idx = np.random.randint(len(data))\n taken.add(rand_idx)\n means[kidx, :] = data[rand_idx, :]\n\n prev_assignments = []\n assignments = []\n for idx in range(max_iterations):\n\n # check for convergence\n if prev_assignments != [] and prev_assignments == assignments:\n break\n\n # reset assignments\n prev_assignments = assignments\n assignments = []\n\n # assign to means\n for sample in data:\n min_k_idx = 0\n min_k_dist = utils.euclidean_dist(means[0], sample)\n for kidx in range(1, k):\n k_dist = utils.euclidean_dist(means[kidx], sample)\n if k_dist < min_k_dist:\n min_k_dist = k_dist\n min_k_idx = kidx\n assignments.append(min_k_idx)\n\n # show intermediate cluster assignments\n # utils.plot_1d_data_assigments(data, means, assignments)\n\n # recompute means\n means = np.zeros((k, dim))\n mean_counts = collections.defaultdict(int)\n for sample, kidx in zip(data, assignments):\n means[kidx, :] += sample\n mean_counts[kidx] += 1\n \n for kidx, count in mean_counts.iteritems():\n means[kidx] /= float(count)\n \n return means, assignments", "def _kmedoids_run(X, n_clusters, dist_func, max_iter=1000, verbose=True):\n # Get initial centers\n n_samples, n_features = len(X), 2\n init_ids = _get_init_centers(n_clusters, X)\n if verbose:\n print('Initial centers are: {}'.format(init_ids))\n centers = init_ids\n members, costs, tot_cost, dist_mat = _get_cost(X, init_ids, dist_func)\n cc, swapped = 0, True\n while True:\n swapped = False\n for i in range(n_samples):\n if i not in centers:\n for j in range(len(centers)):\n centers_ = deepcopy(centers)\n centers_[j] = i\n members_, costs_, tot_cost_, dist_mat_ = _get_cost(X, centers_, dist_func)\n if tot_cost_ < tot_cost:\n members, costs, tot_cost, dist_mat = members_, costs_, tot_cost_, dist_mat_\n centers = centers_\n swapped = True\n if verbose:\n print('Change centers to {}'.format(centers))\n if cc > max_iter:\n if verbose:\n print('End Searching by reaching maximum iteration')\n break\n if not swapped:\n if verbose:\n print('End Searching by no swaps')\n break\n cc += 1\n return centers, members, costs, tot_cost, dist_mat", "def k_clusters(old_ops, max_outputs, mut):\n \n # DM construction\n matrix = starting_centroids(old_ops, max_outputs, mut)\n\n\n # Clustering\n seed = []\n for i in matrix.OPs:\n seed.append(i)\n centroids = cluster(old_ops, seed, mut)\n disto = distortion(centroids, old_ops, mut)\n\n return centroids, disto", "def get_cluster_centers(args=None, autoencoder=None, cluster_number=2, dataloader_list=None,\n file_path=None, save_name=None, device='cpu'):\n\n if file_path: # Load centers from file and return them on device\n print(\"Loading pretrained KMeans centroids\")\n centers = np.loadtxt(file_path)\n cluster_centers = torch.tensor(\n centers, dtype=torch.float, requires_grad=True).to(device)\n else: # Train Kmeans and generate centers\n # https://github.com/vlukiyanov/pt-dec/blob/11b30553858c1c146a5ee0b696c768ab5244f0ff/ptdec/model.py#L74-L92\n print(\"Training KMeans for centroids\")\n kmeans = KMeans(n_clusters=cluster_number,\n n_init=args.cluster_n_init, random_state=args.seed, max_iter=args.cluster_max_step)\n autoencoder.eval()\n features = []\n actual = []\n\n # merge dataloaders\n concat_dataset = torch.utils.data.ConcatDataset([x.dataset for x in dataloader_list])\n\n dataloader = torch.utils.data.DataLoader(\n dataset=concat_dataset,\n batch_size=args.encoder_bs\n )\n\n # form initial cluster centres\n data_iterator = tqdm(dataloader,\n leave=True,\n unit=\"batch\",\n disable=False,\n )\n print(\"Generating features for kmeans\")\n\n with torch.no_grad():\n # Loop through data and generate features from the encoder. \n for index, batch in enumerate(data_iterator):\n if (isinstance(batch, tuple) or isinstance(batch, list)) and len(batch) == 2:\n # if we have a prediction label, separate it to actual\n batch, value = batch\n actual.append(value)\n # Assuming we use the encoder from module.py\n if args.encoder_type == 'vae':\n feature = autoencoder(batch.to(device))\n elif args.encoder_type == 'resnet50':\n feature = list()\n z = autoencoder(batch.to(device)) # [:,:args.dfc_hidden_dim]\n\n feature.append(z)\n\n features.append(feature[0].detach().cpu())\n print(\"Training samples:\", len(features))\n\n actual = torch.cat(actual).long() # Save labels as long in torch tensor.\n samples = torch.cat(features)\n print(f\"Data shape {samples.shape}\")\n print(f\"Labels shape {actual.shape}\")\n print(\"Training...\")\n predicted = kmeans.fit_predict(samples.numpy(), actual) # predict centers from features.\n _, accuracy = cluster_accuracy(predicted, actual.cpu().numpy()) # Compute accuracy of predictions\n cluster_centers = kmeans.cluster_centers_ # define centers\n\n if save_name: # If param. save_name then save the centers.\n filepath = args.log_dir + save_name + \".txt\"\n if not os.path.exists(args.log_dir):\n os.mkdir(args.log_dir)\n print(\"Saving clusters to:\", filepath)\n np.savetxt(filepath, cluster_centers)\n if not (wandb.run is None): # check if wandb is running\n wandb.run.summary[f\"{save_name}_accuracy\"] = accuracy\n\n cluster_centers = torch.tensor( # Convert centers to tensor and send to device.\n cluster_centers, dtype=torch.float, requires_grad=True\n ).to(device)\n print(f\"Training KMeans completed, accuracy: {accuracy:.2f}\")\n return cluster_centers", "def kmeans(X, k, iterations=1000):\n\n # Initialize the cluster centroids (C <- centroid \"means\")\n C = initialize(X, k)\n\n if C is None:\n return None, None\n if not isinstance(iterations, int) or iterations <= 0:\n return None, None\n\n # n: number of dada points\n # d: dimension of each data point\n n, d = X.shape\n\n # # Initialize the cost/distortion function;\n # # defined as J = sum/n(sum/k(r(ij)*||x(i) - c(j)||**2))\n # J = np.inf\n\n # Iterate over iterations\n for iteration in range(iterations):\n # print(\"iteration:\", iteration)\n\n # Maintain a deep copy of C\n # C_prev = np.array([x for x in C])\n # Another alternative (removes for loop):\n C_prev = np.copy(C)\n\n # OPTION 1: FOR LOOPS\n\n # Initialize the array of pairwise data point-centroid\n # distances with zeros\n # dist = np.zeros((n, k))\n\n # for i in range(n):\n # for j in range(k):\n # dist[i, j] = np.linalg.norm(X[i, ...] - C[j, ...])\n # Note: squared distances can alternatively be inferred\n # directtly from the inner product of (X - C) with itself\n # dist[i, j] = np.inner(X[i,:]-C[j,:], X[i,:]-C[j,:])\n # print(\"dist:\", dist)\n # Squared distances from \"dist\":\n # print(\"dist ** 2:\", dist ** 2)\n\n # OPTION 2: VECTORIZATION\n\n # Convert X into an array suitable for vectorization\n Xv = np.repeat(X, k, axis=0)\n # print(\"Xv:\", Xv)\n # print(\"Xv.shape:\", Xv.shape)\n Xv = Xv.reshape(n, k, d)\n # print(\"Xv:\", Xv)\n # print(\"Xv.shape:\", Xv.shape)\n\n # Convert C into an array suitable for vectorization\n Cv = np.tile(C, (n, 1))\n # print(\"Cv:\", Cv)\n # print(\"Cv.shape:\", Cv.shape)\n Cv = Cv.reshape(n, k, d)\n # print(\"Cv:\", Cv)\n # print(\"Cv.shape:\", Cv.shape)\n\n # Compute the \"dist\" matrix of euclidean distances between\n # data points and centroids; shape (n, k)\n dist = np.linalg.norm(Xv - Cv, axis=2)\n\n # Assign each point of the dataset to a centroid:\n # Evaluate argmin(dist**2) for comparison with k\n # r(ij) = 1 if argmin(dist**2) == j\n # -> point i assigned to centroid k\n # otherwise r(ij) = 0 -> ignore point i wrt centroid k\n clss = np.argmin(dist ** 2, axis=1)\n # print(\"centroid indices:\", clss)\n # print(\"clss.shape:\", clss.shape)\n # Note: here, clss is a 1D array of the unique centroid index\n # to which each point in the dataset as been assigned (closest to);\n # the indices array is used in place of r(ij) in J evaluations\n\n # OPTION 1: EXIT CONDITION BASED ON J_prev == J\n\n # # Make a copy of the previous J value & reinitialize J\n # J_prev = J\n # # J = 0\n\n # # Update J (summing over the n data points),\n # # based on the (shortest) distances inferred from \"indices\"\n # # From \"for\" loop:\n # # for i in range(n):\n # # J += (dist[i, clss[i]] ** 2)\n # # From vectorization:\n # J = np.sum(dist[..., clss] ** 2)\n # # Normalize J to the number of data points to\n # # reduce the computational cost (optional)\n # J /= n\n # # print(\"J:\", J)\n\n # if J == J_prev:\n # # print(\"last iteration:\", iteration)\n # return C, clss\n\n # Move the cluster centroids to the center (mean) of\n # the refined cluster by updating C (centroid coordinates)\n for j in range(k):\n # Infer the array of data point indices that correspond\n # to each assigned cluster centroid\n indices = np.where(clss == j)[0]\n # print(\"indices:\", indices)\n if len(indices) == 0:\n C[j] = initialize(X, 1)\n else:\n C[j] = np.mean(X[indices], axis=0)\n\n # OPTION 2: EXIT CONDITION BASED ON C == C_prev\n\n if (C == C_prev).all():\n # print(\"last iteration:\", iteration)\n return C, clss\n\n # Update clss before returning C, clss\n Cv = np.tile(C, (n, 1))\n Cv = Cv.reshape(n, k, d)\n dist = np.linalg.norm(Xv - Cv, axis=2)\n clss = np.argmin(dist ** 2, axis=1)\n\n return C, clss", "def image_kmeans(path):\n imgs_list = os.listdir(path)\n imgs_shape_list = []\n for i in range(len(imgs_list)):\n img = cv2.imread(os.path.join(imgs_path, imgs_list[i]))\n h, w, c = img.shape\n rescale_fac = max(h, w) / 1000\n if rescale_fac > 1.0:\n h = int(h / rescale_fac)\n w = int(w / rescale_fac)\n imgs_shape_list.append([h, w])\n imgs_shape_array = np.array(imgs_shape_list)\n\n cluster = KMeans(n_clusters=10)\n model = cluster.fit(imgs_shape_array)\n labels = model.labels_\n cluster_centers = model.cluster_centers_\n print(labels)\n print(cluster_centers)\n center_list = []\n for i in range(len(cluster_centers)):\n center_list.append([int(cluster_centers[i, 0]), int(cluster_centers[i, 1])])\n print(center_list)\n center_list_sort = sorted(center_list, key=(lambda x:x[0]))\n print(center_list_sort)\n class_count = []\n for i in range(len(cluster_centers)):\n class_count.append(0)\n color_use = ['r', 'g', 'b', 'm', 'c', 'y', 'r', 'g', 'b', 'm']\n color_list = []\n for i in range(len(labels)):\n class_count[labels[i]] = class_count[labels[i]] +1\n color_list.append(color_use[labels[i]])\n print(class_count)\n class_count_sort = []\n for i in range(len(cluster_centers)):\n class_count_sort.append(class_count[center_list.index(center_list_sort[i])])\n print(class_count_sort)\n\n # draw image\n plt.scatter(imgs_shape_array[:,1], imgs_shape_array[:, 0], c=color_list, marker='o')\n for i in range(len(cluster_centers)):\n plt.scatter(cluster_centers[i, 1], cluster_centers[i, 0], c='black', marker='*')\n plt.annotate(class_count[i], xy = (cluster_centers[i, 1], cluster_centers[i, 0]), \n xytext = (cluster_centers[i, 1] + 0.1, cluster_centers[i, 0] + 0.1))\n plt.savefig('kmeans_10.png')", "def k_means_clustering(rows, distance=pearson_distance, k=4):\n # Determine the min and max values for each point\n ranges = [(min(row[i] for row in rows), max([row[i] for row in rows])) for i in range(len(rows[0]))]\n\n # Create k RANDOMLY placed centroids\n clusters = [[random() * (ranges[i][1] - ranges[i][0]) + ranges[i][0] for i in range(len(rows[0]))] for j in\n range(k)]\n distances_from_centroids = {}\n last_matches = None\n best_matches = None\n for t in range(100):\n print ('Iteration {}'.format(t))\n best_matches = [[] for i in range(k)]\n\n # Find the centroid that is the closest for each row\n for j in range(len(rows)):\n row = rows[j]\n best_match = 0\n for i in range(k):\n d = distance(clusters[i], row)\n if d < distance(clusters[best_match], row):\n best_match = i\n best_matches[best_match].append(j)\n\n # if the results are the same as last time, then this is complete\n if best_matches == last_matches:\n break\n last_matches = best_matches\n\n # Move the centroids to the average of their members\n for i in range(k):\n avgs = [0.0] * len(rows[0])\n if len(best_matches[i]) > 0:\n for row_id in best_matches[i]:\n for m in range(len(rows[row_id])):\n avgs[m] += rows[row_id][m]\n for j in range(len(avgs)):\n avgs[j] /= len(best_matches[i])\n clusters[i] = avgs\n\n # Chapter 3 Exercise 5: Return along with the cluster results the total distance between all items\n # and their respective centroids\n for i in range(k):\n for j in range(len(best_matches[i])):\n distances_from_centroids[best_matches[i][j]] = distance(clusters[i],rows[best_matches[i][j]])\n return best_matches, distances_from_centroids", "def kmeans(k, descriptor_list):\r\n kmeans = KMeans(n_clusters = k, n_init=10, verbose = 1) \r\n kmeans.fit(descriptor_list)\r\n visual_words = kmeans.cluster_centers_ \r\n return visual_words", "def run_kmeans(find_closest_centroids_func, compute_centroids_func, x_array, centroids, max_iters=10, plot_progress=False):\n num_centroids_K, ncols = centroids.shape\n num_examples, num_features = x_array.shape\n idx = None\n idx_history = numpy.zeros((max_iters, num_examples))\n centroid_history = numpy.zeros((max_iters, num_centroids_K, ncols))\n\n for i in range(max_iters):\n idx = find_closest_centroids_func(x_array, centroids)\n idx_history[i, :] = idx\n centroid_history[i, :, :] = centroids\n centroids = compute_centroids_func(x_array, idx, num_centroids_K)\n\n if plot_progress is True:\n fig = plt.figure(dpi=120)\n anim = FuncAnimation(fig, kmeans_pca_funcs.plot_progress_kmeans, frames=max_iters,\n interval=500, repeat_delay=2, fargs=(x_array, centroid_history, idx_history))\n return centroids, idx, anim\n else:\n return centroids, idx", "def Demo_K_Means_Orch(log,\n\t\t\t\t train_data,\n\t\t\t\t class_label,\n\t\t\t\t cluster_range,\n\t\t\t\t silhouette_cluster_range,\n\t\t\t\t train_col_names = None, \n\t\t\t\t x_feature_index = 0,\n\t\t\t\t y_feature_index = 1,\n\t\t\t\t viz = False,\n\t\t\t\t show = False,\n\t\t\t\t viz_name = \"\",\n\t\t\t\t test_name = \"\"):\n\n\t\t\n\t#Strip and replace off any spaces\n\ttest_name = test_name.strip().replace(\" \",\"_\")\n\n\t#Initialize customer segmentation test\n\ttest = CustomerSegmentation(Method = KMeans(), \n\t\t\t\t\t\t\t\tdata = train_data,\n\t\t\t\t\t\t\t\tlog = log, \n\t\t\t\t\t\t\t\ttest_name = test_name)\n\n\n\t# Set train data and class labels\n\ttest.Preprocess.set_train_data(train_data, \n\t\t\t\t\t\t\t\t col_names = train_col_names)\n\ttest.Preprocess.set_class_label(class_label)\n\n\t# Conduct PCA, fit and transformation\n\ttest.Preprocess.PCA_fit(viz = viz, viz_name = viz_name, show = show)\n\ttest.Preprocess.PCA_transform()\n\n\n\tif viz:\n\t\t#Create cluster plot visualization if requested\n\t\tcluster_plot = cluster_viz(test.train_data, test.class_label, x_feature_index = x_feature_index, y_feature_index = y_feature_index)\n\t\t\n\t\t#Show the plot at runtime if requested\n\t\tif show:\n\t\t\tcluster_plot.show()\n\n\t\t#Save the image\n\t\ttest.Log.saveImage(cluster_plot, \"cluster_plot\", test.viz_folder_name)\n\n\t#Conduct elbow chart analysis\n\ttest.SegMethod.elbow_chart_test(cluster_range, viz = viz,show = show, viz_name = viz_name, profile = True)\n\n\t#Conduct Silhouette analysis\n\t#test.Preprocess.silhouette_analysis(silhouette_cluster_range, viz = viz, viz_name = viz_name, show = show)\n\n\t#Save Preprocess and Method logs\n\ttest.Preprocess.PreprocessLog.savePreprocessLog()\n\ttest.SegMethod.MethodLog.saveMethodLog()\n\n\t#Add final masterlog record\n\tlog.addMasterLogRecord(test)", "def cluster(self,\n clustering=None,\n algorithm='klustakwik',\n spike_ids=None,\n **kwargs):\n if clustering is None:\n clustering = 'main'\n\n kk2_dir = op.join(self.settings.exp_settings_dir, 'klustakwik2')\n _ensure_dir_exists(kk2_dir)\n\n # Take KK2's default parameters.\n from klustakwik2.default_parameters import default_parameters\n params = default_parameters.copy()\n # Update the PRM ones, by filtering them.\n params.update({k: v for k, v in self.model.metadata.items()\n if k in default_parameters})\n # Update the ones passed to the function.\n params.update(kwargs)\n\n # Original spike_clusters array.\n if self.model.spike_clusters is None:\n n_spikes = (len(spike_ids) if spike_ids is not None\n else self.model.n_spikes)\n spike_clusters_orig = np.zeros(n_spikes, dtype=np.int32)\n else:\n spike_clusters_orig = self.model.spike_clusters.copy()\n\n # HACK: there needs to be one clustering.\n if 'empty' not in self.model.clusterings:\n self.model.add_clustering('empty', spike_clusters_orig)\n\n # Instantiate the KlustaKwik instance.\n kk = KlustaKwik(**params)\n\n # Save the current clustering in the Kwik file.\n @kk.connect\n def on_iter(sc):\n # Update the original spike clusters.\n spike_clusters = spike_clusters_orig.copy()\n spike_clusters[spike_ids] = sc\n # Save to a text file.\n path = op.join(kk2_dir, 'spike_clusters.txt')\n # Backup.\n if op.exists(path):\n shutil.copy(path, path + '~')\n np.savetxt(path, spike_clusters, fmt='%d')\n\n info(\"Running {}...\".format(algorithm))\n # Run KK.\n sc = kk.cluster(model=self.model, spike_ids=spike_ids)\n info(\"The automatic clustering process has finished.\")\n\n # Save the results in the Kwik file.\n spike_clusters = spike_clusters_orig.copy()\n spike_clusters[spike_ids] = sc\n\n # Add a new clustering and switch to it.\n if clustering in self.model.clusterings:\n self.change_clustering('empty')\n self.model.delete_clustering(clustering)\n self.model.add_clustering(clustering, spike_clusters)\n\n # Copy the main clustering to original (only if this is the very\n # first run of the clustering algorithm).\n if clustering == 'main':\n self.model.copy_clustering('main', 'original')\n self.change_clustering(clustering)\n\n # Set the new clustering metadata.\n params = kk.params\n params['version'] = kk.version\n metadata = {'{}_{}'.format(algorithm, name): value\n for name, value in params.items()}\n self.model.clustering_metadata.update(metadata)\n self.save()\n info(\"The clustering has been saved in the \"\n \"`{}` clustering in the `.kwik` file.\".format(clustering))\n self.model.delete_clustering('empty')\n return sc", "def ssKmeans(self, n_clusters, spectralptsfile, mscfile, use_scales=None):\n self.classifier = \"Spectral-Spatial-KMeans\"\n self.inptsfile = spectralptsfile\n self.mscfile = mscfile\n\n points = self.loadPoints()\n\n print \"Running KMeans clustering on both spectral and spatial data ...\"\n\n mscfobj = dpu.openMSC(mscfile)\n mscheader = mscfobj.header\n\n nscales = len(mscheader[1])\n if use_scales is None:\n use_scales = np.arange(nscales)\n else:\n if np.any(use_scales >= nscales):\n raise RuntimeError(\"Indices to scales out of bound, {0:d} scales in input MSC\\n\".format(nscales))\n if np.any(use_scales < 0):\n raise RuntimeError(\"Indices to scales out of bound, negative indices found\")\n \n # Process the points in batches gradually\n npts = mscheader[0]\n niter = int(npts/self.mbk.pf_npts) + 1\n\n rusage_denom = 1024.\n\n pca_flag = True\n \n # Train the standard scaler to scale the input data\n # incrementally\n print\n print \"\\tTraining preprocessing scaler for spectral and MSC spatial data ...\"\n mscfobj.next_pt_idx = 0\n scaler = StandardScaler()\n for i in xrange(niter):\n mscdata = mscfobj.read(npts=self.mbk.pf_npts, use_scales=use_scales)\n mscbool = self.validhit_bool[mscdata[:, -1].astype(int)-1]\n if np.sum(mscbool) == 0:\n if self.verbose:\n # debug\n print \"\\t\\tno valid points, {0:d} / {1:d}\".format(i, niter)\n continue\n \n scaler.partial_fit(np.concatenate((points[mscdata[mscbool, -1].astype(int)-1, :], mscdata[mscbool, 0:-1]), axis=1))\n \n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom\n sys.stdout.write(\"{0:d} / {1:d}: {2:.2f}\\n\".format(i, niter, mem))\n\n if pca_flag:\n # Transform the data with PCA\n print \"\\tPCA of spectral and MSC spatial data ...\"\n mscfobj.next_pt_idx = 0\n ipca = IncrementalPCA(n_components=len(use_scales)+points.shape[1])\n for i in xrange(niter):\n mscdata = mscfobj.read(npts=self.mbk.pf_npts, use_scales=use_scales)\n mscbool = self.validhit_bool[mscdata[:, -1].astype(int)-1]\n if np.sum(mscbool) == 0:\n if self.verbose:\n # debug\n print \"\\t\\tno valid points, {0:d} / {1:d}\".format(i, niter)\n continue\n ipca.partial_fit(scaler.transform(np.concatenate((points[mscdata[mscbool, -1].astype(int)-1, :], mscdata[mscbool, 0:-1]), axis=1)))\n sys.stdout.write(\"{0:d} / {1:d} \\n\".format(i, niter))\n\n print ipca.explained_variance_ratio_\n print np.cumsum(ipca.explained_variance_ratio_)\n print ipca.var_\n print ipca.components_\n\n import pdb; pdb.set_trace()\n \n # Train the mini-batch KMeans\n print\n print \"\\tTraining the mini-batch KMeans cluster ...\"\n mscfobj.next_pt_idx = 0\n mbk = MiniBatchKMeans(n_clusters=n_clusters)\n for i in xrange(niter):\n mscdata = mscfobj.read(npts=self.mbk.pf_npts, use_scales=use_scales)\n mscbool = self.validhit_bool[mscdata[:, -1].astype(int)-1]\n if np.sum(mscbool) == 0:\n if self.verbose:\n # debug\n print \"\\t\\tno valid points, {0:d} / {1:d}\".format(i, niter)\n continue\n if pca_flag:\n mbk.partial_fit(ipca.transform(scaler.transform(np.concatenate((points[mscdata[mscbool, -1].astype(int)-1, :], mscdata[mscbool, 0:-1]), axis=1))))\n else:\n mbk.partial_fit(scaler.transform(np.concatenate((points[mscdata[mscbool, -1].astype(int)-1, :], mscdata[mscbool, 0:-1]), axis=1)))\n \n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom\n sys.stdout.write(\"{0:d} / {1:d}: {2:.2f}\\n\".format(i, niter, mem))\n \n # Predict the label of points after feeding all points to\n # mini-batch KMeans\n print\n print \"\\tPredicting mini-batch KMeans clustering labels ...\"\n # Rewind the MSC file object to read points from the\n # beginning.\n mscfobj.next_pt_idx = 0\n for i in xrange(niter):\n mscdata = mscfobj.read(npts=self.mbk.pf_npts, use_scales=use_scales)\n mscbool = self.validhit_bool[mscdata[:, -1].astype(int)-1]\n if np.sum(mscbool) == 0:\n if self.verbose:\n # debug\n print \"\\t\\tno valid points, {0:d} / {1:d}\".format(i, niter)\n continue\n if pca_flag:\n self.labels[mscdata[mscbool, -1].astype(int)-1] = mbk.predict(ipca.transform(scaler.transform(np.concatenate((points[mscdata[mscbool, -1].astype(int)-1, :], mscdata[mscbool, 0:-1]), axis=1))))\n else:\n self.labels[mscdata[mscbool, -1].astype(int)-1] = mbk.predict(scaler.transform(np.concatenate((points[mscdata[mscbool, -1].astype(int)-1, :], mscdata[mscbool, 0:-1]), axis=1)))\n \n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom\n sys.stdout.write(\"{0:d} / {1:d}: {2:.2f}\\n\".format(i, niter, mem))\n\n mscfobj.close()", "def eval_kd_cluster(kmers):\n shuffle(kmers)\n search_set, kmers = kmers[:1000], kmers[1000:]\n for H in [0.15, 0.125, 0.1, 0.075, 0.05]:\n kdrft_cover = KDRFTCover(H)\n\n start = clock()\n for kmer in kmers:\n kdrft_cover.add(kmer)\n kdrft_cover.greedy_clusters()\n build_time = clock() - start\n\n stats = kdrft_cover.stats()\n stats['build_time'] = build_time\n stats['H'] = H\n\n start = clock()\n for kmer in search_set:\n kdrft_cover.search(kmer, 2)\n stats['search_time'] = (clock() - start)\n yield stats", "def cluster(self):\r\n\t\tself.clusterer.fit(self.koopman_feature_array)\r\n\t\tself.labels = self.clusterer.labels_\r\n\t\tfor j in range(max(self.labels)+1):\r\n\t\t\tself.koop_cluster_list.append([self.koop_list[i] for i in range(len(self.labels)) if self.labels[i] == j])\r\n\t\t\tself.koop_cluster_memb_prob_list.append([self.clusterer.probabilities_[i] for i in range(len(self.labels)) if self.labels[i] == j])", "def Client_K_Means_Orch(log,\n\t\t\t\t train_data,\n\t\t\t\t orig_data,\n\t\t\t\t cluster_range,\n\t\t\t\t silhouette_analysis = False,\n\t\t\t\t silhouette_cluster_range = range(0,0),\n\t\t\t\t train_col_names = None, \n\t\t\t\t x_feature_index = 0,\n\t\t\t\t y_feature_index = 1,\n\t\t\t\t viz = False,\n\t\t\t\t show = False,\n\t\t\t\t viz_name = \"\",\n\t\t\t\t test_name = \"\"):\n\n\n\t\n\tprint(\"\\nData\\n\")\n\t#Strip and replace off any spaces\n\ttest_name = test_name.strip().replace(\" \",\"_\")\n\n\t#Initialize customer segmentation test\n\ttest = CustomerSegmentation(Method = KMeans(), \n\t\t\t\t\t\t\t\tdata = train_data,\n\t\t\t\t\t\t\t\torig_data = orig_data,\n\t\t\t\t\t\t\t\tlog = log, \n\t\t\t\t\t\t\t\ttest_name = test_name)\n\n\t# Set train data and class labels\n\ttest.Preprocess.set_train_data(train_data)\n\n\tprint(\"\\nPCA\\n\")\n\t# Conduct PCA, fit and transformation\n\ttest.Preprocess.PCA_fit(viz = viz, viz_name = viz_name, show = show)\n\ttest.Preprocess.PCA_transform()\n\n\tprint(\"\\nElbow Chart Analysis\\n\")\n\t#Conduct elbow chart analysis\n\ttest.SegMethod.elbow_chart_test(cluster_range, viz = viz,show = show, viz_name = viz_name, profile = True)\n\n\tif silhouette_analysis:\n\t\tprint(\"\\nSilhouette Analysis\\n\")\n\t\t#Conduct Silhouette analysis\n\t\ttest.Preprocess.silhouette_analysis(silhouette_cluster_range, viz = viz, viz_name = viz_name, show = show)\n\n\tprint(\"\\nLog Saving\\n\")\n\t#Save Preprocess and Method logs\n\ttest.Preprocess.PreprocessLog.savePreprocessLog()\n\ttest.SegMethod.MethodLog.saveMethodLog()\n\n\t#Add final masterlog record\n\tlog.addMasterLogRecord(test)", "def kmeans_driver(img, means):\n # Calculations for means must be in floats\n if means.dtype != 'float':\n means = means.astype(np.float32)\n\n # For each cluster, compare Euclidean distance from pixel\n k = len(means)\n old_cluster_num = np.random.randint(k, size=len(img))\n similarity = 0\n loop_counter = 0\n\n while similarity < 1.0:\n # Calculate the relative distances between each pixel and each mean\n pseudo_dist = get_sq_distance(img, means, True)\n \n # Find index of the smallest distance\n cluster_num = np.argmin(pseudo_dist, axis=1)\n\n # Update cluster means\n #TODO: worry about potential divide by zero in np.mean\n means = np.zeros((k, 3), dtype=np.float32)\n for ii in xrange(k):\n cluster_members = img[cluster_num==ii, :]\n means[ii,:] = np.mean(cluster_members, axis=0)\n\n # Monitor convergence; see if the clustering has changed\n similarity = np.sum(old_cluster_num == cluster_num) / float(len(cluster_num))\n old_cluster_num = cluster_num\n loop_counter += 1\n \n print(\"Number of loops needed until convergence: {0}\".format(loop_counter))\n means = (np.round(means)).astype(np.uint8)\n return cluster_num, means", "def _run_sc(self,toRun):\n\n for params in toRun:\n k,sigma,dpath,dtype = params\n sc = SpectralCluster(self.distancePath,dtype=dtype)\n sc.run(k,sk=None,sigma=sigma,verbose=True)\n clusterSizes = self.get_cluster_sizes(sc)\n self.writer1.writerow([k,sigma] + [round(sc.avgSilValue,4)])\n self.writer2.writerow([k,sigma] + clusterSizes)", "def cluster(k, matrix):\n set_printoptions(threshold=10)\n nvectors = array([array(s) for s in matrix])\n # dummy initial means - usually they will be as far from each other as possible.\n # In this case though, they are just the coordinates for the first k sentences.\n initial_means = [nvectors[_] for _ in range(k)]\n clusterer = KMeansClusterer(k, euclidean_distance, initial_means=initial_means)\n clusters = clusterer.cluster(nvectors, True)\n return clusters", "def kmeans_cluster(\n cn,\n min_k=2,\n max_k=100,\n ):\n\n X = cn.T.values\n ks = range(min_k, max_k + 1)\n\n logging.info(f'trying with max k={max_k}')\n\n kmeans = []\n bics = []\n for k in ks:\n logging.info(f'trying with k={k}')\n model = sklearn.cluster.KMeans(n_clusters=k, init=\"k-means++\").fit(X)\n bic = compute_bic(model, X)\n kmeans.append(model)\n bics.append(bic)\n\n opt_k = np.array(bics).argmax()\n logging.info(f'selected k={opt_k}')\n\n model = kmeans[opt_k]\n\n embedding = umap.UMAP(\n n_neighbors=15,\n min_dist=0.1,\n n_components=2,\n random_state=42,\n metric='euclidean',\n ).fit_transform(cn.fillna(0).values.T)\n\n clusters = pd.DataFrame({\n 'cell_id': cn.columns, 'cluster_id': model.labels_,\n 'umap1': embedding[:, 0], 'umap2': embedding[:, 1]\n })\n\n return clusters", "def kmeans_fast(features, k, num_iters=100):\n N = len(features)\n print(N)\n # Randomly initalize cluster centers\n centers, assignments = np.array([features[i] for i in sorted(random.sample(range(0, len(features)), K))]), {}\n _, dim = centers.shape\n\n tile_f = np.tile(features, (k, 1))\n for n in range(num_iters):\n tile_c = np.repeat(centers, N, axis=0)\n tile_sub = np.subtract(tile_f, tile_c)\n dist = np.linalg.norm(tile_sub, axis=1).reshape(k, N)\n assignments_new = np.argmin(dist, axis=0)\n if np.array_equal(assignments, assignments_new):\n break\n else:\n assignments = assignments_new\n\n old_centers, new_stats = centers[:], []\n for j in range(0, k):\n new_stats.append([0, []])\n for i in range(len(features)):\n c_ind = int(assignments[i])\n num, sum_vec = new_stats[c_ind]\n sum_vec_new = np.add(sum_vec, features[i]) if len(sum_vec) > 0 else features[i]\n new_stats[c_ind] = [num + 1, sum_vec_new]\n\n new_centers = np.zeros((k, dim))\n for j in range(0, k):\n num, new_center = new_stats[j]\n new_center = np.divide(new_center, num)\n new_centers[j] = np.array(new_center)\n centers = new_centers\n\n return centers, assignments, 0", "def kmeans(points,n_clusters):\n # create kmeans object\n kmeans = KMeans(n_clusters=n_clusters)\n # fit kmeans object to data\n kmeans.fit(points)\n # print location of clusters learned by kmeans object\n print(kmeans.cluster_centers_)\n # save new clusters for chart\n y_km = kmeans.fit_predict(points)\n\n print('Clusters partition: ', Counter(y_km))\n \n return y_km, kmeans", "def k_means(data, K):\n\n # randomly choose k centroids from the data points\n centroids = data[np.random.choice(len(data), size=K, replace=False)]\n\n # assign each data point to closest centroid\n distances = euclidean_distances(data, centroids)\n labels = np.array([np.argmin(i) for i in distances])\n\n # track the largest centroid movements\n deltas = []\n\n for i in range(MAX_ITERATIONS):\n # keep track of the largest centroid movement for this iteration\n max_delta_mu = 0\n\n for k in range(K):\n # get all data points with label of this centroid k\n cluster_points = data[labels == k]\n if len(cluster_points) == 0:\n continue\n\n # get mean r, g, and b values of all points in this cluster\n # e.g. mu = [112.5, 95.6, 204.2]\n mu = cluster_points.mean(axis=0)\n\n # get the max difference in an r,g, or b value\n # abs(centroids[k] - mu) will return diff in RGB values\n # e.g. abs(centroids[k] - mu) = [15.2, 25.4, 4.7]\n max_delta_mu = max(max_delta_mu, abs(centroids[k] - mu).max())\n\n # update the kth centroid to the new mean value\n centroids[k] = mu\n\n deltas.append(max_delta_mu)\n\n # assign each data point to closest centroid\n distances = euclidean_distances(data, centroids)\n labels = np.array([np.argmin(i) for i in distances])\n\n # stop the iterations early if the largest change in an r, g, or b value is < MIN_DELTA_MU\n if max_delta_mu < MIN_DELTA_MU:\n print(\n f\"reached delta_mu {max_delta_mu:.2f} < {MIN_DELTA_MU} in {i} iterations for K={K}\")\n break\n\n return centroids, labels, deltas", "def _cluster(self):\n # , distance_function=spearman_squared_distance, max_iter=1000, tol=0.0001):\n if self.cluster_method is None:\n clusters = KMedoids(\n self.k,\n self.batchsize,\n dist_func=self.distance_function,\n max_iter=self.max_iter,\n tol=self.tol,\n init_medoids=self.init_medoids,\n swap_medoids=self.swap_medoids,\n )\n clusters.fit(self.clustering_attributions, verbose=self.verbose)\n\n self.subpopulations = clusters.members\n self.subpopulation_sizes = GAM.get_subpopulation_sizes(clusters.members)\n self.explanations = self._get_explanations(clusters.centers)\n # Making explanations return numerical values instead of dask arrays\n if isinstance(self.explanations[0][0][1], da.Array):\n explanations = []\n for explanation in self.explanations:\n explanations.append([(x[0], x[1].compute()) for x in explanation])\n self.explanations = explanations\n else:\n self.cluster_method(self)", "def _kshape(x, k, n_init=1, max_iter=100, n_jobs = 1, random_state=None,normalize=True ):\r\n #print \"n jobs run in parallel: \" + str(cpu_count() ) \r\n random_state = check_random_state(random_state)\r\n best_tot_dist,best_centroids,best_idx = None,None,None\r\n \r\n if n_jobs ==1:\r\n\r\n for i_init in range(n_init): \r\n # n_init is the number of random starting points\r\n # pdb.set_trace()\r\n \r\n idx, centroids,tot_dist = _kshape_single(x, k, max_iter=max_iter, random_state= random_state,normalize=normalize) \r\n if best_tot_dist is None or tot_dist < best_tot_dist:\r\n best_idx = idx.copy()\r\n best_centroids = centroids.copy()\r\n best_tot_dist = tot_dist\r\n else: # n_jobs not =1 # if -1, all CPUs are used\r\n # parallelisation of kshape runs\r\n seeds = random_state.randint(np.iinfo(np.int32).max,size=n_init)\r\n results = Parallel(n_jobs=n_jobs, verbose=0)(\r\n delayed(_kshape_single)(x,k,max_iter=max_iter, random_state=seed, normalize=normalize)\r\n for seed in seeds )\r\n # Get results with the lowest distances\r\n idx, centroids,tot_dist, iterations = zip(*results)\r\n best = np.argmin(tot_dist) \r\n best_idx = idx[best]\r\n best_centroids = centroids[best]\r\n best_tot_dist = tot_dist[best]\r\n sys.stdout.write(\"Done: k=\"+str(k)+\"\\n\")\r\n return {'centroids':best_centroids, 'labels':best_idx, 'distance':best_tot_dist,'centroids_all':centroids,'labels_all':idx,'distance_all':tot_dist,'iterations':iterations}", "def trainSOM_getK(model):\n model.detect_serialized_datasets()\n model.detect_prepared_datasets()\n model.train_SOM()\n model.detect_som_products()\n model.generate_k()\n cluster_num = model.get_k()\n return cluster_num", "def cluster_centroids(self,mydata, clusters, k=None):\n\t\tif k is None:\n\t\t\tk = np.max(clusters) + 1\n\t\tresult = np.empty(shape=(k,) + mydata.shape[1:])\n\t\tfor i in range(k):\n\t\t\tnp.mean(mydata[clusters == i], axis=0, out=result[i])\n\t\treturn result", "def run(self, eatery_id):\n self.start = time.time()\n do_cluster_ins = DoClusters(eatery_id=eatery_id)\n do_cluster_ins.run()\n return", "def train(x_train, y_train, x_test, y_test):\n\n print(\" Nearest centroid : \", end='')\n run(x_train, y_train, x_test, y_test, NearestCentroid())\n print(\" k-NN classifier (k=3) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=3))\n print(\" k-NN classifier (k=7) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=7))\n print(\" Naive Bayes (Gaussian) : \", end='')\n run(x_train, y_train, x_test, y_test, GaussianNB())\n print(\" Random Forest (trees= 5) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=5))\n print(\" Random Forest (trees= 50) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=50))\n print(\" Random Forest (trees=500) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=500))\n print(\" Random Forest (trees=1000): \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=1000))\n print(\" LinearSVM (C=0.01) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.01))\n print(\" LinearSVM (C=0.1) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.1))\n print(\" LinearSVM (C=1.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=1.0))\n print(\" LinearSVM (C=10.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=10.0))", "def test_determine_k(self):\n test_dir_name = os.path.dirname(__file__)\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"four_clusters.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\"x\", \"y\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 4)\n\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"iris.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\n \"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\",\n \"Petal.Width\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 2)", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n # position initial clusters at the location of clusters with largest populations\n cluster_list_copy = sorted(cluster_list,\n reverse = True,\n key=lambda cluster: cluster.total_population())\n cluster_list_copy = cluster_list_copy[: num_clusters]\n cluster_cent = [(cluster.horiz_center(), cluster.vert_center()) for cluster in cluster_list_copy]\n result = []\n #clustering to k initial centers adjusting the centers after each iteration\n for dummy_q in range(num_iterations):\n #Initialize k empty sets C1,...,Ck\n k_clusters = []\n for dummy_k in range(num_clusters):\n k_clusters.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n for idx_j in range(len(cluster_list)):\n # defining the closest k center and add the cluster to it\n dist_list = []\n for idx_k in range(num_clusters):\n center_x, center_y = cluster_cent[idx_k]\n dist = cluster_list[idx_j].distance(\n alg_cluster.Cluster(set(), center_x, center_y, 0, 0))\n dist_list.append((dist, idx_k))\n dummy_k, idx = min(dist_list)\n k_clusters[idx].merge_clusters(cluster_list[idx_j])\n result = k_clusters\n #update the new center of k clusters\n cluster_cent = [(k_clusters[idx_f].horiz_center(), k_clusters[idx_f].vert_center()) for idx_f in range(num_clusters)]\n return result", "def kmeans_clustering(proj_df, k):\r\n k_means= k_means = KMeans(random_state=25, n_clusters=k)\r\n k_means.fit(proj_df)\r\n labels= k_means.predict(proj_df)\r\n \r\n return labels", "def __init__(self,\n num_clusters,\n model_dir=None,\n initial_clusters=RANDOM_INIT,\n distance_metric=SQUARED_EUCLIDEAN_DISTANCE,\n random_seed=0,\n use_mini_batch=True,\n mini_batch_steps_per_iteration=1,\n kmeans_plus_plus_num_retries=2,\n relative_tolerance=None,\n config=None):\n params = {}\n params['num_clusters'] = num_clusters\n params['training_initial_clusters'] = initial_clusters\n params['distance_metric'] = distance_metric\n params['random_seed'] = random_seed\n params['use_mini_batch'] = use_mini_batch\n params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration\n params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries\n params['relative_tolerance'] = relative_tolerance\n super(KMeansClustering, self).__init__(\n model_fn=_kmeans_clustering_model_fn,\n params=params,\n model_dir=model_dir,\n config=config)", "def k_means_step(X, k, means):\n dists = np.array([np.sum((X - mean) * (X - mean), axis=1) for mean in means]) # k*m\n clusters = np.argmin(dists, axis=0)\n new_means = np.array([np.mean(X[clusters == i, :], axis=0) for i in range(k)])\n return new_means, clusters", "def kmeans_pp(img, k):\n n = len(img)\n means = np.zeros((k, 3), dtype=np.float32)\n rand_ind = np.random.randint(n)\n means[0,:]= img[rand_ind,:].astype(np.float32)\n\n # Pick means based on a probability distribution\n dist_mat = np.inf * np.ones((n, k))\n pseudo_dist = get_sq_distance(img, means[0,:])\n dist_mat[:,0] = np.ravel(np.abs(pseudo_dist))\n\n for ii in xrange(1, k):\n # Calculate probability\n min_dist = np.min(dist_mat[:,0:ii], axis=1)\n prob = np.power(min_dist, 2)\n prob = prob/sum(prob)\n\n # Sample next mean with probability, 'prob'\n new_ind = np.random.choice(n, p=prob)\n means[ii,:] = img[new_ind,:] # new mean\n\n # Update distance matrix with new mean\n pseudo_dist = get_sq_distance(img, means[ii,:])\n dist_mat[:,ii] = np.ravel(np.abs(pseudo_dist))\n\n print \"Using Kmeans++..\"\n return kmeans_driver(img, means)", "def spaKmeans(self, n_clusters, spectralptsfile, mscfile, use_scales=None):\n self.classifier = \"Spatial-KMeans\"\n self.inptsfile = spectralptsfile\n self.mscfile = mscfile\n\n self.loadPoints()\n\n print \"Running KMeans clustering on spatial data only ...\"\n\n mscfobj = dpu.openMSC(mscfile)\n mscheader = mscfobj.header\n\n nscales = len(mscheader[1])\n if use_scales is None:\n use_scales = np.arange(nscales)\n else:\n if np.any(use_scales >= nscales):\n raise RuntimeError(\"Indices to scales out of bound, {0:d} scales in input MSC\\n\".format(nscales))\n if np.any(use_scales < 0):\n raise RuntimeError(\"Indices to scales out of bound, negative indices found\")\n \n # Process the points in batches\n npts = mscheader[0]\n niter = int(npts/self.mbk.pf_npts) + 1\n\n rusage_denom = 1024.\n \n pca_flag = True\n \n if pca_flag:\n # Transform the data with PCA\n print \"\\tPCA of MSC spatial data ...\"\n ipca = IncrementalPCA(n_components=len(use_scales))\n for i in xrange(niter):\n mscdata = mscfobj.read(npts=self.mbk.pf_npts, use_scales=use_scales)\n mscbool = self.validhit_bool[mscdata[:, -1].astype(int)-1]\n if np.sum(mscbool) == 0:\n if self.verbose:\n # debug\n print \"\\t\\tno valid points, {0:d} / {1:d}\".format(i, niter)\n continue\n ipca.partial_fit(mscdata[mscbool, 0:-1])\n sys.stdout.write(\"{0:d} / {1:d} \\n\".format(i, niter))\n \n print np.cumsum(ipca.explained_variance_ratio_)\n \n # Train the standard scaler to scale the input data\n # incrementally\n print\n print \"\\tTraining preprocessing scaler for MSC spatial data ...\"\n mscfobj.next_pt_idx = 0\n scaler = StandardScaler()\n for i in xrange(niter):\n mscdata = mscfobj.read(npts=self.mbk.pf_npts, use_scales=use_scales)\n mscbool = self.validhit_bool[mscdata[:, -1].astype(int)-1]\n if np.sum(mscbool) == 0:\n if self.verbose:\n # debug\n print \"\\t\\tno valid points, {0:d} / {1:d}\".format(i, niter)\n continue\n if pca_flag:\n scaler.partial_fit(ipca.transform(mscdata[mscbool, 0:-1]))\n else:\n scaler.partial_fit(mscdata[mscbool, 0:-1])\n \n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom\n sys.stdout.write(\"{0:d} / {1:d}: {2:.2f}\\n\".format(i, niter, mem))\n\n # Train the mini-batch KMeans\n print\n print \"\\tTraining the mini-batch KMeans cluster ...\"\n mscfobj.next_pt_idx = 0\n mbk = MiniBatchKMeans(n_clusters=n_clusters)\n for i in xrange(niter):\n mscdata = mscfobj.read(npts=self.mbk.pf_npts, use_scales=use_scales)\n mscbool = self.validhit_bool[mscdata[:, -1].astype(int)-1]\n if np.sum(mscbool) == 0:\n if self.verbose:\n # debug\n print \"\\t\\tno valid points, {0:d} / {1:d}\".format(i, niter)\n continue\n if pca_flag:\n mbk.partial_fit(scaler.transform(ipca.transform(mscdata[mscbool, 0:-1])))\n else:\n mbk.partial_fit(scaler.transform(mscdata[mscbool, 0:-1]))\n \n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom\n sys.stdout.write(\"{0:d} / {1:d}: {2:.2f}\\n\".format(i, niter, mem))\n \n # Predict the label of points after feeding all points to\n # mini-batch KMeans\n print\n print \"\\tPredicting mini-batch KMeans clustering labels ...\"\n # Rewind the MSC file object to read points from the\n # beginning.\n mscfobj.next_pt_idx = 0\n for i in xrange(niter):\n mscdata = mscfobj.read(npts=self.mbk.pf_npts, use_scales=use_scales)\n mscbool = self.validhit_bool[mscdata[:, -1].astype(int)-1]\n if np.sum(mscbool) == 0:\n if self.verbose:\n # debug\n print \"\\t\\tno valid points, {0:d} / {1:d}\".format(i, niter)\n continue\n if pca_flag:\n self.labels[mscdata[mscbool, -1].astype(int)-1] = mbk.predict(scaler.transform(ipca.transform(mscdata[mscbool, 0:-1])))\n else:\n self.labels[mscdata[mscbool, -1].astype(int)-1] = mbk.predict(scaler.transform(mscdata[mscbool, 0:-1]))\n \n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom\n sys.stdout.write(\"{0:d} / {1:d}: {2:.2f}\\n\".format(i, niter, mem))\n\n mscfobj.close()", "def kmeans(features, projection, ite = 20, k = 4, threshold = 1e-4):\n from scipy.cluster.vq import kmeans, vq\n import datetime\n\n from measures import spatial_coherence \n \n centroids, distance = kmeans(features, k, iter=ite, thresh=threshold)\n #code, _ = vq(features, centroids)\n \n run_ = datetime.datetime.now().strftime(\"%y_%m_%d_%H_%M\")\n \n params = \"projection_size=%d, k=%d\" %(len(projection), k)\n #clusters = clusters_from_code(code, k, projection)\n \n clustering_id = \"(%s)_(%s)_(%s)_(%s)\" %(\"exhaustive_kmeans\", params, run_, projection)\n #print clustering_id\n km_clt = KMClustering(algorithm =\"exhaustive_kmeans\", parameters = params, run = run_,\n clustering_id = clustering_id, clusters = [], ccontains_noise = False, cclustering_on_dimension = True)\n\n \n #measures = {'spatial_coherence': spatial_coherence(km_clt, len(features))[0], 'distortion': distance}\n #km_clt.update_measures(measures)\n \n return km_clt", "def score_one_clustering(X, truelabels, num_components, num_iterations):\n #scipy's builtin K-means is very slow, use mpi-version instead.\n #from scipy.cluster.vq import kmeans,vq\n #clst,dist = kmeans(X, num_components, NUM_ITERATIONS)\n #labels,dist = vq(X, clst)\n clst,dist,labels = mpi_kmeans(X, num_components, 200, num_iterations)\n print truelabels\n print labels-1\n return condentropy(truelabels,labels-1)", "def initialize(data, k, num_runs):\n # run k-means a few times and take best\n means = []\n assignments = []\n best_dist = sys.maxint\n\n for _ in range(num_runs):\n cur_means, cur_assignments = k_means(data, k)\n dist = utils.compute_total_dist(data, cur_means, cur_assignments)\n\n if dist < best_dist:\n best_dist = dist\n means = cur_means\n assignments = cur_assignments\n\n return means, assignments, best_dist" ]
[ "0.78048205", "0.77288556", "0.7313263", "0.7294566", "0.7138447", "0.7079959", "0.70205575", "0.7020232", "0.6925354", "0.69126034", "0.6898227", "0.6897526", "0.68272984", "0.6826028", "0.6779355", "0.67769915", "0.6771654", "0.6768111", "0.6721485", "0.6702656", "0.66518116", "0.66266733", "0.66117156", "0.66113406", "0.6607637", "0.6584395", "0.6583659", "0.65632164", "0.6541713", "0.6528758", "0.64812964", "0.6459086", "0.64495516", "0.64393395", "0.6436575", "0.6427666", "0.642581", "0.6413772", "0.64011836", "0.6396314", "0.6391112", "0.6390856", "0.63859993", "0.63843834", "0.63807994", "0.6346915", "0.6325907", "0.63254154", "0.6309874", "0.63009834", "0.6300324", "0.6294327", "0.6286965", "0.62760776", "0.6268822", "0.6263065", "0.62515604", "0.6245266", "0.6243024", "0.6242297", "0.62355644", "0.62355274", "0.6229936", "0.62086624", "0.61990386", "0.61948645", "0.61888325", "0.6185861", "0.6184443", "0.61684376", "0.6155734", "0.6147474", "0.6139267", "0.6130817", "0.6093573", "0.60931855", "0.60823756", "0.607093", "0.6060973", "0.6054654", "0.60541123", "0.6053624", "0.60457677", "0.60453355", "0.60432154", "0.60394865", "0.60320526", "0.60290605", "0.6028785", "0.6019611", "0.60183847", "0.6011288", "0.6006808", "0.6003387", "0.59917", "0.5978339", "0.5974991", "0.59625804", "0.59524596", "0.5946009", "0.5940671" ]
0.0
-1
Get the codebook vectors.
def get_cb_vectors(self): return self.cb_vectors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vectors(self):\n return self.vecs[:]", "def get_vectors(self, corpus_size, vectors_size, vectors_type):\n vectors = np.zeros((corpus_size, vectors_size))\n for i in range(0, corpus_size):\n prefix = vectors_type + '_' + str(i)\n vectors[i] = self.model_dbow.docvecs[prefix]\n return vectors", "def get_vectors(model, corpus_size, vectors_size, vectors_type):\r\n vectors = np.zeros((corpus_size, vectors_size))\r\n for i in range(0, corpus_size):\r\n prefix = vectors_type + '_' + str(i)\r\n vectors[i] = model.docvecs[prefix]\r\n return vectors", "def get_vectors(model, corpus_size, vectors_size, vectors_type):\n vectors = np.zeros((corpus_size, vectors_size))\n for i in range(0, corpus_size):\n prefix = vectors_type + '_' + str(i)\n vectors[i] = model.docvecs[prefix]\n return vectors", "def boxVectors(self):\n return self.box_vectors", "def getVectors(self):\n vectors = dict()\n i = 0\n N = len(self.db.invertedIndex)\n for w, (idf, docs) in self.db.invertedIndex.items():\n for doc, tf in docs.items():\n try:\n vectors[doc][i] = tf * idf\n except KeyError as k:\n vectors[doc] = {i: tf * idf}\n i += 1\n i = 0;\n return vectors", "def bow_vecs(docs):\n return CECTORIZER.transform(docs).toarray()", "def get_vectors_for_all_docs(docs, vocab):\n docs_vectors = [get_feature_vector(doc, vocab) for doc in docs]\n return np.array(docs_vectors)", "def getVector(text):\n url = cfg.use_vectoriser\n res = requests.post(url, json={'text': text, 'access_key': cfg.vectoriser_access_key})\n res_dictionary = res.json()\n return res_dictionary['vectors']", "def vectors():\n r = db.execute(\"select word, year, c from counts where conf=? order by word, year\", (conf,))\n vects = defaultdict(dict)\n for w,y,c in r:\n l = vects[w]\n l[y] = float(c) \n\n\n ret = []\n for w in vects:\n d = vects[w]\n\n # if word is super uncommon, skip it\n if (max(d.values()) <= 3):\n continue\n if (max([v / (1.+year2c.get(y,0)) for y, v in d.items()]) < .1): \n continue\n\n # some years may not have the word\n counts = dict2arr(d, xrange(minyear, maxyear+1), 1.0)\n\n \n # naive window averaging smoothing over the trend curve\n smooth = []\n for i in xrange(len(counts)):\n smooth.append(np.mean(counts[max(0,i-2):i+2]))\n if max(smooth) > 2:\n ret.append([w] + smooth)\n return np.array(ret)", "def vectorize(self,clean_path):\n \n #load pretrained embedding model (GloVe)\n glove = spacy.load('en_core_web_lg')\n #extract unique words (aka vocabulary)\n unique_words = set()\n for d in self.docs: \n txt = d.text\n doc = glove(txt)\n for word in doc: \n if word.has_vector:\n unique_words.add(word.text)\n #change set to list type\n unique_words = list(unique_words)\n #save vector representation\n word_vectors = np.array([glove(word).vector for word in unique_words if glove(word).has_vector])\n #index vectors by corresponding word \n corpus_vectors = pd.DataFrame(word_vectors, index=unique_words)\n with open(clean_path + 'corpus_vectors.pkl', 'wb') as f:\n pickle.dump(corpus_vectors,f)\n self.vectors = corpus_vectors\n print('Saved embedding vectors.')\n return", "def basis_vectors(self):\n return self._basis_vectors", "def infer_vectors(self, reports, labels):\n logger.info('Inferring vectors from Doc2Vec model')\n tagged_docs = self.tag_dataset(reports, labels)\n vecs = [self.model.infer_vector(tag.words) for tag in tagged_docs]\n vecs = np.array(vecs)\n return vecs", "def vocabulary(self):\n return [recid for recid in self._model.vocab]", "def calculate_cb_vecs(self, clusters):\n if not clusters or not clusters[0]:\n return None\n\n # :param:`n` is the dimension of the vectors\n n = len(clusters[0][0])\n # Initialize the codebook vectors to 0\n cb_vectors = np.zeros([n * self.K]).reshape(self.K, n)\n for i in range(self.K):\n sum = np.zeros([n], dtype=np.uint).reshape(1, n)\n for vector in clusters[i]:\n sum += vector\n # divide the sum of the vectors by the size of the cluster\n cb_vectors[i] = np.divide(sum, len(clusters[i]))\n return cb_vectors", "def load_vector_dictionary():\n return read_word2vecs_from_file(VECTOR_FILE)", "def generate_voc(self):\n\n observations = [\"walk\", \"shop\", \"clean\", \"tennis\", \"read\"]\n states = [\"sunny\", \"rainy\", \"snowy\"]\n\n # Sort them alphabetically, just to be on the safe side\n observations.sort()\n states.sort()\n\n return (observations, states)", "def codelists():\n return CodelistSet()", "def get_vocabulary(documents):\n cv_model = CountVectorizer(binary=True)\n cv_model.fit(documents)\n\n vocabulary = cv_model.get_feature_names()\n vocabulary = list(map(str, vocabulary))\n\n return vocabulary", "def getVocabulary(self): # real signature unknown; restored from __doc__\n pass", "def getVectors(self):\n l = len(self.points)\n return [Vector.createFromTwoPoints(self.points[i % l], self.points[(i + 1) % l], \\\n color=self.side_color, width=self.side_width) for i in range(l)]", "def getVector(self, p):\n vector = {}\n i = 0\n tr = ParseDumpWiki.normName(p)\n if(self.db.isInPage(tr)):\n for w, (idf, docs) in self.db.invertedIndex.items():\n if (p in docs):\n vector[i] = idf * docs[p]\n i += 1\n else:\n freqDist = self.db.transformDocument(wikipedia.page(p).content)\n indexesWords = list(self.db.invertedIndex.keys())\n commonWords = set(indexesWords).intersection(freqDist.keys())\n for w in commonWords:\n idf, docs = self.db.invertedIndex[w]\n vector[indexesWords.index(w)] = idf * freqDist[w]\n return vector", "def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model", "def get_bravais_vectors(p_state, idx_image=-1, idx_chain=-1):\n _a = (3*ctypes.c_float)()\n _b = (3*ctypes.c_float)()\n _c = (3*ctypes.c_float)()\n _Get_Bravais_Vectors(ctypes.c_void_p(p_state), _a, _b, _c,\n ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n return [a for a in _a], [b for b in _b], [c for c in _c]", "def stateVector(self):\n simulator=Aer.get_backend('statevector_simulator')\n result=execute(self.circuit,backend=simulator).result()\n statevector=result.get_statevector(decimals=4) #\"decimals=4\" doesn't work in version 0.20.0 \n return statevector.tolist()", "def load_vectors(fname):\r\n # taken from: https://fasttext.cc/docs/en/english-vectors.html\r\n vectors_data = vocab.Vectors(name=fname)\r\n\r\n return vectors_data", "def getVectors(self,graph):\n return [Vector.createFromTwoTuples(graph[i],graph[i+1]) for i in range(len(graph)-1)]", "def get_vector(self): \n #print(self.state)\n '''\n print(\"\"\"\n Price {}\n Last Price {}\n Last Period Transaction {}\n Last Transaction {}\n Las Value {}\n Last day {}\n Last hour {}\n Last minute {}\n --------------\n Balance {}\n Bag {}\n \"\"\".format(\n self.state['price'],\n self.states[-1]['price'],\n self.states[-1]['transaction'],\n self.transactions[-1]['transaction'],\n self.value,\n self.state['day'],\n self.state['hour'],\n self.state['minute'], \n self.balance, \n self.bag, \n )) \n ''' \n self.state_vector = np.array([\n self.state['price'],\n self.states[-1]['price'],\n self.states[-1]['transaction'],\n self.transactions[-1]['transaction'],\n self.value,\n self.state['day'],\n self.state['hour'],\n self.state['minute'],\n ])\n\n return self.state_vector", "def get_label_vectors():\n print(\"Retrieving label vectors...\")\n label_dict = {} # instantiate dict for labels:vectors\n categories = sorted([c for c in os.listdir('images/') if c[0] != '.']) # ignore hidden files\n x = np.zeros(len(categories)) # zero vector of number of categories\n for i, c in enumerate(categories): # get index and category for images\n y = x.copy() # use copy of x\n y[i] = 1 # set label index to true\n label_dict[c] = y.copy() # create label:vector\n\n return label_dict", "def get_box_vectors(file):\n box_vectors = [None,None,None]\n with open(file,\"rt\") as fin:\n for line in fin:\n if line[0:6] == \"CRYST1\":\n x_length = float(line[9:14])\n y_length = float(line[18:23])\n z_length = float(line[27:33])\n box_vectors = [x_length,y_length,z_length]\n return(box_vectors)\n return(box_vectors)", "def getStateVocabs (self):\n\t\tstate_vocabs = []\n\t\tcurrent_cntry = None\n\t\tcurrent_vocab = None\n\n\t\tfor termData in self.stateVocabs.getDataList():\n\t\t\n\t\t\t# termData[0] is state code, which as form \"%s %s % (country_code, state_code)\n\t\t\tcountry_code = termData[0].split(\"-\")[0]\n\t\t\t\n\t\t\tif not self.countryCodeMap.has_key (country_code):\n\t\t\t\traise KeyError, \"no country code map entry for '%s'\" % counry_code\n\t\t\t\t\n\t\t\tcountry = self.countryCodeMap[country_code]\n\t\t\t\t\n\t\t\tif country_code != current_cntry:\n\t\t\t\ttypeName = self.makeTypeName (country.name)\n\t\t\t\tcurrent_cntry = country_code\n\t\t\t\tcurrent_vocab = self.createEnumerationType (typeName)\n\t\t\t\tstate_vocabs.append (current_vocab)\n\n\t\t\tcurrent_vocab.addValue (termData)\n\t\treturn state_vocabs", "def init_LVQ_pvectors(som, taggings, x_train, y_train):\n p_vectors = np.ndarray(shape = (som.x, som.y), dtype = prototype)\n for i in range(som.x):\n for j in range(som.y):\n p_vectors[i][j] = prototype(taggings[i][j], som.weights[(i,j)])\n return p_vectors", "def gen_review_vecs(reviews, model, num_features):\n\n curr_index = 0\n review_feature_vecs = np.zeros((len(reviews), num_features), dtype=\"float32\")\n\n # index2word is a list consisting of all words in the vocabulary\n # Convert list to set for speed\n index2word_set = set(model.wv.index2word)\n for review in reviews:\n\n #if curr_index%1000 == 0.:\n # print \"Vectorizing review %d of %d\" % (curr_index, len(reviews))\n \n review_feature_vecs[curr_index] = review_to_vec(review, model, num_features , index2word_set)\n curr_index += 1\n \n return review_feature_vecs", "def vector(self):\n return self.__vector", "def unit_vectors(self):\n # return {'comp1': CartesianRepresentation(...),\n # 'comp2': CartesianRepresentation(...),\n # 'comp3': CartesianRepresentation(...)}\n raise Exception(\"Not yet implemented\")", "def vector(self):\n return self._representation_vector", "def vector(self) -> np.ndarray:\n link_vectors = [link.vector for link in self.links]\n v = np.array(link_vectors).ravel()\n return v", "def Cvec(self):\n return vec(self.xc, self.yc)", "def vector(self):\n \n v_list = Householder.triangle_operation(self)[1]\n \n return(v_list)", "def print_vectors(self):\n print(\"Vectors:\")\n for name, vector in self.get_vectors():\n self.print_vector(name, vector.items)", "def getCVTerms(self):\n return _libsbml.SBase_getCVTerms(self)", "def getVectorSemanticSBERT(text):\n url = cfg.sbert_vectoriser\n res = requests.post(url, json={'text': text, 'access_key': cfg.vectoriser_access_key})\n res_dictionary = res.json()\n return res_dictionary['vectors']", "def return_vec(self) :\r\n y_vec = np.concatenate((self.x_vec,self.v_vec))\r\n return y_vec", "def getVectors(self, graph):\n return [Vector.createFromTwoTuples(graph[i], graph[i + 1]) for i in range(len(graph) - 1)]", "def landmarks_as_vectors(landmarks):\n mat = []\n for lm in landmarks:\n mat.append(lm.as_vector())\n return np.array(mat)", "def vocabulary(self):\n return self._vocabulary", "def get_periodic_box_vectors(self):\n return self._periodic_box_vectors", "def build_doc_sense_vec(self):\n\t\twith codecs.open(self.vocab_file, encoding='utf-8', mode='r') as infile:\n\t\t\tline = infile.readline()\n\t\t\ti = 0\n\t\t\twhile line:\n\t\t\t\tword = line.split()[0]\n\t\t\t\tif not self.word2IdVocabulary.has_key(word):\n\t\t\t\t\t# print i, word\n\t\t\t\t\t# else:\n\t\t\t\t\tself.word2IdVocabulary[word] = i\n\t\t\t\tif not self.id2WordVocabulary.has_key(i):\n\t\t\t\t\tself.id2WordVocabulary[i] = word\n\t\t\t\tline = infile.readline()\n\t\t\t\ti += 1\n\t\t\tself.vocab_num = len(self.word2IdVocabulary)\n\t\t\tprint \"vocabulary number:\" + str(self.vocab_num)\n\n\t\twith codecs.open(self.vec_file, encoding='utf-8', mode='r') as vecfile:\n\t\t\twith codecs.open(self.vec_out_file, encoding='utf-8', mode='a+') as vec_outfile:\n\n\t\t\t\tfor i, line in enumerate(vecfile):\n\t\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\t\tprint i\n\t\t\t\t\t# if i > 72:\n\t\t\t\t\t# \tbreak\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ta, b, c = map(int, line.split()[:3])\n\t\t\t\t\t\tprint('Number of sememes: {}\\n'\n\t\t\t\t\t\t\t 'Number of words: {}\\n'\n\t\t\t\t\t\t\t 'Dimension of vectors: {}'.format(a, b, c))\n\t\t\t\t\telif i > 462667:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tvector_list.append(sline[1:])\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\t# vector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_array\n\t\t\t\t\t\t# vec_outfile.write(line)\n\t\t\t\t\telif i > 462887:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tsense_num = int(sline[1])\n\t\t\t\t\t\tvectors = sline[2:sense_num*c+2] # (sense_num*c+2)\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tfor start in range(0, len(vectors), c):\n\t\t\t\t\t\t\tvector_list.append(list(map(float, vectors[start: start+c])))\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\tvector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_mean\n\t\t\t\t\t\t'''j = 0\n\t\t\t\t\t\tfor each_sense_vec in vector_array:\n\t\t\t\t\t\t\tif len(vector_array) > 1:\n\t\t\t\t\t\t\t\tnew_line = word + '_' + str(j) + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tformatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n'\n\t\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnew_line = word + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t x: '%6f' % x})[1:-1] + '\\n'\n\n\t\t\t\t\t\t\tvec_outfile.write(new_line)'''\n\n\t\twith codecs.open(self.doc_file, encoding='utf-8', mode='r') as docfile:\n\t\t\twith codecs.open(self.doc_out_file, encoding='utf-8', mode='a+') as doc_outfile:\n\t\t\t\twith codecs.open(self.vec_out_file_bydoc, encoding='utf-8', mode='a+') as vec_outfile_bydoc:\n\t\t\t\t\tprint \"Processing document file......\"\n\t\t\t\t\tline = docfile.readline().strip('\\n')\n\t\t\t\t\twhile line:\n\t\t\t\t\t\twords = line.split()\n\t\t\t\t\t\tnew_words = [x for x in words]\n\t\t\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\t\t\tword_id = self.word2IdVocabulary[words[i]]\n\t\t\t\t\t\t\tsense_vecs = self.vectors[word_id]\n\t\t\t\t\t\t\tsense_num = len(sense_vecs)\n\t\t\t\t\t\t\tif sense_num > 1:\n\t\t\t\t\t\t\t\tcontext_words = []\n\t\t\t\t\t\t\t\tfor x in range(i-int(self.context_num), i+int(self.context_num)+1):\n\t\t\t\t\t\t\t\t\tif x != i and 0 <= x < len(words):\n\t\t\t\t\t\t\t\t\t\tcontext_words.append(words[x])\n\t\t\t\t\t\t\t\tsense_index = self.select_attention(context_words, sense_vecs)\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[sense_index]\n\t\t\t\t\t\t\t\tnew_wordi = words[i] + '_' + str(sense_index)\n\t\t\t\t\t\t\t\tself.vector_word_doc[new_wordi.encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\t\tnew_words[i] = new_wordi\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[0]\n\t\t\t\t\t\t\t\tself.vector_word_doc[words[i].encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\tvec_outfile_bydoc.write(new_words[i] + ' ' + np.array2string(word_vec_i, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n')\n\n\t\t\t\t\t\tdoc_outfile.write(' '.join(new_words) + '\\n')\n\n\t\t\t\t\t\tline = docfile.readline()\n\n\t\treturn self.vector_word_doc", "def get_word_vectors(self, docs):\n return self.tfidf.transform(docs)", "def getOutputVector(self, data = None):\n\t\treturn self.loader.getOutputVector(data)", "def veg_list(self):\r\n # Convert veg_codes table to NumPy Array\r\n veg_table = arcpy.da.TableToNumPyArray(self.vegcode_table, (vegcodeCol), skip_nulls=True)\r\n # pull values out of veg_code column into a list\r\n _veg_list = [str(v) for v in veg_table[vegcodeCol].tolist()] # to deal with unicode issues\r\n del veg_table\r\n return _veg_list", "def vectors(self, adr = 0x10000):\n\n\t\tself.__vector(adr - 2, \"RST\")\n\t\tself.__vector(adr - 4, \"NMI\")\n\t\tself.__vector(adr - 6, \"SWI\")\n\t\tself.__vector(adr - 8, \"IRQ\")\n\t\tx = self.p.t.add(adr - 8, adr, \"tbl\")\n\t\tx.blockcmt += \"\\n-\\nMC6800 Vector Table\\n\\n\"", "def take_vec(self):\n vec = aux.vec(self.numbers)\n\n return vec", "def get_word_vector(doc_id, corpus):\n inv_index = vsm_retrieval.get_inverted_index(corpus)\n word_vec = np.zeros(len(inv_index))\n for count_vec, word in enumerate(inv_index):\n word_vec[count_vec] = inv_index[word].get(doc_id, {'frequency': 0})['frequency']\n return word_vec", "def getVectorSemanticSBERTArray(text):\n repArray = []\n for element in text:\n repArray.append(getVectorSemanticSBERT(element))\n\n return repArray", "def efficient_integer_vectors(self):\n return self._efficient_integer_vectors", "def get_vectors(mols):\n vect_types = VectTypes()\n for mol in mols:\n if \".\" in mol.smiles:\n logger.debug(\"SKIPPING - FRAGMENT: %s\", mol.smiles)\n continue\n vectors = get_3d_vects_for_mol(mol.sdf_info)\n for vect_type in vectors:\n vect_choice = vect_types.translate_vect_types(vect_type)\n for vector in vectors[vect_type]:\n spl_vect = vector.split(\"__\")\n smiles = spl_vect[0]\n if len(spl_vect) > 1:\n vect_ind = int(spl_vect[1])\n else:\n vect_ind = 0\n new_vect = Vector.objects.get_or_create(\n smiles=smiles, cmpd_id=mol.cmpd_id, type=vect_choice\n )[0]\n create_vect_3d(mol, new_vect, vect_ind, vectors[vect_type][vector])", "def list():\n\n return cache.codeTableList()", "def vectorcorpus(model, wcl):\r\n corpus = np.array([model.word_vec(word) for word, _ in wcl])\r\n print('Created corpus with {} elements'.format(len(corpus)))\r\n return corpus", "def generateAllRegionVectors():\n\tregionVectors = []\n\tfor i in range(NUM_REGION_VECTORS):\n\t\tregionVectors.append('{0:04x}'.format(i))\n\treturn regionVectors", "def _seq2vec(seq):\n vec = np.zeros(len(seq), dtype=int)\n for aai, aa in enumerate(seq):\n vec[aai] = AA2CODE[aa]\n return vec", "def get_vect(word, model, method):\n if method == \"model\":\n try:\n return model.wv[word]\n except KeyError:\n return None\n else:\n try:\n return model[word]\n except KeyError:\n return None", "def vectorize_doc_list(docList):\n vecList = bc.encode(docList)\n return vecList", "def yvec(self):\n return self._yvec", "def __call__(self):\n return self._representation_vector", "def stimulus_vectors(vocab, beta, prefix='V'):\n v = vocab.parse('V0').v\n yield v\n for i in itertools.count(1):\n v = np.sqrt(1. - beta**2) * v + beta * vocab.parse('V' + str(i)).v\n v /= np.linalg.norm(v)\n yield v", "def get_vocab(self):\n vocab, char_vocab = set(), set()\n\n for document in self.docs:\n vocab.update(document.tokens)\n char_vocab.update([char\n for word in document.tokens\n for char in word])\n\n return vocab, char_vocab", "def bow_vec(doc):\n return CECTORIZER.transform([doc]).toarray()", "def create_sample_vectors(cleaned_data_directory, out_vectors_path):\n vectors = []\n\n for filename in os.listdir(cleaned_data_directory):\n if not filename.endswith(\".txt\"):\n continue\n\n path = os.path.join(cleaned_data_directory, filename)\n f = open(path, mode='r', encoding='utf8')\n\n print(\"Processing\", path)\n\n lang = filename[:2]\n lang_number = language_codes.index(lang)\n\n print(f\"\\tLanguage: {lang} ({lang_number})\")\n print(\"\\tReading...\", end=' ')\n\n file_content = f.read()\n content_length = len(file_content)\n\n print(\"done.\")\n print(\"\\tExtracting vectors...\", end=' ')\n\n sample_start_index = 0\n count = 0\n\n while sample_start_index + text_sample_size < content_length:\n sample = get_sample(file_content, sample_start_index, text_sample_size)\n input_vector = build_input_vector(sample)\n vector = input_vector + [lang_number]\n vectors.append(vector)\n sample_start_index += text_sample_size\n count += 1\n\n print(\"done.\")\n print(f\"\\tExtracted {count} vectors.\")\n\n del file_content\n\n print(f\"Total {len(vectors)} vectors.\")\n\n np_vectors = np.array(vectors, dtype=np.uint16)\n np.random.shuffle(np_vectors)\n\n print(f\"Converted to NumPy array, shape: {np_vectors.shape}.\")\n\n np.savez_compressed(out_vectors_path, data=np_vectors)\n\n print(f\"Saved to {out_vectors_path}.\")", "def to_countvectors(self):\n if hasattr(self, \"ifp\"):\n df = self.to_dataframe()\n return to_countvectors(df)\n raise AttributeError(\"Please use the `run` method before\")", "def V_vect(self, points):\n return self.A_conf*norm(points)*self.isOutside(points)", "def get_vector(self) -> Optional[List[_Score]]:\n\n if len(self._vector) is 0:\n return None\n else:\n return self._vector", "def load_vectors_novocab(path: str) -> (Optional[str], dict):\n print(f\"Started loading vectors from {path} @ {datetime.now()}\")\n words = dict()\n try:\n with open(file=path, mode=\"r\", encoding=\"utf-8\") as source_file:\n # Get the first line. Check if there's only 2 space-separated strings (hints a dimension)\n dimensions = str(next(source_file))\n if len(dimensions.split(\" \")) == 2:\n # We have a dimensions line. Keep it in the variable, continue with the next lines\n pass\n else:\n # We do not have a dimensions line\n line = dimensions.split(' ', 1)\n key = line[0]\n words[key] = np.fromstring(line[1], dtype=\"float32\", sep=' ')\n dimensions = None\n for line in source_file:\n line = line.split(' ', 1)\n key = line[0]\n words[key] = np.fromstring(line[1], dtype=\"float32\", sep=' ')\n except OSError:\n print(\"Unable to read word vectors, aborting.\")\n return {}\n print(f\"Finished loading a total of {len(words)} vectors @ {datetime.now()}\")\n return dimensions, normalise(words)", "def doc2vec(self, text: str) -> np.array:\n # tfidf_matrix = self.tfidf.transform([text])\n # vectors = []\n # for token in self.tokenize(text):\n # if token in self.word2vec and token in self.feature_names:\n # tfidf_score = tfidf_matrix[0, self.feature_names.index(token)]\n # vectors.append(self.word2vec[token] * tfidf_score)\n vectors = [self.word2vec[token] for token in self.tokenize(text) if token in self.word2vec]\n if not vectors:\n return np.zeros(300)\n return np.mean(vectors, axis=0)", "def vec(self):\r\n\r\n xv = np.arange(self.dx / 2, self.lx, self.dx)\r\n yv = np.arange(-self.ly / 2 + self.dy / 2, self.ly / 2, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n if self.ox != 0:\r\n xv = np.arange(self.ox, self.lx + self.ox, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n return xv, yv, zv", "def vocabulary(self):\n lst = []\n for key in self.frequencies().keys():\n lst.append(key)\n return sorted(lst)\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # for word in wordslst:\n # if word not in lst:\n # lst.append(word.lower())\n #return sorted(lst)", "def get_doc_vector(doc_id, model_id):\n queue = get_vec_queue(app.config)\n data = queue.get_by_id((doc_id, model_id))\n if data is not None:\n return jsonify(doc=data.tolist())\n return jsonify(err=f\"{doc_id} not found\"), 404", "def string_vector(self):\n pass", "def build(self,documents):\n\t\tself.vectorKeywordIndex = self.getVectorKeywordIndex(documents)\n\n\t\tself.documentVectors = [self.createVector(document) for document in documents]", "def vectorize_documents(documents, model):\n document_vectors = []\n count = 0\n for document in documents:\n count += 1\n sentence_vectors = [vectorize_sentence(sentence, model) for sentence in document]\n document_vector = get_aggregate_vector(sentence_vectors)\n document_vectors.append(document_vector)\n return document_vectors", "def get_codebook(self):\n return self.codebook", "def _vectorize_data(self, docs: []):\n print('Vectorizing data...')\n tfidf = TfidfVectorizer()\n encoded_data = tfidf.fit_transform(docs)\n return encoded_data", "def __getitem__(self, name):\n return np.array(self._vectors[name])", "def reconstruct_vectors(self, vectors):\n return self.instance_vectors(self.project_vectors(vectors))", "def get_vocab(self):\n\n\t\tself.parse_transcript() \n\t\tself.purge_words()\n\t\tself.analyze_words()\n\t\tself.sort_word_analysis()", "def get_vector_representation(self):\r\n vectorizer = CountVectorizer(lowercase=False,\r\n tokenizer=lambda x: x, # Tokenization should already be done by preprocessor\r\n stop_words=None,\r\n min_df=5,\r\n max_features=None, ## use all features\r\n ngram_range=(1, 1), ## This uses only unigram counts\r\n binary=False) ## This sets the beatures to be frequency counts\r\n pipeline = Pipeline([('vec', vectorizer), ('tfidf', TfidfTransformer())])\r\n\r\n X = pipeline.fit_transform(self.df['tokens'])\r\n Y = self.df['label'].values\r\n return ((X[:self.train_index], Y[:self.train_index]),\r\n (X[self.train_index:self.valid_index], Y[self.train_index:self.valid_index]),\r\n (X[self.valid_index:], Y[self.valid_index:]))", "def show_vdcs(self):\n for v in self.vdcs:\n print v", "def load_vecs(fin):\n h5f = tables.open_file(fin)\n h5vecs= h5f.root.vecs\n\n vecs=np.zeros(shape=h5vecs.shape,dtype=h5vecs.dtype)\n vecs[:]=h5vecs[:]\n h5f.close()\n return vecs", "def get_source_vectors(testsmells):\n\n for testsmell in testsmells:\n df = pd.read_csv('data/' + testsmell + '_data.csv')\n df['Vector'] = ''\n\n repnames = df['App'].unique().tolist()\n for repname in repnames:\n print('Processing project \\'' + repname + '\\' for ' + testsmell + '...')\n currdf = df[df['App'] == repname]\n repo = Repo('repositories/' + repname)\n vectors = []\n \n # Get the vectors for each Java file in the dataframe\n for _, row in tqdm(list(currdf.iterrows())): \n try:\n repo.git.checkout(row['CommitSHA'], force=True)\n file_path = 'repositories/' + repname + '/' + row['RelativeTestFilePath']\n vectors.append(get_vector(file_path))\n except GitCommandError as err:\n print('Failed for ' + row['App'] + ':' + row['CommitSHA'])\n print(err)\n vectors.append('')\n \n df.loc[df['App'] == repname, 'Vector'] = vectors # Set the vectors on the dataframe\n \n filename = 'data/' + testsmell + '_vectors.csv'\n df.to_csv(filename, index=False)", "def __call__(self):\n return self._vector", "def get_vector(word, model):\n return model.wv[word]", "def svcs_list(self) -> List[str]:\n return self._svcs_list", "def vocabulary(self) -> np.ndarray:\n return np.array(\n list(set(word for text in self.preprocess_corpus for word in text))\n )", "def vectorizer_features(self) -> list:\n if self._vectorizer:\n return self._vectorizer.get_feature_names()\n self.logger.warning('Uninitialized vector. Please call count_vectorizer first.')", "def sentences2vec(self, sentences, unseen=None):\r\n keys = self.keys\r\n # print(sentences)\r\n if unseen:\r\n unseen_vec = self.model.wv.word_vec(unseen)\r\n\r\n # if unseen:\r\n # vec.append([self.model.wv.word_vec(y) if y in set(sentences) & keys\r\n # else unseen_vec for y in sentences])\r\n # else:\r\n # vec.append([self.model.wv.word_vec(y) for y in sentences\r\n # if y in set(sentences) & keys])\r\n vec = np.array([0 for _ in range(300)])\r\n for y in sentences:\r\n if len(vec) == 0:\r\n vec = np.array(self.model.wv.word_vec(y))\r\n elif y in self.keys:\r\n vec = vec + np.array(self.model.wv.word_vec(y))\r\n # print(len(vec))\r\n return vec", "def load_vectors(path, to_train=False):\n model = Word2Vec.load(path)\n\n if to_train:\n return model\n\n # In case it doesn't need to be trained, delete train code to free up ram\n word_vectors = model.wv\n\n context_vectors = dict()\n if hasattr(model, \"syn1\"):\n # For hierarchical softmax\n context_vectors = model.syn1\n elif hasattr(model, \"syn1neg\"):\n # For negative sampling\n context_vectors = model.syn1neg\n\n del model # Save memory\n return VectorCollection(word_vectors, context_vectors)", "def get(self):\n return GenericGet().get_catalogs()", "def getPrincipalVectors(A): #\n VT=np.linalg.eig(np.matmul(A.T,A))\n sort = sorted(zip(VT[0],VT[1].T.tolist()),reverse=True)\n values,vectors = zip(*sort)\n return vectors,values", "def writingstyle_vector(comment):\n words = nltk.word_tokenize(comment)\n x1 = get_word_count(words)\n x2 = get_URL_count(comment)\n x3 = get_second_person_pronouns(words)\n x4 = get_question_exclamation_marks(comment)\n x5 = get_capital_words(words)\n return [x1, x2, x3, x4, x5]", "def calculate_vectors(self, spectrum_list: List[Spectrum]) -> np.ndarray:\n n_rows = len(spectrum_list)\n reference_vectors = np.empty(\n (n_rows, self.output_vector_dim), dtype=\"float\")\n binned_spectrums = self.model.spectrum_binner.transform(spectrum_list, progress_bar=self.progress_bar)\n for index_reference, reference in enumerate(\n tqdm(binned_spectrums,\n desc='Calculating vectors of reference spectrums',\n disable=(not self.progress_bar))):\n reference_vectors[index_reference, 0:self.output_vector_dim] = \\\n self.model.base.predict(self._create_input_vector(reference), verbose=0)\n return reference_vectors" ]
[ "0.6734293", "0.6306046", "0.6234584", "0.6101457", "0.6060832", "0.5943614", "0.59248465", "0.58844465", "0.58299667", "0.5818226", "0.5769358", "0.5711672", "0.5681364", "0.5677561", "0.5665824", "0.5648895", "0.5648038", "0.5617257", "0.5584856", "0.5566153", "0.55533874", "0.5549351", "0.55450785", "0.55408937", "0.5519725", "0.5513539", "0.5478773", "0.5468112", "0.54615676", "0.54583496", "0.5452427", "0.5451798", "0.5434233", "0.54270893", "0.54263294", "0.539407", "0.5382778", "0.53707063", "0.5360884", "0.53555906", "0.5348344", "0.5346587", "0.5335465", "0.53335965", "0.5333347", "0.5328098", "0.5318851", "0.5293418", "0.5288024", "0.5286855", "0.52518386", "0.5222179", "0.5213532", "0.5205181", "0.5182974", "0.5179402", "0.5173164", "0.51677644", "0.51654917", "0.51623297", "0.5155941", "0.51534885", "0.51497823", "0.51435363", "0.51390326", "0.5138973", "0.51383466", "0.5127259", "0.51131606", "0.51064354", "0.5102574", "0.50814384", "0.5075693", "0.50718886", "0.5063794", "0.5063229", "0.50628024", "0.5056477", "0.5049998", "0.50479114", "0.50435084", "0.5037508", "0.5037011", "0.5036213", "0.5032911", "0.5032408", "0.5024255", "0.50221014", "0.501569", "0.5001341", "0.499649", "0.4996095", "0.49907365", "0.49886072", "0.49872676", "0.49766478", "0.49750692", "0.49716914", "0.4961625", "0.49605945" ]
0.6465732
1
Extracts features from the final codebook vectors using the L2 norm. The way it works is that we pass in the data as an argument and the function produces len(data) feature vectors such that f(x_i)=[a_1 ... a_K] and a_j = || x_i c_j || where c_j is the codebook vector.
def extract_features(self, data): # TODO: Should feature extraction be done on the testing data? In the lecture notes # TODO: it is not done with the training data, but with the test data. # TODO: Maybe we should use the validate data when we do cross-validation. features = np.zeros([len(data)*self.K]).reshape(len(data), self.K) for i in range(len(data)): for j in range(self.K): features[i][j] = np.linalg.norm(data[i] - self.cb_vectors[j]) return features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_liwc_features(train_data, test_data):\n print(\"getting liwc features\")\n train_liwc_matrix = []\n test_liwc_matrix = []\n for phrase in train_data:\n liwc_scores = word_category_counter.score_text(phrase)\n feature_vector = []\n for key in liwc_categories:\n if key in liwc_scores.keys():\n # print(key)\n # print(liwc_scores[key])\n feature_vector.append(liwc_scores[key])\n else:\n feature_vector.append(0)\n # print(feature_vector)\n train_liwc_matrix.append(feature_vector)\n for phrase in test_data:\n liwc_scores = word_category_counter.score_text(phrase)\n feature_vector = []\n for key in liwc_categories:\n if key in liwc_scores.keys():\n # print(key)\n # print(liwc_scores[key])\n feature_vector.append(liwc_scores[key])\n else:\n feature_vector.append(0)\n test_liwc_matrix.append(feature_vector)\n # print(train_liwc_matrix)\n return sparse.csr_matrix(train_liwc_matrix), sparse.csr_matrix(test_liwc_matrix)", "def featurize(self, data):\n \n features = []\n\n # tokens = data.split()\n\n #Modification 1: Normalization: All lowercase\n #Removing this did not seem to have any performance boost\n #but it did nothing negative either\n data = data.lower()\n\n #Modification 2: Normalization: Tokenizing using NLTK\n #Keep this\n # tokens = word_tokenize(data)\n tokens = data.split()\n\n #Modification 3: Word List: Removing stop words using NLTK\n #Keep this\n stop_words = set(stopwords.words('english'))\n tokens_filtered = []\n\n for t in tokens:\n if t not in stop_words:\n tokens_filtered.append(t)\n\n tokens = tokens_filtered\n\n #Modification 4: Pre-Processing Lemmization using NLTK\n #Surprisingly does not appear to impact performance\n # for t in tokens:\n # t = self.wordnet_lemmatizer.lemmatize(t)\n\n capital = 0\n average_word_length = 5 #It's 4.7, but we'll use 5\n short_words = 0\n long_words = 0\n\n for t in tokens:\n\n #Feature 1: Bag of words\n features.append((t, True))\n\n if(t.isupper()):\n capital += 1\n\n #Feature 3: Long or short word counter, intentionally ignoring length 4\n #and 5 as those are close to average\n #Very important that stop words were removed\n if(len(t) > average_word_length):\n long_words += 1\n elif(len(t) < average_word_length - 1):\n short_words += 1\n \n #Feature 2: Lots of capital\n #Remove this. It only appears to be a rough count of sentence number vs.\n #Capturing any sentiment. Does not impact F1 score in given train/dev sets\n # if(capital > 2):\n # features.append((\"LOTS_OF_CAPITAL\", True))\n\n #Feature 3: Long or short words\n # if(long_words > short_words):\n # features.append((\"LOTS_OF_LONG_WORDS\", True))\n\n\n\n return features", "def generateFeatures(self, data):\n pass", "def fvector(data, method ):\n\n fv = 0\n if method['type'] == 'lbp':\n \n\n lbpkern = lbpsimple.generateKernel2()\n \n imlbp = lbpsimple.lbp2oneslice(data, lbpkern)\n\n fv,bins = lbpsimple.features(imlbp)\n\n #pdb.set_trace();\n elif method['type'] == 'hist':\n \n fv, bins = numpy.histogram( data,range(-200,2000,20))\n fv = fv[10:15]\n #fv, bins = numpy.histogram( data)\n pass\n\n else:\n raise Exception('Unknow method for feature vector: %s' %(method))\n\n return fv", "def create_vectorized_features(data_dir, feature_version=2):\n extractor = PEFeatureExtractor(feature_version)\n\n print(\"Vectorizing training set\")\n X_path = os.path.join(data_dir, \"X_train.dat\")\n y_path = os.path.join(data_dir, \"y_train.dat\")\n raw_feature_paths = [os.path.join(data_dir, \"train_features_{}.jsonl\".format(i)) for i in range(6)]\n nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])\n vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)\n\n print(\"Vectorizing test set\")\n X_path = os.path.join(data_dir, \"X_test.dat\")\n y_path = os.path.join(data_dir, \"y_test.dat\")\n raw_feature_paths = [os.path.join(data_dir, \"test_features.jsonl\")]\n nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])\n vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)", "def bag_of_words_vectorizer(datafile, k_features):\n data = []\n labels = []\n\n for jsoned_entity in open(\"data.json\", errors=\"ignore\").readlines():\n entity = json.loads(jsoned_entity)\n if entity[\"lang\"] == \"en\":\n data.append(entity[\"text\"])\n labels.append(entity[\"label\"])\n\n vectorizer = TfidfVectorizer(stop_words=get_stop_words(\"english\"))\n data = vectorizer.fit_transform(data)\n data = SelectKBest(chi2, k=k_features).fit_transform(data, labels)\n\n for vector_label_batch in batch(zip(data, labels), config.BATCH_SIZE):\n vectors = []\n labels = []\n for vec_label in vector_label_batch:\n vectors.append(vec_label[0].toarray())\n labels.append(vec_label[1])\n\n X = np.vstack(vectors)\n Y = np_utils.to_categorical(labels, 2)\n yield X, Y", "def _featurize(self, predictions: SequenceSample) -> List[np.ndarray]:\n feature_vectors: List[np.ndarray] = []\n source = predictions.origin_words\n\n char_nn_scores = self.char_nn_lm_score(predictions.paths)\n word_nn_scores = self.word_nn_lm_score(predictions.paths)\n\n for i, (score, hypothesis) in enumerate(zip(predictions.scores, predictions.paths)):\n obss = list(zip(hypothesis, source))\n length = len(source)\n feature_vector = np.array([\n 1.,\n length,\n self.language_model.score(hypothesis) / length,\n char_nn_scores[i],\n word_nn_scores[i],\n score / length,\n sum(w in self.language_model for w in hypothesis) / length,\n sum(h[:self.prefix_size] == s[:self.prefix_size] for h, s in obss) / length,\n sum(h[-self.suffix_size:] == s[-self.prefix_size:] for h, s in obss) / length,\n self.language_model.score(hypothesis) * score / length,\n np.mean([editdistance.eval(h, s) for h, s in obss]),\n np.mean([float(obs in self.train_set_uniq) for obs in obss]),\n np.mean([self.train_counter.get(obs, self.discount) for obs in obss]),\n ])\n feature_vectors.append(feature_vector)\n return feature_vectors", "def vectorize(tokens_list, feature_fns, min_freq, vocab=None):\n ###TODO\n \n features = []\n feature_freq = {}\n vocabulary = {}\n \n # 2 case : for vocab\n # case 1: \n if (vocab == None):\n \n for doc in tokens_list: \n #print('doc#=%d tokens=%s'%(i,doc)) \n data = featurize(doc,feature_fns)\n #print('data=',data)\n \n for feature in data: \n if feature[1] > 0 : \n if feature[0] not in feature_freq.keys():\n feature_freq.setdefault(feature[0],1) \n else :\n feature_freq[feature[0]] += 1\n \n if feature[0] not in vocabulary.keys() :\n vocabulary.setdefault(feature[0], None) \n \n features.append(data)\n \n # sort vocab according to features (alphabetical order)\n vacab_list = sorted(feature_freq.keys(), key =lambda x: x,reverse=False)\n \n for colIndex,term in enumerate(vacab_list) :\n #print('colIndex = %d, term = %s'%(colIndex,term))\n vocabulary[term] = colIndex\n\n else: # case 2 \n \n # vocab already present\n #print('Vocab already present')\n vocabulary = vocab.copy() \n \n \n for doc in tokens_list: \n data = featurize(doc,feature_fns) \n \n test_data = [] \n for feature in data: \n # only take feature present in vocab \n if feature[0] in vocabulary.keys():\n #print('feature = ',feature) \n if feature[1] > 0 : \n test_data.append(feature) \n if feature[0] not in feature_freq.keys():\n feature_freq.setdefault(feature[0],1) \n else :\n feature_freq[feature[0]] += 1\n \n #print('test_data = ',len(test_data)) \n features.append(test_data)\n #test_data.clear()\n #print('features = ',features)\n \n \n # build a csr_matrix \n row = []\n col = []\n data = [] \n \n for docID,feat_list in enumerate(features) :\n for term in feat_list:\n if (feature_freq[term[0]] >= min_freq): # (zero values are not stored)\n \n row.append(docID)\n col.append(vocabulary[term[0]])\n data.append(term[1])\n \n #print('row =',row)\n #print('col =',col)\n #print('data=',data)\n \n X = csr_matrix((data, (row, col)), shape=(len(features), len(vocabulary)), dtype=np.int64)\n \n #print('X ->')\n #print(X.toarray())\n #print(' size of X = ',X.get_shape())\n \n return(X, vocabulary)", "def extract_features(data_dir,mode='train'):\n files = get_files(data_dir)\n t0 = time.time()\n features = list()\n labels = list()\n for f in files:\n freq = get_frequencies(f)\n if mode=='train':\n sents = corpus_reader(f)\n labels.extend(d2l(sents,f,freq))\n elif mode=='decode':\n sents = corpus_reader(f,tag='pos')\n else:\n print('Invalid mode!')\n break\n features.extend(d2f(sents,f,freq)) \n dt = time.time() - t0\n print('Total feature extraction time: %d seconds' % dt)\n return features,labels", "def _get_word2vec_features(x, word2vec, all_words_per_tweet, max_tweet_len):\n\n features = np.zeros((len(x), max_tweet_len, word2vec.vector_size))\n\n for i, tweet_words in enumerate(all_words_per_tweet):\n tweet_repr = np.array(\n [word2vec.wv[r] if r in word2vec.wv.vocab else np.zeros(word2vec.vector_size) for r in tweet_words])\n features[i][:len(tweet_repr), :word2vec.vector_size] = tweet_repr\n\n return features", "def images_to_feature_vectors(images, bbox_size=None, train=False):\n # If no bounding box size is supplied then compute a suitable\n # bounding box by examining sizes of the supplied images.\n if bbox_size is None:\n bbox_size = get_bounding_box_size(images)\n\n bbox_h, bbox_w = bbox_size\n nfeatures = bbox_h * bbox_w\n fvectors = np.empty((len(images), nfeatures))\n\n for i, image in enumerate(images):\n padded_image = np.ones(bbox_size) * 255\n h, w = image.shape\n h = min(h, bbox_h)\n w = min(w, bbox_w)\n\n \"\"\"Here I've centred the characters, as I believe the covariance\n matricies will more easily pick up distinct features of characters when\n they are centrally aligned (instead of an L being in the same position\n as the right hand side of an M, it'd be in the middle, where there'd be\n a clearer distinction as the middle of an M doesn't usually extend a\n full character height, whereas an L will).\n \"\"\"\n h_start = round((bbox_h/2)-(h/2))\n w_start = round((bbox_w/2)-(w/2))\n padded_image[h_start:h_start+h, w_start:w_start+w] = image[0:h, 0:w]\n\n #----------Denoising\n #Simple thresholding\n threshold = lambda image: np.where(image > 127, 255, 0)\n\n #By histographical analysis, I'm fairly certain x is 90 for page 2. \n #Using this denoising improves page 2 significantly, but only that page.\n threshold2 = lambda image: np.where(image > 255-90, 255, image)\n\n #This method \"stretches\" all the values away from 128, which I thought\n # may be a marginally better approach than hard thresholding as it'd\n # preserve some of the \"confidence\" inherently expressed in the greyness\n # of each pixel.\n def stretch(image, factor=5):\n image = np.round((image-128)*factor + 128)\n image = np.where(image > 255, 255, image)\n image = np.where(image < 0, 0, image)\n return image\n\n #I tried median sizes 2, 3, & 4. I found size 3 works best.\n median = lambda image: scipy.ndimage.median_filter(padded_image, size=3)\n\n #I found that if the median kernel is shaped vertically, it performs\n # better. I suspect this is due to the fact that a lot of characters are\n # composed of vertical lines.\n median2 = lambda image: scipy.ndimage.median_filter(image, size=(3,2))\n\n #I decided to try using a diamond shaped vertical footprint to squeeze\n # some extra % out, as the font doesn't tend to have square corners.\n # This brought a minor improvement over a simple kernel of size (3,2).\n padded_image = scipy.ndimage.median_filter(padded_image, \n footprint=np.array([[0,1,0],[1,1,1],[1,1,1],[0,1,0]]))\n\n #Reshaping to a column vector.\n fvectors[i, :] = padded_image.reshape(1, nfeatures)\n\n return fvectors", "def _vectorize_data(self, docs: []):\n print('Vectorizing data...')\n tfidf = TfidfVectorizer()\n encoded_data = tfidf.fit_transform(docs)\n return encoded_data", "def extractFeatures(self, data, tf=False):\n tfidf_training_matrix, tfidf_terms = self.useTfidfVectorizer(data)\n \n if tf:\n tf_vectorizer = CountVectorizer(max_df=0.5, min_df=2, max_features=10000,\n stop_words='english')\n \n tf_training_matrix = tf_vectorizer.fit_transform(data)\n tf_terms = tf_vectorizer.get_feature_names()\n \n return tfidf_training_matrix, tfidf_terms, tf_training_matrix, tf_terms\n \n else:\n return tfidf_training_matrix, tfidf_terms", "def featurize(self, data):\n \n bag_of_words = []\n\n tokens = data.split()\n\n for i in tokens:\n bag_of_words.append((i, True))\n\n return bag_of_words", "def get_all_features(train_data, test_data):\n #train_wc_matrix, test_wc_matrix = get_word_count_features(train_data, test_data)\n train_idf_matrix, test_idf_matrix = get_idf_features(train_data, test_data)\n train_ngram_matrix, test_ngram_matrix = get_ngram_features(train_data, test_data)\n # train_liwc_matrix, test_liwc_matrix = get_liwc_features(train_data, test_data)\n return sparse.hstack([train_idf_matrix, train_ngram_matrix]), \\\n sparse.hstack([test_idf_matrix, test_ngram_matrix])", "def create_feature_vector(features, length):\n START_IDX = 0\n END_IDX = 1\n\n output_vector = np.zeros(length)\n\n # negative strand\n for loc in features[-1]:\n output_vector[loc[START_IDX]:loc[END_IDX]] = 1 \n\n # positive strand\n for loc in features[1]:\n output_vector[loc[START_IDX]:loc[END_IDX]] = 2\n\n return output_vector", "def useTfidfVectorizer(self, data):\n if self.results:\n print()\n print(\"Extracting features from the training dataset using a sparse vectorizer\", end=\" - \")\n t0 = time()\n \n vectorizer = TfidfVectorizer(max_features=10000, stop_words='english',norm='l2',use_idf=True, sublinear_tf=False,encoding='utf-8')\n matrix = vectorizer.fit_transform(data)\n \n if self.results:\n print(\"done in %0.3fs\" % (time() - t0))\n print(\"n_samples: %0.3d, n_features: %d\" % matrix.shape)\n print()\n \n feature_names = vectorizer.get_feature_names()\n return matrix, feature_names", "def get_train_data(self, train_data):\n X = []\n Y = []\n\n # word 2 indices and tag 2 indices\n w2i = {} # word to index\n c2i = {} # char to index\n tag2idx = {} # tag2idx\n\n w2i[\"_UNK\"] = 0 # unk word / OOV\n c2i[\"_UNK\"] = 0 # unk char\n c2i[\"<w>\"] = 1 # word start\n c2i[\"</w>\"] = 2 # word end index\n \n \n num_sentences=0\n num_tokens=0\n for instance_idx, (words, tags) in enumerate(read_conll_file(train_data)):\n instance_word_indices = [] #sequence of word indices\n instance_char_indices = [] #sequence of char indices\n instance_tags_indices = [] #sequence of tag indices\n\n for i, (word, tag) in enumerate(zip(words, tags)):\n\n # map words and tags to indices\n if word not in w2i:\n w2i[word] = len(w2i)\n instance_word_indices.append(w2i[word])\n\n if self.c_in_dim > 0:\n chars_of_word = [c2i[\"<w>\"]]\n for char in word:\n if char not in c2i:\n c2i[char] = len(c2i)\n chars_of_word.append(c2i[char])\n chars_of_word.append(c2i[\"</w>\"])\n instance_char_indices.append(chars_of_word)\n\n if tag not in tag2idx:\n tag2idx[tag]=len(tag2idx)\n\n instance_tags_indices.append(tag2idx.get(tag))\n\n num_tokens+=1\n\n num_sentences+=1\n\n X.append((instance_word_indices, instance_char_indices)) # list of word indices, for every word list of char indices\n Y.append(instance_tags_indices)\n\n\n print(\"%s sentences %s tokens\" % (num_sentences, num_tokens), file=sys.stderr)\n print(\"%s w features, %s c features \" % (len(w2i),len(c2i)), file=sys.stderr)\n if self.c_in_dim == 0:\n print(\"char features disabled\", file=sys.stderr)\n\n assert(len(X)==len(Y))\n\n # store mappings of words and tags to indices\n self.set_indices(w2i, c2i, tag2idx)\n\n return X, Y", "def generate_feature_vector(self, test_document, n):\n m = len(self.bag_of_features)\n feature_vector = np.zeros(m)\n for feature, col in self.bag_of_features.items():\n if feature in test_document.tfs['all'].keys():\n tf = test_document.tfs['all'][feature]\n df = self.df_term[feature]\n tf_idf = calculate_tf_idf(tf=tf, df=df, doc_num=n)\n feature_vector[col] = tf_idf\n\n np.linalg.norm(feature_vector, axis=0)\n test_document.feature_vector = feature_vector\n return feature_vector", "def feature_vecs_NLP(train_pos, train_neg, test_pos, test_neg):\n # English stopwords from nltk\n stopwords = set(nltk.corpus.stopwords.words('english'))\n \n # Determine a list of words that will be used as features. \n # This list should have the following properties:\n # (1) Contains no stop words\n # (2) Is in at least 1% of the positive texts or 1% of the negative texts\n # (3) Is in at least twice as many postive texts as negative texts, or vice-versa.\n # YOUR CODE HERE\n\n pos_unique_words = []\n neg_unique_words = []\n intermediate_vec = []\n feature_vec = []\n\n for line in train_pos:\n line = list(set(line))\n for word in line:\n if word not in stopwords:\n pos_unique_words.append(word)\n\n for line in train_neg:\n line = list(set(line))\n for word in line:\n if word not in stopwords:\n neg_unique_words.append(word)\n\n\n pos_word_dict = collections.Counter(pos_unique_words)\n neg_word_dict = collections.Counter(neg_unique_words)\n\n unique_words = list(set(pos_word_dict.keys()).intersection(set(neg_word_dict.keys())))\n\n for word in unique_words:\n if(pos_word_dict[word] >= 0.01*len(train_pos) or neg_word_dict[word] >= 0.01*len(train_neg)):\n intermediate_vec.append(word)\n\n for word in intermediate_vec:\n if (int(pos_word_dict[word]) >= 2*int(neg_word_dict[word])or neg_word_dict[word] >= 2*pos_word_dict[word]):\n feature_vec.append(word)\n\n train_pos_vec = []\n train_neg_vec = []\n test_pos_vec = []\n test_neg_vec = []\n # Using the above words as features, construct binary vectors for each text in the training and test set.\n # These should be python lists containing 0 and 1 integers.\n # YOUR CODE HERE\n for line in train_pos:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n train_pos_vec.append(lst)\n\n for line in train_neg:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n train_neg_vec.append(lst)\n\n for line in test_pos:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n test_pos_vec.append(lst)\n\n for line in test_neg:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n test_neg_vec.append(lst)\n\n # Return the four feature vectors\n return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec", "def get_idf_features(train_data, test_data):\n tfidf = TfidfVectorizer(tokenizer = tokenize, ngram_range = (1, 2))\n tfidf.fit(train_data)\n return tfidf.transform(train_data), tfidf.transform(test_data)", "def get_word2vec_features(x_train, x_test):\n\n all_words_per_tweet_train = [nltk.word_tokenize(sent) for sent in x_train[\"text\"]]\n all_words_per_tweet_test = [nltk.word_tokenize(sent) for sent in x_test[\"text\"]]\n\n word2vec = Word2Vec(all_words_per_tweet_train, min_count=5)\n word2vec.train(all_words_per_tweet_train, total_examples=word2vec.corpus_count, epochs=15)\n\n max_tweet_len = np.max(\n [np.max([len(t) for t in all_words_per_tweet_train]), np.max([len(t) for t in all_words_per_tweet_test])])\n\n features_train = _get_word2vec_features(x_train, word2vec, all_words_per_tweet_train, max_tweet_len)\n features_test = _get_word2vec_features(x_test, word2vec, all_words_per_tweet_test, max_tweet_len)\n\n return features_train, features_test", "def get_training(feature_path): \n features = np.loadtxt(feature_path)\n feature_size = features.shape[1] -1 \n features_in = features[:,0:feature_size]\n features_out = features[:,-1]\n #features_out = np.array(map(lambda x: x if x else 0, features_out_unnorm))\n return features_in, features_out", "def vectorize_data(self, data, idf=False):\r\n\r\n # collect only the cleaned text of the tweet\r\n text = []\r\n for tweet in data:\r\n if not tweet.get_processed_text():\r\n tweet.set_processed_text(self.clean_tweet(tweet))\r\n text.append(tweet.get_processed_text())\r\n\r\n # vectorize tweets\r\n\r\n if idf:\r\n vectorizer = TfidfVectorizer(min_df=((len(data) // 1000) + 1), max_df=10000, ngram_range=(1, 3))\r\n else:\r\n vectorizer = CountVectorizer(min_df=((len(data) // 1000) + 1), max_df=10000, ngram_range=(1, 3))\r\n\r\n # vectorizer = TFVectorizing()\r\n vectors = vectorizer.fit_transform(text)\r\n return vectors", "def extract_features(docs_train, docs_test, perform_dimensionality_reduction):\n word_ngram_range = (1, 4)\n char_ngram_range = (2, 5)\n\n '''\n Build an n grams vectorizer with word_n_gram_range and char_n_gram_range\n '''\n\n ngrams_vectorizer = create_n_grams_vectorizer(\n word_ngram_range, char_ngram_range)\n\n # use the n_gram vectorizer to form the train and test dataset\n # it will take a lot of time... i think\n X_train = ngrams_vectorizer.fit_transform(docs_train)\n X_test = ngrams_vectorizer.transform(docs_test)\n print(\"Performed fitting of data\")\n\n ############ dimensionality reduction ################\n\n if(perform_dimensionality_reduction == True):\n X_train, X_test = perform_dimensionality_reduction(X_train, X_test)\n\n # print(docs_train[0])\n return X_train, X_test", "def vectorize(tokens_list, feature_fns, min_freq, vocab=None):\n# counter = defaultdict(int)\n# data, row, col, result = [], [], [], []\n\n# for tokens in tokens_list:\n# feats = featurize(tokens, feature_fns)\n# result.append(feats)\n# for feat in feats:\n# counter[feat[0]] += 1\n\n# if vocab == None:\n# vocab = defaultdict(int)\n# index = 0\n# for val in sorted(counter.items()):\n# if (val[1] >= min_freq):\n# vocab[val[0]] = index\n# index += 1\n\n# for index, tokens in enumerate(tokens_list):\n# for res in sorted(result[index]):\n# if (res[0] in vocab.keys()):\n# data.append(res[1])\n# col.append(vocab[res[0]])\n# row.append(index)\n\n# return csr_matrix((data, (row, col)), dtype=np.int64), vocab\n \n if vocab == None:\n d_vocab = defaultdict(list)\n doc_map = defaultdict(dict)\n for doc_no in range(len(tokens_list)):\n feats = featurize(tokens_list[doc_no], feature_fns)\n feat_dic = dict(feats)\n doc_map[doc_no] = feat_dic\n for feat in feat_dic:\n d_vocab[feat].append(doc_no)\n\n index = 0\n new_vocab = {}\n for key in sorted(d_vocab):\n if len(d_vocab[key]) >= min_freq:\n new_vocab[key] = index\n index += 1\n\n row = []\n column = []\n data = []\n for key in sorted(new_vocab.keys()):\n for doc_no in sorted(d_vocab[key]):\n if key in doc_map[doc_no]:\n row.append(doc_no)\n column.append(new_vocab[key])\n data.append(doc_map[doc_no][key])\n\n return csr_matrix((data, (row, column)), shape=(len(tokens_list), len(new_vocab)),dtype=np.int64), new_vocab\n \n\n elif vocab != None:\n row = []\n column = []\n data = []\n for doc_no in range(len(tokens_list)):\n feat_dic = dict(featurize(tokens_list[doc_no],feature_fns))\n for feat in feat_dic:\n if feat in vocab:\n row.append(doc_no)\n column.append(vocab[feat])\n data.append(feat_dic[feat])\n\n return csr_matrix((data,(row,column)), shape=(len(tokens_list),len(vocab)),dtype=np.int64),vocab", "def get_features(docs, max_length):\n docs = list(docs)\n Xs = numpy.zeros((len(docs), max_length), dtype='int32')\n for i, doc in enumerate(docs):\n j = 0\n for token in doc:\n vector_id = token.vocab.vectors.find(key=token.orth)\n if vector_id >= 0:\n Xs[i, j] = vector_id\n else:\n Xs[i, j] = 0\n j += 1\n if j >= max_length:\n break\n return Xs", "def gen_review_vecs(reviews, model, num_features):\n\n curr_index = 0\n review_feature_vecs = np.zeros((len(reviews), num_features), dtype=\"float32\")\n\n # index2word is a list consisting of all words in the vocabulary\n # Convert list to set for speed\n index2word_set = set(model.wv.index2word)\n for review in reviews:\n\n #if curr_index%1000 == 0.:\n # print \"Vectorizing review %d of %d\" % (curr_index, len(reviews))\n \n review_feature_vecs[curr_index] = review_to_vec(review, model, num_features , index2word_set)\n curr_index += 1\n \n return review_feature_vecs", "def get_ngram_features(train_data, test_data):\n print(\"getting ngram features\")\n ngram_vectorizer = CountVectorizer(ngram_range = (1, 2))\n ngram_vectorizer = ngram_vectorizer.fit(train_data)\n return ngram_vectorizer.transform(train_data), ngram_vectorizer.transform(test_data)", "def data_mining_features(index,input_string_x1,input_string_x2,vocab_word2index,word_vec_fasttext_dict,word_vec_word2vec_dict,tfidf_dict,n_gram=8):\r\n input_string_x1=input_string_x1.decode(\"utf-8\")\r\n input_string_x2 = input_string_x2.decode(\"utf-8\")\r\n #1. get blue score vector\r\n feature_list=[]\r\n #get blue score with n-gram\r\n for i in range(n_gram):\r\n x1_list=split_string_as_list_by_ngram(input_string_x1,i+1)\r\n x2_list = split_string_as_list_by_ngram(input_string_x2, i + 1)\r\n blue_score_i_1 = compute_blue_ngram(x1_list,x2_list)\r\n blue_score_i_2 = compute_blue_ngram(x2_list,x1_list)\r\n feature_list.append(blue_score_i_1)\r\n feature_list.append(blue_score_i_2)\r\n\r\n #2. get length of questions, difference of length\r\n length1=float(len(input_string_x1))\r\n length2=float(len(input_string_x2))\r\n length_diff=(float(abs(length1-length2)))/((length1+length2)/2.0)\r\n feature_list.append(length_diff)\r\n\r\n #3. how many words are same, how many words are unique\r\n sentence_diff_overlap_features_list=get_sentence_diff_overlap_pert(index,input_string_x1,input_string_x2)\r\n feature_list.extend(sentence_diff_overlap_features_list)\r\n\r\n #4. question 1,2 start with how/why/when\r\n #how_why_feature_list=get_special_start_token(input_string_x1,input_string_x2,special_start_token)\r\n #print(\"how_why_feature_list:\",how_why_feature_list)\r\n #feature_list.extend(how_why_feature_list)\r\n\r\n #5.edit distance\r\n edit_distance=float(edit(input_string_x1, input_string_x2))/30.0\r\n feature_list.append(edit_distance)\r\n\r\n #6.cos distance from sentence embedding\r\n x1_list=token_string_as_list(input_string_x1, tokenize_style='word')\r\n x2_list = token_string_as_list(input_string_x2, tokenize_style='word')\r\n distance_list_fasttext = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict)\r\n distance_list_word2vec = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_word2vec_dict, tfidf_dict)\r\n #distance_list2 = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict,tfidf_flag=False)\r\n #sentence_diffence=np.abs(np.subtract(sentence_vec_1,sentence_vec_2))\r\n #sentence_multiply=np.multiply(sentence_vec_1,sentence_vec_2)\r\n feature_list.extend(distance_list_fasttext)\r\n feature_list.extend(distance_list_word2vec)\r\n #feature_list.extend(list(sentence_diffence))\r\n #feature_list.extend(list(sentence_multiply))\r\n return feature_list", "def feature_extraction(_data):\n # Find the digits in the given string Example - data='18-20' digits = '1820'\n digits = str(''.join(c for c in _data if c.isdigit()))\n # calculate the length of the string\n len_digits = len(digits)\n # splitting digits in to values example - digits = '1820' ages = [18, 20]\n ages = [int(digits[i:i + 2]) for i in range(0, len_digits, 2)]\n # checking for special character in the given data\n special_character = '.+-<>?'\n spl_char = ''.join([c for c in list(special_character) if c in _data])\n # handling decimal age data\n if len_digits == 3:\n spl_char = '.'\n age = \"\".join([str(ages[0]), '.', str(ages[1])])\n # normalizing\n age = int(float(age) - 0.5)\n ages = [age]\n # Finding the maximum, minimum, average age values\n max_age = 0\n min_age = 0\n mean_age = 0\n if len(ages):\n max_age = max(ages)\n min_age = min(ages)\n if len(ages) == 2:\n mean_age = int((max_age + min_age) / 2)\n else:\n mean_age = max_age\n # specially added for 18 years cases\n only_18 = 0\n is_y = 0\n if ages == [18]:\n only_18 = 1\n if 'y' in _data or 'Y' in _data:\n is_y = 1\n under_18 = 0\n if 1 < max_age < 18:\n under_18 = 1\n above_65 = 0\n if mean_age >= 65:\n above_65 = 1\n # verifying whether digit is found in the given string or not.\n # Example - data='18-20' digits_found=True data='????' digits_found=False\n digits_found = 1\n if len_digits == 1:\n digits_found = 1\n max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = 0, 0, 0, 0, 0, 0, 0\n elif len_digits == 0:\n digits_found, max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = -1, -1, -1, -1, -1, -1, -1, -1\n \n feature = {\n 'ages': tuple(ages),\n 'len(ages)': len(ages),\n 'spl_chr': spl_char,\n 'is_digit': digits_found,\n 'max_age': max_age,\n 'mean_age': mean_age,\n 'only_18': only_18,\n 'is_y': is_y,\n 'above_65': above_65,\n 'under_18': under_18\n }\n\n return feature", "def __call__(self, features):\n norm = []\n for data in features:\n if all(x == 0 for x in data):\n norm.append(data)\n else:\n scale = sum(x*x for x in data) ** 0.5\n normalized_data = [x / scale for x in data]\n norm.append(normalized_data)\n \n return norm", "def get_features(data, start_num=0, end_num=None, scale=False):\n features = list(data.columns)[start_num:end_num]\n feature_n = len(features)\n\n if scale:\n features = list(map(lambda x: \"scale({})\".format(x), features))\n features = \" + \".join(features)\n\n else:\n features = \" + \".join(features)\n\n return feature_n, features", "def extract_feature_vectors(model, data_loader, parameters, features_file_path):\n feature_vectors, label_vectors = [], []\n\n # Set model to evaluation mode\n model.eval()\n\n # Show progress bar while iterating over mini-batches\n with tqdm(total=len(data_loader)) as progress_bar:\n for i, (X_batch, Y_batch) in enumerate(data_loader):\n\n # Dimensions of the input Tensor\n batch_size, channels, height, width = X_batch.size()\n\n # If GPU available, enable CUDA on data\n if parameters.cuda:\n X_batch = X_batch.cuda()\n Y_batch = Y_batch.cuda()\n\n # Wrap the input tensor in a Torch Variable\n X_batch_variable = Variable(X_batch, volatile=True)\n\n # Run the model on this batch of inputs, obtaining a Variable of predicted labels and a Variable of features\n Y_predicted, features = model(X_batch_variable)\n\n # Convert the features Variable (of size [batch_size, 1024]) to a Tensor, move it to\n # CPU, and convert it to a NumPy array\n features_numpy = features.data.cpu().numpy()\n\n # Move the labels Tensor (of size [batch_size, 14]) to CPU and convert it to a NumPy array\n Y_numpy = Y_batch.cpu().numpy()\n\n # For each example in the batch, record its features and labels\n for j in range(batch_size):\n feature_vectors.append(features_numpy[j,:])\n label_vectors.append(Y_numpy[j,:])\n\n progress_bar.update()\n\n utils.write_feature_and_label_vectors(features_file_path, feature_vectors, label_vectors)", "def historyFeatures(data,k=1,stepsize=1):\n rows, cols = data.shape\n hcols = k*cols\n\n hist = np.zeros((rows,hcols))\n \n # Create the array of feature vectors\n # Default to the initial value of hist if there would be a range error\n for i in range(rows):\n for j in range(k):\n if (i-(j*stepsize)) >= 0:\n for c in range(cols):\n hist[i,((j*cols)+c)] = data[i-(j*stepsize),c]\n else:\n continue\n return hist", "def predict(data, word_form, cues, semvecs, n_fbsfs, fbsfs_map, n_events, n_vec_dims):\n s = get_s_matrix(data, word_form, semvecs, n_events, n_vec_dims)\n fc = get_fc_matrix(data, cues, n_events, n_fbsfs, fbsfs_map)\n fc_inv = get_fc_inv(fc)\n w = get_w_matrix(fc_inv, s)\n return fc @ w", "def get_features_from_pca(feat_num, feature):\n\n if feature == 'HoG':\n vocab = np.load('vocab_hog.npy')\n elif feature == 'SIFT':\n vocab = np.load('vocab_sift.npy')\n\n # Your code here. You should also change the return value.\n\n def _get_PCA_vectors(feat_num, vocab):\n\n mean = vocab.mean(axis=0, keepdims=True)\n vocab_normalized = vocab - np.multiply(np.ones([vocab.shape[0], mean.shape[0]]),\n mean)\n #TEST: mean unit test\n #mean = vocab_normalized.mean(axis=0, keepdims=True)\n\n cov_matrix = np.cov(np.transpose(vocab_normalized))\n sigma, V = np.linalg.eig(cov_matrix)\n order_sigma = np.argsort(sigma)\n\n PCA_vectors = []\n i = 1\n for f in range(len(order_sigma)):\n eigen_vector = V[:, order_sigma[i]]\n if all(True for _ in np.isreal(eigen_vector)):\n PCA_vectors.append(np.real(eigen_vector))\n i += 1\n if len(PCA_vectors) == feat_num:\n break\n\n return np.array(PCA_vectors)\n\n #MAIN\n PCA_vectors = _get_PCA_vectors(feat_num, vocab)\n\n d = np.dot(vocab, np.transpose(PCA_vectors))\n\n return np.dot(vocab, np.transpose(PCA_vectors))\n #return np.zeros((vocab.shape[0],2))", "def _create_feature_vec():\n\tnum_tags = NGRAM_TUPLE[0]\n\tfvec = []\n\tfor _, size in FEATURE_TUPLE:\n\t\tfvec.append(np.zeros((num_tags, size)))\n\n\t# Append tag ngram weights to end\n\tfvec.append(np.zeros((num_tags, num_tags)))\n\treturn fvec", "def compute_embeddings(encoder, data_batches):\n\n vectors = []\n for batch in iter(data_batches):\n X, Y = batch\n X_embedded = encoder(X)\n for vec in np.array(X_embedded):\n vectors.append(vec)\n vectors = np.array(vectors)\n\n return vectors", "def generateFeatures(src_image, label, knn=None):\n \n # computes the features\n f_vec = extractFeatures(src_image, args.features)\n \n # quantize, if codebook is present\n if not (knn == None):\n # implementation using opencv\n f_vec1 = getHistogramOfVisualWords(f_vec, knn)\n #print f_vec1[0]\n \n# # alternative implementation using scipy, results in the same numbers\n# codebook = loadCodebook()\n# codes,dist = vq.vq(f_vec, codebook)\n# f_vec2, bin_edges = histogram(codes,\n# bins=range(codebook.shape[0]+1),\n# normed=True)\n# print f_vec2[0]\n \n f_vec = f_vec1\n else:\n # flatten the array\n f_vec = np.reshape(f_vec, (1,f_vec.size))\n \n # prepend the label\n f_vec = np.insert(f_vec, 0, label)\n \n return f_vec", "def feature_vecs_DOC(train_pos, train_neg, test_pos, test_neg):\n # Doc2Vec requires LabeledSentence objects as input.\n # Turn the datasets from lists of words to lists of LabeledSentence objects.\n # YOUR CODE HERE\n labeled_train_pos = []\n labeled_train_neg = []\n labeled_test_pos = []\n labeled_test_neg = []\n i = 0\n for line in train_pos:\n labeled_train_pos.append(LabeledSentence(line, ['TRAIN_POS_%i' % i]))\n i += 1\n i = 0\n for line in train_neg:\n labeled_train_neg.append(LabeledSentence(line, ['TRAIN_NEG_%i' % i]))\n i += 1\n i = 0\n for line in test_pos:\n labeled_test_pos.append(LabeledSentence(line, ['TEST_POS_%i' % i]))\n i += 1\n i = 0\n for line in test_neg:\n labeled_test_neg.append(LabeledSentence(line, ['TEST_NEG_%i' % i]))\n i += 1\n\n # Initialize model\n model = Doc2Vec(min_count=1, window=10, size=100, sample=1e-4, negative=5, workers=4)\n sentences = labeled_train_pos + labeled_train_neg + labeled_test_pos + labeled_test_neg\n model.build_vocab(sentences)\n\n # Train the model\n # This may take a bit to run \n for i in range(5):\n print \"Training iteration %d\" % (i)\n random.shuffle(sentences)\n model.train(sentences)\n\n # Use the docvecs function to extract the feature vectors for the training and test data\n # YOUR CODE HERE\n train_pos_vec = []\n train_neg_vec = []\n test_pos_vec = []\n test_neg_vec = []\n for j in range(len(train_pos)):\n train_pos_vec.append(model.docvecs['TRAIN_POS_%i' % j])\n for j in range(len(train_neg)):\n train_neg_vec.append(model.docvecs['TRAIN_NEG_%i' % j])\n for j in range(len(test_pos)):\n test_pos_vec.append(model.docvecs['TEST_POS_%i' % j])\n for j in range(len(test_neg)):\n test_neg_vec.append(model.docvecs['TEST_NEG_%i' % j])\n\n # Return the four feature vectors\n return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec", "def _get_embedding(self, data):\n embedding_list = [super()._get_embedding(data)]\n context = data['context']\n for i in range(context.shape[1]):\n embedding_list.append(getattr(self, f'context{i}')(context[:, i:i+1]))\n return torch.cat(embedding_list, dim=1)", "def _make_feature_vec(self, word_list):\n\n # Pre-initialize an empty numpy array (for speed)\n feature_vec = np.zeros((self.num_features,), dtype=\"float32\")\n\n # index2word is a list that contains the names of the words in\n # the model's vocabulary. Convert it to a set, for speed.\n index2word_set = set(self.w2v_model.index2word)\n\n # Loop over each word in the word_list and, if it is in the model's\n # vocabulary, add its feature vector to the total\n nwords = 0\n for word in word_list:\n # NOTE: Careful there, if all words are in caps in the article,\n # this function will return nan values and blow up the forest.\n word = word.lower()\n if word in index2word_set:\n nwords += 1\n feature_vec = np.add(feature_vec, self.w2v_model[word])\n\n # Divide the result by the number of words to get the average\n feature_vec = np.divide(feature_vec, nwords)\n return feature_vec", "def vectorize_text(corpus):\n bag_of_words_model = CountVectorizer()\n\n # performs the above described three tasks on the given data corpus.\n dense_vec_matrix = bag_of_words_model.fit_transform(corpus).todense()\n bag_of_word_df = pd.DataFrame(dense_vec_matrix)\n bag_of_word_df.columns = sorted(bag_of_words_model.vocabulary_)\n return bag_of_word_df", "def compute_sklearn_features():\n text_dir = 'text_model'\n emb_dir = 'embedding_weights'\n filename = 'glove.6B.50d.txt'\n emb_name = 'glove'\n emotions = ['happy', 'sad', 'angry', 'scared', 'disgusted', 'surprised']\n post_size = 200\n df_all, word_to_id, embedding = preprocess_df(text_dir, emb_dir, filename, emb_name, emotions, post_size)\n\n X = np.stack(df_all['text_list'])\n y = df_all['search_query'].values\n\n id_to_word = {i: k for k, i in word_to_id.iteritems()}\n config = {'word_to_id': word_to_id,\n 'id_to_word': id_to_word,\n 'batch_size': 128,\n 'vocab_size': len(word_to_id),\n 'embedding_dim': embedding.shape[1],\n 'post_size': post_size,\n 'fc1_size': 16,\n 'nb_emotions': len(emotions),\n 'dropout': 1.0, # Proba to keep neurons\n 'max_grad_norm': 5.0, # Maximum norm of gradient\n 'init_scale': 0.1, # Weights initialization scale\n 'initial_lr': 1e-3,\n 'lr_decay': 0.5,\n 'max_epoch_no_decay': 2, # Number of epochs without decaying learning rate\n 'nb_epochs': 10} # Maximum number of epochs\n \n tf.reset_default_graph()\n with tf.Session() as sess:\n print('Computing sklearn features:')\n init_scale = config['init_scale']\n initializer = tf.random_uniform_initializer(-init_scale, init_scale) \n with tf.variable_scope('Model', reuse=None, initializer=initializer):\n config['nb_epochs'] = 1\n m_train = WordModel(config)\n sess.run(tf.global_variables_initializer())\n sess.run(m_train.embedding_init, feed_dict={m_train.embedding_placeholder: embedding})\n\n batch_size = m_train.config['batch_size']\n initial_lr = m_train.config['initial_lr']\n \n nb_batches = X.shape[0] / batch_size\n dropout_param = 1.0\n ops = m_train.h1\n \n sess.run(tf.assign(m_train.learning_rate, initial_lr))\n\n X, y = _shuffling(X, y)\n X_reshaped = X[: (nb_batches * batch_size), :].reshape((nb_batches, batch_size, -1))\n y_reshaped = y[: (nb_batches * batch_size)].reshape((nb_batches, batch_size))\n h1_list = []\n for i in range(nb_batches):\n curr_input = X_reshaped[i, :, :]\n curr_target = y_reshaped[i, :]\n h1_features = sess.run(ops, feed_dict={m_train.input_data: curr_input, \n m_train.target: curr_target,\n m_train.keep_prob: dropout_param})\n h1_list.append(h1_features)\n\n X_sklearn = np.vstack(h1_list)\n y_sklearn = y_reshaped.reshape((-1))\n print('Finished')\n return X_sklearn, y_sklearn", "def prepare_length_features(text_counts, custom_vec, length_processed_flora_data_frame):\n vocab = custom_vec.get_feature_names() # https://stackoverflow.com/questions/39121104/how-to-add-another-feature\n # -length-of-text-to-current-bag-of-words-classificati\n\n length_model_data_frame = pd.DataFrame(text_counts.toarray(), columns=vocab)\n length_model_data_frame = pd.concat(\n [length_model_data_frame, length_processed_flora_data_frame['length'].reset_index(drop=True)], axis=1)\n\n length_model_data_frame_values = length_model_data_frame.values.astype(np.float64)\n length_model_sparse = sparse.csr_matrix(length_model_data_frame_values)\n\n assert length_model_sparse.shape > text_counts.shape, 'Length model should have one more column of data than BOW ' \\\n 'model '\n return length_model_sparse", "def test_scale_features_L2_norm(self):\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # correct answer computed in Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[0.0304526, 0.409996], [-0.999536, 0.816936], [-0.000485946, 0.40561]])\n\n # perform L2 normalization and check answer\n cdata.scale_features('L2 norm')\n self.assertTrue(allclose(cdata.data, answer))", "def generate_new_features(data):\n utils.save_log('{0} :: {1}'.format(\n generate_new_features.__module__,\n generate_new_features.__name__))\n\n data = create_feature_is_credit_debit(data)\n data = create_feature_value_category(data)\n data = create_features_from_transaction_timestamp(data)\n data = create_feature_based_on_spent_by_timestamp(data)\n list_of_categories = config.feature_categorical_to_check_spent_value\n data = create_features_avg_ratio_value_by_categories(data,\n list_of_categories)\n return data", "def newsgroup_featurize(data_list):\n # TODO: Implement featurization of input.\n all_text = data_list[\"train\"][\"input\"] + data_list[\"test\"][\"input\"] + data_list[\"dev\"][\"input\"]\n word_dict = word_count(all_text)\n bow_noun_features = bow_noun(word_dict) # 11,925 features\n train_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"train\"][\"input\"]])\n dev_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"dev\"][\"input\"]])\n test_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"test\"][\"input\"]])\n return train_input, dev_input, test_input", "def build_data_vectors(annotations, tweets, Tfidf_vect, adr_lexicon_dict, should_balance_set=True):\n\n def vectorize_word(word):\n \"\"\"gives vectorized value from TfidfVectorizer for the given word\n If the word is not part of vocabulary, 0 will be returned\n\n # Arguments\n word - word to vectorize\n\n # Returns\n vectorized value\n \"\"\"\n if word in Tfidf_vect.vocabulary_:\n i = Tfidf_vect.vocabulary_[word]\n return Tfidf_vect.idf_[i]\n else:\n return 0\n\n def clean_text(text):\n \"\"\"Cleans the text\n This code snippet is taken from https://towardsdatascience.com/multi-label-text-classification-with-scikit-learn-30714b7819c5\n Author: Susan Li\n\n # Arguments\n text - text to clean\n\n # Returns\n cleaned text\n \"\"\"\n text = text.lower()\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"can not \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"\\'scuse\", \" excuse \", text)\n text = re.sub('\\W', ' ', text)\n text = re.sub('\\s+', ' ', text)\n text = text.strip(' ')\n return text\n\n X = []\n Y = []\n adr_labels_size = 0\n nonadr_labels_size = 0\n for i, (k, v) in enumerate(annotations.items()):\n tweet_text = clean_text(tweets[k])\n tokens = word_tokenize(tweet_text)\n\n for annotation_index, annotation in enumerate(v):\n prev_token_adr = False\n\n annotated_text = clean_text(annotation['annotatedText'])\n annotated_text_tokens = word_tokenize(annotated_text)\n\n for index, focus_word in enumerate(tokens):\n focus_vector = []\n\n # for Context feature, get index for 3 surrounding words on each side of focus word\n if program_args.context_feature:\n focus_vector.append(vectorize_word(tokens[index-3]) if (index-3 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-2]) if (index-2 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-1]) if (index-1 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index]))\n focus_vector.append(vectorize_word(tokens[index+1]) if (index+1 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+2]) if (index+2 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+3]) if (index+3 < len(tokens)) else 0)\n\n if program_args.adrlexicon_feature:\n if focus_word in adr_lexicon_dict:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n if program_args.prev_adrlexicon_feature:\n if prev_token_adr:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n # assign class label\n if annotation['semanticType'] == 'ADR' and focus_word in annotated_text_tokens:\n Y.append(ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n adr_labels_size += 1\n prev_token_adr = True\n else:\n Y.append(NON_ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n nonadr_labels_size += 1\n prev_token_adr = False\n\n print(\" Dataset size: {}\".format(len(X)))\n print(\" {} class size: {}\".format(ADR_MENTION_CLASS_NAME, adr_labels_size))\n print(\" {} class size: {}\".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size))\n\n if should_balance_set:\n X, Y = balance_set(X, Y, adr_labels_size, nonadr_labels_size)\n\n X = scipy.sparse.csr_matrix(X)\n return X, Y", "def custom_collate_fn(data):\n features, labels = zip(*data)\n return pack_sequence(features, enforce_sorted=False), torch.tensor(labels)", "def cluster_features(self):\n logger.info('Creating term-document matrix...')\n self._create_tdm()\n init_centroids = self.centroids_from_categories()\n\n # Cluster the features using specific centroids.\n logger.info('Clustering features...')\n self.kmeans = KMeans(init=init_centroids, n_init=1, max_iter=1, n_clusters=len(self.feature_categories))\n self.clusters = self.kmeans.fit_predict(self.tdm)\n\n # The feature vector maps key features (categories) to other features that occur in the same cluster.\n logger.info('Converting clusters to feature vectors...')\n feature_vectors = self.clusters_to_feature_vectors(category_features=list(self.feature_amenity_map.keys()))\n\n return feature_vectors", "def compute_features(names):\n Alphabet = ['a', 'b', 'c', 'd', 'e','f', 'g', 'h', 'i', 'j','k', 'l', 'm', 'n', 'o',\n 'p', 'q', 'r', 's', 't','u', 'v', 'w', 'x', 'y' , 'z']\n \n N = len(names)\n Feature_matrix = np.zeros((N, 260))\n for row in range(0, N):\n firstLast = names[row].split()\n first = firstLast[0] #First Name\n last = firstLast[1] #Last Name\n if(len(first) < 5):\n firstRange = len(first)\n else:\n firstRange = 5\n if(len(last) < 5):\n lastRange = len(last)\n else:\n lastRange = 5\n for index in range(0,firstRange): #iterate though first 5 letters of First name\n offset = 26 * index\n featureIndex = offset + Alphabet.index(first[index])\n Feature_matrix[row,featureIndex] = 1\n index = 4 #advance index in case length was less than 5 \n for Lastindex in range(0,lastRange): #iterate though first 5 letters of Last name\n index += 1\n offset = 26 * index\n featureIndex = offset + Alphabet.index(last[Lastindex])\n Feature_matrix[row,featureIndex] = 1\n return Feature_matrix", "def fit_features(data, max_features):\n ndata = []\n for rec in data:\n rec = list(rec)\n if len(rec) > max_features:\n rec = rec[:max_features]\n elif len(rec) < max_features:\n rec = rec + (max_features - len(rec)) * [0.0]\n ndata.append(rec)\n return np.array(ndata)", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def classify(self, data):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum\n guesses.append(vectors.argMax())\n return guesses", "def preprocess(self, data_f):\n \n return self.vec.transform(data_f.review)", "def build_sf(dta, clf, trained_tweets):\n X = [] # samples\n y = [] # features\n for tweet in trained_tweets:\n vector = w2v_vector(dta, tweet['text'])\n if vector is not None: # ValueError: setting an array element with a sequence :')\n X.append(vector)\n y.append(tweet['label'])\n return X, y", "def featurize(self, prev_tag, prev_word, token, next_word):\n features = np.array([])\n # one of our features will be the previous tag\n if None or prev_tag == 'O':\n features = np.append(features, [0])\n else:\n features = np.append(features, [1])\n # another set of features will be our word shape\n # condensed word shape form with the truncation thing\n\n # another feature will be our pos tags\n # use nltk pos tagging\n\n # another feature will be our word embeddings\n features = np.append(features, self.model[token])\n # and the final feature will be bias\n features = np.append(features, [1])\n return features", "def read_vectorized_features(data_dir, subset=None, feature_version=2):\n if subset is not None and subset not in [\"train\", \"test\"]:\n return None\n\n extractor = PEFeatureExtractor(feature_version)\n ndim = extractor.dim\n X_train = None\n y_train = None\n X_test = None\n y_test = None\n\n if subset is None or subset == \"train\":\n X_train_path = os.path.join(data_dir, \"X_train.dat\")\n y_train_path = os.path.join(data_dir, \"y_train.dat\")\n y_train = np.memmap(y_train_path, dtype=np.float32, mode=\"r\")\n N = y_train.shape[0]\n X_train = np.memmap(X_train_path, dtype=np.float32, mode=\"r\", shape=(N, ndim))\n if subset == \"train\":\n return X_train, y_train\n\n if subset is None or subset == \"test\":\n X_test_path = os.path.join(data_dir, \"X_test.dat\")\n y_test_path = os.path.join(data_dir, \"y_test.dat\")\n y_test = np.memmap(y_test_path, dtype=np.float32, mode=\"r\")\n N = y_test.shape[0]\n X_test = np.memmap(X_test_path, dtype=np.float32, mode=\"r\", shape=(N, ndim))\n if subset == \"test\":\n return X_test, y_test\n\n return X_train, y_train, X_test, y_test", "def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec", "def vectorize_collection(feature_space, collection_path):\n sentences = load_collection_sentences(collection_path, __fape_files_to_load)\n # concatenate all the string lists\n sentences = reduce(lambda x,y: x[0]+y[0], sentences)\n return zip(sentences, map(vectorize, [feature_space]*len(sentences),\\\n sentences))", "def classify(self, data ):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum\n guesses.append(vectors.argMax())\n return guesses", "def embed_vectors_2d(self,lag,embed,predict,percent=0.1):\n\n rsize = self.X.shape[0]\n csize = self.X.shape[1]\n\n r_lag,c_lag = lag\n rem,cem = embed\n\n\n # determine how many iterations we will have and\n # the empty feature and target matrices\n\n c_iter = csize - c_lag*(cem-1)\n r_iter = rsize - predict - r_lag*(rem-1)\n\n #randomly pick spots to be embedded\n ix = np.random.rand(r_iter,c_iter)<=percent\n r_inds,c_inds = np.where(ix)\n\n targets = np.zeros((len(r_inds),predict))\n features = np.zeros((len(r_inds),rem*cem))\n\n\n print(\"targets before loop:\", targets.shape)\n\n for ii in range(features.shape[0]):\n\n rs = r_inds[ii]\n cs = c_inds[ii]\n\n\n r_end_val = rs+r_lag*(rem-1)+1\n c_end_val = cs+c_lag*(cem-1)+1\n\n part = self.X[rs : r_end_val, cs : c_end_val ]\n\n features[ii,:] = part[::r_lag,::c_lag].ravel()\n targets[ii,:] = self.X[r_end_val:r_end_val+predict,cs + int(c_lag*(cem-1)/2)]\n\n\n return features,targets", "def compute_features(face_points,expectedNumberOfPoints=68):\n\tassert (len(face_points) >= expectedNumberOfPoints), \"len(face_points) must be at least \" + str(expectedNumberOfPoints)\n\t\n\tface_points = np.array(face_points)\n\tpoints_of_ancor = np.array([face_points[36],# Right eye right corne\n\t\t\t\t\tface_points[39],#Right eye left corne\n\t\t\t\t\tface_points[42],# left eye right corne\n\t\t\t\t\tface_points[45],# left eye left corne\n\t\t\t\t\tface_points[27],# Nose top\n\t\t\t\t\tface_points[33],# Nose tip\n\t\t\t\t\tface_points[48],# Mouth right corne\n\t\t\t\t\tface_points[57],# Mouth botton tip\n\t\t\t\t\tface_points[54],# Mouth left corne\n\t\t\t\t\tface_points[0],# face up right corne\n\t\t\t\t\tface_points[8],# face botton corne\n\t\t\t\t\tface_points[16]], dtype=\"double\")\n\tpoints_of_ancor_vectors = np.zeros((12,12,3))\n\tfor i in range(12):\n\t\tfor j in range (12):\n\t\t\tvectors_parts = [0.,0.,0.]\n\t\t\tif i == j:\n\t\t\t\tpoints_of_ancor_vectors[i][j][:] = vectors_parts\n\t\t\t\tcontinue\n\t\t\tdistance = [points_of_ancor[i][0] - points_of_ancor[j][0], points_of_ancor[i][1] - points_of_ancor[j][1]]\n\t\t\tnorm = math.sqrt(distance[0] ** 2 + distance[1] ** 2)#size of vec\n\t\t\tvectors_parts[0] = norm\n\t\t\tvectors_parts[1] = distance[0] / norm\n\t\t\tvectors_parts[2] = distance[1] / norm\n\t\t\tpoints_of_ancor_vectors[i][j][:] = vectors_parts\n\t\t\t\t\n\tprint( \"Good run\")\n\treturn np.array(points_of_ancor_vectors).reshape(1, -1)", "def vectorize(self,clean_path):\n \n #load pretrained embedding model (GloVe)\n glove = spacy.load('en_core_web_lg')\n #extract unique words (aka vocabulary)\n unique_words = set()\n for d in self.docs: \n txt = d.text\n doc = glove(txt)\n for word in doc: \n if word.has_vector:\n unique_words.add(word.text)\n #change set to list type\n unique_words = list(unique_words)\n #save vector representation\n word_vectors = np.array([glove(word).vector for word in unique_words if glove(word).has_vector])\n #index vectors by corresponding word \n corpus_vectors = pd.DataFrame(word_vectors, index=unique_words)\n with open(clean_path + 'corpus_vectors.pkl', 'wb') as f:\n pickle.dump(corpus_vectors,f)\n self.vectors = corpus_vectors\n print('Saved embedding vectors.')\n return", "def predict(self, data, max_iteration=20, tol=1e-16):\n doc_topic_matrix = np.zeros([len(data), self.K], dtype=np.float)\n word_index_list = []\n for word in data:\n word_index_list.append(self.word2id[word])\n for i in range(max_iteration + 1):\n doc_topic_matrix_new = self.topic_word_matrix[:, word_index_list].T\n doc_topic_matrix_new = doc_topic_matrix_new.astype(np.float)\n doc_topic_matrix_new *= (doc_topic_matrix_new.sum(axis=0) - doc_topic_matrix + self.alpha)\n doc_topic_matrix_new /= doc_topic_matrix_new.sum(axis=1)[:, np.newaxis]\n delta_naive = np.abs(doc_topic_matrix_new - doc_topic_matrix).sum()\n doc_topic_matrix = doc_topic_matrix_new\n if delta_naive < tol:\n break\n theta_doc = doc_topic_matrix.sum(axis=0) / doc_topic_matrix.sum()\n return theta_doc", "def processdata(path = os.getcwd(), mode = 'train'):\n # ind_vector: raw counts of ngrams occurring in each industry.\n # example: ('consultant', 'consultant'): 112, ('business', 'analyst'): 106, ('operations', 'manager'): 98, ('network', 'network'): 97, ('director', 'of'): 93, ('account', 'director'): 86, ('co', 'ordinator'): 82, ('product', 'product'): 79, ('it', 'it'): 77, ('programme', 'manager'): 77\n ind_vectors = pickle_load('ind_vectors.data')\n i_features = pickle_load('i_features.data')\n if mode == 'train':\n if not (ind_vectors and i_features): # False if the files weren't there.\n ind_vectors, i_features = gather_and_save_vectors(path)\n else: \n ind_vectors, i_features = gather_and_save_vectors(path,ind_vectors,i_features)\n elif mode != 'test':\n print('Usage: mode parameter should be either \"train\" or \"test\".')\n return None\n return ind_vectors, i_features", "def fvsLexical(data):\n fvs_lexical = np.zeros((len(data), 3))\n fvs_punct = np.zeros((len(data), 3))\n labels_lexical = [''] * len(data)\n labels_punct = [''] * len(data)\n for e, (id, text, author) in enumerate(data):\n # print id, text\n tokens = nltk.word_tokenize(text.lower())\n words = word_tokenizer.tokenize(text.lower())\n sentences = sentence_tokenizer.tokenize(text)\n vocab = set(words)\n words_per_sentence = np.array([len(word_tokenizer.tokenize(s)) for s in sentences])\n\n # update fvs_lexical and labels_lexical\n # average number of words per sentence\n fvs_lexical[e, 0] = words_per_sentence.mean()\n # sentence length variation\n fvs_lexical[e, 1] = words_per_sentence.std()\n # lexical diversity\n fvs_lexical[e, 2] = len(vocab) / float(len(words))\n # put author label\n labels_lexical[e] = author\n\n # update fvs_punct and labels_punct\n # commas per sentence\n fvs_punct[e, 0] = tokens.count(',') / float(len(sentences))\n # semicolons per sentence\n fvs_punct[e, 1] = tokens.count(';') / float(len(sentences))\n # colons per sentence\n fvs_punct[e, 2] = tokens.count(':') / float(len(sentences))\n # put author label\n labels_punct[e] = author\n\n return (fvs_lexical, labels_lexical), (fvs_punct, labels_punct)", "def k_fold_FVmoVMF(data, wv_model, n_comp=15, k=10, reg=1):\n \n ## Prepare the corpus.\n tokenized_data_text = [data[k][0] for k in range(len(data))] # data\n \n # Initialize a moVMF with K components.\n vmf_neu = VonMisesFisherMixture(n_clusters=n_comp, posterior_type='soft', n_init=4, n_jobs=-2,\n init='k-means++')\n\n # Fit the word embedding data with the GMM model.\n vmf_neu.fit(normalize(wv_model.vectors))\n \n ## Create train/test sets.\n data_tags = [data[k][1] for k in range(len(data))] # tags\n comb_data = list(zip(tokenized_data_text, data_tags))\n random.shuffle(comb_data)\n folds = chunks(comb_data, k)\n \n k_fold_acc = []\n \n for fold in folds:\n # Training data\n X_train = [fold[0][k][0] for k in range(len(fold[0]))] # text \n y_train = [fold[0][k][1] for k in range(len(fold[0]))] # labels\n \n # Test data\n X_test = [fold[1][k][0] for k in range(len(fold[1]))] # text \n y_test = [fold[1][k][1] for k in range(len(fold[1]))] # labels\n \n # Get sentence embedding by using the FVs.\n X_train_FV = [FV_moVMF(BoWE_doc(wv_model, X_train[k]), vmf_neu) for k in range(len(X_train))]\n X_test_FV = [FV_moVMF(BoWE_doc(wv_model, X_test[k]), vmf_neu) for k in range(len(X_test))]\n \n ## Logistic regression classifier.\n\n # Use the elements in train_vecs as feature vectors.\n logreg = linear_model.LogisticRegression(C=reg, n_jobs=1, solver='liblinear', multi_class='ovr')\n logreg = logreg.fit(X_train_FV, y_train)\n\n ## Evaluation.\n acc = evaluate_prediction(logreg, X_test_FV, y_test)\n k_fold_acc.append(acc)\n \n return k_fold_acc", "def k_fold_BoW(data, vectorizer, features, k=10, reg=1):\n \n shuffle(data) # random shuffle data before making folds\n \n folds = chunks(data, k)\n k_fold_acc = []\n \n for fold in folds:\n # CountVectorizer: convert a collection of text documents \n # to a matrix of token counts.\n count_vectorizer = vectorizer(tokenizer=identity_tokenizer, lowercase=False,\n max_features=features) \n \n # Matrix of shape len(subj_train) x #words.\n train_data = (fold[0][k][0] for k in range(len(fold[0]))) # text for the training data\n train_features = count_vectorizer.fit_transform(train_data)\n\n ### Logistic regression classifier.\n logreg = linear_model.LogisticRegression(C=reg, n_jobs=1, solver='liblinear', multi_class='ovr')\n train_tag = [fold[0][k][1] for k in range(len(fold[0]))] # labels for the trainig data\n logreg = logreg.fit(train_features, train_tag)\n \n test_data = fold[1] # Both text and labels\n acc = evaluate_prediction_BoW(count_vectorizer, logreg, test_data)\n k_fold_acc.append(acc)\n \n return k_fold_acc", "def preprocess_features(npdata, pca=128):\n _, ndim = npdata.shape\n npdata = npdata.astype('float32')\n\n # Using PCA didn't help in our case.\n \n # Apply PCA-whitening with Faiss\n #mat = faiss.PCAMatrix (ndim, pca, eigen_power=-0.9)\n #mat.train(npdata)\n #assert mat.is_trained\n #npdata = mat.apply_py(npdata)\n\n\n # L2 normalization\n row_sums = np.linalg.norm(npdata, axis=1)\n npdata = npdata / row_sums[:, np.newaxis]\n\n return npdata", "def classify(self, data):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum + self.bias[l]\n guesses.append(vectors.argMax())\n return guesses", "def get_vectors_for_all_docs(docs, vocab):\n docs_vectors = [get_feature_vector(doc, vocab) for doc in docs]\n return np.array(docs_vectors)", "def ex_2_c(x_train, y_train, x_test, y_test):\n ###########\n ## TODO:\n ## Train SVMs with RBF kernels for different values of the gamma\n ## and plot the variation of the test and training scores with gamma using 'plot_score_vs_gamma' function.\n ## Plot the decision boundary and support vectors for the best value of gamma\n ## using 'plot_svm_decision_boundary' function\n ###########\n gammas = np.arange(0.01, 2, 0.02)\n\n test_scores = np.array([])\n train_scores = np.array([])\n best_svm = None\n best_test_score = 0\n\n for gamma in gammas:\n clf = svm.SVC(kernel='rbf', gamma=gamma)\n clf.fit(x_train, y_train)\n\n test_score = clf.score(x_test, y_test)\n\n if test_score > best_test_score:\n best_test_score = test_score\n best_svm = clf\n\n test_scores = np.append(test_scores, test_score)\n train_scores = np.append(train_scores, clf.score(x_train, y_train))\n\n plot_score_vs_gamma(train_scores, test_scores, gammas)\n\n plot_svm_decision_boundary(clf, x_train, y_train, x_test, y_test)", "def calculate_cb_vecs(self, clusters):\n if not clusters or not clusters[0]:\n return None\n\n # :param:`n` is the dimension of the vectors\n n = len(clusters[0][0])\n # Initialize the codebook vectors to 0\n cb_vectors = np.zeros([n * self.K]).reshape(self.K, n)\n for i in range(self.K):\n sum = np.zeros([n], dtype=np.uint).reshape(1, n)\n for vector in clusters[i]:\n sum += vector\n # divide the sum of the vectors by the size of the cluster\n cb_vectors[i] = np.divide(sum, len(clusters[i]))\n return cb_vectors", "def extract_features(img, clf, windows, y_start_stop, xy_window, stride):\n\n transformers = {k: v for k, v in clf.named_steps['features'].transformer_list}\n\n chist_transformer = transformers['chist']\n # remove the first two steps since they are not needed\n chist_transformer = Pipeline(chist_transformer.steps[2:])\n\n sb_transformer = transformers['sb']\n # remove the first two steps since they are not needed\n sb_transformer = Pipeline(sb_transformer.steps[2:])\n\n img_scaled_sb = convert_cspace(img, transformers['sb'].named_steps['sb_csc'].cspace)\n samples_sb = cut_out_windows(img_scaled_sb, windows)\n\n img_scaled_chist = convert_cspace(img, transformers['chist'].named_steps['chist_csc'].cspace)\n samples_chist = cut_out_windows(img_scaled_chist, windows)\n\n img_scaled_hog = convert_cspace(img, transformers['hog'].named_steps['hog_csc'].cspace)\n search_area_hog = img_scaled_hog[y_start_stop[0]:y_start_stop[1], :, :]\n\n hog_vectors = get_hog_vector(search_area_hog, transformers['hog'], xy_window[0], stride)\n sb_vectors = sb_transformer.transform(samples_sb)\n chist_vectors = chist_transformer.transform(samples_chist)\n\n return np.concatenate((hog_vectors, chist_vectors, sb_vectors), axis=1)", "def get_features(words, vectors):\n result = [vectors.loc[word].values for word in words if word in df_keys.values.reshape(-1)]\n if result:\n return np.stack(result)\n return None", "def compute_features(self, X):\n F = self.feature_extractor(X)\n if self.with_dropout:\n F = self.dropout(F)\n F = F[:, None].expand(-1, self.n_primitives, -1)\n F = torch.cat([\n F,\n self.primitive_embedding[None].expand_as(F)\n ], dim=-1)\n\n B = F.shape[0]\n M = self.n_primitives\n D = 2*self.feature_extractor.feature_size\n\n assert F.shape == (B, M, D)\n return F", "def extractFeatures(image, feature_list):\n # for multiple features or color features\n #feat_vec = np.array([])\n \n # sift has 128D\n feat_vec = np.empty((0,128))\n n_channels = (image.shape[2] if len(image.shape)==3 else 1)\n \n #img_f32 = image.astype(np.float32)\n\n for feature in feature_list:\n if (feature.strip().lower() == 'dsift'):\n print \"computing dsift (dense rootSift) features\"\n dense = cv2.FeatureDetector_create(\"Dense\")\n sift = cv2.SIFT()\n if n_channels == 1:\n kp = dense.detect(image[:,:])\n # compute kp descriptors\n _,des = sift.compute(image[:,:],kp)\n \n # normalize the descriptors (L1)\n des /= (des.sum(axis=1, keepdims=True) + 1e-7)\n des = np.sqrt(des)\n \n feat_vec = np.vstack((feat_vec, des))\n else:\n for channel in xrange(n_channels):\n kp = dense.detect(image[:,:,channel])\n _,des = sift.compute(image[:,:,channel],kp)\n \n # normalize the descriptors (L1)\n des /= (des.sum(axis=1, keepdims=True) + 1e-7)\n des = np.sqrt(des)\n\n feat_vec = np.vstack((feat_vec, des))\n \n# if (feature.strip().lower() == 'color'):\n# print \"computing color features\"\n# # scale from 0-255 between 0 and 1\n# if args.scale == 1:\n# img_f32 /= 255.\n# \n# f_tmp = img_f32.flatten()\n# feat_vec = np.append(feat_vec, f_tmp)\n else:\n raise Exception(\"Method '%s' is not implemented!\"%(feature)) \n \n return feat_vec", "def get_features(data, col_list, y_name):\n \n # keep track of numpy values\n feature_matrix = data[col_list + [y_name]].dropna().values\n return feature_matrix[:, :-1], feature_matrix[:, -1]", "def __init__(self, data, m=100, eta=0.1, seq_length=25, sigma= 0.01):\n\n self.m, self.eta, self.seq_length = m, eta, seq_length\n self.vocab_len = data['vocab_len']\n self.ind_to_char = data['ind_to_char']\n self.char_to_ind = data['char_to_ind']\n self.book_data = data['book_data']\n\n self.b = np.zeros((m, 1))\n self.c = np.zeros((self.vocab_len, 1))\n\n self.U = np.random.normal(0, sigma, size=(m, self.vocab_len))\n self.W = np.random.normal(0, sigma, size=(m, m))\n self.V = np.random.normal(0, sigma, size=(self.vocab_len, m))", "def get_embeddings(self, data):\n raise NotImplementedError()", "def FeatureExtraction(ppg, accx, accy, accz):\n\n fs = 125\n n = len(ppg) * 4\n # applying fast Fourier transform\n freqs = np.fft.rfftfreq(n, 1/fs)\n fft = np.abs(np.fft.rfft(ppg,n))\n fft[freqs <= 40/60.0] = 0.0\n fft[freqs >= 240/60.0] = 0.0\n \n ## calculating L2 norm\n acc_mag = np.sqrt(accx**2 + accy**2 + accz**2)\n acc_fft = np.abs(np.fft.rfft(acc_mag, n))\n acc_fft[freqs <= 40/60.0] = 0.0\n acc_fft[freqs >= 240/60.0] = 0.0\n \n ppg_feature = freqs[np.argmax(fft)]\n acc_feature = freqs[np.argmax(acc_fft)]\n \n return (np.array([ppg_feature, acc_feature]), ppg, accx, accy, accz)", "def features(x):\n # We need to contract last axis of x with first of W - do this with\n # tensordot. The result has shape:\n # (?, ?, num_random_features)\n return jnp.sqrt(2 / num_random_features) * jnp.cos(\n jnp.sqrt(2 / gamma) * jnp.tensordot(x, w, axes=1) + b)", "def makeFeatureVec(words, model, num_features):\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n num_words = 0.\n index2word_set = set(model.wv.index2word)\n for word in words:\n if word in index2word_set:\n num_words += 1\n featureVec = np.add(featureVec,model[word]) \n featureVec = np.divide(featureVec,num_words)\n return featureVec", "def feature_engineer_dataset(data, look_back = 1):\n dataX = []\n dataY = []\n est_range = len(data)-look_back-1\n for i in range(est_range):\n a = data[i:(i+look_back), 0]\n dataX.append(a)\n dataY.append(data[i + look_back, 0])\n\n return numpy.array(dataX), numpy.array(dataY)", "def process(self, data):\n allocating = (self._output is None)\n ind = 0\n for i, (name, feature) in enumerate(self.features):\n if allocating:\n x = feature.compute(data)\n self.feature_indices[name] = (ind, ind+x.size)\n ind += x.size\n\n if self._output is None:\n self._output = x\n else:\n self._output = np.hstack([self._output, x])\n else:\n self._output[self.feature_indices[name][0]:\n self.feature_indices[name][1]] = \\\n feature.compute(data)\n\n return self._output", "def get_embeddings(self, in_data):\n context, da = in_data\n if self.fixed_divide:\n da_emb = super(PersonageContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=True)\n else:\n da_emb = super(PersonageContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=False)\n\n # Shubhangi: what this step essentially does is it replaces the context words by their token, with UNK as default.\n # again , we don't need this since our context data is essentially vectors therefore commenting this out\n # similary we don't need context embedding , that's exactly what context is already .\n\n # context_emb = []\n context_emb = [float(parameter[0]) for parameter in context]\n\n # for tok in context[-max_context_len:]:\n # context_emb.append(self.dict_token.get(tok, self.UNK_TOKEN))\n\n # Shubhangi: padding is needed because each context sentence could be of different length ,\n # we don't need to include context in padding as we're going to have a fixed size\n # (max_context_len - len(context)) = 0\n\n\n # padding = [self.UNK_TOKEN] * (max_context_len - len(context))\n\n # Shubhangi: padding might be harmless for now therefore not removing ,\n # essentially what this is doing is concatenating the arrays and sending\n if self.use_div_token:\n return context_emb + [self.DIV_TOKEN] + da_emb\n # return padding + context_emb + [self.DIV_TOKEN] + da_emb\n # return padding + context_emb + da_emb\n return context_emb + da_emb", "def nmfFeatures\\\n(\n # Data:\n docs,\n trainDocs=None,\n # Parameters:\n min_df=1,\n max_df=1.0,\n max_features=None,\n sublinear_tf=True,\n stop_words=None,\n useTrainDocs=False,\n n_components=100,\n init='nndsvd',\n l1_ratio=0,\n alpha=0.1,\n lowercase=True,\n # Others:\n max_iter=200,\n random_state=1,\n # Misc:\n logger=None, verbose=True,\n):\n if useTrainDocs:\n assert trainDocs is not None\n assert len(trainDocs) > 0\n # if isinstance(trainDocs[0], list):\n # trainDocs = flattenLists(trainDocs)\n assert len(docs) > 0\n # if isinstance(docs[0], list):\n # docs = flattenLists(docs)\n tfidf_vectorizer = TfidfVectorizer\\\n (\n \tlowercase=lowercase,\n min_df=min_df,\n max_df=max_df,\n max_features=max_features,\n stop_words=stop_words,\n sublinear_tf=sublinear_tf,\n tokenizer=None if isinstance(docs[0], str) else lambda x: x,\n preprocessor=None if isinstance(docs[0], str) else lambda x: x,\n )\n if useTrainDocs:\n docs = docs + trainDocs \n tfidf = tfidf_vectorizer.fit_transform(docs)\n tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n nmf = NMF\\\n (\n n_components=n_components,\n random_state=random_state,\n alpha=alpha,\n l1_ratio=l1_ratio,\n init=init,\n max_iter=max_iter,\n ).fit(tfidf)\n vectors = nmf.transform(tfidf)\n if useTrainDocs:\n vectors = list(vectors)\n vectors = vectors[:-len(trainDocs)]\n vectors = np.array(vectors)\n return np.array(vectors)", "def load_data_and_embedding():\n\n # Load data\n df_data = pd.read_csv('../new_data/train_ids_and_labels_1400.txt',nrows=10000)\n y = df_data['class'] - 1 # class (0 ~ 18)\n X = df_data.drop(['class'], axis=1).values\n\n # Transform to binary class matrix\n y = to_categorical(y.values)\n\n # Randomly shuffle data\n np.random.seed(10)\n\n shuffle_indices = np.random.permutation(range(len(y)))\n X_shuffled = X[shuffle_indices]\n y_shuffled = y[shuffle_indices]\n\n # Split to train/test set\n # TODO: This is very crude, should use cross validation\n val_sample_index = -1 * int(0.2 * len(y))\n X_train, X_val = X_shuffled[:val_sample_index], X_shuffled[val_sample_index:]\n y_train, y_val = y_shuffled[:val_sample_index], y_shuffled[val_sample_index:]\n\n del df_data, X, y, X_shuffled, y_shuffled\n\n embedding_matrix = np.load(\"../embedding/word-embedding-200d-mc5.npy\")\n\n return X_train, y_train, X_val, y_val,embedding_matrix", "def _feature_vec(xs, y):\n\tf = _create_feature_vec()\n\n\t# Iterate over rows in x, values of y, and update f.\n\tcount = y.shape[0]\n\tfor idx in range(count):\n\t\tword = xs[idx, :]\n\t\ttag = y[idx]\n\n\t\t# Defense!\n\t\tassert len(word) + 1 == len(f)\n\n\t\t# Iterate over feature values in word, increment the vector\n\t\tfor fidx, fvalue in enumerate(word):\n\t\t\tf[fidx][tag, fvalue] += 1\n\n\t\t# Update ngram matrix at the end of fvec. Must update edge potential\n\t\t# for previous AND next tag.\n\t\tif idx != 0:\n\t\t\tprev_tag = y[idx-1]\n\t\t\tf[-1][prev_tag, tag] += 1\n\t\tif idx != count - 1:\n\t\t\tnext_tag = y[idx+1]\n\t\t\tf[-1][tag, next_tag] += 1\n\n\treturn f", "def k_fold_FVGMM(data, wv_model, n_comp=15, k=10, reg=1):\n \n ## Prepare the corpus.\n tokenized_data_text = [data[k][0] for k in range(len(data))] # data\n \n # Initialize a GMM with K components.\n gmm_neu = mixture.GaussianMixture(n_components=n_comp, covariance_type='diag', \n max_iter=300, n_init=10, reg_covar=1e-05)\n \n # Fit the word embedding data with the GMM model.\n gmm_neu.fit(wv_model.vectors)\n \n ## Create train/test sets.\n data_tags = [data[k][1] for k in range(len(data))] # tags\n comb_data = list(zip(tokenized_data_text, data_tags))\n random.shuffle(comb_data)\n folds = chunks(comb_data, k)\n \n k_fold_acc = []\n \n for fold in folds:\n # Training data\n X_train = [fold[0][k][0] for k in range(len(fold[0]))] # text \n y_train = [fold[0][k][1] for k in range(len(fold[0]))] # labels\n \n # Test data\n X_test = [fold[1][k][0] for k in range(len(fold[1]))] # text \n y_test = [fold[1][k][1] for k in range(len(fold[1]))] # labels\n \n # Get sentence embedding by using the FVs.\n X_train_FV = [FV_GMM(BoWE_doc(wv_model, X_train[k]), gmm_neu) for k in range(len(X_train))]\n X_test_FV = [FV_GMM(BoWE_doc(wv_model, X_test[k]), gmm_neu) for k in range(len(X_test))]\n \n ## Logistic regression classifier.\n\n # Use the elements in train_vecs as feature vectors.\n logreg = linear_model.LogisticRegression(C=reg, n_jobs=1, solver='liblinear', multi_class='ovr')\n logreg = logreg.fit(X_train_FV, y_train)\n\n ## Evaluation.\n acc = evaluate_prediction(logreg, X_test_FV, y_test)\n k_fold_acc.append(acc)\n \n return k_fold_acc", "def classify(self, data):\n\n \"*** YOUR CODE HERE ***\"\n # should compute (validationData[i] - trainingData[j])^2\n result = np.zeros(data.shape[0])\n for i in range(data.shape[0]):\n distances = np.linalg.norm(self.trainingData - data[i], axis=1)\n nearest = np.argsort(distances)[:self.num_neighbors]\n nearest_tags = [self.trainingLabels[j] for j in nearest]\n result[i] = max(nearest_tags, key=lambda x: nearest_tags.count(x))\n return result", "def log2FC_data(data):\n log2FC_df = pd.DataFrame()\n for i in range(0,len(data.columns),10):\n i = i\n data_subset = data[data.columns[i:i+10]]\n log_data = data_subset.apply(np.log2)\n \n new_df = pd.DataFrame()\n for j in range(len(log_data.columns)):\n tmp_col = log_data.iloc[:, j].name\n tmp_df = log_data.iloc[:,0] - log_data.iloc[:,j]\n new_df[tmp_col] = tmp_df\n \n log2FC_df = log2FC_df.append(new_df.T)\n log2FC_df = log2FC_df.T\n return log2FC_df", "def update_features_dims(data):\n utils.save_log('{0} :: {1}'.format(\n update_features_dims.__module__,\n update_features_dims.__name__))\n\n return [data[features_list].columns.get_loc(i)\n for i in categorical_features_list]", "def computeFeatures(self, absList):\n raise NotImplementedError(\"Need to implement computeFeatures()\")", "def __init__(self,training_data,default_kernel=\"rbf\"):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #preprocessing data\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[n_features,n_col])[0])\n #define classifier\n self.classifier = svm.SVC(class_weight='auto',cache_size=DEFAULT_CACHE_SIZE, kernel=default_kernel)\n self.classifier.fit(X, Y)", "def makeFeatureVec(words, model, num_features):\n\t# Initialize an empty numpy array (for speed) \n\tfeatureVec = np.zeros((num_features,), dtype=\"float32\")\n\t# Initialize a counter (number of words)\n\tnwords = 0.\n\t \n\t# Index2word is a list that contains the names of the words in the model's vocabulary. \n\tindex2word_set = set(model.index2word)\n\t# \n\t# Loop over each word in the review and, if it is in the model's vocaublary, add \n\t# its feature vector to the total \n\tfor word in words:\n\t\tif word in index2word_set:\n\t\t\tnwords = nwords + 1.\n\t\t\tfeatureVec = np.add(featureVec,model[word])\n\t# \n\t# Divide the result by the number of words to get the average \n\tfeatureVec = np.divide(featureVec,nwords)\n\treturn featureVec", "def featureNormalize(X):\n X_norm, mu, sigma = X,0,0\n # ====================== YOUR CODE HERE ======================\n # Instructions: First, for each feature dimension, compute the mean\n # of the feature and subtract it from the dataset,\n # storing the mean value in mu. Next, compute the\n # standard deviation of each feature and divide\n # each feature by it's standard deviation, storing\n # the standard deviation in sigma.\n #\n # Note that X is a matrix where each column is a\n # feature and each row is an example. You need\n # to perform the normalization separately for\n # each feature.\n #\n # Hint: You might find the 'mean' and 'std' functions useful.\n #\n \n # get the number of features in X and norm 1 col at a time \n \n for i in range(X.shape[1]):\n mu_i = np.mean(X[:,i]) #calculate mean for each col\n sigma_i = np.std(X[:,i]) #calculate sigma for each col\n X_norm[:,i] = ((X_norm[:,i] - mu_i) / sigma_i) #norm data in col\n \n # want to make an array of all values of mu and sigma\n if i == 0: \n mu = mu_i\n sigma = sigma_i\n else:\n mu = np.append(mu,mu_i)\n sigma = np.append(sigma,sigma_i)\n # ============================================================\n \n return X_norm, mu, sigma" ]
[ "0.60243773", "0.59016556", "0.58867896", "0.5756956", "0.573142", "0.56395006", "0.56027967", "0.55864096", "0.55687845", "0.5554088", "0.55537987", "0.54962337", "0.54528916", "0.54197", "0.53998107", "0.5392125", "0.53568494", "0.5354972", "0.5350437", "0.5302239", "0.52826774", "0.5275317", "0.5274099", "0.52733266", "0.52679604", "0.5242423", "0.5238932", "0.5227998", "0.5224041", "0.52234316", "0.5223229", "0.5198568", "0.5196713", "0.51960284", "0.5188575", "0.51734185", "0.5142483", "0.5129644", "0.5117284", "0.51024705", "0.5099493", "0.50890076", "0.5086764", "0.50734454", "0.5064952", "0.5055581", "0.5055528", "0.5040671", "0.5037045", "0.5036678", "0.50316036", "0.50246096", "0.50209427", "0.5019669", "0.501469", "0.5014333", "0.5009046", "0.5008423", "0.5008059", "0.5007693", "0.5004186", "0.49925387", "0.4973113", "0.49726173", "0.49719754", "0.49706256", "0.49698517", "0.49663296", "0.4965093", "0.4955096", "0.4946713", "0.49367803", "0.4936006", "0.49271443", "0.49249157", "0.49214846", "0.490919", "0.490762", "0.49057215", "0.49034426", "0.4903363", "0.49017802", "0.49017403", "0.4898746", "0.48975533", "0.4894797", "0.48935103", "0.48922974", "0.4892079", "0.4891195", "0.48897797", "0.48859006", "0.48749855", "0.48636842", "0.48499963", "0.48453817", "0.4843854", "0.4836608", "0.48359197", "0.48291752" ]
0.6925255
0
Make the first block in a blockchain.
def make_genesis_block(): block = Block(index=0, timestamp=datetime.now(), data="Genesis Block", previous_hash="0") return block
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createFirstBlock(self):\n firstBlock = Block(0, self.__currentTransactionsList, 0, '00')\n self.__chain.append(firstBlock)", "def create_genesis_block(self):\n index = 0\n transactions = []\n timestamp = 0.0\n previous_hash = \"0\"*64\n block = Block(index=index, transactions=transactions, timestamp=timestamp,previous_hash=previous_hash)\n block.hash = block.compute_hash()\n self.chain.append(block)", "def create_genesis_block(self):\r\n genesis_block = Block(0, [], time.time(), \"0\")\r\n genesis_block.hash = genesis_block.compute_hash()\r\n self.chain.append(genesis_block)", "def create_genesis_block(self):\n genesis_block = Block(0, [], time.time(), \"0\")\n genesis_block.hash = genesis_block.compute_hash()\n self.chain.append(genesis_block)", "def create_genesis_block(self):\n genesis_block = Block(0, [], 0, \"0\")\n genesis_block.hash = genesis_block.compute_hash()\n self.chain.append(genesis_block)", "def create_genesis_block(self):\n genesis_block = Block(0, [], 0, \"0\")\n genesis_block.hash = genesis_block.compute_hash()\n self.chain.append(genesis_block)", "def genesisBlock():\n return Block(0, \"0\", 1465154705, \"my genesis block!!\",\n \"816534932c2b7154836da6afc367695e6337db8a921823784c14378abed4f7d7\");", "def genesis_block(self):\n block = Block(target=self.target, transactions=[])\n self.current_transactions.append(block)", "def newBlock(preBlock, remitter, number, payee):\r\n index = preBlock.index + 1\r\n timestamp = int(round(time.time() * 1000))\r\n data = (remitter, number, payee).__str__()\r\n previousHash = preBlock.hash\r\n nounce = 0\r\n return Blockchain(index, data, timestamp, nounce, previousHash)", "def mine_block(): \n # Fetch the current last block of blockchain\n last_block = blockchain[-1]\n # Hash th elast block (=> to be able to compare it to stored hash value)\n hashed_block = hash_block(last_block)\n proof = proof_of_work()\n # Miners should be rewarded, so here is reward\n # reward_transaction = {\n # 'sender': 'MINING',\n # 'recipient': owner,\n # 'amount': MINING_REWARD\n # }\n reward_transaction = OrderedDict([('sender', 'MINING'), ('recipient', owner), ('amount', MINING_REWARD)])\n copied_transactions = open_transactions[:]\n copied_transactions.append(reward_transaction)\n\n block = {\n 'previous_hash': hashed_block,\n 'index': len(blockchain),\n 'transactions': copied_transactions,\n 'proof': proof\n }\n blockchain.append(block)\n return True", "def mine():\n last_block = blockchain.get_last_block\n print(last_block)\n last_proof = last_block['proof']\n proof = blockchain.proof_of_work(last_proof)\n\n blockchain.add_transaction(sender=0, recipient=node_identifier, amount=1)\n block = blockchain.add_block(proof)\n block['message'] = 'New block added'\n\n return jsonify(block), 200", "def mine(self):\n last_block = self.chain[-1]\n\n nonce = self.proof_of_work()\n previous_hash = self.hash(last_block)\n self.create_block(nonce, previous_hash)", "def createGenesisBlock(self):\n return Block(\"Genesis Block\")", "def genesis():\n #return Block(\n #GENESIS_DATA['timestamp'],\n #GENESIS_DATA['last_hash'],\n #GENESIS_DATA['hash'],\n #GENESIS_DATA['data']\n #)\n return Block(**GENESIS_DATA)", "def inner_start_mining(self):\n print(\"Mining a new block\")\n blockchain = self.get_blockchain()\n self.request_transactions(blockchain)\n last_block_hash = blockchain.last_block().header\n complete_hash, nonce = self.proof_of_work(last_block_hash)\n new_block = self.create_block(complete_hash, nonce)\n self.send_block(new_block)\n self.reset_transaction()", "def create_genesis(self):\n return Block(0, 0, b'0', b'0', b'')", "def test_single_chain(self):\n self.assertEqual(len(self.genesis_blocks), 1)\n manager = self.create_peer('testnet', tx_storage=self.tx_storage)\n\n # The initial score is the sum of the genesis\n score = self.genesis_blocks[0].weight\n for tx in self.genesis_txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 100 blocks in a row with no transaction but the genesis\n blocks = add_new_blocks(manager, 100, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata(force_reload=True)\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 30, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 50 more blocks in a row with no transactions between them\n blocks = add_new_blocks(manager, 50)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n consensus_context = manager.consensus_algorithm.create_context()\n self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)\n\n # Mine 15 more blocks with 10 transactions between each block\n for _ in range(15):\n txs = add_new_transactions(manager, 10, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n blocks = add_new_blocks(manager, 1)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n consensus_context = manager.consensus_algorithm.create_context()\n self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)\n\n self.assertConsensusValid(manager)", "def _generate_genesis() -> None:\n logging.debug(\"Generating the genesis block\")\n new_recv_block(Block.genesis())", "def mine_block(last_block, data):\n timestamp = time.time_ns()\n last_hash = last_block.hash\n difficulty = Block.adjust_difficulty(last_block, timestamp)\n nonce = 0\n hash = crypto_hash(timestamp, last_hash, data, difficulty, nonce)\n\n while hex_to_binary(hash)[0:difficulty] != '0'* difficulty:\n nonce += 1\n timestamp = time.time_ns()\n difficulty = Block.adjust_difficulty(last_block, timestamp)\n hash = crypto_hash(timestamp, last_hash, data, difficulty, nonce)\n\n return Block(timestamp, last_hash, hash, data, difficulty, nonce)", "def mine_block(self):\n if self.public_key == None:\n return None\n last_block = self.__chain[-1]\n hashed_block = hash_block(last_block)\n proof = self.proof_of_work()\n reward_transaction = Transaction(\n 'MINING', self.public_key, '', MINING_REWARD)\n\n copied_transactions = self.__open_transactions[:]\n for tx in copied_transactions:\n if not Wallet.verify_transaction(tx):\n return None\n copied_transactions.append(reward_transaction)\n\n copied_chipsactions = self.__open_chipsactions[:]\n for tx in copied_chipsactions:\n if not Wallet.verify_chipsaction(tx):\n return None\n\n copied_messsactions = self.__open_messsactions[:]\n for tx in copied_messsactions:\n if not Wallet.verify_messsaction(tx):\n return None\n\n block = Block(len(self.__chain), hashed_block,\n copied_transactions, copied_chipsactions, copied_messsactions, proof)\n self.__chain.append(block)\n self.__open_transactions = []\n self.__open_chipsactions = []\n self.__open_messsactions = []\n self.save_data()\n for node in self.__peer_nodes:\n url = 'http://{}/broadcast-block'.format(node)\n converted_block = block.__dict__.copy()\n converted_block['transactions'] = [\n tx.__dict__ for tx in converted_block['transactions']]\n converted_block['chipsactions'] = [\n tx.__dict__ for tx in converted_block['chipsactions']]\n converted_block['messsactions'] = [\n tx.__dict__ for tx in converted_block['messsactions']] \n try:\n response = requests.post(url, json={'block': converted_block})\n if response.status_code == 400 or response.status_code == 500:\n print('Block declined, needs resolving')\n if response.status_code == 409:\n self.resolve_conflicts = True\n except requests.exceptions.ConnectionError:\n continue\n return block", "def init_with_genesis_block(self, block):\n\n genesis_tx = block.transactions # Genesis block contains single seed transaction from God\n\n if not signature.verify(genesis_tx.from_pk, genesis_tx.to_string_for_hashing(), genesis_tx.signature):\n print(\"Genesis transaction signature is NOT valid.\")\n return\n\n self.blocks.append(block)\n return self", "def get_first_block(blockchain):\n response = requests.get('https://api.blockcypher.com/v1/%s/main' % blockchain)\n if response.status_code == 200:\n return int(json.loads(response.content.decode('latin1'))['height'])\n elif response.status_code == 429:\n print('Too many requests')\n return -1", "def create_block(self, prev_hash=None):\n # Update blockchain and balance state (thread safe)\n if prev_hash is not None and prev_hash not in self._blockchain.hash_block_map.keys():\n print(prev_hash, self._blockchain.hash_block_map)\n prev_blk = None if prev_hash is None else \\\n self._blockchain.hash_block_map[prev_hash]\n last_blk = self._update(prev_blk)\n pending_tx = self._get_tx_pool()\n gathered_tx = self._gather_transactions(pending_tx)\n block = self._mine_new_block(last_blk.header, gathered_tx)\n if block is not None:\n blk_json = block.to_json()\n # Add block to blockchain (thread safe)\n self.add_block(blk_json)\n print(f\"{self.__class__.__name__} {self.name} created a block.\")\n # Broadcast block and the header.\n self._broadcast_block(block)\n # Remove gathered transactions from pool and them to added pile\n with self.added_tx_lock:\n self._added_transactions |= set(gathered_tx)\n self._update()\n return block", "def mine_block(self):\n if self.hosting_node == None:\n return None\n # Fetch the currently last block of the blockchain\n last_block = self.__chain[-1]\n print(last_block)\n # Hash the last block (to be able to compare it to the stored hash value)\n hashed_block = hash_block(last_block)\n proof = self.proof_of_work()\n # Miners should be rewarded, so let's create a reward transaction\n reward_transaction = Transfer(self.hosting_node, \"MINING\", MINING_REWARD)\n # Copy transaction instead of manipulating the original open_transactions list\n # This ensures that if for some reason the mining should fail, we don't have the reward transaction stored in the open transactions\n copied_transactions = self.__open_transfers[:]\n for tx in copied_transactions:\n if not Wallet.verify_transfer(tx):\n return None\n copied_transactions.append(reward_transaction)\n block = Block(len(self.__chain), hashed_block, copied_transactions, proof)\n self.__chain.append(block)\n self.__open_transfers = []\n self.save_data()\n return block", "def createNewBlock(self, nonce, previousBlockHash, hash):\n newBlock = Block(len(self.chain), self.pendingTransactions, nonce, hash, previousBlockHash)\n self.pendingTransactions = []\n self.chain.append(newBlock)\n return newBlock", "def mine_block(self):\n\n last_block = self.__chain[-1]\n hashed_block = hash_util.hash_block(last_block)\n\n proof = self.proof_of_work()\n\n # we are using OrderedDict to get an ordered dictionary so that the hash doesn't change due to the order changing\n reward_transaction = Transaction('MINING', self.hosting_node, MINING_REWARD)\n copied_transactions = self.__open_transactions[:] # copies open_transactions by value (: signifies range, if nothing is\n # specified, then the whole list is copied\n copied_transactions.append(reward_transaction) # reward for miners\n\n block = Block(len(self.__chain), hashed_block, copied_transactions, proof)\n self.__chain.append(block)\n self.__open_transactions = []\n return True", "def test_mine_simple_transaction_block(self):\n miner_address = 'miner_address'\n\n blockchain = Blockchain()\n blockchain.create_transaction('sender', 'recipient', 1)\n blockchain.create_transaction('sender2', 'recipient2', 1.5)\n self.assertEqual(len(blockchain.pending_transactions), 2)\n\n block = blockchain.mine(miner_address)\n\n # First we look that a new block could be mined\n self.assertIsNotNone(block)\n\n # Let's see if the block was added to the chain\n self.assertEqual(blockchain.last_block.hash, block.hash)\n\n # We need to check that the transaction list is empty\n self.assertEqual(0, len(blockchain.pending_transactions))\n\n # We need to check that the block contains all of the transactions\n self.assertEqual(3, len(block.transactions))\n\n reward_transaction = block.transactions[-1]\n\n # We make sure the reward function has no sender, and gives away exactly 1 coin\n self.assertEqual('0', reward_transaction.sender)\n self.assertEqual(miner_address, reward_transaction.recipient)\n self.assertEqual(1, reward_transaction.amount)", "def mine(self):\n print(\"Mining\")\n\n prev_hash = self.r.get(PREV_HASH_KEY)\n if prev_hash:\n prev_hash = prev_hash.decode('utf-8')\n\n block = Block(prev_hash)\n\n\n # wait to fill the block with transactions\n while not block.full():\n # in between mining\n if self.stop_mining():\n print(\"Someone mined the coins\")\n l = len(block.transactions)\n left = TRANSACTIONS_IN_BLOCK - l\n for _ in range(left):\n self.r.blpop(TRANSACTION_QUEUE_KEY)\n return None\n\n print(\"Searching for transactions to fill the block\")\n # blocking pop from transaction key\n transaction = Transaction.from_redis(self.r, json.loads(self.r.blpop(TRANSACTION_QUEUE_KEY)[1].decode('utf-8')))\n print(\"found a transaction, adding it to block\")\n block.add_transaction(transaction)\n\n # create a new transaction that creates a lazycoin and gives it to the user\n print(\"Block is full, now add a create transaction\")\n print(\"Prev hash = \", prev_hash)\n create = Transaction(\n prev_hash=prev_hash,\n transaction_type='CREATE',\n sender=self.user.pub,\n receiver=self.user.pub,\n )\n\n # sign this transaction and add the signature to the transaction\n print(\"signing transaction\")\n msg, sign = self.user.sign(create)\n create.add_signature(sign)\n\n print(\"adding transaction\")\n block.add_transaction(create)\n\n print(\"finding nonce\")\n nonce = self.solve_puzzle(block)\n\n block.add_nonce(nonce)\n print(\"block done\")\n\n if self.stop_mining():\n print(\"stopping mining\")\n return None\n\n return block", "def mine(self):\n new_block = Block(self.block['timestamp'], self.block['car'],\n self.block['id'])\n # link the block to the previous block\n new_block.previous_hash = self._get_previous_hash()\n while True:\n # get a hash\n new_hash = new_block.get_hash()\n # check hash rules, in our case check if the hash starts with\n # self.difficulty number of zeroes\n if new_hash[0] != self.difficulty * \"0\":\n if self.new_block[\"block\"] is None:\n # the hash hasn't been found yet by any other process,\n # therefore increase the nonce and continue\n # miners will use a different mining mechanism in order\n # to increase the probability of finding a hash by\n # a different miner\n new_block.increment_nonce(self.id + 1)\n continue\n break\n break\n\n # NOTE: May happen that two processes find the hash at the same time,\n # because there is not a big difficulty, however, it's not a problem,\n # for sake of the demo it's fine\n\n if self.new_block[\"block\"] is None:\n # this process has found the hash first\n print(self.id, \" - the winner hash\", new_hash)\n new_block.hash = new_hash\n self.new_block[\"block\"] = new_block\n print(self.id, \" - mined the block\")\n else:\n # validate the block found by other process (miner)\n if self.new_block[\"validated\"] is not False:\n print(self.id, \" - validating\")\n # check block's validity\n valid = False\n if self.new_block[\"block\"].is_block_valid():\n # check blockchain's validity when we apply the newly\n # mined block\n if self.is_blockchain_valid(self.new_block[\"block\"]):\n valid = True\n self.new_block[\"validated\"] = valid\n else:\n # NOTE: this demo doesn't take into account the number of\n # miners who approved the block, the block will be rejected\n # if any of them rejected it\n # but usually just more than 50% of miners must approve\n print(self.id, \" - the block has been rejected by other miner\")", "def create_block(self, previous_hash):\r\n if len(self.transaction_pool) < 1:\r\n return None, None\r\n\r\n # Create A Temporary Block\r\n block = {'index': None, # before mining set index to None\r\n 'timestamp': None, # before mining set timestamp to None\r\n 'nonce': 0, # before mining set nonce to 0\r\n 'transactions': self.transaction_pool, # Fill in all the transactions\r\n 'previous_hash': previous_hash, # Set the previous hash\r\n 'current_hash': ''} # Current hash is yet to be calculated\r\n\r\n # Empty Transaction Pool\r\n self.transaction_pool = [] # Once transactions have been placed in a block\r\n # they can be removed from the pool\r\n\r\n # Calculate Proof Of Work (Nonce)\r\n block['nonce'], block['current_hash'] = self.proof_of_work(block, previous_hash) # Validate the block by calculating the nonce\r\n block['index'] = len(self.chain) + 1 # Set the block index\r\n block['timestamp'] = str(datetime.datetime.now()) # Set the timestamp to the time when the block was validated\r\n\r\n # Add Block To DistrictNode's Own Chain\r\n self.chain.append(block) # Append the block to the list of blocks in the blockchain\r\n print(\"BLOCK ADDED TO 90\")\r\n for block in self.chain:\r\n for key, value in block.items():\r\n print(key, value)\r\n print('\\n')\r\n\r\n return self.chain, self.transaction_pool # Return the new chain and the new transaction_pool\r", "def test_genesis_block(self):\n genesis_block = self.blockchain.last_block[\"index\"]\n self.assertEqual(genesis_block, 1)", "def test_single_fork_not_best(self):\n self.assertEqual(len(self.genesis_blocks), 1)\n manager = self.create_peer('testnet', tx_storage=self.tx_storage)\n\n # The initial score is the sum of the genesis\n score = self.genesis_blocks[0].weight\n for tx in self.genesis_txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 30 blocks in a row with no transactions\n blocks = add_new_blocks(manager, 30, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 5, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 1 blocks\n blocks = add_new_blocks(manager, 1, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Generate a block which will be a fork in the middle of the chain\n # Change the order of the transactions to change the hash\n fork_block1 = manager.generate_mining_block()\n fork_block1.parents = [fork_block1.parents[0]] + fork_block1.parents[:0:-1]\n fork_block1.resolve()\n fork_block1.verify()\n\n # Mine 8 blocks in a row\n blocks = add_new_blocks(manager, 8, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Fork block must have the same parents as blocks[0] as well as the same score\n self.assertEqual(set(blocks[0].parents), set(fork_block1.parents))\n\n # Propagate fork block.\n # This block belongs to case (ii).\n self.assertTrue(manager.propagate_tx(fork_block1))\n fork_meta1 = fork_block1.get_metadata()\n self.assertEqual(fork_meta1.voided_by, {fork_block1.hash})\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 5, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 5 blocks in a row\n # These blocks belong to case (i).\n blocks = add_new_blocks(manager, 5, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 2, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Propagate a block connected to the voided chain\n # These blocks belongs to case (iii).\n sidechain1 = add_new_blocks(manager, 3, parent_block_hash=fork_block1.hash)\n for block in sidechain1:\n meta = block.get_metadata(force_reload=True)\n self.assertEqual(meta.voided_by, {block.hash})\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 2, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Propagate a block connected to the voided chain\n # This block belongs to case (iv).\n fork_block3 = manager.generate_mining_block(parent_block_hash=fork_block1.hash)\n fork_block3.resolve()\n fork_block3.verify()\n self.assertTrue(manager.propagate_tx(fork_block3))\n fork_meta3 = fork_block3.get_metadata()\n self.assertEqual(fork_meta3.voided_by, {fork_block3.hash})\n\n self.assertConsensusValid(manager)", "def create_block(self, nonce, previous_hash) -> None:\n block = {\n 'block_number': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.transactions,\n 'nonce': nonce,\n 'previous_hash': previous_hash\n }\n\n self.transactions = []\n self.chain.append(block)", "def mine_block(previous: bytes, height: int, miner: bytes, transactions: List[Transaction], timestamp: int, difficulty: int, cutoff_time: int) -> 'Block':\n print('============================ \\n \\n \\n ============================')\n manager = mp.Manager()\n final_nonce = manager.Value('i', None)\n # Declare a new instance of a block, and change the nonce until it produces a correct block_id\n\n block = Block(miner=miner, transactions=transactions, timestamp=timestamp,\n block_id=None, nonce=0, previous=previous, height=height, difficulty=difficulty)\n processes: List[Process] = []\n\n found_event = Event()\n for _ in range(processors):\n process = Process(target=_mine_block, args=(\n block, final_nonce, found_event, cutoff_time))\n processes.append(process)\n\n for process in processes:\n\n process.start()\n\n found_event.wait()\n\n for process in processes:\n process.terminate()\n\n for process in processes:\n process.join()\n print('============= END =============== \\n \\n \\n ============================')\n if final_nonce.value is not None:\n block.nonce = final_nonce.value\n block.block_id = block.compute_block_id()\n return block\n else:\n return None", "def create_block(self, nonce, previous_hash):\n block = {'block_number': transaction_blocks.count() + 1,\n 'timestamp': ctime(t),\n 'transactions': self.transactions,\n 'nonce': nonce,\n 'previous_hash': previous_hash}\n\n # Reset the current list of transactions\n self.transactions = []\n self.chain.append(block)\n return block", "def create_block(self, complete_hash, nonce):\n print(\"Creating block with hash: '%s'\" % complete_hash)\n block = Block(complete_hash, nonce)\n for transaction in self.transactions:\n block.add_transaction(transaction)\n return block", "def _initBlock(o,block):\n o.block = block.clone().shift(*o.board.startPosition)", "def new_block(self, proof, previous_hash = None):\n #create a new Block & adds it to the chain.\n \n block = {\n 'index' : len(self.chain) + 1,\n 'timestamp' : time(),\n 'transactions' : self.pending_transactions,\n 'proof' : proof,\n 'previous_hash' : previous_hash or self.hash(self.chain[-1])\n }\n\n # Reset the current list of transactions\n self.pending_transactions = []\n\n self.chain.append(block)\n return block\n #pass", "def create_origin_block(self):\n # creating a new hash object and finding new hash with empty string.\n hash = hashlib.sha256()\n hash.update(''.encode('utf-8'))\n # Instantiating a new block with data 'Origin', and for previous hash we'll give the hash generated by empty string\n origin_block = Block('Origin', hash)\n # Mine the block with the difficulty level of the chain\n origin_block.mine(self.difficulty)\n # Appending it to the mined block list\n self.blocks.append(origin_block)", "def new_block(self, proof, previous_hash=None):\n\n # Create the block\n my_block = Block(proof=proof,\n previous_hash=previous_hash or self.hash(self.last_block))\n my_block.save()\n\n # Update current_transactions with this new block.\n my_block_trans = self.current_transactions_obj\n\n for trans in Transaction.objects.filter(block__isnull=True):\n trans.block = my_block\n trans.save()\n\n block = {\n 'index': my_block.id,\n 'timestamp': my_block.timestamp,\n 'transactions': list(Transaction.objects.filter(block=my_block).values()),\n 'proof': my_block.proof,\n 'previous_hash': my_block.previous_hash,\n }\n\n return block", "def mine(self, rewardAddress):\n lastBlock = self.getLastBlock()\n index = lastBlock.index + 1\n previousHash = lastBlock.hash\n\n nonce = self.generate(lastBlock)\n\n self.createTransaction( # Reward for the miner\n sender=\"0\", # The miner receive coins \"created\", so there is no sender\n recipient=rewardAddress,\n amount=1,\n )\n\n # Add the block to the new chain\n block = Block(index, self.__currentTransactionsList, nonce, previousHash)\n\n if self.addBlock(block):\n return block\n\n return None", "def new(cls, prev_hash, transactions, stop_mine):\n if not transactions:\n raise Exception(\"No transactions in block creation.\")\n root = MerkleTree(transactions).get_root()\n header = {\n \"prev_hash\": prev_hash,\n \"root\": root,\n \"timestamp\": datetime.datetime.utcnow().timestamp(),\n \"nonce\": os.urandom(algo.NONCE_LEN // 2).hex()\n }\n while not stop_mine.is_set():\n # Compute hash to meet target\n header_hash = algo.hash1_dic(header)\n if header_hash < Block.TARGET:\n return cls(header, transactions)\n header[\"nonce\"] = os.urandom(algo.NONCE_LEN // 2).hex()\n return None", "def createBlock(self, block: ghidra.program.model.mem.MemoryBlock, name: unicode, start: ghidra.program.model.address.Address, length: long) -> ghidra.program.model.mem.MemoryBlock:\n ...", "def new_block(self, proof, previous_hash=None):\n \n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n\n # Reset the current list of transactions\n self.current_transactions = []\n # Add block to existing chain\n self.chain.append(block)\n return block", "def test_mine_empty_transaction_block(self):\n miner_address = 'miner_address'\n\n blockchain = Blockchain()\n block = blockchain.mine(miner_address)\n\n # First we look that a new block could be mined\n self.assertIsNotNone(block)\n\n # Let's see if the block was added to the chain\n self.assertEqual(blockchain.last_block.hash, block.hash)\n\n # We need to check that the block contains only the reward transaction\n self.assertEqual(len(block.transactions), 1)\n\n reward_transaction = block.transactions[0]\n\n # We make sure the reward function has no sender, and gives away exactly 1 coin\n self.assertEqual(reward_transaction.sender, '0')\n self.assertEqual(reward_transaction.recipient, miner_address)\n self.assertEqual(reward_transaction.amount, 1)", "def test_adding_multiple_blocks(self, blockchain, genesis, block1, block2, block3):\n assert blockchain.get_depth(hash(block1)) == -float('inf')\n assert blockchain.get_depth(hash(block2)) == -float('inf')\n assert blockchain.get_depth(hash(block3)) == -float('inf')\n\n blockchain.add(block1)\n # graph should look like this:\n # 0 <- 1\n assert hash(block1) in blockchain\n assert blockchain[hash(block1)] == block1\n assert blockchain._leaves == {hash(block1)}\n assert blockchain.get_virtual_block_parents() == {hash(block1)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block1): 1}\n assert blockchain._longest_chain == {hash(genesis), hash(block1)}\n assert blockchain.is_a_before_b(hash(genesis), hash(block1)) is True\n assert blockchain.get_depth(hash(genesis)) == 1\n assert blockchain.get_depth(hash(block1)) == 0\n assert blockchain.get_depth(hash(block2)) == -float('inf')\n assert blockchain.get_depth(hash(block3)) == -float('inf')\n\n blockchain.add(block2)\n # graph should look like this:\n # 0 <- 1\n # 0 <- 2\n assert hash(block2) in blockchain\n assert blockchain[hash(block2)] == block2\n assert blockchain._leaves == {hash(block1), hash(block2)}\n assert blockchain.get_virtual_block_parents() == {min(hash(block1), hash(block2))}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block1): 1}\n assert blockchain._longest_chain == {hash(genesis), hash(block1)}\n assert blockchain.is_a_before_b(hash(genesis), hash(block1)) is True\n assert blockchain.is_a_before_b(hash(block1), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(genesis), hash(block2)) is True\n assert blockchain.get_depth(hash(genesis)) == 1\n assert blockchain.get_depth(hash(block1)) == 0\n assert blockchain.get_depth(hash(block2)) == 0\n assert blockchain.get_depth(hash(block3)) == -float('inf')\n\n blockchain.add(block3)\n # graph should look like this:\n # 0 <- 1 <- 3\n # 0 <- 2\n assert hash(block3) in blockchain\n assert blockchain[hash(block3)] == block3\n assert blockchain._leaves == {hash(block2), hash(block3)}\n assert blockchain.get_virtual_block_parents() == {hash(block3)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block3)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block1): 1, hash(block3): 2}\n assert blockchain._longest_chain == {hash(genesis), hash(block1), hash(block3)}\n assert blockchain.is_a_before_b(hash(genesis), hash(block1)) is True\n assert blockchain.is_a_before_b(hash(block1), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(genesis), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(block3), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(genesis), hash(block3)) is True\n assert blockchain.get_depth(hash(genesis)) == 2\n assert blockchain.get_depth(hash(block1)) == 1\n assert blockchain.get_depth(hash(block2)) == 0\n assert blockchain.get_depth(hash(block3)) == 0", "def mine():\n block = app.miner(app.blockchain)\n\n response = {\n 'message': \"New block is mined!\",\n 'block': block.dump()\n }\n\n return jsonify(response), 200", "def create_from_transaction(tx, prev_hash):\n\n tx_hash = HashAssist.hash_value(tx.to_string_for_hashing())\n\n print(\"Mining nonce....\")\n nonce = proof.mint(prev_hash + tx_hash, WORK_FACTOR)\n header_hash = HashAssist.hash_value(prev_hash + tx_hash + nonce)\n\n return Block(header_hash, prev_hash, nonce, tx_hash, tx)", "def new_block(self, proof, previous_hash=None):\r\n block = {\r\n 'index': len(self.chain) + 1,\r\n 'timestamp': time(),\r\n 'transactions': self.current_transactions,\r\n 'proof': proof,\r\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\r\n }\r\n\r\n # reseta a atual lista de transacoes\r\n self.current_transactions = []\r\n\r\n self.chain.append(block)\r\n return block", "def create_block(self, proof, previous_hash=None):\n\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash,\n }\n\n # Reset current list of transactions\n self.current_transactions = []\n\n self.chain.append(block)\n return block", "def MakeBlock(self, *args):\n return _BRepAlgo.BRepAlgo_EdgeConnector_MakeBlock(self, *args)", "def test_fork_simple(self):\n bvh = self.BlockValidationHandler()\n new_block = self.btm.generate_block(previous_block=self.btm.chain_head,\n add_to_store=True)\n\n bv = self.create_block_validator(new_block, bvh.on_block_validated)\n bv.run()\n\n self.assertTrue(bvh.has_result())\n self.assertTrue(new_block.status == BlockStatus.Valid)\n self.assertTrue(bvh.result[\"commit_new_block\"])", "def build_block(self, parent, transactions=(), n_time=None):\n parent.calc_sha256()\n block_height = self.block_heights[parent.sha256] + 1\n block_time = (parent.nTime + 1) if n_time is None else n_time\n\n block = create_block(\n parent.sha256, create_coinbase(block_height), block_time)\n block.vtx.extend(transactions)\n make_conform_to_ctor(block)\n block.hashMerkleRoot = block.calc_merkle_root()\n block.solve()\n self.block_heights[block.sha256] = block_height\n return block", "def new_block(self, proof, previous_hash=None):\n\n\t\tblock = {\n\t\t\t'index': len(self.chain) + 1,\n\t\t\t'timestamp': time(),\n\t\t\t'transactions': self.current_transactions,\n\t\t\t'proof': proof,\n\t\t\t'previous_hash': previous_hash or self.hash(self.chain[-1]),\t\t\n\t\t}\n\n\t\t#Reset current list of transactions\n\t\tself.current_transactions = []\n\n\t\tself.chain.append(block)\n\t\treturn block", "def new_block(self, proof, previous_hash=None):\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions':self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n self.current_transactions = []\n self.chain.append(block)\n return block", "def new_block(self, proof, previous_hash=None):\n block = {\n 'index': len( self.chain ) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'merkle': self.hash(self.current_transactions),\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1])\n }\n\n # Reset the current list of transactions\n self.current_transactions = []\n\n # Add the block to the chain\n self.chain.append( block )\n self._write_chain()\n\n return block", "def build_new_block(cls, data='', previous_block=None):\n if previous_block:\n new_index = previous_block.index+1\n previous_hash = previous_block.hash\n else:\n new_index = 0\n previous_hash = ''\n timestamp = int(time.time())\n block_hash = cls.build_block_hash(\n index=new_index,\n timestamp=timestamp,\n data=data,\n previous_hash=previous_hash\n )\n block = cls(\n index=new_index,\n previous_hash=previous_hash,\n data=data,\n timestamp=timestamp,\n block_hash=block_hash\n )\n\n return block", "def create_next_block(\n self,\n input_constants: Dict,\n prev_block: FullBlock,\n timestamp: uint64,\n difficulty: uint64,\n ips: uint64,\n seed: bytes = b\"\",\n ) -> FullBlock:\n test_constants: Dict[str, Any] = constants.copy()\n for key, value in input_constants.items():\n test_constants[key] = value\n\n assert prev_block.header_block.challenge\n\n return self._create_block(\n test_constants,\n prev_block.header_block.challenge.get_hash(),\n uint32(prev_block.height + 1),\n prev_block.header_hash,\n prev_block.header_block.challenge.total_iters,\n prev_block.weight,\n timestamp,\n uint64(difficulty),\n ips,\n seed,\n )", "def mine_block(self):\n\t\tlast_block = self.blockchain.last_block\n\t\tlast_proof = last_block['proof']\n\t\tproof = self.blockchain.proof_of_work(last_proof)\n\n\t\t# Forge the new Block by adding it to the chain\n\t\tprevious_hash = self.blockchain.hash(last_block)\n\t\tblock = self.blockchain.new_block(proof, previous_hash)\n\n\t\t# broadcast request for all neighbor to resolve conflict\n\t\tself.broadcast_new_block()\n\n\t\t# now add a special transaction that signifies the reward mechanism\n\t\tnew_transaction = {\n\t\t'node':self.node_identifier,\n\t\t'block_index':block['index'],\n\t\t'reward':self.MINE_REWARD\n\t\t}\n\t\tself.blockchain.new_transaction(new_transaction)\n\t\treturn proof", "def getBlock(self) -> ghidra.program.model.correlate.Block:\n ...", "def gen_new_block(self):\n block = BasicBlock()\n self.blocks.append(block)\n return block", "def create_block(self):\n return poet_transaction_block.PoetTransactionBlock()", "def get_best_block(self) -> Block:\n assert self.indexes is not None\n block_hash = self.indexes.height.get_tip()\n block = self.get_transaction(block_hash)\n assert isinstance(block, Block)\n assert block.get_metadata().validation.is_fully_connected()\n return block", "def __init__(self):\n self.unconfirmed_transactions = [] \n self.chain = []\n self.create_genesis_block()", "def get_best_block(self, node):\n block_height = node.getblockcount()\n blockhash = node.getblockhash(block_height)\n block = FromHex(CBlock(), node.getblock(blockhash, 0))\n block.calc_sha256()\n self.block_heights[block.sha256] = block_height\n return block", "def __create_blockchain_table(self):\n cmd = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n %s integer PRIMARY KEY AUTOINCREMENT,\n %s text,\n %s integer,\n %s real,\n %s text,\n %s text);\"\"\" %(TABLE_BLOCKCHAIN,\n COL_BLOCKCHAIN_BLOCKID,\n COL_BLOCKCHAIN_BLOCK,\n COL_BLOCKCHAIN_TRANS_COUNT,\n COL_BLOCKCHAIN_AMOUNT,\n COL_BLOCKCHAIN_TIME,\n COL_BLOCKCHAIN_BLOCK_HASH)\n self.__dbcursor.execute(cmd)", "def __init__(self):\n self.chain = [Block.genesis()]", "def new_block(self, proof, previous_hash=None):\n servers = [\n \"1.us.pool.ntp.org\",\n \"2.us.pool.ntp.org\",\n \"3.us.pool.ntp.org\"\n ]\n\n response = {}\n\n try:\n response = self.c.request('0.us.pool.ntp.org')\n except Exception:\n for server in servers:\n try:\n response = self.c.request(server)\n\n if response:\n break\n\n except Exception:\n print('\\n //// alternate ntp server didnt work')\n\n block = {\n 'message': 'New Block Forged',\n 'index': len(self.chain) + 1,\n 'timestamp': response.tx_time or time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.chain[-1]['hash'],\n }\n\n # Calculate the hash of this new Block\n block['hash'] = self.hash(block)\n\n # Reset the current list of transactions\n self.current_transactions = []\n\n self.chain.append(block)\n return block", "def add_block(self, block):\n if block.index >= len(self.blockchain):\n self.blockchain.append(block)\n else:\n self.blockchain[block.index] = block\n self.write_to_disk()", "def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)", "def seed(cls, block: Expr) -> Expr:\n return cls(BlockField.block_seed, block)", "def mine(self):\n if self.unconfirmed_transactions == []:\n return False\n\n transactions = self.unconfirmed_transactions\n for transaction in transactions:\n author = transaction['author']\n public_key_path = author + '_public.pem'\n content = transaction['content']\n signature = transaction['signature']\n verify = rsa_verify(content, signature, public_key_path)\n if verify == False:\n print('Transaction not verified.')\n return \n previous_block = self.last_block\n last_index = previous_block.index\n\n index = last_index + 1\n timestamp = time.time()\n previous_hash = previous_block.hash\n\n newblock = Block(index=index, transactions=transactions, timestamp=timestamp, previous_hash=previous_hash)\n proof = Blockchain.proof_of_work(newblock)\n\n self.add_block(newblock, proof)\n self.unconfirmed_transactions = []\n return newblock.index", "def new_block(self, body_blocks, snake_head):\n\t\tx = randint(0, 35)\n\t\ty = randint(1, 26)\n\t\tself.rect.x = (25 * x) + 1\n\t\tself.rect.bottom = 25 * y\n\t\t\n\t\t# If new block is on snake, get new block\n\t\tif self.rect.x == snake_head.rect.x and self.rect.bottom == snake_head.rect.bottom:\n\t\t\tself.new_block(body_blocks, snake_head)\n\t\t\n\t\t# If new block is on any body block, get new block\n\t\tif body_blocks:\n\t\t\tfor i in range(len(body_blocks)):\n\t\t\t\tif self.rect.x == body_blocks[i].rect.x and self.rect.bottom == body_blocks[i].rect.bottom:\n\t\t\t\t\tself.new_block(body_blocks, snake_head)", "def begin():\n return BeginBlock()", "def add_block(self, block):\n # Create a list of transaction objects\n transactions = [Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']] \n chipsactions = [Chipsaction(\n tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount']) for tx in block['chipsactions']] \n messsactions = [Messsaction(\n tx['sender'], tx['follower'], tx['message'], tx['signature']) for tx in block['messsactions']] \n # Validate the proof of work of the block and store the result (True or False) in a variable\n proof_is_valid = Verification.valid_proof(\n transactions[:-1], chipsactions, messsactions, block['previous_hash'], block['proof'])\n # Check if previous_hash stored in the block is equal to the local blockchain's last block's hash and store the result in a block\n hashes_match = hash_block(self.chain[-1]) == block['previous_hash']\n if not proof_is_valid or not hashes_match:\n return False\n # Create a Block object\n converted_block = Block(\n block['index'], block['previous_hash'], transactions, chipsactions, messsactions, block['proof'], block['timestamp'])\n self.__chain.append(converted_block)\n stored_transactions = self.__open_transactions[:]\n stored_chipsactions = self.__open_chipsactions[:]\n stored_messsactions = self.__open_messsactions[:]\n # Check which open transactions were included in the received block and remove them\n # This could be improved by giving each transaction an ID that would uniquely identify it\n for itx in block['transactions']:\n for opentx in stored_transactions:\n if opentx.sender == itx['sender'] and opentx.recipient == itx['recipient'] and opentx.amount == itx['amount'] and opentx.signature == itx['signature']:\n try:\n self.__open_transactions.remove(opentx)\n except ValueError:\n print('Item was already removed')\n\n for itx in block['chipsactions']:\n for opentx in stored_chipsactions:\n if opentx.sender == itx['sender'] and opentx.recipient == itx['recipient'] and opentx.sender == itx['follow'] and opentx.recipient == itx['message'] and opentx.amount == itx['amount'] and opentx.signature == itx['signature']:\n try:\n self.__open_chipsactions.remove(opentx)\n except ValueError:\n print('Item was already removed')\n for itx in block['messsactions']:\n for opentx in stored_messsactions:\n if opentx.sender == itx['sender'] and opentx.sender == itx['follower'] and opentx.recipient == itx['message'] and opentx.signature == itx['signature']:\n try:\n self.__open_messsactions.remove(opentx)\n except ValueError:\n print('Item was already removed')\n self.save_data()\n return True", "def create_block(data: str, leading_zeros: int) -> Block:\n MAX_NONCE_VALUE = 2 ** 32\n\n nonce = 0\n created_block = None\n\n potential_block = Block(data, nonce)\n\n while potential_block.nonce != MAX_NONCE_VALUE:\n potential_block_hash = potential_block.get_hash()\n binary_block_hash = get_binary_sha256_hash(potential_block_hash)\n\n if binary_block_hash[:leading_zeros] == \"0\" * leading_zeros:\n potential_block.hash = potential_block_hash\n potential_block.store()\n created_block = potential_block\n\n break\n else:\n potential_block.nonce += 1\n\n return created_block", "async def new_block(request: Request) -> dict:\n block: dict = await request.json()\n block = await chain.add_block(block)\n response_block = Block(**block).to_dict()\n\n miner_ip = f\"{request.client.host}:{request.client.port}\"\n for node in chain.peers:\n async with httpx.AsyncClient() as client:\n _ = await client.get(f\"{node}/\")\n temp_chain = {f\"Block-{height}\": data.to_dict()\n for height, data in enumerate(chain.serialized)}\n return {\"miner_address\": miner_ip,\n \"latest_block\": response_block.dict(),\n \"new_chain\": temp_chain, }", "def addBlock(self, newBlock):\n newBlock.index = len(self.chain)\n newBlock.previousHash = self.chain[-1].hash\n newBlock.mineBlock(self.difficulty)\n self.chain.append(newBlock)\n self.writeBlocks()", "def _make_block(self, model):\n # TODO Make base class\n assert model is not None, 'Top level model must be initialized first'\n self.model = model\n # If block is already present, remove it\n if self.model.component(self.name) is not None:\n self.model.del_component(self.name)\n self.model.add_component(self.name, Block())\n self.block = self.model.__getattribute__(self.name)\n\n self.logger.info(\n 'Optimization block initialized for {}'.format(self.name))", "def _check_for_new_block() -> None:\n logging.debug(\"Checking for new block\")\n CAPACITY = block.get_capacity()\n\n r = util.get_db()\n with r.lock(\"blockchain:last_block:lock\"), \\\n r.lock(\"blockchain:miner_pid:lock\"), \\\n r.lock(\"blockchain:tx_pool:lock\"), \\\n r.lock(\"blockchain:utxo-block:lock\"):\n # NOTE: If a miner is running, we expect it to add a new block, so we\n # abort. If mining succeeds, this function will be called again by\n # new_recv_block(). If it fails (another valid block is received) this\n # will again be called by new_recv_block()\n miner_pidb = r.get(\"blockchain:miner_pid\")\n if miner_pidb is not None:\n logging.debug(\"Miner already running with PID %d\", util.btoui(miner_pidb))\n return\n\n tx_pool = {Transaction.loadb(tb) for tb in r.hvals(\"blockchain:tx_pool\")}\n if len(tx_pool) < CAPACITY:\n logging.debug(\"Cannot create new block yet (not enough transactions)\")\n return\n\n last_block = get_block()\n utxo_block = {TransactionInput.loadb(i): TransactionOutput.loadb(o) for i, o \\\n in r.hgetall(\"blockchain:utxo-block:\".encode() + last_block.current_hash).items()}\n new_block_tx: List[Transaction] = []\n # NOTE: Since there are >= CAPACITY transactions in the pool, and we\n # don't mind transaction inter-dependence in the same block, a new\n # block can be created, so this loop will terminate\n while True:\n for t in tx_pool:\n # Search for t.inputs in UTXO-block[last_block] as well as in new_block_tx\n if all(i in utxo_block or \\\n any(nt.id == i.transaction_id for nt in new_block_tx) for i in t.inputs):\n new_block_tx.append(t)\n if len(new_block_tx) == CAPACITY:\n new_block = Block(index=last_block.index + 1,\n previous_hash=last_block.current_hash,\n transactions=new_block_tx)\n # NOTE: We don't delete the new block_tx from the pool, because\n # mining might fail. They will be deleted eventually when they\n # enter the main branch.\n miner_pid = new_block.finalize()\n r.set(\"blockchain:miner_pid\", util.uitob(miner_pid))\n logging.debug(\"Miner started with PID %d\", miner_pid)\n return\n tx_pool.difference_update(new_block_tx)", "def route_mine_block():\n transaction_data = transaction_pool.transaction_data()\n transaction_data.append(Transaction.reward(wallet).to_json())\n blockchain.add_block(transaction_data)\n block = blockchain.chain[-1]\n pubsub.broadcast_block(block)\n transaction_pool.clear_transactions_added_to_blockchain(blockchain)\n return jsonify(block.to_json())", "def test_fork_different_genesis(self):\n bvh = self.BlockValidationHandler()\n\n # create a new valid chain 5 long from the current root\n new_head = self.btm.generate_chain(self.btm.chain_head, 5,\n {'add_to_store': True})\n self.btm.set_chain_head(new_head[-1])\n\n # generate candidate chain 5 long from it's own genesis\n new_block = self.btm.generate_chain(None, 5,\n {'add_to_cache': True})\n\n bv = self.create_block_validator(new_block[-1], bvh.on_block_validated)\n bv.run()\n\n self.assertTrue(bvh.has_result())\n self.assertTrue(new_block[-1].status == BlockStatus.Invalid)\n self.assertFalse(bvh.result[\"commit_new_block\"])", "def test_valid_balance_genesis(self):\n db = MockDatabase()\n prev = TestBlock(block_type=BlockTypes.CHECKPOINT, transaction={'balance': 0})\n result, errors = prev.validate_transaction(db)\n self.assertEqual(result, ValidationResult.valid)\n self.assertEqual(errors, [])\n db.add_block(prev)", "def mine(self):\n # Checking if there is anything to be mined \n if len(self.pool) > 0:\n # Getting data from the pools list and removing it from the list\n data = self.pool.pop()\n # Instantiating the block with the given data and hash of the last block in the blocks list\n block = Block(data, self.blocks[-1].hash)\n # mining the block on the given difficulty level\n block.mine(self.difficulty)\n # Adding the block to the chain\n self.add_to_chain(block)\n # Showing block details\n self.verbose(block)", "def create_block(world: World, block_id: str, x: int, y: int, *args):\n block_id = BLOCKS[block_id]\n if block_id == \"mystery_empty\":\n block = MysteryBlock()\n elif block_id == \"mystery_coin\":\n block = MysteryBlock(drop=\"coin\", drop_range=(3, 6))\n elif block_id == \"bounce_block\":\n block = BounceBlock()\n elif block_id == \"flag\":\n block = Flagpole()\n elif block_id == \"tunnel\":\n block = Tunnel()\n elif block_id == \"switch\":\n block = Switch()\n else:\n block = Block(block_id)\n\n world.add_block(block, x * BLOCK_SIZE, y * BLOCK_SIZE)", "def new_block(self, previous_hash):\n\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'information': self.current_information,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n\n # Reset the current list of transactions\n self.current_information = []\n\n self.chain.append(block)\n return block", "def mine(self, block):\r\n for n in range(self.maxNonce):\r\n if int(block.generate_hash(), 16) <= self.chain.targetHash:\r\n self.chain.add(block)\r\n break\r\n else:\r\n block.nonce += 1", "def __init__(self, transactions=None):\n\n self.blocks = []\n if transactions:\n if type(transactions) is not list:\n raise Exception(\"Data must be a list of transactions!\")\n\n for i, tx in enumerate(transactions):\n if i == 0: # Create genesis block\n if not signature.verify(tx.from_pk, tx.to_string_for_hashing(), tx.signature):\n print(\"Genesis transaction signature is NOT valid.\")\n return\n prev_hash = \"0\" # Arbitrary prev_hash for genesis block\n new_block = Block.create_from_transaction(tx, prev_hash)\n self.blocks.append(new_block)\n else:\n if not self.validate_transaction(tx):\n print(\"Transaction is NOT valid.\")\n return\n new_block = Block.create_from_transaction(tx, self.blocks[-1].header_hash)\n self.validate_and_add_block(new_block)", "def blockchain_test():\n\n init_hash = hashlib.md5('foobarbaz').hexdigest()\n chain = Chain(genesis=Node(data=None, hash_str=init_hash))\n\n genesis = chain.genesis\n\n assert chain\n assert chain.size == 1\n assert init_hash == genesis.hash\n\n chain.new_node(data='hello')\n\n expected_hash = hashlib.md5(init_hash + 'hello').hexdigest()\n\n assert chain.size == 2\n assert chain.last_node.hash == expected_hash\n\n chain.new_node(data='world')\n\n expected_hash = hashlib.md5(expected_hash + 'world').hexdigest()\n\n assert chain.size == 3\n assert chain.last_node.hash == expected_hash\n\n chain.new_node(data='007')\n\n expected_hash = hashlib.md5(expected_hash + '007').hexdigest()\n\n assert chain.size == 4\n assert chain.last_node.hash == expected_hash", "def add_block(self, block_pf):\n\n # test si il s'agit du bloc genesis\n if len(self.blocks) != 0:\n # check si previous H est coherent avant ajout a chaine\n if self.check_previousBlockH(block_pf.header['prevBlockH']):\n self.blocks.append(block_pf)\n else:\n print \"== Probleme de parent\"\n print \"= %s\" % block_pf.header['prevBlockH']\n print \"= %s\" % getHashBlock(self.get_topBlock())\n else:\n self.blocks.append(block_pf)", "def get_last_block():\n if namoto_length < 1:\n return None\n\n return namoto_blockchain[-1]", "def test_validate_chain_with_tempered_block_nonce(self):\n miner_address = 'miner_address'\n\n blockchain = Blockchain()\n last_block = blockchain.mine(miner_address)\n\n # First we look that a new block could be mined\n self.assertIsNotNone(last_block)\n\n chain = blockchain.full_chain\n\n # Hack a block\n chain.append(Block(1, [], 1, last_block.hash))\n\n self.assertFalse(blockchain.validate_chain(blockchain.full_chain))", "def ui_create_blockchain(chain_type=None, chain_backend=None, network_id=None, genesis=None, external_bootnodes=None):\n\n backends = get_backends_info()\n name = w.prompt(\"Blockchain name\")\n cloud = w.menu(\"Choose cloud\", [\"{} ({})\".format(b[\"name\"], b[\"id\"]) for b in get_clouds()])\n\n cloud_id = re.findall(r'^.*\\((.*)\\)$', cloud)[0]\n\n if chain_backend is None:\n chain_backend = w.menu(\"Chooses a blockchain backend\", backends.keys())\n\n if chain_type is None:\n chain_type = w.menu(\"Choose a blockchain type\", backends[chain_backend]['chain_types'])\n\n account = w.prompt(\"Enter the account to which mining rewards will be send\")\n controller_flavour = w.menu(\"Choose a flavour for the controller\", get_node_flavours(cloud_id))\n network = w.menu(\"Choose a network for the blockchain\", get_networks(cloud_id))\n jumpbox = w.menu(\"Choose a jumpbox\", get_instances(cloud_id))\n jumpbox_keyfile = w.prompt(\"Enter the jumpbox key file\")\n\n add_blockchain(\n name=name,\n cloud_id=cloud_id,\n chain_backend=chain_backend,\n chain_type=chain_type,\n controller_flavour=controller_flavour,\n jumpbox=jumpbox,\n jumpbox_keyfile=jumpbox_keyfile,\n network=network,\n mining_account=account,\n genesis=genesis,\n network_id=network_id,\n external_bootnodes=external_bootnodes)", "def parse_one_block_blockcypher(blockchain, block_number):\n results = {}\n response = requests.get('https://api.blockcypher.com/v1/%s/main/blocks/%s' % (blockchain, block_number))\n if response.status_code == 200:\n r = json.loads(response.content.decode('latin1'))\n results[columns[3]] = r['fees'] * 1E-8 # convert to non-satoshi\n results[columns[0]] = r['height']\n results[columns[1]] = r['n_tx']\n results['time'] = r['time']\n results['nonce'] = r['nonce']\n results['blockchain'] = r[\"chain\"]\n return results\n else:\n return -1", "def parse_blockcypher(blockchain, first_block=None, n_block=200):\n r = []\n if not first_block:\n first_block = get_first_block(blockchain)\n for block_number in range(first_block, first_block - n_block, -1):\n block = parse_one_block_blockcypher(blockchain, block_number)\n if block != -1:\n r.append(block)\n else:\n print('Error after block number %s (%s blocks done)' % (block_number, first_block - block_number))\n break\n df = pd.DataFrame(r)\n df[columns[4]] = pd.to_datetime(df['time'], format=\"%Y-%m-%dT%H:%M:%SZ\")\n df[columns[2]] = df[columns[3]] / df[columns[1]]\n return df", "def mine(self):\n if not self.unconfirmed_transactions: \n return False\n \n last_block = self.last_block\n \n new_block = Block(index= last_block.index + 1, \n transactions = self.unconfirmed_transactions,\n timestamp = time.time(),\n previous_hash = last_block.hash)\n\n proof = self.proof_of_work(new_block)\n self.add_block(new_block, proof)\n self.unconfirmed_transactions = []\n return new_block.index", "def block_start(self, previous_block_id):\n previous_block = self._block_store[previous_block_id].get_block()\n previous_header = BlockHeader()\n previous_header.ParseFromString(previous_block.header)\n\n block_info = BlockInfo(\n block_num=previous_header.block_num,\n previous_block_id=previous_header.previous_block_id,\n signer_public_key=previous_header.signer_public_key,\n header_signature=previous_block.header_signature,\n timestamp=int(time.time()))\n\n return [self.create_batch(block_info)]", "def position_at_beginning(self, block):\n self._curblock = block\n self._lastop = 'head'", "async def add_block(\n self,\n block: FullBlock,\n peer: Optional[WSChiaConnection] = None,\n raise_on_disconnected: bool = False,\n ) -> Optional[Message]:\n if self.sync_store.get_sync_mode():\n return None\n\n # Adds the block to seen, and check if it's seen before (which means header is in memory)\n header_hash = block.header_hash\n if self.blockchain.contains_block(header_hash):\n return None\n\n pre_validation_result: Optional[PreValidationResult] = None\n if (\n block.is_transaction_block()\n and block.transactions_info is not None\n and block.transactions_info.generator_root != bytes([0] * 32)\n and block.transactions_generator is None\n ):\n # This is the case where we already had the unfinished block, and asked for this block without\n # the transactions (since we already had them). Therefore, here we add the transactions.\n unfinished_rh: bytes32 = block.reward_chain_block.get_unfinished().get_hash()\n unf_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(unfinished_rh)\n if (\n unf_block is not None\n and unf_block.transactions_generator is not None\n and unf_block.foliage_transaction_block == block.foliage_transaction_block\n ):\n # We checked that the transaction block is the same, therefore all transactions and the signature\n # must be identical in the unfinished and finished blocks. We can therefore use the cache.\n pre_validation_result = self.full_node_store.get_unfinished_block_result(unfinished_rh)\n assert pre_validation_result is not None\n block = dataclasses.replace(\n block,\n transactions_generator=unf_block.transactions_generator,\n transactions_generator_ref_list=unf_block.transactions_generator_ref_list,\n )\n else:\n # We still do not have the correct information for this block, perhaps there is a duplicate block\n # with the same unfinished block hash in the cache, so we need to fetch the correct one\n if peer is None:\n return None\n\n block_response: Optional[Any] = await peer.call_api(\n FullNodeAPI.request_block, full_node_protocol.RequestBlock(block.height, True)\n )\n if block_response is None or not isinstance(block_response, full_node_protocol.RespondBlock):\n self.log.warning(\n f\"Was not able to fetch the correct block for height {block.height} {block_response}\"\n )\n return None\n new_block: FullBlock = block_response.block\n if new_block.foliage_transaction_block != block.foliage_transaction_block:\n self.log.warning(f\"Received the wrong block for height {block.height} {new_block.header_hash}\")\n return None\n assert new_block.transactions_generator is not None\n\n self.log.debug(\n f\"Wrong info in the cache for bh {new_block.header_hash}, there might be multiple blocks from the \"\n f\"same farmer with the same pospace.\"\n )\n # This recursion ends here, we cannot recurse again because transactions_generator is not None\n return await self.add_block(new_block, peer)\n state_change_summary: Optional[StateChangeSummary] = None\n ppp_result: Optional[PeakPostProcessingResult] = None\n async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):\n # After acquiring the lock, check again, because another asyncio thread might have added it\n if self.blockchain.contains_block(header_hash):\n return None\n validation_start = time.time()\n # Tries to add the block to the blockchain, if we already validated transactions, don't do it again\n npc_results = {}\n if pre_validation_result is not None and pre_validation_result.npc_result is not None:\n npc_results[block.height] = pre_validation_result.npc_result\n\n # Don't validate signatures because we want to validate them in the main thread later, since we have a\n # cache available\n pre_validation_results = await self.blockchain.pre_validate_blocks_multiprocessing(\n [block], npc_results, validate_signatures=False\n )\n added: Optional[AddBlockResult] = None\n pre_validation_time = time.time() - validation_start\n try:\n if len(pre_validation_results) < 1:\n raise ValueError(f\"Failed to validate block {header_hash} height {block.height}\")\n if pre_validation_results[0].error is not None:\n if Err(pre_validation_results[0].error) == Err.INVALID_PREV_BLOCK_HASH:\n added = AddBlockResult.DISCONNECTED_BLOCK\n error_code: Optional[Err] = Err.INVALID_PREV_BLOCK_HASH\n elif Err(pre_validation_results[0].error) == Err.TIMESTAMP_TOO_FAR_IN_FUTURE:\n raise TimestampError()\n else:\n raise ValueError(\n f\"Failed to validate block {header_hash} height \"\n f\"{block.height}: {Err(pre_validation_results[0].error).name}\"\n )\n else:\n result_to_validate = (\n pre_validation_results[0] if pre_validation_result is None else pre_validation_result\n )\n assert result_to_validate.required_iters == pre_validation_results[0].required_iters\n (added, error_code, state_change_summary) = await self.blockchain.add_block(\n block, result_to_validate, None\n )\n if added == AddBlockResult.ALREADY_HAVE_BLOCK:\n return None\n elif added == AddBlockResult.INVALID_BLOCK:\n assert error_code is not None\n self.log.error(f\"Block {header_hash} at height {block.height} is invalid with code {error_code}.\")\n raise ConsensusError(error_code, [header_hash])\n elif added == AddBlockResult.DISCONNECTED_BLOCK:\n self.log.info(f\"Disconnected block {header_hash} at height {block.height}\")\n if raise_on_disconnected:\n raise RuntimeError(\"Expected block to be added, received disconnected block.\")\n return None\n elif added == AddBlockResult.NEW_PEAK:\n # Only propagate blocks which extend the blockchain (becomes one of the heads)\n assert state_change_summary is not None\n ppp_result = await self.peak_post_processing(block, state_change_summary, peer)\n\n elif added == AddBlockResult.ADDED_AS_ORPHAN:\n self.log.info(\n f\"Received orphan block of height {block.height} rh {block.reward_chain_block.get_hash()}\"\n )\n else:\n # Should never reach here, all the cases are covered\n raise RuntimeError(f\"Invalid result from add_block {added}\")\n except asyncio.CancelledError:\n # We need to make sure to always call this method even when we get a cancel exception, to make sure\n # the node stays in sync\n if added == AddBlockResult.NEW_PEAK:\n assert state_change_summary is not None\n await self.peak_post_processing(block, state_change_summary, peer)\n raise\n\n validation_time = time.time() - validation_start\n\n if ppp_result is not None:\n assert state_change_summary is not None\n await self.peak_post_processing_2(block, peer, state_change_summary, ppp_result)\n\n percent_full_str = (\n (\n \", percent full: \"\n + str(round(100.0 * float(block.transactions_info.cost) / self.constants.MAX_BLOCK_COST_CLVM, 3))\n + \"%\"\n )\n if block.transactions_info is not None\n else \"\"\n )\n self.log.log(\n logging.WARNING if validation_time > 2 else logging.DEBUG,\n f\"Block validation time: {validation_time:0.2f} seconds, \"\n f\"pre_validation time: {pre_validation_time:0.2f} seconds, \"\n f\"cost: {block.transactions_info.cost if block.transactions_info is not None else 'None'}\"\n f\"{percent_full_str} header_hash: {header_hash} height: {block.height}\",\n )\n\n # This code path is reached if added == ADDED_AS_ORPHAN or NEW_TIP\n peak = self.blockchain.get_peak()\n assert peak is not None\n\n # Removes all temporary data for old blocks\n clear_height = uint32(max(0, peak.height - 50))\n self.full_node_store.clear_candidate_blocks_below(clear_height)\n self.full_node_store.clear_unfinished_blocks_below(clear_height)\n if peak.height % 1000 == 0 and not self.sync_store.get_sync_mode():\n await self.sync_store.clear_sync_info() # Occasionally clear sync peer info\n\n state_changed_data: Dict[str, Any] = {\n \"transaction_block\": False,\n \"k_size\": block.reward_chain_block.proof_of_space.size,\n \"header_hash\": block.header_hash,\n \"height\": block.height,\n \"validation_time\": validation_time,\n \"pre_validation_time\": pre_validation_time,\n }\n\n if block.transactions_info is not None:\n state_changed_data[\"transaction_block\"] = True\n state_changed_data[\"block_cost\"] = block.transactions_info.cost\n state_changed_data[\"block_fees\"] = block.transactions_info.fees\n\n if block.foliage_transaction_block is not None:\n state_changed_data[\"timestamp\"] = block.foliage_transaction_block.timestamp\n\n if block.transactions_generator is not None:\n state_changed_data[\"transaction_generator_size_bytes\"] = len(bytes(block.transactions_generator))\n\n state_changed_data[\"transaction_generator_ref_list\"] = block.transactions_generator_ref_list\n if added is not None:\n state_changed_data[\"receive_block_result\"] = added.value\n\n self._state_changed(\"block\", state_changed_data)\n\n record = self.blockchain.block_record(block.header_hash)\n if self.weight_proof_handler is not None and record.sub_epoch_summary_included is not None:\n if self._segment_task is None or self._segment_task.done():\n self._segment_task = asyncio.create_task(self.weight_proof_handler.create_prev_sub_epoch_segments())\n return None", "def mine_transactions(self, address):\n transaction = Transaction(walletoffrom=None, walletofto=address, amount=self.reward)\n self.current_transactions.append(transaction)\n\n block = Block(target=self.target, transactions=self.current_transactions, previoushash=self.last_block().__hash__())\n\n\n self.chain.append(block)\n self.current_transactions = []" ]
[ "0.82950324", "0.72951174", "0.7135279", "0.7082487", "0.7036506", "0.7036506", "0.7006431", "0.6923667", "0.6863689", "0.6726204", "0.66985613", "0.6642569", "0.6592184", "0.6535945", "0.65345776", "0.6523206", "0.64736736", "0.6410874", "0.639564", "0.63902736", "0.6376658", "0.6365321", "0.6359219", "0.6341173", "0.6310709", "0.6300225", "0.62643796", "0.62553656", "0.6242157", "0.62271357", "0.62255484", "0.62226945", "0.6205295", "0.6198096", "0.61912143", "0.617212", "0.6164279", "0.61623704", "0.61264694", "0.6067108", "0.6050508", "0.6036968", "0.6024302", "0.6023027", "0.601436", "0.6004765", "0.59995323", "0.5995884", "0.5974266", "0.59574676", "0.59562176", "0.59497225", "0.5938102", "0.59289706", "0.59201753", "0.59194154", "0.5917267", "0.5905863", "0.59015906", "0.58840173", "0.5876119", "0.587564", "0.58740693", "0.5855957", "0.5842416", "0.5827762", "0.5807799", "0.58060884", "0.5797595", "0.57949734", "0.577104", "0.5759065", "0.5734132", "0.5732882", "0.5725509", "0.57216215", "0.5719081", "0.5717059", "0.57081157", "0.5705395", "0.5658371", "0.56572163", "0.56563246", "0.56501013", "0.56427705", "0.5642765", "0.5634932", "0.5628327", "0.56175643", "0.56172705", "0.5615158", "0.56093186", "0.55998695", "0.559344", "0.5570632", "0.5540853", "0.5537157", "0.55361414", "0.55318856", "0.5524084" ]
0.7289684
2
Initializing Cin as o, l2 as empty list and final as empty string
def add(num1, num2, Cin): Cin=0 l2=[] final="" """Using for loop for starting addition from the last indexed number""" for i in range(len(num1)-1,-1,-1): bit1=num1[i]#storing each indexed() value of num1 in bit1 bit2=num2[i]#storing each indexed() value of num2 in bit2 S1=g.XOR(bit1, bit2)#calling XOR() method of gates module and passing value in it. S2=g.NAND(S1, Cin)#calling NAND() method of gates module and passing value in it. S3=g.OR(S1, Cin)#calling OR() method of gates module and passing value in it. SUM=g.AND(S2, S3)#calling AND() method of gates module and passing value in it. C1=g.AND(bit1, bit2)#calling AND() method of gates module and passing value in it. C2=g.AND(S1, Cin)#calling AND() method of gates module and passing value in it. C3=g.NOR(C1, C2)#calling NOR() method of gates module and passing value in it. Cout=g.NOT(C3)#calling NOT() method of gates module and passing value in it. Cin=Cout#declaring cout as cin for next bit addition l2.append(SUM)#adding SUM to l2 l3=l2[::-1]##reversing the value of l2 final+=str(l2[-1])#converting the list value to string final1=final[::-1]#reversing string value return final1 #returning the binary addition as string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, o0=(0, 0), o1=(0, 0)):\n self._output = [o0, o1]", "def __init__(self,l=None,c=True):\r\n\t\t\r\n\t\t# default None to zero\r\n\t\tif l is None:\r\n\t\t\tl = 0\r\n\t\t\t\r\n\t\tif l == []:\r\n\t\t\tl = 0\r\n\t\t\r\n\t\t# attempt to translate from string\r\n\t\ttry:\r\n\t\t\tl = Li._translate(l)\r\n\t\t\t\r\n\t\t# otherwise try to make a Term in a list\r\n\t\texcept:\r\n\t\t\ttry:\r\n\t\t\t\tl = [Te(l)]\r\n\t\t\t\t\t\r\n\t\t\t# or assume already list of Terms\r\n\t\t\texcept:\r\n\t\t\t\tl = [Te(i) for i in l]\r\n\t\t\t\r\n\t\t# condense?\r\n\t\tif c:\r\n\t\t\tl = Li._condense(l)\r\n\t\t\t\r\n\t\t# deposit terms, skip zeroes\r\n\t\tfor i in l:\r\n\t\t\tif 0 not in i:\r\n\t\t\t\tself.append(i)", "def __init__(self, *args, **kwargs):\n super(AbsLoopinData, self).__init__(\n # All set outside\n ('linl_lis', LinlLis()),\n ('linh', Byte()),\n *args, **kwargs\n )", "def __init__(self):\n\t\tself.s1 = []\n\t\tself.s2 = []", "def __init__(self):\n self.l = []\n self.length = 0", "def __init__(self):\n self.l = []", "def __init__(self):\n self.s1 = list()\n self.s2 = list()", "def __init__(self):\n self.l = {}\n self.s = {}", "def init(self):\n logger.info(mm_cnofs.ackn_str)\n self.acknowledgements = mm_cnofs.ackn_str\n self.references = '\\n'.join((mm_cnofs.refs['mission'],\n mm_cnofs.refs['vefi']))\n\n return", "def __init__(self, *args, **kwargs):\n super(LinlLis, self).__init__(\n ('linl', Bits(maxlen=4)),\n ('lis', Bits(maxlen=4)),\n *args, **kwargs\n )", "def __init__(self):\n list.__init__([])\n self.name = ''\n self.ideal_value = None", "def __init__(self):\n self.l = []\n self.r = []", "def __init__(self):\n self.N_Chls = 0\n self.N_Chl_a = 0\n self.N_Chl_b = 0\n self.type = \"none\"", "def __init__(self,kl,l=1024):\n self.kl,self.l = kl,l", "def __init__(self, inlist):\n\n self.args = []\n while len(inlist) != 0:\n added = 0\n for i, v in enumerate(self.args):\n if ((str(inlist[0][1]) == str(self.args[i][1])) and\n (str(inlist[0][2]) == str(self.args[i][2]))):\n self.args[i] = (self.args[i][0] +\n inlist[0][0], inlist[0][1], inlist[0][2])\n inlist.remove(inlist[0])\n added = 1\n break\n if added != 1:\n self.args.append(inlist[0])\n inlist.remove(inlist[0])\n i = 0\n # This code is to remove empty parts from the list\n while i < len(self.args):\n if ((self.args[i][0] == 0) | (self.args[i][1] == 0) |\n (self.args[i][2] == 0)):\n self.args.remove(self.args[i])\n i -= 1\n i += 1", "def __init__(self):\n self.numeralList = self.nf.getOcrNumerals(1234567890, 10)", "def __init__(self):\n self.if0 = None\n self.if1 = None", "def __init__(self):\n self.if0 = None\n self.if1 = None", "def __init__(self, lstr, for_signature=None):\n self.lstr = lstr\n if for_signature:\n self.forsig = for_signature\n else:\n self.forsig = lstr", "def __init__(self):\n self.__list = None\n self.__length = 0", "def parse_ls(self,ins):\n global Creg\n if ins.instr == 'lb':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1])\n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'lbu':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'lh':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'lhu':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'lw':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n \n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'dlw':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = self.double_reg(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'dmfc1':\n if len(ins.args) == 2:\n self.need = [ins.args[1]] \n self.gen = self.double_reg(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'l.s':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n \n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'l.d':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n \n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'sb': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'sbu': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1])\n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sh': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1])\n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'shu': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sw': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.need = [ins.args[0]] + self.need\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'dsw': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.need = self.double_reg(ins.args[0]) + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'dsz': \n if len(ins.args) == 1:\n ins.args[0] = str(ins.args[0]) \n g = re.match(Creg, ins.args[0])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[0]] \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 's.s': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 's.d': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.need = self.double_reg(ins.args[0]) + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'move':\n if len(ins.args) == 2:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'mov.d':\n if len(ins.args) == 2:\n self.need = self.double_reg(ins.args[1])\n self.gen = self.double_reg(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'mov.s':\n if len(ins.args) == 2:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'li':\n if len(ins.args) == 2:\n self.gen = [ins.args[0]]\n self.ival = ins.args[1]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)", "def __init__(self, repr:polynomialRepr, refTraj:referenceTrajectory, nu:int,limL=None,limU=None):\n super(boxInputCstr,self).__init__(repr,refTraj,nu)\n \n self.limLCall = hasattr(limL,\"__call__\")\n self.limUCall = hasattr(limU,\"__call__\")\n \n # Save values if limL is not callable -> not a function of time\n if not self.limLCall:\n self.limL = -np.ones((self.nu,1),dtype=nfloat) if limL is None else limL\n self.limL = np.array(limL).reshape((nu,1))\n self.thisLimL = self.limL\n # Save values if limL is not callable -> not a function of time\n if not self.limUCall:\n self.limU = np.ones((self.nu,1),dtype=nfloat) if limU is None else limU\n self.limU = np.array(limU).reshape((nu,1))\n self.thisLimU = self.limU", "def constructOrc(self, noCh=None, instList=None):\n return None", "def __init__(self):\n self.s1=[]\n self.s2=[]", "def __init__(self):\n self.soul = self._cnew(py_object(self),self._cmethods)\n self.name = self.__class__.__name__\n # required to keep this object around in the C world\n Py_INCREF(self)", "def __init__(self):\r\n self.s1=[]\r\n self.s2=[]", "def __init__(self, *args):\n this = _libsbml.new_ListOfInitialAssignments(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init___0(self, world, list_):\n self.world = world\n self.list_ = list_", "def __init__(self, initializing_list, typecode) :\r\n global TypeRanges, NumericToArray\r\n array_typecode = NumericToArray[typecode][0]\r\n self.numeric_typecode = typecode\r\n self.impl = array.array(array_typecode)\r\n self.complex = (typecode=='F' or typecode=='D')\r\n for x in initializing_list :\r\n self.append(x)", "def __init__(self, *args):\n this = _libsbml.new_ListOfCompartments(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self,kl,salt=64,l=1024):\n self.kl,self.s,self.l = kl,salt,l", "def __init__(self) -> None:\n self.normal_customer: List[str] = list()\n self.priority_customer: List[str] = list()", "def __init__(self):\n self.d = {}\n self.l = []", "def init(self, l):\n global lock\n lock = l", "def __init__(self, id, lcs, classname='unknown', generate_snippets=True):\n self.lcs = []\n self.lc_snippets = []\n self.id = id\n self.classname = classname\n for lc in lcs:\n self.lcs.append(lc)\n if generate_snippets:\n self.lc_snippets.extend(generate_lc_snippets(lc))", "def __init__(self):\n self.c_sect = []", "def __init__(self, bolsa,letras=[]):\n self.atril = []\n self.bolsa = bolsa\n if letras == []:\n self.initialize()\n else:\n self.cargar_atril(letras,bolsa)", "def __init__(self, ckt = None): \n self.unplaced = list(ckt)\n self.ckt = ckt\n self.ids = 0", "def __init__(self):\n self.inputs = []\n self.op = None\n self.const_attr = None\n self.name = \"\"", "def __init__(self):\n self.rho=[]\n self.te=[]\n self.ti=[]\n self.ne=[]\n self.ni=[]\n self.ni1=[]\n self.ni2=[]\n self.ni3=[]\n self.vtor=[]\n self.zeff=[]\n\n self.nion=1\n self.Z=[]\n self.A=[]\n self.coll_mode=[]", "def __init__(self):\n\t\tself.prim = None\n\t\tself.len = 0", "def __init__(self):\n self.m = 100007\n self.a = [None] * self.m", "def __init__(self):\n self.mylist1 = []\n self.mylist2 = []", "def get_init_code(self, obj):\n return []", "def __init__(self, coor1, coor2):\n self.coor1 = coor1\n self.coor2 = coor2", "def __init__(self):\n self.hi = []\n self.lo = []", "def __init__(self,ic,ia,informat):\n self.ped = {}\n self.pedlist = []\n self.mark = {}\n self.marklist = []\n self.sep = '\\t'\n if informat in ['Plink','plink']:\n self.ic = 6\n self.ia = 3\n self.nc = 1\n elif informat in ['DMU','dmu']:\n self.ic = 1\n self.ia = 1\n self.nc = 0\n elif not informat:\n self.ic = ic\n self.ia = ia\n self.nc = 0\n else:\n sys.stderr.write('Unknown input format: \"%s\"\\n' % informat)\n sys.exit(1)", "def __init__(self):\n self.s = []", "def __init__(self):\n self.s = []", "def __init__(self, numberType):\n\n self.id = List.__getNextId()\n self.__numberType = numberType\n if self.__numberType != List.NONE:\n self.__levelText = \"\\\\'02\\\\'00.\"\n self.__levelNumbers = \"\\\\'01\"\n else:\n self.__levelText = \"\\\\'01\\\\u8226 *\"\n self.__levelNumbers = \"\"", "def __init__(self):\n # list的行为更像是stack\n self.value = []", "def __init__(self, *args):\n this = _libsbml.new_IdList(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.data = None\n self.compiled = None", "def __init__(self):\n self.output = []\n self.ctl = clingo.Control() # Control object for the grounding/solving process", "def __init__(self, osi, other, init_strain, n_dim):\n self.osi = osi\n self.other = other\n self.init_strain = float(init_strain)\n self.n_dim = float(n_dim)\n if osi is not None:\n osi.n_mat += 1\n self._tag = osi.n_mat\n self._parameters = [self.op_type, self._tag, self.other.tag, self.init_strain, self.n_dim]\n if osi is None:\n self.built = 0\n if osi is not None:\n self.to_process(osi)", "def __init__(self, head: ListNode):\n self.l = []\n while head:\n self.l.append(head.val)\n head = head.next", "def __init__(self, *args):\n this = _libsbml.new_ListOfLocalParameters(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self._in = None\n self._out = None\n self._last_in_count = 0\n self._last_out_count = 0\n self._in_finished = False\n self._out_finished = False", "def __init__(self,L):\n list.__init__(self,L)\n self.list = L\n i,j = LTMatrix.getRowColumn(len(L) - 1)\n assert i == j, \"Not a LTMatrix\"\n self.dimension = i + 1", "def __init__(self):\n\n # The molecule-residue-spin object.\n self.mol = MoleculeList()\n\n # The interatomic data object.\n self.interatomic = InteratomList()\n\n # The data pipe type.\n self.pipe_type = None\n\n # Hybrid models.\n self.hybrid_pipes = []", "def init_c_code(self):\r\n subd = dict(\r\n zip(self.fgraph.inputs,\r\n [\"%%(i%i)s\" % i for i in xrange(len(self.fgraph.inputs))])\r\n + zip(self.fgraph.outputs,\r\n [\"%%(o%i)s\" % i for i in xrange(len(self.fgraph.outputs))]))\r\n\r\n for orphan in self.fgraph.variables: # fgraph.orphans:\r\n if orphan.owner is None and orphan not in self.fgraph.inputs:\r\n if isinstance(orphan, Constant):\r\n subd[orphan] = orphan.type.c_literal(orphan.data)\r\n else:\r\n raise ValueError(\r\n \"All orphans in the fgraph to Composite must\"\r\n \" be Constant instances.\")\r\n\r\n _c_code = \"{\\n\"\r\n self.nodenames = [\"%(nodename)s_\" + ('subnode%i' % j)\r\n for j, n in enumerate(self.fgraph.toposort())]\r\n\r\n i = 0\r\n for j, node in enumerate(self.fgraph.toposort()):\r\n for output in node.outputs:\r\n if output not in subd:\r\n i += 1\r\n name = \"V%%(id)s_tmp%i\" % i\r\n subd[output] = name\r\n _c_code += \"%s %s;\\n\" % (\r\n output.type.dtype_specs()[1], name)\r\n s = node.op.c_code(node,\r\n self.nodenames[j],\r\n [subd[input] for input in node.inputs],\r\n [subd[output] for output in node.outputs],\r\n dict(fail=\"%(fail)s\",\r\n id=\"%%(id)s_%i\" % j))\r\n _c_code += s\r\n _c_code += \"\\n\"\r\n _c_code += \"}\\n\"\r\n self._c_code = _c_code", "def _copy_catalog(self):\n\n # load the IOL into an astropy table\n # the table is in iol.catalog\n self.iol = axeiol.InputObjectList(self.in_sex)\n\n # check for an empty table\n if len(self.iol.catalog) < 1:\n _log.info(\"Empty catalog found\\n\")\n return None\n\n # create a new GOL that's a copy of the input list\n self.gol = deepcopy(self.iol.catalog) # just make a copy", "def __init__(self,obj):\n self.nature_libelle = obj['NatureLibelle']\n self.ins_nom = obj['InsNom']\n self.ins_numero_install = obj['InsNumeroInstall']\n self.equipement_id = obj['EquipementId']", "def __init__(self, lunit=\"nm\"):\n super().__init__(lunit)", "def __init__(self, list, store_none=True):\n super().__init__()\n self.list = list\n self.store_none = store_none", "def __init__(self, Ls, germs, prepStrs, effectStrs, aliases=None,\n sequenceRules=None):\n self.Ls = Ls[:]\n self.germs = germs[:]\n self.prepStrs = prepStrs[:]\n self.effectStrs = effectStrs[:]\n self.aliases = aliases.copy() if (aliases is not None) else None\n self.sequenceRules = sequenceRules[:] if (sequenceRules is not None) else None\n\n self.allstrs = []\n self.allstrs_set = set()\n self.unindexed = [] # unindexed strings\n self._plaquettes = {}\n self._firsts = []\n self._baseStrToLGerm = {}\n super(LsGermsStructure, self).__init__()", "def __init__( self\n , _o_data\n ):\n self.o_data = _o_data", "def __init__(self, *args):\n this = _libsbml.new_InitialAssignment(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, Ls, germs, nMinorRows, nMinorCols, aliases=None,\n sequenceRules=None):\n self.Ls = Ls[:]\n self.germs = germs[:]\n self.nMinorRows = nMinorRows\n self.nMinorCols = nMinorCols\n self.aliases = aliases.copy() if (aliases is not None) else None\n self.sequenceRules = sequenceRules[:] if (sequenceRules is not None) else None\n\n self.allstrs = []\n self.allstrs_set = set()\n self.unindexed = []\n self._plaquettes = {}\n self._firsts = []\n self._baseStrToLGerm = {}\n super(LsGermsSerialStructure, self).__init__()", "def __init__(self):\n _hypre.HypreILU_swiginit(self, _hypre.new_HypreILU())", "def __init__(self):\n self.a = []\n self.b = []", "def __init__(self, i1=None, i2=None, axial=None, label=None):\n self.i1 = i1 # will be 1 for center.\n self.i2 = i2\n self.setAxial(axial)\n if not label:\n self.label = \"ExCore\"\n self.makeLabel()\n else:\n self.fromLabel(label)\n self.firstChar = None", "def __init__(self, vnl=230.):\n self.__brackets = []\n self.__voltage_no_load = vnl", "def initialize(self, cwrap):\n pass", "def __init__(self, string1, string2):\r\n self.s1 = string1\r\n self.s2 = string2\r\n return", "def __init__(self, *args):\n this = _libsbml.new_ListOfCompartmentTypes(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _libsbml.new_CompFlatteningConverter(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\r\n self.a = []\r\n self.b = []", "def __init__(self):\n self.capacity = 1000\n self.data = [None]*self.capcity", "def __init__(self):\r\n self.data = PositionalList()", "def __init__(self):\n self.b = []", "def __init__(self):\r\n self.lis = []", "def __init__(self):\n self.dummy = ListNode(-1)\n self.cnt = 0", "def __init__(self, *args):\n this = _ida_hexrays.new_carglist_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.output = []", "def __init__(self):\n self.i, self.pool = 0, list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')", "def initialize(self):\n self.muondEdx = []\n self.muondNdx = []\n self.muonmomentum = []\n self.piondEdx = []\n self.piondNdx = []\n self.pionmomentum = []\n self.kaondEdx = []\n self.kaondNdx = []\n self.kaonmomentum = []\n self.protdEdx = []\n self.protdNdx = []\n self.protmomentum = []\n self.elecdEdx = []\n self.elecdNdx = []\n self.elecmomentum = []", "def new_empty_ll():\n from linked_list import Linked_List\n this_empty_ll = Linked_List()\n return this_empty_ll", "def __init__(self, arg0, arg1=None, arg2=None):\n self.exclude_nullable = False\n self.exclude_units = False\n self.inserted = False\n self.deleted = \"\"\n self.replaced = \"\"\n self.prefix = \"\"\n self.suffix = \"\"\n if arg1 is None:\n self.__set_str(arg0)\n else:\n self.__set_vars(arg0, arg1, arg2)", "def __init__(self, m, c):\n self.m = m\n self.c = c", "def __init__(self, states, L):\n self._states = set(states)\n self._veclen = L", "def __init__(self, *args):\n this = _ida_hexrays.new_qlist_cinsn_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.normal = []\n self.reverse = []", "def __init__(self) -> None:\n self.network: list = list()\n self.arcs = 0", "def init(self):\n\n logger.info(mm_chain.ackn_str)\n self.acknowledgements = mm_chain.ackn_str\n self.references = mm_chain.refs['chain']\n\n return", "def __init__(self, lista_enlazada): \n\t\tself.lista = lista_enlazada\n\t\tself.anterior = None\n\t\tself.actual = lista_enlazada.prim\n\t\tself.pila_anteriores = Pila()\n\t\tself.posicion = 0", "def __init__(self):\r\n self.strings = []", "def __init__(self):\n self.min_stack = []\n self.listHead = LNode(0, 0)", "def __init__(self, *args):\n this = _libsbml.new_ListOfUnits(*args)\n try: self.this.append(this)\n except: self.this = this", "def init(l):\n global lock\n lock = l", "def init(l):\n global lock\n lock = l" ]
[ "0.6156051", "0.60318625", "0.5904705", "0.5787834", "0.5713939", "0.56481445", "0.56018484", "0.5577135", "0.5547001", "0.55375147", "0.54676926", "0.5461734", "0.54217833", "0.5397691", "0.5375492", "0.5349343", "0.53472084", "0.53472084", "0.53247607", "0.53019875", "0.5295706", "0.528412", "0.52733594", "0.52634877", "0.5247873", "0.523901", "0.5220308", "0.5220042", "0.52189195", "0.52106845", "0.519744", "0.5163793", "0.51625115", "0.5158391", "0.5157653", "0.51575613", "0.5153854", "0.51425225", "0.5115596", "0.51155645", "0.51062965", "0.5104775", "0.5102329", "0.5078961", "0.50782114", "0.5077383", "0.50717133", "0.5062104", "0.5062104", "0.5058103", "0.5048187", "0.50481164", "0.5047126", "0.5044209", "0.50192446", "0.5018135", "0.50086784", "0.49863017", "0.4981689", "0.49713477", "0.49659592", "0.49644005", "0.4962494", "0.49623057", "0.4959928", "0.49558198", "0.49523944", "0.49479413", "0.49438202", "0.49336103", "0.49293098", "0.49242005", "0.49191064", "0.49175408", "0.49147007", "0.4913159", "0.4913016", "0.49114183", "0.49101967", "0.49069208", "0.49025312", "0.49017152", "0.4900963", "0.4900006", "0.48967075", "0.4894093", "0.48934117", "0.48776478", "0.48733816", "0.48724723", "0.48695272", "0.48689884", "0.48678043", "0.48565578", "0.4853295", "0.48529392", "0.48522082", "0.4851135", "0.48493215", "0.4845486", "0.4845486" ]
0.0
-1
NetflowFilters a model defined in Swagger
def __init__(self, node_b=None, qos_type=None, device_interfaces=None, ports=None, protocol=None, ip_version=None, netflow_devices=None, top=None, app_type=None, nbar_application_names=None, node_a=None, conversation=None, if_names=None, direction=None): # noqa: E501 # noqa: E501 self._node_b = None self._qos_type = None self._device_interfaces = None self._ports = None self._protocol = None self._ip_version = None self._netflow_devices = None self._top = None self._app_type = None self._nbar_application_names = None self._node_a = None self._conversation = None self._if_names = None self._direction = None self.discriminator = None if node_b is not None: self.node_b = node_b if qos_type is not None: self.qos_type = qos_type if device_interfaces is not None: self.device_interfaces = device_interfaces if ports is not None: self.ports = ports if protocol is not None: self.protocol = protocol if ip_version is not None: self.ip_version = ip_version if netflow_devices is not None: self.netflow_devices = netflow_devices if top is not None: self.top = top if app_type is not None: self.app_type = app_type if nbar_application_names is not None: self.nbar_application_names = nbar_application_names if node_a is not None: self.node_a = node_a if conversation is not None: self.conversation = conversation if if_names is not None: self.if_names = if_names if direction is not None: self.direction = direction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_filters(self):", "def get_timeline_filters(self, req):", "def filter(self, filters):", "def search_model():\n search_condition = request.stream.read()\n try:\n search_condition = json.loads(search_condition if search_condition else \"{}\")\n except Exception:\n raise ParamValueError(\"Json data parse failed.\")\n\n model_lineage_info = _get_lineage_info(\n lineage_type=\"model\",\n search_condition=search_condition\n )\n\n return jsonify(model_lineage_info)", "def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model):\n ml = fc.get_models(project=test_project_name)\n assert all(m.project_name == test_project_name for m in ml)\n\n ml = fc.get_models(model=test_model[\"name\"])\n assert all(m.name == test_model[\"name\"] for m in ml)\n\n ml = fc.get_models(project=test_project_name, model=test_model[\"name\"])\n assert all(\n m.project_name == test_project_name and m.name == test_model[\"name\"] for m in ml\n )", "def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}", "def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def test_parse_filter_params_success(self):\n filter_params = {\n \"resolution\": \"daily\",\n \"time_scope_value\": \"-10\",\n \"time_scope_units\": \"day\",\n \"region\": FAKE.word(),\n \"payer_tenant_id\": FAKE.uuid4(),\n \"product_service\": FAKE.word(),\n }\n serializer = OCIFilterSerializer(data=filter_params)\n self.assertTrue(serializer.is_valid())", "def test_parse_filter_params_no_time(self):\n filter_params = {\n \"region\": FAKE.word(),\n \"payer_tenant_id\": FAKE.uuid4(),\n \"instance_type\": FAKE.word(),\n }\n serializer = OCIFilterSerializer(data=filter_params)\n self.assertTrue(serializer.is_valid())", "def getFilter(self, type: int) -> int:\n ...", "def __init__(self, filter_methods: ConfigNodePropertyArray=None, filter_enable_safe_user_agents: ConfigNodePropertyBoolean=None, filter_safe_user_agents: ConfigNodePropertyArray=None, filter_excluded_paths: ConfigNodePropertyArray=None): # noqa: E501\n self.openapi_types = {\n 'filter_methods': ConfigNodePropertyArray,\n 'filter_enable_safe_user_agents': ConfigNodePropertyBoolean,\n 'filter_safe_user_agents': ConfigNodePropertyArray,\n 'filter_excluded_paths': ConfigNodePropertyArray\n }\n\n self.attribute_map = {\n 'filter_methods': 'filter.methods',\n 'filter_enable_safe_user_agents': 'filter.enable.safe.user.agents',\n 'filter_safe_user_agents': 'filter.safe.user.agents',\n 'filter_excluded_paths': 'filter.excluded.paths'\n }\n\n self._filter_methods = filter_methods\n self._filter_enable_safe_user_agents = filter_enable_safe_user_agents\n self._filter_safe_user_agents = filter_safe_user_agents\n self._filter_excluded_paths = filter_excluded_paths", "def filter(self, *args, **kwargs):", "def testUsingFilterTool(self):\n pass", "def test_optional_filter_params(self):\n del self.internal_filter['max']\n del self.external_filter['max']\n\n # Serialize\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)\n\n # Deserialize\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters", "def test_all_filter_op_fields(self):\n for field in OCIFilterSerializer._opfields:\n field = \"and:\" + field\n filter_param = {field: [\"1\", \"2\"]}\n serializer = OCIFilterSerializer(data=filter_param)\n self.assertTrue(serializer.is_valid())\n for field in OCIFilterSerializer._opfields:\n field = \"or:\" + field\n filter_param = {field: [\"1\", \"2\"]}\n serializer = OCIFilterSerializer(data=filter_param)\n self.assertTrue(serializer.is_valid())", "def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order", "def reference_filters(self, version, options):\n pass", "def __init__(self):\n\n super().__init__(\n filter_models=[\n DoorKalmanFilter(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=DoorVirtualSensorModel(modalities={\"image\"}),\n ),\n DoorKalmanFilter(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=DoorVirtualSensorModel(\n modalities={\"pos\", \"sensors\"}\n ),\n ),\n ],\n state_dim=3,\n )", "def test_rawfilter(self):\n credentials = Mock(base_url=\"\")\n manager = Manager('invoices', credentials)\n uri, params, method, body, headers, singleobject = manager._filter(\n Status=\"VOIDED\",\n raw='Name.ToLower()==\"test contact\"'\n )\n self.assertEqual(\n params,\n {'where': 'Name.ToLower()==\"test contact\"&&Status==\"VOIDED\"'}\n )", "def GetModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "async def filter(self, **kwargs):\n\n pass", "def test_categorical_filter_deserialization(self):\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)", "def expose_models(app, HOST=\"localhost\", PORT=5000, API_PREFIX=\"/api\"):\n\n api = SAFRSAPI(app, host=HOST, port=PORT)\n api.expose_object(models.Category)\n api.expose_object(models.CustomerCustomerDemo)\n api.expose_object(models.OrderDetail)\n api.expose_object(models.Order)\n api.expose_object(models.Customer)\n api.expose_object(models.CustomerDemographic)\n api.expose_object(models.EmployeeAudit)\n api.expose_object(models.EmployeeTerritory)\n api.expose_object(models.Employee)\n api.expose_object(models.Product)\n api.expose_object(models.Region)\n api.expose_object(models.Shipper)\n api.expose_object(models.Supplier)\n api.expose_object(models.Territory)\n return api", "def test_query_filter_field(self):\n obj = self.provision_single_asset()\n # TODO: Write a positive test for this\n ret = self.get('widget', 200,\n params={'__filter': [\n {'field': 'created_at', 'name': 'name', 'op': 'eq'}]})\n assert len(ret['objects']) == 0", "def __init__(self, type: int, filter: int):\n ...", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters", "def handle_models_request():\n # TODO: add sort and filter by creation/modification date\n return (\n json.dumps({\"models\": {\n k: d[\"data\"] for k, d in app.models.items()\n }}), 200,\n {'ContentType': 'application/json'}\n )", "def init_filters(model: Model, settings: Model) -> None:\n filters = [\n {\"name\": \"Project\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Attachments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Priority\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Resolved\", \"filtration_type\": \"date\"},\n {\"name\": \"Labels\", \"filtration_type\": \"string\"},\n {\"name\": \"Created\", \"filtration_type\": \"date\"},\n {\"name\": \"Comments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Status\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Key\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Summary\", \"filtration_type\": \"string\"},\n {\"name\": \"Resolution\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Description\", \"filtration_type\": \"string\"},\n {\"name\": \"Components\", \"filtration_type\": \"string\"},\n ]\n for filter_ in filters:\n model.objects.create(\n name=filter_[\"name\"],\n filtration_type=filter_[\"filtration_type\"],\n settings=settings,\n )", "def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters", "def _filter_in_request(self):\n pass", "def __init__(self, response_template=None, response_attribute_filter_default_value=False,\\\n parameter_sets=None, body_template=None, request_attribute_filter=None):\n\n self._response_attribute_filter_template = None\n\n #: response_template; required parameter\n if response_template is not None and \\\n (not isinstance(response_template, prestans.types.DataCollection) and\\\n not isinstance(response_template, prestans.types.BinaryResponse)):\n raise TypeError(\"response_template of type %s must be an instance of \\\n a prestans.types.DataCollection subclass\" % response_template.__class__.__name__)\n\n if response_template is not None and \\\n isinstance(response_template, prestans.types.DataCollection):\n self.response_attribute_filter_template = AttributeFilter.\\\n from_model(model_instance=response_template,\\\n default_value=response_attribute_filter_default_value)\n else:\n self.response_attribute_filter_template = None\n\n self._response_template = response_template\n\n #: parameter_sets turn a single object into a list\n if isinstance(parameter_sets, ParameterSet):\n parameter_sets = [parameter_sets]\n\n if isinstance(parameter_sets, list):\n for parameter_set in parameter_sets:\n if not isinstance(parameter_set, ParameterSet):\n raise TypeError(\"parameter_set of type %s must be an instance of \\\n prestans.parser.ParameterSet\" % parameter_set.__class__.__name__)\n\n self._parameter_sets = parameter_sets\n else:\n self._parameter_sets = list()\n\n #: body_template\n if body_template is not None and not \\\n isinstance(body_template, prestans.types.DataCollection):\n raise TypeError(\n \"body_template of type %s must be an instance of \\\n a prestans.types.DataCollection subclass\" % body_template.__class__.__name__)\n\n self._body_template = body_template\n\n #: request_attribute_filter\n if request_attribute_filter is not None and \\\n not isinstance(request_attribute_filter, AttributeFilter):\n raise TypeError(\"request_attribute_filter of type %s must an instance \\\n of prestans.parser.AttributeFilter\" % request_attribute_filter.__class__.__name__)\n\n self._request_attribute_filter = request_attribute_filter", "def __init__(self):\n self.swagger_types = {\n 'is_waiting': 'bool',\n 'is_active': 'bool',\n 'is_acd': 'bool',\n 'is_preferred': 'bool',\n 'is_screenshare': 'bool',\n 'is_cobrowse': 'bool',\n 'is_voicemail': 'bool',\n 'is_flagged': 'bool',\n 'is_monitored': 'bool',\n 'filter_wrap_up_notes': 'bool',\n 'match_all': 'bool'\n }\n\n self.attribute_map = {\n 'is_waiting': 'isWaiting',\n 'is_active': 'isActive',\n 'is_acd': 'isAcd',\n 'is_preferred': 'isPreferred',\n 'is_screenshare': 'isScreenshare',\n 'is_cobrowse': 'isCobrowse',\n 'is_voicemail': 'isVoicemail',\n 'is_flagged': 'isFlagged',\n 'is_monitored': 'isMonitored',\n 'filter_wrap_up_notes': 'filterWrapUpNotes',\n 'match_all': 'matchAll'\n }\n\n self._is_waiting = None\n self._is_active = None\n self._is_acd = None\n self._is_preferred = None\n self._is_screenshare = None\n self._is_cobrowse = None\n self._is_voicemail = None\n self._is_flagged = None\n self._is_monitored = None\n self._filter_wrap_up_notes = None\n self._match_all = None", "def tests_ti_document_get_filter(self):\n super().group_get_filter()", "def model(self) -> Type[Model]:", "def resolve(self, spec: \"ModelSpec\"):", "def filter(self, target_model):\n # return filter_dict_to_target_model(self._axl_data, target_model)\n super().__setattr__('_axl_data', filter_dict_to_target_model(self._axl_data, target_model))\n return self", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def ListModelInputs(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def test_parse_filter(self):\n old_type = Filter(\n 'filter[student.school.title]', [\n Relationship('student', PersonSchema(), None),\n Relationship('school', StudentSchema(), None)],\n Attribute('title', SchoolSchema(), None), ('eq', ['test']))\n new_type = self.driver.parse(old_type)\n \n assert new_type.source == old_type\n assert old_type.relationships != new_type.relationships\n assert isinstance(new_type.relationships[0], Mapper)\n assert old_type.attribute != new_type.attribute\n assert isinstance(new_type.attribute, Column)\n assert old_type.value == new_type.value", "def process_type(self, swagger_type, context):\n pass", "def test_quantitative_filter_deserialization(self):\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def get_filter_model(table: Table, allowed_filters: Sequence[str]) -> Type[BaseModel]:\n _check_columns_with_table(table, allowed_filters)\n\n annotations = {}\n\n for column in allowed_filters:\n column_obj = table.c[column]\n type_hint = _get_type(column_obj.type)\n annotations[column] = Optional[\n type_hint\n ] # optional as all columns may not be passed\n\n return type(table.name, (BaseModel,), {\"__annotations__\": annotations})", "def filter(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"filter\")", "def test_categorical_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def display_representation(self, filters=None):\n attrs = OrderedDict()\n for field in get_fields(type(self)):\n name = field.field_name\n value = getattr(self, name)\n attrs[name] = value\n \n if filters is None:\n return attrs\n elif isinstance(filters, list):\n filter_attrs = OrderedDict()\n for attr in filters:\n if attr in attrs:\n filter_attrs[attr] = attrs[attr]\n return filter_attrs", "def filter(self, filter_dict):\n pass", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def test_otoroshi_controllers_adminapi_analytics_controller_filterable_stats(self):\n pass", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def __init__(self): # noqa: E501\n self.openapi_types = {\n }\n\n self.attribute_map = {\n }", "def test_search_collection_filters():\n col = Collection(search='forest', object_type=['layer'], filters={'provider': 'gee'}, app=['gfw'])\n assert len(col) > 1", "def related_view_filter():\n pass", "def test_tag_filter(self):\n request = RequestFactory().get('/?search=foobar')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.qs.filters['name__icontains'], 'foobar')\n self.assertEquals(filter.qs.filters['status__startswith'], 'foobar')", "def test_bad_filter_json_format(admin_client, public_resource_with_metadata):\n query_filter = {'malformed': 'json'}\n djangoresponse = admin_client.get('/discoverapi/?filter={}'.format(query_filter), follow=True)\n response = json.loads(djangoresponse.content.decode(\"utf-8\"))\n assert djangoresponse.status_code == 400\n assert \"Filter JSON parsing error\" in response['message']", "def _filter(self, _model, **kwargs):\n return _model.objects.filter(**kwargs)", "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(NetflowFilters, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "def filter(self, filter_id):\r\n self.require_collection()\r\n url = '{0}/{1}'.format(self.get_url(), filter_id)\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def test_quantitative_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def __call__(self, *filters):\n return self.client._get_and_filter(Domain, *filters)", "def test_otoroshi_controllers_adminapi_analytics_controller_filterable_events(self):\n pass", "def to_dict(self):\n result = {}\n\n if hasattr(super(EnhancedWatermarkFilter, self), \"to_dict\"):\n result = super(EnhancedWatermarkFilter, self).to_dict()\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if value is None:\n continue\n if isinstance(value, list):\n if len(value) == 0:\n continue\n result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, \"to_dict\") else x for x in value]]\n elif hasattr(value, \"to_dict\"):\n result[self.attribute_map.get(attr)] = value.to_dict()\n elif isinstance(value, Enum):\n result[self.attribute_map.get(attr)] = value.value\n elif isinstance(value, dict):\n result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, \"to_dict\") else v) for (k, v) in value.items()}\n else:\n result[self.attribute_map.get(attr)] = value\n\n return result", "def filters(self, value):\n if not isinstance(value, dict):\n raise TypeError(\"input must be a dictionary\")\n\n self._filters = value", "def filters(self):\n return self.__filters", "def AdaptiveFilter(model=\"lms\", **kwargs):\n # check if the filter size was specified\n if \"n\" not in kwargs:\n raise ValueError('Filter size is not defined (n=?).')\n # create filter according model\n if model.upper() not in FILTERS.keys():\n raise ValueError('Unknown model of filter {}'.format(model))\n return FILTERS[model.upper()](**kwargs)", "def test_collection_get_with_filters(self, app, filter_payload, total):\n base_path = self.get_base_path_with_query_str(filter_payload)\n request = app.get(base_path, headers=self.headers, status=200)\n result = request.json\n assert 'data' in result\n assert 'total' in result\n assert result['total'] == len(result['data']) == total", "def model(inputs, is_training):\n\n tf.logging.info(FLAGS.model_structure)\n tf.logging.info(FLAGS.model_edge_weights)\n structure = json.loads(FLAGS.model_structure)\n\n if FLAGS.use_object_input:\n feature_shape = inputs[0].shape\n original_inputs = inputs[0]\n object_inputs = inputs[1]\n else:\n feature_shape = inputs.shape\n original_inputs = inputs\n object_inputs = None\n\n batch_size = feature_shape[0] // FLAGS.num_frames\n original_num_frames = FLAGS.num_frames\n num_frames = original_num_frames\n\n grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}\n for i in range(len(structure)):\n grouping[structure[i][0]].append(i)\n\n stem_count = len(grouping[-3]) + len(grouping[-2]) + len(grouping[-1])\n\n assert stem_count != 0\n stem_filters = 128 // stem_count\n\n if grouping[-2]:\n # Instead of loading optical flows as inputs from data pipeline, we are\n # applying the \"Representation Flow\" to RGB frames so that we can compute\n # the flow within TPU/GPU on fly. It's essentially optical flow since we\n # do it with RGBs.\n flow_inputs = rf.rep_flow(\n original_inputs,\n batch_size,\n original_num_frames,\n num_iter=40,\n is_training=is_training,\n bottleneck=1,\n scope='rep_flow')\n streams = []\n\n for i in range(len(structure)):\n with tf.variable_scope('Node_' + str(i)):\n if structure[i][0] == -1:\n inputs = asn.rgb_conv_stem(original_inputs,\n original_num_frames,\n stem_filters,\n structure[i][1],\n is_training,\n data_format)\n streams.append(inputs)\n elif structure[i][0] == -2:\n inputs = asn.flow_conv_stem(flow_inputs,\n stem_filters,\n structure[i][1],\n is_training,\n data_format)\n streams.append(inputs)\n elif structure[i][0] == -3:\n # In order to use the object inputs, you need to feed your object\n # input tensor here.\n inputs = object_conv_stem(object_inputs,\n data_format)\n streams.append(inputs)\n else:\n block_number = structure[i][0]\n\n combined_inputs = [streams[structure[i][1][j]]\n for j in range(0, len(structure[i][1]))]\n\n tf.logging.info(grouping)\n nodes_below = []\n for k in range(-3, structure[i][0]):\n nodes_below = nodes_below + grouping[k]\n\n peers = []\n if FLAGS.attention_mode:\n lg_channel = -1\n tf.logging.info(nodes_below)\n for k in nodes_below:\n tf.logging.info(streams[k].shape)\n lg_channel = max(streams[k].shape[3], lg_channel)\n\n for node_index in nodes_below:\n attn = tf.reduce_mean(streams[node_index], [1, 2])\n\n attn = tf.layers.dense(\n inputs=attn,\n units=lg_channel,\n kernel_initializer=tf.random_normal_initializer(stddev=.01))\n peers.append(attn)\n\n combined_inputs = fusion_with_peer_attention(\n combined_inputs,\n index=i,\n attention_mode=FLAGS.attention_mode,\n attention_in=peers,\n use_5d_mode=False,\n data_format=data_format)\n\n graph = asn.block_group(\n inputs=combined_inputs,\n filters=structure[i][2],\n block_fn=block_fn,\n blocks=layers[block_number],\n strides=structure[i][4],\n is_training=is_training,\n name='block_group' + str(i),\n block_level=structure[i][0],\n num_frames=num_frames,\n temporal_dilation=structure[i][3],\n data_format=data_format)\n\n streams.append(graph)\n\n outputs = asn.multi_stream_heads(streams,\n grouping[3],\n original_num_frames,\n num_classes,\n data_format)\n\n return outputs", "def filter(self) -> Optional[pulumi.Input['FilterArgs']]:\n return pulumi.get(self, \"filter\")", "def search_api(request):\n data = ApiViewFilters(request.GET, queryset=ApiView.objects.all())\n return render(request, 'template.html', {'filter': data})", "def api_documentation(api: str, summary: str, in_model: BaseModel,\n out_model: BaseModel, out_description: str) -> Callable:\n for model, name in ((in_model, 'Input'), (out_model, 'Output')):\n doc.Object(\n make_dataclass(\n f'Api{api[1:].title()}{name}',\n [(key, val.type_, val.type_)\n for key, val in model.__dict__['__fields__'].items()]))\n im_returns = doc.JsonBody({\n key: val.type_\n for key, val in in_model.__dict__['__fields__'].items()\n })\n\n om_returns = {\n key: val.type_\n for key, val in out_model.__dict__['__fields__'].items()\n }\n\n def decorator(func):\n @doc.summary(summary)\n @doc.response(412,\n 'Error: Precondition Failed',\n description='The passed request-parameters are invalid')\n @doc.response(500,\n 'Error: Server-Error occured',\n description='An internal error occured')\n @doc.consumes(im_returns,\n content_type='application/json',\n location='body')\n @doc.produces(om_returns,\n content_type='application/json',\n description=out_description)\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n return await func(request=request, *args, **kwargs)\n\n return function_wrapper\n\n return decorator", "def test_no_op(self):\n request = RequestFactory().get('/?search=&tags=&status=')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.qs.filters, {})", "def parse_filters(func, *args, **kwargs):\n request = args[1]\n filters = request.query_params.get(\"filters\", None)\n\n if not filters:\n return func(*args, **kwargs)\n cleaned_filters = {}\n try:\n filters = json.loads(filters)\n for field, operations in filters.items():\n for operation, value in operations.items():\n cleaned_filters[\"%s__%s\" % (field, operation)] = value\n except (ValueError, AttributeError):\n return Response(\n {\n \"errors\": [\n \"Filters incorrectly formatted. Required format: \"\n \"{'filters': {'fieldname': { 'operation': 'value'}}\"\n ]\n },\n status=400,\n )\n\n try:\n # NOTE(adriant): This feels dirty and unclear, but it works.\n # Positional argument 3 is filters, so we just replace it.\n args = list(args)\n args[2] = cleaned_filters\n return func(*args, **kwargs)\n except FieldError as e:\n return Response({\"errors\": [str(e)]}, status=400)", "async def getQueryFilters(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getQueryFilters()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/query-options/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/query-options/\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")", "def swagger():\n return jsonify(current_app.spec.to_dict())", "def __init__(self, samples=None):\n\n self.openapi_types = {\n 'samples': List\n }\n\n self.attribute_map = {\n 'samples': 'samples'\n }\n\n self._Samples = Samples", "def parse_filters(func, *args, **kwargs):\n request = args[1]\n filters = request.query_params.get('filters', None)\n\n if not filters:\n return func(*args, **kwargs)\n cleaned_filters = {}\n try:\n filters = json.loads(filters)\n for field, operations in filters.items():\n for operation, value in operations.items():\n cleaned_filters['%s__%s' % (field, operation)] = value\n except (ValueError, AttributeError):\n return Response(\n {'errors': [\n \"Filters incorrectly formatted. Required format: \"\n \"{'filters': {'fieldname': { 'operation': 'value'}}\"\n ]},\n status=400\n )\n\n try:\n # NOTE(adriant): This feels dirty and unclear, but it works.\n # Positional argument 3 is filters, so we just replace it.\n args = list(args)\n args[2] = cleaned_filters\n return func(*args, **kwargs)\n except FieldError as e:\n return Response({'errors': [str(e)]}, status=400)", "def opt_get_all_models_rest_api():\n return retrieve_all_models()", "def GetModelInference(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def _process_model_like_filter(model, query, filters):\n if query is None:\n return query\n\n if filters:\n for key in sorted(filters):\n column_attr = getattr(model, key)\n if 'property' == type(column_attr).__name__:\n continue\n value = filters[key]\n if not (isinstance(value, (str, int))):\n continue\n query = query.filter(\n column_attr.op('LIKE')(u'%%%s%%' % value))\n return query", "def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)", "def __init__(self, api_groups=None, attribute_restrictions=None, non_resource_ur_ls=None, resource_names=None, resources=None, verbs=None):\n self.swagger_types = {\n 'api_groups': 'list[str]',\n 'attribute_restrictions': 'RuntimeRawExtension',\n 'non_resource_ur_ls': 'list[str]',\n 'resource_names': 'list[str]',\n 'resources': 'list[str]',\n 'verbs': 'list[str]'\n }\n\n self.attribute_map = {\n 'api_groups': 'apiGroups',\n 'attribute_restrictions': 'attributeRestrictions',\n 'non_resource_ur_ls': 'nonResourceURLs',\n 'resource_names': 'resourceNames',\n 'resources': 'resources',\n 'verbs': 'verbs'\n }\n\n self._api_groups = api_groups\n self._attribute_restrictions = attribute_restrictions\n self._non_resource_ur_ls = non_resource_ur_ls\n self._resource_names = resource_names\n self._resources = resources\n self._verbs = verbs", "def filters(self, filters):\n\n self._filters = filters", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'version': 'str',\n 'tagline': 'str',\n 'keywords': 'str',\n 'short_description': 'str',\n 'usage_information': 'str',\n 'long_description': 'str',\n 'license_model_description': 'str',\n 'system_requirements': 'str',\n 'time_released': 'datetime',\n 'release_notes': 'str',\n 'categories': 'list[str]',\n 'publisher': 'Publisher',\n 'languages': 'list[Item]',\n 'screenshots': 'list[Screenshot]',\n 'videos': 'list[NamedLink]',\n 'support_contacts': 'list[SupportContact]',\n 'support_links': 'list[NamedLink]',\n 'documentation_links': 'list[DocumentationLink]',\n 'icon': 'UploadData',\n 'banner': 'UploadData',\n 'regions': 'list[Region]',\n 'package_type': 'str',\n 'default_package_version': 'str',\n 'links': 'list[Link]',\n 'is_featured': 'bool'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'version': 'version',\n 'tagline': 'tagline',\n 'keywords': 'keywords',\n 'short_description': 'shortDescription',\n 'usage_information': 'usageInformation',\n 'long_description': 'longDescription',\n 'license_model_description': 'licenseModelDescription',\n 'system_requirements': 'systemRequirements',\n 'time_released': 'timeReleased',\n 'release_notes': 'releaseNotes',\n 'categories': 'categories',\n 'publisher': 'publisher',\n 'languages': 'languages',\n 'screenshots': 'screenshots',\n 'videos': 'videos',\n 'support_contacts': 'supportContacts',\n 'support_links': 'supportLinks',\n 'documentation_links': 'documentationLinks',\n 'icon': 'icon',\n 'banner': 'banner',\n 'regions': 'regions',\n 'package_type': 'packageType',\n 'default_package_version': 'defaultPackageVersion',\n 'links': 'links',\n 'is_featured': 'isFeatured'\n }\n\n self._id = None\n self._name = None\n self._version = None\n self._tagline = None\n self._keywords = None\n self._short_description = None\n self._usage_information = None\n self._long_description = None\n self._license_model_description = None\n self._system_requirements = None\n self._time_released = None\n self._release_notes = None\n self._categories = None\n self._publisher = None\n self._languages = None\n self._screenshots = None\n self._videos = None\n self._support_contacts = None\n self._support_links = None\n self._documentation_links = None\n self._icon = None\n self._banner = None\n self._regions = None\n self._package_type = None\n self._default_package_version = None\n self._links = None\n self._is_featured = None", "def filter(self, filters):\r\n # because http.Request needs params to be a dict of strings to strings\r\n # (roughly) and since BitBucket wants repeated parameters to express\r\n # OR, we'll do the quoting by hand ourselves\r\n def flatten_conditions(filters):\r\n for key, val in filters.items():\r\n if isinstance(val, (list, tuple)):\r\n for v in val:\r\n yield (port.to_b(key), port.to_b(v))\r\n else:\r\n yield (port.to_b(key), port.to_b(val))\r\n\r\n to_encode = tuple(flatten_conditions(filters))\r\n qs = port.urlencode(to_encode)\r\n\r\n url = '{0}/?{1}'.format(self.get_url(), qs)\r\n return http.Request('GET', url), parsers.parse_json", "def custom_filter(title):\n\n class Wrapper(admin.FieldListFilter):\n \"\"\"\n custom_filter :: wrapper\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n instance = admin.FieldListFilter.create(*args, **kwargs)\n instance.title = title\n\n return instance\n\n return Wrapper", "def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']", "def api_field_from_django_field(cls, f, default=CharField):\n if isinstance(f, JSONField):\n return JSONApiField\n \n return super(PandaModelResource, cls).api_field_from_django_field(f, default)", "def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def test_is_pipeline(model):\n assert type(model) == Pipeline", "def test_filter_by_atribute(admin_client, public_resource_with_metadata, private_resource_with_metadata):\n query_filter = {\"availability\": [\"public\"]}\n djangoresponse = admin_client.get('/discoverapi/?filter={}'.format(json.dumps(query_filter)), follow=True)\n response = json.loads(djangoresponse.content.decode(\"utf-8\"))\n short_ids = [x['short_id'] for x in json.loads(response['resources'])]\n assert djangoresponse.status_code == 200\n assert public_resource_with_metadata.short_id in short_ids\n assert private_resource_with_metadata.short_id not in short_ids", "def test_default_filter(self):\n request = RequestFactory().get('/?foo=bar')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.data.getlist('status'), ['active', 'paused'])\n self.assertEquals(filter.data.getlist('tags'), ['foo'])\n self.assertEquals(filter.data.getlist('foo'), ['bar'])", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(AreaResource, self).build_filters(filters)\n \n if \"level\" in filters:\n orm_filters[\"layout__level\"] = int(filters[\"level\"])\n \n return orm_filters", "def __init__(self,\n examples: types.Channel,\n model: types.Channel,\n blessing: Optional[types.Channel] = None,\n name: Optional[Text] = None):\n blessing = blessing or types.Channel(\n type=standard_artifacts.ModelBlessing,\n artifacts=[standard_artifacts.ModelBlessing()])\n spec = ModelValidatorSpec(\n examples=examples,\n model=model,\n blessing=blessing)\n super(ModelValidator, self).__init__(spec=spec, name=name)" ]
[ "0.60868007", "0.5681885", "0.54847294", "0.54048103", "0.52722216", "0.52648157", "0.5253178", "0.525117", "0.5230509", "0.5183991", "0.5142966", "0.5128541", "0.5121736", "0.508809", "0.50717574", "0.5034174", "0.5003462", "0.49971294", "0.49885327", "0.4967403", "0.4960733", "0.49457967", "0.49071184", "0.4900119", "0.48781753", "0.48752365", "0.4860993", "0.48507938", "0.4838076", "0.48282456", "0.48219705", "0.4816044", "0.48084906", "0.48042163", "0.48031846", "0.48030466", "0.47970462", "0.47908872", "0.47907233", "0.47897115", "0.47839585", "0.47786912", "0.47695613", "0.47661868", "0.47590464", "0.47590464", "0.47571534", "0.474969", "0.47435808", "0.4738991", "0.4738854", "0.4737496", "0.4737496", "0.4737496", "0.4737496", "0.4737496", "0.47321653", "0.47138837", "0.47110772", "0.47053927", "0.46997574", "0.46904582", "0.4680224", "0.46766227", "0.4667556", "0.4663374", "0.46510628", "0.46498546", "0.46450716", "0.4639857", "0.46338934", "0.46296695", "0.46239236", "0.46238947", "0.46206707", "0.4618885", "0.46152908", "0.46117225", "0.46095395", "0.46054882", "0.45998722", "0.45853817", "0.45812023", "0.45778984", "0.45750648", "0.4572183", "0.45711887", "0.4569598", "0.45692232", "0.4563737", "0.45604947", "0.45598", "0.45591882", "0.45591322", "0.4558254", "0.4557864", "0.45461848", "0.45336127", "0.4532889", "0.4530207", "0.4528316" ]
0.0
-1
Sets the node_b of this NetflowFilters.
def node_b(self, node_b): self._node_b = node_b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_node(self, b):\n return b == self.__node_b", "def setB(self, b):\n\t\tself.b = int(b)", "def b(self, b):\n\n self._b = b", "def add_bilink(self, nodeport_a, nodeport_b, bilink):", "def set_bias_for_node(node: Node, value: np.ndarray):\n bias = get_bias_for_node(node)\n if bias is None:\n raise Exception('Can\\'t set bias for node {} because node does not have a bias'.format(node.name))\n set_node_value(bias, value)", "def _onSetParameterBIgnoreBounds(self, value):\n self._parameters['b'] = value\n self._logger.info(\"Parameter 'b' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def node_a(self, node_a):\n\n self._node_a = node_a", "def __init__(self, node_b=None, qos_type=None, device_interfaces=None, ports=None, protocol=None, ip_version=None, netflow_devices=None, top=None, app_type=None, nbar_application_names=None, node_a=None, conversation=None, if_names=None, direction=None): # noqa: E501 # noqa: E501\n\n self._node_b = None\n self._qos_type = None\n self._device_interfaces = None\n self._ports = None\n self._protocol = None\n self._ip_version = None\n self._netflow_devices = None\n self._top = None\n self._app_type = None\n self._nbar_application_names = None\n self._node_a = None\n self._conversation = None\n self._if_names = None\n self._direction = None\n self.discriminator = None\n\n if node_b is not None:\n self.node_b = node_b\n if qos_type is not None:\n self.qos_type = qos_type\n if device_interfaces is not None:\n self.device_interfaces = device_interfaces\n if ports is not None:\n self.ports = ports\n if protocol is not None:\n self.protocol = protocol\n if ip_version is not None:\n self.ip_version = ip_version\n if netflow_devices is not None:\n self.netflow_devices = netflow_devices\n if top is not None:\n self.top = top\n if app_type is not None:\n self.app_type = app_type\n if nbar_application_names is not None:\n self.nbar_application_names = nbar_application_names\n if node_a is not None:\n self.node_a = node_a\n if conversation is not None:\n self.conversation = conversation\n if if_names is not None:\n self.if_names = if_names\n if direction is not None:\n self.direction = direction", "def nbf(self, nbf):\n\n self._nbf = nbf", "def setEntityValue(self, b):\n return self._set(entityValue=b)", "def setEntityValue(self, b):\n return self._set(entityValue=b)", "def _onSetParameterB(self, value):\n self._parameters['b'] = min(max(value, self._parameters['lower']), self._parameters['upper']) # Limit at upper and lower\n self._logger.info(\"Parameter ba' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def set_node(self, name, state):\n self.source_net.nodes[name] = state", "def set_node(self, node):\n self.__node = node", "def __init__(self, a_node, b_node, name=None):\n BinaryMatrixOp.__init__(self, a_node, b_node, name)", "def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value", "def SetActive(self, b):\r\n\r\n self.active = b", "def set_bunit(self, bunit):\n self.bunit = bunit", "def set_bribe(self, bribe_amount):\r\n self.bribe = bribe_amount", "def add_biport(self, node, biport):", "def __init__(self, node_a, node_b):\n self.node_a = node_a\n self.node_b = node_b\n self.base_color = 'blue'\n self.tint_color = 'white'\n self.tint = 0\n self.options = []", "def subject_b(self, subject_b):\n\n self._subject_b = subject_b", "def player_b_rating(self, player_b_rating):\n\n self._player_b_rating = player_b_rating", "def update_b(color, new_b):\n\n color.update_b(new_b)", "def player_b_id(self, player_b_id):\n\n self._player_b_id = player_b_id", "def apply_to(self, b):\n raise NotImplementedError(\"base class called\")", "def update_node(self, node):\n markov_blanket_vals = tuple(\n [self.state.data[var.index] for var in node.markov_blanket])\n gibbs_dist = self.gibbs_distributions[node][markov_blanket_vals]\n self.state.data[node.index] = gibbs_dist.sample(None)", "def setRevertable(self, b):\n\n self.revertable = b", "def bvh_tree_file(self, bvh_tree_file):\n\n self._bvh_tree_file = bvh_tree_file", "def setBorder(self, b):\n self.border = fn.mkPen(b)\n self.update()", "def node_count(self, node_count):\n\n self._node_count = node_count", "def percent_b(self, percent_b: float):\n\n self._percent_b = percent_b", "def set_blists(self, blists):\n self.blists = blists[:]", "def _set_bias(self):\n # Alternative\n # self.B = self.TPinv.T.matmul(self.Bc)\n self.Bc = self.TP.T.matmul(self.B)", "def node_version(self, node_version):\n\n self._node_version = node_version", "def solve_b(self, sess, x_b_np, y_b_np, fdict=None):\r\n tconfig = self.config.transfer_config\r\n steps = tconfig.ft_optimizer_config.max_train_steps\r\n batch_size = tconfig.ft_optimizer_config.batch_size\r\n rnd = np.random.RandomState(0)\r\n # Re-initialize the fast weights.\r\n # self.reset_b(sess)\r\n if fdict is None:\r\n fdict = {}\r\n if batch_size == -1:\r\n fdict[self.inputs_b] = x_b_np\r\n fdict[self.labels_b] = y_b_np\r\n\r\n # print('solve_b : fdict')\r\n # for k, v in fdict.items():\r\n # print(k)\r\n # print(v)\r\n\r\n cost_b = sess.run(self.cost_b, feed_dict=fdict)\r\n return cost_b", "def set(self, node, value):\n self.val[node] = value", "def setBDTValues(self, NTrees, MinNodeSize, MaxDepth, AdaBoostBeta):\n\n self.BDT_NTrees = NTrees\n self.BDT_MinNodeSize = MinNodeSize\n self.BDT_MaxDepth = MaxDepth\n self.BDT_AdaBoostBeta = AdaBoostBeta", "def set_working_node(self, node):\n self.working_node = node", "def toggle_airborne(self, b=None):\n self.airborne = b if b != None else not self.airborne\n if not self.airborne:\n self.airtime = 0", "def _adjustBlock(self, b):\n raise NotImplementedError", "def setParameterNode(self, parameterNode):\r\n # framework\r\n profbox()\r\n self.parameterNode = parameterNode", "def bx(self, bx):\n\n self._bx = bx", "def bcp_set(self, **kwargs):\n pass", "def set_weights_for_node(node: Node, value: np.ndarray):\n weights = get_weights_for_node(node)\n if weights is None:\n raise Exception('Can\\'t set weights for node {} because node does not have weights'.format(node.name))\n set_node_value(weights, value)", "def nodes(self, nodes_array):\n self.nodes_set = nodes_array", "def set_branch(self, value):\n self.update(value)", "def node_id(self, node_id):\n\n self._node_id = node_id", "def __init__(self, node_a, node_b, id, edge_value=\"null\"):\n self.__node_a = node_a\n self.__node_b = node_b\n self.__edge_value = edge_value\n self.__id = id", "def set_nodeset(self, nodeset):\n self.nodeset = set(nodeset) # overwrite the existing nodeset with the input nodeset\n\n self.__check_validity() # check if graph is valid - throws exception if not", "def __init__(self, b=0, drop_rate=0.5, pretrained=True):\n\n # Calls the super for the nn.Module.\n super(Classifier, self).__init__()\n\n # Sets the drop rate for the dropout layers.\n self.drop_rate = drop_rate\n\n # Loads the EfficientNet encoder.\n if pretrained:\n self.encoder = EfficientNet.from_pretrained(f\"efficientnet-b{str(b)}\")\n else:\n self.encoder = EfficientNet.from_name(f\"efficientnet-b{str(b)}\")\n self.encoder_pool = nn.AdaptiveAvgPool2d(1)\n\n # Defines a hidden layer.\n self.hidden = nn.Linear(2560, 512)\n\n # Defines the output layer of the neural network.\n self.classifier = nn.Linear(512, 2)", "def add_node_pairs(self, node_a,node_b):\r\n \r\n if node_b is not None : \r\n self.nodes[node_a].append(node_b)", "def replace_node(self, network_node: Node, node: Node) -> None:\n index = self.network.index(network_node)\n self.network[index] = node", "def swap_nodes(self, a, b):\n if a == b:\n return\n if len(self) < 2:\n return\n\n nodeA = nodeB = None\n curr_node = self._header\n\n while curr_node is not None and not (nodeA and nodeB):\n if curr_node._element == a and not nodeA:\n nodeA = curr_node\n elif curr_node._element == b and not nodeB:\n nodeB = curr_node\n curr_node = curr_node._next\n\n if curr_node is None:\n raise Empty(\"Not in list\")\n\n precessorA = nodeA._prev\n successorA = nodeA._next\n precessorB = nodeB._prev\n successorB = nodeB._next\n\n precessorA._next = successorA._prev = nodeB\n precessorB._next = successorB._prev = nodeA\n\n nodeA._prev, nodeB._prev = nodeB._prev, nodeA._prev\n nodeA._next, nodeB._next = nodeB._next, nodeA._next", "def set_node_status(self, status):\n self._node.status = status", "def set_node_id(self, node_id):\n self._node_id = node_id", "def addBranch(self, value, node):\n self.branches[value] = node", "def node_data(self, node_data):\n\n self._node_data = node_data", "def eval_step_b(self, sess, task_b_data):\n raise NotImplemented()", "def set_node_value(node: Node, value: np.ndarray):\n if node.type != 'Const':\n raise Exception('Can\\'t set value for non-constant node {}'.format(node.name))\n data_type = np.float32\n if node.out_port(0).is_data_type_defined():\n data_type = node.out_port(0).get_data_type()\n node.out_port(0).data.set_value(np.array(value).astype(data_type))", "def player_b_games(self, player_b_games):\n\n self._player_b_games = player_b_games", "def bn(self):\n return self.add_layer(bn)", "def block(self, b):\n return self.flatten(b)", "def beta_channel(self, value):\n self._data[ATTR_BETA_CHANNEL] = bool(value)\n self.save()", "def player_b_name(self, player_b_name):\n\n self._player_b_name = player_b_name", "def __init__(self, input_dim, hidden_dim, bias=True, node_n=48, dtype=float):\n super(GCNGRU_Block, self).__init__()\n self.hidden_dim = hidden_dim\n self.node_n = node_n\n\n self.gc1 = GraphConvolution(input_dim + hidden_dim, 2*self.hidden_dim, node_n=node_n, bias=bias)\n self.gc2 = GraphConvolution(input_dim + hidden_dim, self.hidden_dim, node_n=node_n, bias=bias)", "def mask_propagation(\n cls, node: NNCFNode, graph: NNCFGraph, tensor_processor: Type[NNCFPruningBaseTensorProcessor]\n ) -> None:\n raise NotImplementedError", "def set_boost(self, boost):\r\n self._boost = float(boost)\r\n return self", "def update_b(self, theta, force=False):\n self.b = self.eval_b(self.theta)\n self.b_eval_cnt += 1", "def set_nodes(self, nodes):\n self._drv_nodes = nodes", "def setBuildFromTokens(self, b):\n return self._set(buildFromTokens=b)", "def setBuildFromTokens(self, b):\n return self._set(buildFromTokens=b)", "def setGreater(self,Node):\n self.greater=Node", "def SetButton(self, b):\r\n \r\n self.button = b", "def eval_step_b(self, sess, task_b_data):\r\n fdict = self._prerun(sess, None, task_b_data)\r\n prediction_b, y_b = sess.run([self.prediction_b_all, self.labels_b_v_all],\r\n feed_dict=fdict)\r\n\r\n return prediction_b, y_b", "def setParameterNode(self, parameterNode):\n #framework\n profbox()\n self.parameterNode = parameterNode", "def bne(self, arg):\n\n self.pc += arg if not self.p & const.FLAG_ZERO else 0\n self.pc = c_uint16(self.pc).value", "def toggled(self, b):\n self.group.setVisible(b)\n\n for line in (self.rLine, self.gLine, self.bLine):\n line.setVisible(b)\n\n self.parent.image.timeLine.setVisible(not b)", "def set_bytes(self, b):\n if not self._readonly:\n self._bytes = b\n else:\n raise ReadOnlyError(\"This memory element cannot be written to.\")", "def eval_step_b_old_and_new(self, sess, task_b_data):\n raise NotImplemented()", "def brelu(self, x):\n return F.relu(x + self.bias)", "def reset_b(self, sess):\r\n sess.run(self._init_ops)", "def node_id(self, node_id: int):\r\n self._node_id = node_id", "def update_nodes(nodes, bb):\n \n for node in nodes:\n node.set(\"label\", update_bb_string(node.get_attributes()[\"label\"], bb))\n node.set_name(update_node_name(node.get_name(), bb))", "def partial_fusion(self):\n if self.partially_fused:\n return\n\n if self.fully_fused:\n # TODO: we actually can, all we need to do is insert the properly initialized post_bn back\n # init is not trivial, so not implemented for now\n raise NotImplementedError(\"QARepVGGBlock can't be converted to partially fused from fully fused\")\n\n kernel, bias = self._get_equivalent_kernel_bias_for_branches()\n\n self.rbr_reparam.weight.data = kernel\n self.rbr_reparam.bias.data = bias\n\n self.__delattr__(\"branch_3x3\")\n self.__delattr__(\"branch_1x1\")\n if hasattr(self, \"identity\"):\n self.__delattr__(\"identity\")\n if hasattr(self, \"alpha\"):\n self.__delattr__(\"alpha\")\n if hasattr(self, \"id_tensor\"):\n self.__delattr__(\"id_tensor\")\n\n self.partially_fused = True\n self.fully_fused = False", "def set_edge_param(self, key_a, key_b, **kwargs):\n for param_key, param_value in kwargs.items():\n self.vertices[key_a].edges_out[key_b].params[param_key] = param_value", "def append(self, bts_node: BTSNode):\n pass", "def player_b_points(self, player_b_points):\n\n self._player_b_points = player_b_points", "def bv(self, bv):\n\n self._bv = bv", "def set_child(self,b):\n if b.isChecked() == True:\n self.mother = False\n self.child = True\n else:\n self.mother = True\n self.child = False", "def set_bbox(self, bbox):\n self.bbox = bbox", "def bairro(self, bairro):\n self._bairro = bairro", "def __init__(self, boolee):\n\n super(BinaryColor, self).__init__(1)\n self.boolee = bool(boolee)", "def add(self, a, b):\n a, b = (a, b) if a in self.node_id else (b, a)\n target_id = self.node_id[a]\n self.node_id[b] = target_id\n self.groups[target_id] |= set([b])", "def get_b(self):\n return self._b", "def __init__(self, b=0, drop_rate=0.5, pretrained=True):\n\n # Calls the super for the nn.Module.\n super(SelectiveNet, self).__init__()\n\n # Loads the EfficientNet encoder.\n if pretrained:\n self.encoder = EfficientNet.from_pretrained(f\"efficientnet-b{str(b)}\")\n else:\n self.encoder = EfficientNet.from_name(f\"efficientnet-b{str(b)}\")\n self.encoder_pool = nn.AdaptiveAvgPool2d(1)\n\n # Defines a hidden layer.\n self.hidden = nn.Linear(2560, 512)\n\n # Initialises the classifier for generating predictions.\n self.classifier = nn.Linear(512, 2)\n\n # Initialises the selective branch for generating selection scores.\n self.selective_hidden = nn.Linear(512, 512)\n self.selective_batch_norm = nn.BatchNorm1d(512)\n self.selective_regression = nn.Linear(512, 1)\n\n # Initialises the auxiliary output used by the model during training.\n self.auxiliary_output = nn.Linear(512, 2)\n\n # Stores the dropout rate in the object.\n self.drop_rate = drop_rate", "def set_node(self, index, node):\r\n self.loc.coord[index] = node", "def set_branch(self, branch):\n if branch in self.valid_branches:\n self.branch = branch\n self.load_settings()\n self.connect()\n else:\n raise Exception('Error BranchConfig: invalid branch')", "def set_state(self, uNodeState, uNewState):\n uNodeState['coincidences'] = uNewState['coincidences']\n uNodeState['temporal_groups'] = uNewState['temporal_groups']\n uNodeState['PCG'] = uNewState['PCG']", "def flipNodeColor(rbnode):\n try:\n if (rbnode is not None):\n if (rbnode['color'] == node.RED):\n rbnode['color'] = node.BLACK\n else:\n rbnode['color'] = node.RED\n except Exception as exp:\n error.reraise(exp, 'RBT:flipNodeColors')" ]
[ "0.61645985", "0.6144432", "0.60478383", "0.5791615", "0.576705", "0.5688763", "0.5581127", "0.5452997", "0.5451288", "0.53915596", "0.53915596", "0.5351376", "0.5198766", "0.5175632", "0.5073717", "0.5065744", "0.50484663", "0.50223225", "0.50185555", "0.50114703", "0.5000813", "0.4966118", "0.49446964", "0.49384293", "0.4902644", "0.48818833", "0.48490784", "0.48289916", "0.4784288", "0.47530612", "0.4751366", "0.47012603", "0.4699706", "0.4685422", "0.4680813", "0.46805158", "0.4658784", "0.4644128", "0.4638441", "0.46336788", "0.46333966", "0.4599557", "0.45975435", "0.45896214", "0.4589527", "0.45814702", "0.45774698", "0.45730573", "0.45701262", "0.45634377", "0.4561738", "0.45396173", "0.45274663", "0.45236534", "0.45206842", "0.45182052", "0.45125824", "0.45033985", "0.44992024", "0.44811493", "0.44803402", "0.44756672", "0.44691542", "0.44557244", "0.44523495", "0.44479734", "0.44408458", "0.44385558", "0.44384804", "0.4434442", "0.4430557", "0.4430557", "0.44305503", "0.44304433", "0.44291955", "0.4427394", "0.44241196", "0.44214687", "0.4413445", "0.4404936", "0.44032365", "0.4401558", "0.43996304", "0.4397979", "0.4397235", "0.43961585", "0.43930447", "0.43923667", "0.43907273", "0.43878424", "0.43807405", "0.43698114", "0.4363411", "0.43618184", "0.4358285", "0.43582085", "0.43559042", "0.435366", "0.43518505", "0.43492848" ]
0.8324408
0
Sets the qos_type of this NetflowFilters.
def qos_type(self, qos_type): self._qos_type = qos_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qos(self, qos: int):\n if qos is not None and qos > 2: # noqa: E501\n raise ValueError(\"Invalid value for `qos`, must be a value less than or equal to `2`\") # noqa: E501\n if qos is not None and qos < 0: # noqa: E501\n raise ValueError(\"Invalid value for `qos`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._qos = qos", "def set_qos(self, qos, set_specs_args):\n self._impl.set_qos(qos.id, set_specs_args)\n return self._unify_qos(qos)", "def _set_qos(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_qos_openconfig_qos__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"qos must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_qos_openconfig_qos__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__qos = t\n if hasattr(self, '_set'):\n self._set()", "def add_qos(self, qos):\n \n qos_id = qos[\"ovsdb:qos-entries\"][0][\"qos-id\"]\n self.qos_dict[qos_id] = qos", "def change_qos(self, arg, qos):\n\n if isinstance(arg, (list, tuple)):\n for job_id in arg:\n self.change_qos(job_id, qos)\n\n elif isinstance(arg, int):\n cmd = 'update job {} QOS={}'.format(arg, qos)\n self.scontrol(cmd)\n\n elif str(arg).lower() == 'all':\n self._queue = None\n for job_id, attrs in self.queue.items():\n status = attrs[self.QCOL_STATUS].lower()\n if status == 'pd':\n self.change_qos(job_id, qos)\n\n else:\n e = ('Could not change qos of: {} with type {}'\n .format(arg, type(arg)))\n logger.error(e)\n raise ExecutionError(e)", "def _set_qos(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_qos_openconfig_qos_interfaces__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"qos must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_qos_openconfig_qos_interfaces__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__qos = t\n if hasattr(self, '_set'):\n self._set()", "def set_qos(self, qos_id, set_specs_args):\n aname = \"cinder_v%s.set_qos\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().qos_specs.set_keys(qos_id,\n set_specs_args)", "def _set_qos(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_qos_openconfig_qos_elements__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"qos must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_qos_openconfig_qos_elements__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__qos = t\n if hasattr(self, '_set'):\n self._set()", "def set_qos_stat_type(self, iface, ptype):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def qos(self) -> int:\n return self._qos", "def associate_qos(self, qos_id, vol_type_id):\n url = \"qos-specs/%s/associate\" % qos_id\n url += \"?vol_type_id=%s\" % vol_type_id\n resp, body = self.get(url)\n self.validate_response(schema.associate_qos, resp, body)\n return rest_client.ResponseBody(resp, body)", "def set_qos(self, on_ok):\n self._channel.basic_qos(\n prefetch_count=self._prefetch_count, callback=on_ok)", "def disassociate_qos(self, qos_id, vol_type_id):\n url = \"qos-specs/%s/disassociate\" % qos_id\n url += \"?vol_type_id=%s\" % vol_type_id\n resp, body = self.get(url)\n self.validate_response(schema.disassociate_qos, resp, body)\n return rest_client.ResponseBody(resp, body)", "def resource_type(self):\n return 'qos'", "def set_qos_key(self, qos_id, **kwargs):\n put_body = json.dumps({\"qos_specs\": kwargs})\n resp, body = self.put('qos-specs/%s' % qos_id, put_body)\n body = json.loads(body)\n self.validate_response(schema.set_qos_key, resp, body)\n return rest_client.ResponseBody(resp, body)", "def qos(self):\n if self == SubscribeResult.qos0:\n rv = 0\n elif self == SubscribeResult.qos1:\n rv = 1\n elif self == SubscribeResult.qos2:\n rv = 2\n else:\n raise TypeError()\n\n return rv", "def SetType(self, ct_type):\r\n\r\n self._type = ct_type", "def qos_associate_type(self, qos_specs, vol_type_id):\n aname = \"cinder_v%s.qos_associate_type\" % self.version\n with atomic.ActionTimer(self, aname):\n tuple_res = self._get_client().qos_specs.associate(qos_specs,\n vol_type_id)\n return (tuple_res[0].status_code == 202)", "def qos_associate_type(self, qos_specs, vol_type_id):\n self._impl.qos_associate_type(qos_specs, vol_type_id)\n return self._unify_qos(qos_specs)", "def test_400_enable_qos(self):\n if self._get_openstack_release() >= self.trusty_mitaka:\n unit = self.n_ovs_sentry\n set_default = {'enable-qos': 'False'}\n set_alternate = {'enable-qos': 'True'}\n self.d.configure('neutron-api', set_alternate)\n self._wait_and_check(sleep=60)\n qos_plugin = 'qos'\n config = u._get_config(\n self.neutron_api_sentry, '/etc/neutron/neutron.conf')\n service_plugins = config.get(\n 'DEFAULT',\n 'service_plugins').split(',')\n if qos_plugin not in service_plugins:\n message = \"{} not in service_plugins\".format(qos_plugin)\n amulet.raise_status(amulet.FAIL, msg=message)\n\n config = u._get_config(\n unit,\n '/etc/neutron/plugins/ml2/openvswitch_agent.ini')\n extensions = config.get('agent', 'extensions').split(',')\n if qos_plugin not in extensions:\n message = \"qos not in extensions\"\n amulet.raise_status(amulet.FAIL, msg=message)\n\n u.log.debug('Setting QoS back to {}'.format(\n set_default['enable-qos']))\n self.d.configure('neutron-api', set_default)\n self._wait_and_check()\n u.log.debug('OK')", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def get_qos(self, qos_id):\n return self._unify_qos(self._impl.get_qos(qos_id))", "def get_port_qos_rxrate(self, iface, qos):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def qos_disassociate_type(self, qos_specs, vol_type_id):\n aname = \"cinder_v%s.qos_disassociate_type\" % self.version\n with atomic.ActionTimer(self, aname):\n tuple_res = self._get_client().qos_specs.disassociate(qos_specs,\n vol_type_id)\n return (tuple_res[0].status_code == 202)", "def get_qos_rule_type_details(self, rule_type, filters=None):\n if not self._has_neutron_extension('qos'):\n raise exc.OpenStackCloudUnavailableExtension(\n 'QoS extension is not available on target cloud'\n )\n\n if not self._has_neutron_extension('qos-rule-type-details'):\n raise exc.OpenStackCloudUnavailableExtension(\n 'qos-rule-type-details extension is not available '\n 'on target cloud'\n )\n\n return self.network.get_qos_rule_type(rule_type)", "def get_qos(self, qos_id):\n aname = \"cinder_v%s.get_qos\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().qos_specs.get(qos_id)", "def set_type(self, type):\n self._type = type", "def test_qos_specs(self):\n qos = {'maxIOPS': 1000, 'maxBWS': 2048}\n snapshot = fake_snapshot.fake_snapshot_obj(\n self.ctx, **{'volume': self.volume,\n 'provider_id': self.snapshot_id,\n 'volume_size': 8})\n extraspecs = {}\n self.driver._get_volumetype_qos = mock.MagicMock()\n self.driver._get_volumetype_qos.return_value = qos\n self.driver._get_volumetype_extraspecs = mock.MagicMock()\n self.driver._get_volumetype_extraspecs.return_value = extraspecs\n\n props = self.driver.initialize_connection_snapshot(\n snapshot,\n self.connector)\n\n self.assertEqual(1000, int(props['data']['iopsLimit']))\n self.assertEqual(2048, int(props['data']['bandwidthLimit']))", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def set_type(self, type: int):\r\n self.type = type\r\n self.canvas.itemconfig(self.item, image=self._get_image())", "def set_port_data(self, port_ofid, qos):\n \n # qos_id = qos[\"ovsdb:qos-entries\"][0][\"qos-id\"]\n self.port_dict[port_ofid] = qos", "def show_qos(self, qos_id):\n url = \"qos-specs/%s\" % qos_id\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(schema.show_qos, resp, body)\n return rest_client.ResponseBody(resp, body)", "def platform_type(self, platform_type):\n self._platform_type = platform_type", "def attr_type(self, attr_type):\n\n self._attr_type = attr_type", "def roof_type(self, roof_type):\n\n self._roof_type = roof_type", "def pressure_type(self, pressure_type):\n\n self._pressure_type = pressure_type", "async def qos(\n self,\n prefetch_size: int = 0,\n prefetch_count: int = 0,\n connection_global: bool = False,\n ):\n await self.channel.basic_qos(\n prefetch_size=prefetch_size,\n prefetch_count=prefetch_count,\n connection_global=connection_global,\n )", "def qos_disassociate_type(self, qos_specs, vol_type_id):\n self._impl.qos_disassociate_type(qos_specs, vol_type_id)\n return self._unify_qos(qos_specs)", "def get_qos(self):\n return self.qos", "def setQualifierType(self, *args):\n return _libsbml.CVTerm_setQualifierType(self, *args)", "def create_qos(self, **kwargs):\n post_body = json.dumps({'qos_specs': kwargs})\n resp, body = self.post('qos-specs', post_body)\n body = json.loads(body)\n self.validate_response(schema.show_qos, resp, body)\n return rest_client.ResponseBody(resp, body)", "def event_purpose_category_type(self, event_purpose_category_type):\n\n self._event_purpose_category_type = event_purpose_category_type", "def update_qos(tenant_id, qos_id, new_qos_name=None):\n session = db.get_session()\n try:\n qos = (session.query(network_models_v2.QoS).\n filter_by(tenant_id=tenant_id).\n filter_by(qos_id=qos_id).one())\n if new_qos_name:\n qos[\"qos_name\"] = new_qos_name\n session.merge(qos)\n session.flush()\n return qos\n except exc.NoResultFound:\n raise c_exc.QosNotFound(qos_id=qos_id,\n tenant_id=tenant_id)", "def set_review_type(self, review_type):\n if review_type not in [APPROVE, REQUEST_REVISION]:\n raise RuntimeError(\n \"%s.review_type should be set to either %s or %s, not %s\"\n % (self.__class__.__name__, APPROVE, REQUEST_REVISION, review_type)\n )\n\n index = self.findText(review_type)\n if index != -1:\n self.setCurrentIndex(index)", "def publication_type(self, publication_type):\n\n self._publication_type = publication_type", "def setDistributionType(self, distribution_type):\n self._distribution_type = distribution_type", "def __init__(__self__, *,\n event_type: Optional[pulumi.Input[Sequence[pulumi.Input['FilterEventTypeItem']]]] = None):\n if event_type is not None:\n pulumi.set(__self__, \"event_type\", event_type)", "def remove_qos(tenant_id, qos_id):\n session = db.get_session()\n try:\n qos = (session.query(network_models_v2.QoS).\n filter_by(tenant_id=tenant_id).\n filter_by(qos_id=qos_id).one())\n session.delete(qos)\n session.flush()\n return qos\n except exc.NoResultFound:\n pass", "def list_qos_rule_types(self, filters=None):\n if not self._has_neutron_extension('qos'):\n raise exc.OpenStackCloudUnavailableExtension(\n 'QoS extension is not available on target cloud'\n )\n\n # Translate None from search interface to empty {} for kwargs below\n if not filters:\n filters = {}\n return list(self.network.qos_rule_types(**filters))", "def type(self, type: str):\n\n self._type = type", "def cabletype(self, cabletype):\n\n self._cabletype = cabletype", "def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\")\n\n self._type = type", "def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\")\n\n self._type = type", "def discount_type(self, discount_type):\n\n self._discount_type = discount_type", "def mode(self, mode_type: str):\r\n self._mode = mode_type.lower()\r\n self.mode_hist.append(mode_type)\r\n\r\n if self.mode_hist[-2] != mode_type and self._daq:\r\n msg = Message(\"mode\", mode_type, self.checksum).message_bytes\r\n self._daq.asynch.transmit(msg)", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def del_qos(self, qos_id):\n \n del self.qos_dict[qos_id]", "def type(self, type):\n self._type = type", "def type(self, type):\n self._type = type", "def quality(self, quality):\n\n self._quality = quality", "def create_qos_queue(self, body=None):\r\n return self.post(self.qos_queues_path, body=body)", "def fusion_api_update_qos_aggregated_configuration(self, body=None, uri=None, api=None, headers=None):\n param = '/qos-aggregated-configuration'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type", "def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type", "def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type", "def set_animal_type(self, type):\n self.__animal_type = type", "def setType(self,newtype):\n\t\tself.type = newtype;", "def show_association_qos(self, qos_id):\n url = \"qos-specs/%s/associations\" % qos_id\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(schema.show_association_qos, resp, body)\n return rest_client.ResponseBody(resp, body)" ]
[ "0.69267035", "0.665194", "0.6260876", "0.6227411", "0.61854005", "0.6163835", "0.61467403", "0.6086317", "0.6053018", "0.60085255", "0.5876559", "0.5848173", "0.56665426", "0.56485814", "0.5635002", "0.559807", "0.53704786", "0.52488047", "0.5220447", "0.5210421", "0.5194985", "0.5194985", "0.5189316", "0.5151207", "0.5147622", "0.5135847", "0.50896573", "0.50822896", "0.5080055", "0.50793546", "0.5078152", "0.50558156", "0.5050343", "0.50451326", "0.5040653", "0.5029146", "0.5008852", "0.5002538", "0.5001706", "0.4966876", "0.4904841", "0.48928344", "0.48670027", "0.4859665", "0.48571408", "0.48276332", "0.482127", "0.4803668", "0.47964898", "0.4793705", "0.47868755", "0.47744712", "0.47718507", "0.47718507", "0.47717896", "0.4769012", "0.47476274", "0.47476274", "0.47476274", "0.47382164", "0.47382164", "0.47382164", "0.4731924", "0.47249907", "0.47249907", "0.47245094", "0.47214797", "0.47144705", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096437", "0.47096118", "0.47096118", "0.47096118", "0.4708321", "0.469588", "0.46865728" ]
0.8767736
0
Sets the device_interfaces of this NetflowFilters.
def device_interfaces(self, device_interfaces): self._device_interfaces = device_interfaces
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def network_interfaces(self, network_interfaces):\n\n self._network_interfaces = network_interfaces", "def netflow_devices(self, netflow_devices):\n\n self._netflow_devices = netflow_devices", "def ifaces(self, ifaces):\n \n self._ifaces = ifaces", "def update_interfaces_config(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n all_devices = devices[\"other_devices\"]\n all_devices.update(devices[\"dpdk_devices\"])\n all_devices.update(devices[\"kernel_devices\"])\n\n current_ifcs = {}\n interfaces = {}\n if \"interfaces\" in node:\n current_ifcs = node[\"interfaces\"]\n if current_ifcs:\n for ifc in current_ifcs.values():\n dvid = ifc[\"pci_address\"]\n if dvid in all_devices:\n VppPCIUtil.vpp_create_interface(\n interfaces, dvid, all_devices[dvid]\n )\n node[\"interfaces\"] = interfaces\n\n self.updateconfig()", "def _set_interfaces(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_interfaces_openconfig_qos_interfaces__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interfaces must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_interfaces_openconfig_qos_interfaces__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__interfaces = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interfaces(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_interfaces_openconfig_qos_elements__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interfaces must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_interfaces_openconfig_qos_elements__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__interfaces = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interfaces(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_interfaces_openconfig_qos__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interfaces must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_interfaces_openconfig_qos__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__interfaces = t\n if hasattr(self, '_set'):\n self._set()", "def devices(self, devices):\n\n self._devices = devices", "def devices(self, devices):\n\n self._devices = devices", "def update_interfaces(self, interfaces):\n for i in interfaces:\n self.update_interface(i)", "def _config_interfaces(self):\n self.interfaces['loopback'] = \"127.0.0.1\"\n self.interfaces['internal'] = \"127.0.0.1\"\n self.interfaces['external'] = \"0.0.0.0\"\n self.interfaces[\"any\"] = \"0.0.0.0\"\n self.interfaces[\"localhost\"] = \"127.0.0.1\"", "def _set_interface(self, cfg, itf):\n self.interface = None\n for i in range(cfg.bNumInterfaces):\n x = cfg[(i,0)]\n if x.bInterfaceNumber == itf:\n self.interface = x\n endpoints = sorted([ep.bEndpointAddress for ep in self.interface])\n self.ep_out, self.ep_in = endpoints[:2]", "def plug_vifs(self, instance, network_info):\n LOG.debug('plug_vifs called for instance', instance=instance)\n try:\n for viface in network_info:\n self.vif_driver.plug(instance, viface)\n self.start_firewall(instance, network_info)\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_LE('Failed to configure container network'\n ' for %(instance)s: %(ex)s'),\n {'instance': instance.name, 'ex': ex},\n instance=instance)", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_interfaces__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_interfaces__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def get_network_interfaces(self):\n return self.mycam.devicemgmt.GetNetworkInterfaces()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_elements__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_elements__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"interface_id\",yc_interface_openconfig_qos__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"interface_id\",yc_interface_openconfig_qos__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def setAdaptationInterfaceProperties(self, logicalinterface):\n logicalinterface.setDevice(self.getDevice())\n logicalinterface.setBlade(self.getBlade())\n logicalinterface.setPort(self.getPort())", "def fusion_api_configure_appliance_interfaces(self, body=None, api=None, headers=None):\n return self.interfaces.configure(body, api, headers)", "def get_port_interfaces(self, oid):\n path = '/servers/%s/os-interface' % oid\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('List port interfaces for server %s: %s' % \n (oid, truncate(res)))\n nets = res[0]['interfaceAttachments']\n for item in nets:\n item[u'name'] = None\n return nets", "def interfaces(self):\n if self._interfaces is None:\n self._interfaces = list(x[\"interface\"] for x in self._interfaces_detailed_list())\n\n return self._interfaces", "def setNewNativeInterfaceProperties(self, interface):\n interface.setDevice(self)\n # interface.removable = False\n if interface not in self.interfaces:\n self.interfaces.append(interface)\n if interface not in self.logicalinterfaces:\n self.logicalinterfaces.append(interface)", "def set_interface(self, interface):\n if not interface_exists(interface):\n raise ValueError(f\"Interface {interface} is invalid.\")\n self.interface = interface", "def set_device(self, device: torch.Tensor) -> None:\n raise NotImplementedError", "def devicenodes(self, devicenodes):\n\n self._devicenodes = devicenodes", "def get(self, context, device_id, filters):\n interfaces_obj = dbapi.net_interfaces_get_by_device(\n context, device_id, filters)\n return jsonutils.to_primitive(interfaces_obj), 200, None", "def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceArgs']]]]:\n return pulumi.get(self, \"network_interfaces\")", "def set_filters(self, can_filters=None):\n self.sw_filters = can_filters or []\n\n if not len(self.sw_filters):\n logger.info(\"Filtering has been disabled\")\n else:\n for can_filter in can_filters:\n can_id = can_filter[\"can_id\"]\n can_mask = can_filter[\"can_mask\"]\n logger.info(\n \"Filtering on ID 0x%X, mask 0x%X\", can_id, can_mask)", "def interfaces(self, site_id, element_id, interface_id, data, tenant_id=None, api_version=\"v4.15\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sites/{}/elements/{}/interfaces/{}\".format(api_version,\n tenant_id,\n site_id,\n element_id,\n interface_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)", "def add_interface(self, inf):\n self.interfaces[inf] = {'ip': 'unassigned', 'status': 'shutdown', 'connect': ['none', 'none']}", "def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:\n return pulumi.get(self, \"network_interfaces\")", "def set_device(self, device):\n self.device = device", "def plug_vifs(self, instance, network_info):\n for vif in network_info:\n self.vif_driver.plug(instance, vif)", "def plug_vifs(self, instance, network_info):\n for vif in network_info:\n self.vif_driver.plug(instance, vif)", "def add_port_interfaces(self, oid, net_id, fixed_ips=None):\n data = {\n \"interfaceAttachment\": {\n \"net_id\": net_id\n }\n }\n if fixed_ips is not None:\n data[\"interfaceAttachment\"][\"fixed_ips\"] = fixed_ips\n path = '/servers/%s/os-interface' % oid\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Add port interface for server %s: %s' % \n (oid, truncate(res)))\n return res[0]['interfaceAttachment']", "def configure_interfaces(channels=None):\n\n # set default channels, if not defined\n if channels is None:\n channels = [1, 6, 11]\n\n # list for the interfaces that support monitor mode\n # [[phy_id, mon_id, channel_id], ...]\n monitor_list = []\n\n # check all wi-fi interfaces\n for w_id in pyw.winterfaces():\n\n # get the interface info\n # es: Card(phy=0, dev='wlan0', ifindex=2)\n w_card = pyw.getcard(w_id)\n\n # activate the interface\n pyw.up(w_card)\n\n # check if it supports monitor mode\n if 'monitor' in pyw.devmodes(w_card):\n\n if pyw.modeget(w_card) == 'monitor':\n # it is already in monitor mode\n m_card = w_card\n\n else:\n m_card = set_monitor(w_card)\n\n if m_card is not None:\n # activate the monitor interface\n pyw.up(m_card)\n\n # set the first unset suitable channel\n for ch in channels:\n is_ch_set = set_channel_verified(m_card, ch)\n if is_ch_set:\n monitor_list.append(['phy{}'.format(m_card.phy), m_card.dev, ch])\n channels.remove(ch)\n break\n\n if len(channels) < 1:\n break\n\n return monitor_list", "def _plug_interface(self, context, tenant_id, net_id, port_id,\n remote_interface_id):\n LOG.debug(_(\"QuantumRestProxyV2: _plug_interface() called\"))\n\n # update attachment on network controller\n try:\n port = super(QuantumRestProxyV2, self).get_port(context, port_id)\n mac = port[\"mac_address\"]\n\n for ip in port[\"fixed_ips\"]:\n if ip.get(\"subnet_id\") is not None:\n subnet = super(QuantumRestProxyV2, self).get_subnet(\n context, ip[\"subnet_id\"])\n gateway = subnet.get(\"gateway_ip\")\n if gateway is not None:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\"network\":\n {\"id\": net_id,\n \"gateway\": gateway,\n }\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n\n if mac is not None:\n resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)\n data = {\"attachment\":\n {\"id\": remote_interface_id,\n \"mac\": mac,\n }\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2:Unable to update remote network: \"\n \"%s\"), e.message)\n raise", "def app_network_interface_list(self, **kwargs):\n return self._get(\n _name=APINames.Application,\n _method=\"networkInterfaceList\",\n response_class=NetworkInterfaceList,\n **kwargs\n )", "def set_device_ids(self, device_ids):\n if not all(isinstance(device_id, str) for device_id in device_ids):\n raise ApiError(\"One or more invalid device IDs\")\n self._update_criteria(\"device.id\", device_ids)\n return self", "def SetWirelessInterface(self, interface):\n print \"setting wireless interface %s\" % (str(interface))\n self.wifi.wireless_interface = noneToBlankString(interface)\n self.wired.wireless_interface = noneToBlankString(interface)\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\",\"wireless_interface\", interface)\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)", "def adapters(self, adapters):\n self.logger.debug(\"In 'adapters' setter.\")\n\n self._adapters = adapters", "def set_interface(self, ifname):\n \n if not self._slave_dhcp_process is None:\n raise Exception('DhcpClientAlreadyStarted')\n \n self._ifname = ifname", "def iface_config(self, iface, *args, **kwargs):\n if not set(kwargs).issubset({'intf_ip_addr', 'netns', 'adminMode'}):\n raise NotImplementedError(\"Method is not implemented for current kwargs.\")\n if kwargs.get('netns', False):\n # Create network namespaces for current iface\n self.create_namespaces(iface)\n del kwargs['netns']\n if 'intf_ip_addr' in kwargs:\n kwargs['ipAddr'] = \"{}/24\".format(kwargs['intf_ip_addr'])\n if iface in self.namespaces:\n self._lhost.ui.enter_namespace(self.namespaces[iface])\n self._lhost.ui.modify_ports([iface], **kwargs)\n if iface in self.namespaces:\n self._lhost.ui.exit_namespace()", "def set_interface(self, iface):\n\t\tf = os.path.join(self.config_dir, \"iface-%s\" % LibvirtFile.TEMPLATE_FILE)\n\t\tself.iface_xml = cziso.fill_template(f, iface=iface)", "def ifaces_init(*ifnames):\n for ifname in ifnames:\n _set_eth_admin_state(ifname, schema.InterfaceState.ABSENT)", "def get_interfaces(self):\n interfaces = _parse_interfaces(self.do('addr', 'show'),\n filters=PHYSICAL_INTERFACES)\n\n interfaces.sort(key=lambda x: x.ifname)\n for i in interfaces:\n if i.ifname not in self.host_mapping:\n generic_name = 'ge%d' % self.next_generic_index\n self.host_mapping[i.ifname] = generic_name\n self.next_generic_index += 1\n\n # change ifname to generic version\n i.ifname = self.host_mapping[i.ifname]\n self.generic_mapping = dict((v, k) for k, v in\n self.host_mapping.iteritems())\n\n return interfaces", "def set_devices(args):\n global devices\n if args is not None:\n devices = [torch.device(i) for i in ast.literal_eval('[' + args + ']')]\n torch.cuda.set_device(devices[0])\n else:\n devices = [torch.device('cpu')]", "def network_adapter_types(self, network_adapter_types):\n\n self._network_adapter_types = network_adapter_types", "def target_interfaces(self):", "def target_interfaces(self):", "def setFilters(self, filters):\n self.__filters = filters", "def remove_interfaces(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n\n tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE']\n nat_config = {\"nat_zone\": \"0\"}\n\n for table_name in tables:\n table_dict = config_db.get_table(table_name)\n if table_dict:\n for table_key_name in table_dict:\n if isinstance(table_key_name, str) is False:\n continue\n\n config_db.set_entry(table_name, table_key_name, nat_config)", "def get_net_interfaces():\n import netifaces\n return netifaces.interfaces()", "def ports(self, ports):\n\n self._ports = ports", "def set_interface(interface, name=''):\n if not interface:\n raise ValueError('interface is empty')\n\n global interfaces\n logger.debug('connection_name: \"{}\" -> {}.{}'.format(\n name,\n interface.__module__,\n interface.__class__.__name__\n ))\n interfaces[name] = interface", "def get_ipv4_interfaces(device_name):\n interfaces = {}\n if DEBUG:\n print note + \"Entering into get_ipv4_interfaces function\"\n # Needs to be fixed. Get list of interfaces first, then IP addresses, then VLAN, then ACLs\n config_element = nipper_xml.find(\"./report/part/[@ref='CONFIGURATION']\")\n\n for section in config_element.findall('./section'):\n device_item = None\n\n for i in section.get('title').split():\n if device_name == i:\n device_item = device_name\n if DEBUG:\n print \"\\t\" + note + \"Set Device: %s\" % device_name\n\n if device_item is not None:\n interface_element = section.find(\"./section/[@ref='CONFIGURATION.ADDRESSES']/section/\"\n \"[@ref='ADDRESSES.IPV4']\")\n if interface_element is not None:\n headings = []\n items = []\n for heading in interface_element.findall(\"./table/[@title='IPv4 addresses']/headings/heading\"):\n headings.append(heading.text)\n if DEBUG:\n print \"\\t\" + note + \"Set Heading: %s\" % heading.text\n for item in interface_element.findall(\"./table/[@title='IPv4 addresses']/tablebody\"\n \"/tablerow/tablecell/item\"):\n items.append(item.text)\n if DEBUG:\n print \"\\t\" + note + \"Set Item: %s\" % item.text\n i = 0\n interface_id = None\n if DEBUG:\n print \"\\t\" + note + \"Heading List: %s\" % headings\n print \"\\t\" + note + \"Items List: %s\" % items\n for item in items:\n if i > (len(headings) - 1):\n i = 0\n if DEBUG:\n print \"\\t\" + info + \"Heading: %s\\t Item: %s\" % (headings[i], item)\n if i is 0:\n interface_id = item\n interfaces[interface_id] = {}\n interfaces[interface_id].update({headings[i]: item})\n i += 1\n\n interfaces_element = section.find(\"./section/[@ref='CONFIGURATION.INTERFACES']/section/\"\n \"[@ref='ETHINTERFACESLAYER3']\")\n if interfaces_element is not None:\n headings = []\n for heading in interfaces_element.findall(\"./table/[@title='Layer 3 Ethernet Interfaces']\"\n \"/headings/heading\"):\n headings.append(heading.text)\n for tr in interfaces_element.findall(\"./table/[@title='Layer 3 Ethernet Interfaces']\"\n \"/tablebody/tablerow\"):\n items = []\n for i in tr.findall(\"./tablecell/item\"):\n items.append(i.text)\n if 'Zone' in headings:\n interfaces[items[headings.index('Interface')]].update({'Zone': items[headings.index('Zone')]})\n if 'VLAN' in headings:\n interfaces[items[headings.index('Interface')]].update({'VLAN': items[headings.index('VLAN')]})\n if DEBUG:\n print info + \"Interfaces object: \"\n print interfaces\n raw_input(warn + \"Press any key to continue\")\n return interfaces", "def plug_vifs(self, instance, network_info):\n for vif in network_info:\n vcloud_network_utils.plug_vif(\n self._vcenterapi,\n instance,\n vif,\n self.ovsport_info)", "def __set_port_list(self):\n\n self._coms = [str(i.device) for i in sorted(self.ports)]", "def SetWiredInterface(self, interface):\n print \"setting wired interface %s\" % (str(interface))\n self.wired.wired_interface = noneToBlankString(interface)\n self.wifi.wired_interface = noneToBlankString(interface)\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\",\"wired_interface\", interface)\n config.write(open(self.app_conf, \"w\"))", "def restricted_interfaces_enabled(self, restricted_interfaces_enabled):\n\n self._restricted_interfaces_enabled = restricted_interfaces_enabled", "def set_scanning_filter(self, **kwargs):\n for k, v in kwargs.get(\"filters\", {}).items():\n if k == \"UUIDs\":\n self._filters[k] = Variant(\"as\", v)\n elif k == \"RSSI\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Pathloss\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Transport\":\n self._filters[k] = Variant(\"s\", v)\n elif k == \"DuplicateData\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Discoverable\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Pattern\":\n self._filters[k] = Variant(\"s\", v)\n else:\n logger.warning(\"Filter '%s' is not currently supported.\" % k)\n\n if \"Transport\" not in self._filters:\n self._filters[\"Transport\"] = Variant(\"s\", \"le\")", "def attach_interface_to_namespace(node, namespace, interface):\n cmd = f\"ip link set {interface} netns {namespace}\"\n\n ret_code, _, stderr = exec_cmd(node, cmd, timeout=5, sudo=True)\n if ret_code != 0:\n raise RuntimeError(f\"Could not attach interface, reason:\\n{stderr}\")\n\n cmd = f\"ip netns exec {namespace} ip link set {interface} up\"\n\n ret_code, _, stderr = exec_cmd(node, cmd, timeout=5, sudo=True)\n if ret_code != 0:\n raise RuntimeError(\n f\"Could not set interface state, reason:\\n{stderr}\"\n )", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def get_interfaces(self):\n raise NotImplementedError", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['interface'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def plug_vifs(self, instance, network_info):\n raise NotImplementedError()", "def update_ifaces_configs(self):\n # Nothing to be done if no reordering has occurred.\n reordered = self.udev.reordered_devices\n if not reordered:\n return\n\n # Skip if we have already completed this stage\n if self.ifaces_confs:\n return\n\n # Generate candidate list of iface conf files, with\n # associated rule, that need to be processed.\n reordered_files = tuple((r, os.path.join(self.syspaths.ifaces_dir,\n r['from']))\n for r in reordered)\n\n ifaces_confs = self._process_candidate_conf_files(reordered_files)\n\n # Process the main interfaces file, and if it was modified, then\n # include it in the list of interface conf objects to be tracked\n conf = ConfFile(self.syspaths.ifaces_file, self.syspaths)\n conf.replace(self.remap_renamer)\n if conf.dirty:\n ifaces_confs.append(conf)\n\n # At this stage changes have been prepared but are not yet\n # committed to disk\n self._ifaces_confs = ifaces_confs", "def network_interface_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"network_interface_ids\")", "def put(self, id):\n context = request.environ.get('context')\n net_obj = dbapi.net_interfaces_update(context, id, request.json)\n return jsonutils.to_primitive(net_obj), 200, None", "def plug_vifs(self, instance, network_info):\n self._plug_vifs(instance, network_info)", "def set_config(self, existing_l3_interfaces_facts):\n config = self._module.params.get(\"config\")\n want = []\n if config:\n for w in config:\n w.update({\"name\": normalize_interface(w[\"name\"])})\n want.append(remove_empties(w))\n have = deepcopy(existing_l3_interfaces_facts)\n self.init_check_existing(have)\n resp = self.set_state(want, have)\n return to_list(resp)", "def interfaces(self) -> list:\n\t\tinterfaces = os.getenv('APP_INTERFACES', 'mqtt').lower()\n\n\t\t# Parses it into a list.\n\t\treturn re.sub(r'\\ ', '', interfaces).split(',')", "def interfaces(self):\n # TODO: make not a property\n int_list = self.get_interfaces()\n\n # Put loopbacks before physical interfaces\n type_index = {\"loopback\": 0, \"physical\": 1}\n # TODO: extend this based on medium category, etc\n\n int_list = sorted(int_list, key=lambda x: x.id)\n int_list = sorted(int_list, key=lambda x: type_index[x.category])\n return int_list", "def magma_setdevice(dev):\n\n _libmagma.magma_setdevice(dev)", "def setDeviceConfig(self, device_config_dict):\n ip_address = str(device_config_dict[\"IP Address\"])\n port = int(device_config_dict[\"Port No\"])\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # A single string is used for the AF_UNIX address family. A pair (host, port) is used for the\n # AF_INET address family, where host is a string representing either a hostname in Internet domain\n # notation like 'daring.cwi.nl' or an IPv4 address like '100.50.200.5', and port is an integer.\n #E.g., self.sock.connect(('192.168.1.155', 7777)) #raspberry ip = 192.168.1.155 and port = 7777\n self.sock.connect((ip_address, port))\n except socket.error,msg:\n dlg = wx.MessageDialog(None, str(msg), 'Info',wx.OK)\n dlg.ShowModal()\n raise", "def test_interfaces():\n with patch.object(\n salt.utils.network, \"win_interfaces\", MagicMock(return_value=True)\n ):\n assert win_network.interfaces()", "def get_host_interfaces(self, context, host_uuid):\n result = {}\n interfaces = self._get_cgtsclient().iinterface.list(host_uuid)\n for interface in interfaces:\n if interface.networktype != \"data\":\n continue\n providernets = interface.providernetworks\n result[interface.uuid] = {'uuid': interface.uuid,\n 'mtu': interface.imtu,\n 'vlans': '',\n 'network_type': interface.networktype,\n 'providernets': providernets}\n return result", "def list_interfaces(self, instance_name):\n return ['A_VIF']", "def setMAC( self, intf, mac ):\n result = self.cmd( 'ifconfig', intf, 'down' )\n result += self.cmd( 'ifconfig', intf, 'hw', 'ether', mac )\n result += self.cmd( 'ifconfig', intf, 'up' )\n return result", "def updateDeviceManagementInterface(self, serial: str, **kwargs):\n\n kwargs.update(locals())\n\n metadata = {\n 'tags': ['devices', 'configure', 'managementInterface'],\n 'operation': 'updateDeviceManagementInterface'\n }\n resource = f'/devices/{serial}/managementInterface'\n\n body_params = ['wan1', 'wan2', ]\n payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}\n action = {\n \"resource\": resource,\n \"operation\": \"update\",\n \"body\": payload\n }\n return action", "def all_interfaces():\n max_possible = 128 # arbitrary. raise if needed.\n number_of_bytes = max_possible * 32\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n names = array.array('B', '\\0' * number_of_bytes)\n outbytes = struct.unpack('iL', fcntl.ioctl(\n s.fileno(),\n 0x8912, # SIOCGIFCONF\n struct.pack('iL', number_of_bytes, names.buffer_info()[0])\n ))[0]\n namestr = names.tostring()\n interfaces = {}\n\n for i in range(0, outbytes, 40):\n name = namestr[i:i+16].split('\\0', 1)[0]\n ip = namestr[i+20:i+24]\n interfaces[name] = format_ip(ip)\n return interfaces", "def setSwitchedInterfaceProperties(self, logicalinterface):\n device = self.getDevice()\n peerdevice = logicalinterface.getDevice()\n if device:\n self.logger.debug(\"Set device of interface %s to %s due to switchTo from %s\" \\\n % (logicalinterface.getIdentifier(), device.getIdentifier(), self.getIdentifier()))\n logicalinterface.setDevice(device)\n elif peerdevice:\n self.logger.debug(\"Set device of interface %s to %s due to switchTo from %s\" \\\n % (self.getIdentifier(), peerdevice.getIdentifier(), logicalinterface.getIdentifier()))\n self.setDevice(peerdevice)\n layer = self.getLayer()\n peerlayer = logicalinterface.getLayer()\n if layer:\n self.logger.debug(\"Set layer of interface %s to %s due to switchTo from %s\" \\\n % (logicalinterface.getIdentifier(), layer.getIdentifier(), self.getIdentifier()))\n logicalinterface.setLayer(layer)\n elif peerlayer:\n self.logger.debug(\"Set layer of interface %s to %s due to switchTo from %s\" \\\n % (self.getIdentifier(), peerlayer.getIdentifier(), logicalinterface.getIdentifier()))\n self.setLayer(peerlayer)\n switchmatrix = self.getSwitchMatrix()\n peerswitchmatrix = logicalinterface.getSwitchMatrix()\n if switchmatrix:\n self.logger.debug(\"Set switch matrix of interface %s to %s due to switchTo from %s\" \\\n % (logicalinterface.getIdentifier(), switchmatrix.getIdentifier(), self.getIdentifier()))\n logicalinterface.setSwitchMatrix(switchmatrix)\n elif logicalinterface.getSwitchMatrix():\n self.logger.debug(\"Set switch matrix of interface %s to %s due to switchTo from %s\" \\\n % (self.getIdentifier(), peerswitchmatrix.getIdentifier(), logicalinterface.getIdentifier()))\n self.setSwitchMatrix(peerswitchmatrix)\n # Do not set label; this may not be equal for switch matrices with a swapping capability.", "def _set_lsp_config_exclude_interfaces(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"interface_name\",lsp_config_exclude_interfaces.lsp_config_exclude_interfaces, yang_name=\"lsp-config-exclude-interfaces\", rest_name=\"lsp-config-exclude-interfaces\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-name', extensions=None), is_container='list', yang_name=\"lsp-config-exclude-interfaces\", rest_name=\"lsp-config-exclude-interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_exclude_interfaces must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"interface_name\",lsp_config_exclude_interfaces.lsp_config_exclude_interfaces, yang_name=\"lsp-config-exclude-interfaces\", rest_name=\"lsp-config-exclude-interfaces\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-name', extensions=None), is_container='list', yang_name=\"lsp-config-exclude-interfaces\", rest_name=\"lsp-config-exclude-interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__lsp_config_exclude_interfaces = t\n if hasattr(self, '_set'):\n self._set()", "def getdevips(self, devs):\r\n\t\tprint \"Try to detect network interface names and default adapter:\"\n\t\tresult = {}\r\n\t\tif (os.name == 'posix') or (os.name == 'mac'):\r\n\t\t\tdefault = 'any'\r\n\t\t\tfor dev in devs:\r\n\t\t\t\ttry:\r\n\t\t\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\t\t\t\t\tip = socket.inet_ntoa(fcntl.ioctl(\r\n\t\t\t\t\t\ts.fileno(),\r\n\t\t\t\t\t\t0x8915, # SIOCGIFADDR\r\n\t\t\t\t\t\tstruct.pack('256s', dev[:15])\r\n\t\t\t\t\t)[20:24])\r\n\t\t\t\t\tprint dev, ip\r\n\t\t\t\t\tresult[ip] = dev\r\n\t\t\t\t\tif (default == 'any'): default = dev\r\n\t\t\t\texcept IOError:\r\n\t\t\t\t\tpass\r\n\t\telif (os.name == 'nt') and (\"wmi\" in imported):\r\n\t\t\tdefault = None\r\n\t\t\tdev_names = {}\r\n\t\t\tif (\"dnet\" in imported):\r\n\t\t\t\tdef store_cb(*data): dnet_devs.append( data )\r\n\t\t\t\tdnet_devs = []\r\n\t\t\t\tdnet.intf().loop(store_cb)\r\n\t\t\t\t#dnet.intf().loop(dnet_devs.append)\r\n\t\t\t\tfor dev in dnet_devs:\r\n\t\t\t\t\tif \"addr\" not in dev[0]: continue\r\n\t\t\t\t\tdev_names[str(dev[0][\"addr\"]).rsplit(\"/\")[0]] = dev[0][\"name\"]\r\n\t\t\tfor interface in wmi.WMI().Win32_NetworkAdapterConfiguration(IPEnabled=1):\r\n\t\t\t\t#print interface\r\n\t\t\t\tdev = \"\\\\Device\\\\NPF_\" + interface.SettingID\r\n\t\t\t\t(ip, info) = interface.IPAddress\r\n\t\t\t\tdesc, mac = interface.Description, interface.MACAddress\r\n\t\t\t\tprint dev, ip, dev_names.get(ip, ''), \"\\n(\", desc, \")\"\r\n\t\t\t\tresult[ip] = dev\r\n\t\t\t\tif not default: default = dev\r\n\t\telse:\t# 'nt' (WITHOUT wmi), 'os2', 'ce', 'java', 'riscos'\r\n\t\t\tdefault = None\r\n\t\t\tprint \"(none)\"\r\n\r\n\t\treturn (result, default)", "def _set_fcoe_intf_ifindex(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"fcoe-intf-ifindex\", rest_name=\"fcoe-intf-ifindex\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"fcoe_intf_ifindex must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"fcoe-intf-ifindex\", rest_name=\"fcoe-intf-ifindex\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__fcoe_intf_ifindex = t\n if hasattr(self, '_set'):\n self._set()", "def set_filters(self, filters):\n obj = []\n for fltr in filters:\n obj.append(fltr.jobject)\n javabridge.call(self.jobject, \"setFilters\", \"([Lweka/filters/Filter;)V\", obj)", "def _get_interfaces(self):\n return self.__interfaces", "def _get_interfaces(self):\n return self.__interfaces", "def _get_interfaces(self):\n return self.__interfaces", "def configure_stackwise_virtual_interfaces(device, svl_links):\n # build a list of commands to send\n # Add stackwise-virtual as first element in the list\n # Add domain only if domain argument has been provided\n command_list = []\n for interface, link_id in svl_links.items():\n command_list.append(f'interface {interface}')\n command_list.append(f'stackwise-virtual link {link_id}')\n try:\n output = device.configure(command_list)\n except SubCommandFailure:\n raise SubCommandFailure('Failed to configure stackwise-virtual interfaces')\n return output" ]
[ "0.68550044", "0.63056403", "0.6121295", "0.6114787", "0.60211456", "0.59780586", "0.5934186", "0.58451796", "0.58451796", "0.57590055", "0.5745199", "0.56535774", "0.54265004", "0.5393894", "0.5360232", "0.53313124", "0.53304845", "0.53277284", "0.5235097", "0.51627266", "0.5103836", "0.5072573", "0.50651103", "0.5025339", "0.50048065", "0.50018364", "0.49787417", "0.49764925", "0.49457002", "0.49392918", "0.49241656", "0.48937786", "0.4876594", "0.4876594", "0.48551086", "0.48446572", "0.48406434", "0.48397315", "0.4838599", "0.48290312", "0.48142713", "0.48060298", "0.47930375", "0.47919804", "0.47907615", "0.4781418", "0.47553492", "0.4748775", "0.47305405", "0.47305405", "0.47214672", "0.4712725", "0.4690413", "0.4685942", "0.4683309", "0.4678268", "0.46635145", "0.464186", "0.46350127", "0.4612359", "0.46099314", "0.46088547", "0.45988986", "0.45988986", "0.45988986", "0.45988986", "0.45988986", "0.45988986", "0.4598671", "0.45941576", "0.45941576", "0.45941576", "0.45941576", "0.45941576", "0.45941576", "0.45882192", "0.4587771", "0.45713365", "0.45652178", "0.4560637", "0.45536277", "0.45513305", "0.45493004", "0.45484284", "0.45428348", "0.45340437", "0.45218658", "0.4521172", "0.4520841", "0.4505594", "0.44988802", "0.44947258", "0.44936553", "0.44900867", "0.44875935", "0.44874927", "0.4481431", "0.4481431", "0.4481431", "0.44742662" ]
0.81078243
0
Sets the ports of this NetflowFilters.
def ports(self, ports): self._ports = ports
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_ports(self, ports, **kwargs):\n pass", "def modify_rstp_ports(self, ports, **kwargs):\n pass", "def https_ports(self, https_ports):\n\n self._https_ports = https_ports", "def http_ports(self, http_ports):\n\n self._http_ports = http_ports", "def make_external_ports(self, ports):\n\n self._set_unconnected_ports()\n for ip_name, _ports in ports.items():\n for _port in _ports:\n self._set_port(self._ips[ip_name], _port)", "def setport(self, port):\n self.__port = port", "def modify_mstp_ports(self, ports, instance=0, **kwargs):\n pass", "def ports(self):\n return self.attrs.get('NetworkSettings', {}).get('Ports', {})", "def port(self, port):\n\n self._port = port", "def port(self, port):\n\n self._port = port", "def port(self, port):\n\n self._port = port", "def update_ports( self ):\n self.ports = self.getComPorts()\n self.updatePortsUI()", "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerPortArgs']]]]:\n return pulumi.get(self, \"ports\")", "def ports(self):\n return port.PortCollection(\n self._conn,\n utils.get_sub_resource_path_by(self, \"Ports\"),\n redfish_version=self.redfish_version,\n )", "def port(self, port):\n if port is not None and port > 65535:\n raise ValueError(\"Invalid value for `port`, must be a value less than or equal to `65535`\")\n if port is not None and port < 1:\n raise ValueError(\"Invalid value for `port`, must be a value greater than or equal to `1`\")\n\n self._port = port", "def get_ports(self):\n return self._ports", "def connect_walker_ports(self, port1: Port, port2: Port) -> None:\n self.port_end.req_ports = port1\n self.port_end.req_ports = port2", "def set_ports_pool(self, being: int, end: int):\n self.ports_pool = (being, end)\n return self", "def ports(self): # type: () -> t.Dict[str, t.List[t.Dict[str, str]]]\n return self.network_settings['Ports']", "def __set_port_list(self):\n\n self._coms = [str(i.device) for i in sorted(self.ports)]", "def set_port(self, party_port) -> None:\n\n self._port = party_port", "def setPort(self, port):\n libxml2mod.xmlURISetPort(self._o, port)", "def netflow_devices(self, netflow_devices):\n\n self._netflow_devices = netflow_devices", "def set_login_port(self, port: int):\n assert 0 < port < 65535\n self.login_udp_port = port\n return self", "def __init__(self, env, name, num_ports):\n self.env = env\n self.ports = [Port(self.env, \"{}-port{}\".format(name, i))\n for i in range(num_ports)]\n self.name = name", "def exposed_ports(self) -> list[\"Port\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"exposedPorts\", _args)\n _ctx = Port(_ctx)._select_multiple(\n _description=\"description\",\n _port=\"port\",\n _protocol=\"protocol\",\n )\n return _ctx.execute_sync(list[Port])", "def port(self, port: int):\n if port is not None and port < 0: # noqa: E501\n raise ValueError(\"Invalid value for `port`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._port = port", "def get_ports(cls):\n return cls._open_ports.copy()", "def port_in(self, port_in):\n\n self._port_in = port_in", "def port_in(self, port_in):\n\n self._port_in = port_in", "def bind_ports(self, ip, ports): #{\n if isinstance(ports, int):\n ports = [ports]\n for p in ports:\n try:\n if p==0:\n port = self.socket.bind_to_random_port(\"tcp://%s\" % ip)\n else:\n self.socket.bind(\"tcp://%s:%i\" % (ip, p))\n port = p\n except zmq.ZMQError:\n # bind raises this if the port is not free\n continue\n except zmq.ZMQBindError:\n # bind_to_random_port raises this if no port could be found\n continue\n else:\n break\n else:\n raise zmq.ZMQBindError('Could not find an available port')\n\n url = 'tcp://%s:%i' % (ip, port)\n self.bound.add(url)\n self._ready = True\n\n return port", "def _set_port(self, ip, port_name):\n\n inst_args = getattr(self, ip.top_name)\n try:\n name = [key for key in inst_args.keys() if key[2:] == port_name][0]\n except IndexError:\n raise ValueError(f'port: \"{port_name}\" does not exist in ip: '\n f'{ip.top_name}')\n sig = inst_args[name]\n sig.name = port_name\n setattr(self, port_name, sig)\n self._ports.append(sig)", "def all_ports(self, **kwargs) -> t.Any:\n\n return tools.all_ports(**kwargs)", "def _ports(self):\n try:\n return self._graph.node[self.node_id][\"_ports\"]\n except KeyError:\n log.debug(\"No interfaces initialised for %s\" % self)\n return", "def port_mapping(self, port_mapping):\n\n self._port_mapping = port_mapping", "def setFilters(self, filters):\n self.__filters = filters", "def _set_unconnected_ports(self):\n for name, ip in self._ips.items():\n count = 0\n inst_args = getattr(self, name)\n ports = ip.get_ports()\n for port in ports:\n full_name = (port_direction_to_prefix(port.direction)\n + port.name)\n if full_name not in inst_args.keys():\n sig = Signal(len(port), name=full_name)\n inst_args[full_name] = sig\n count += 1", "def port1(self, port1):\n\n self._port1 = port1", "def port_list(self):\n return self._port_list", "def set_userside_ports(userside_ports):\n global JUPYTER_PORT, JS9HELPER_PORT, HTTPSERVER_PORT, CARTA_PORT, CARTA_WS_PORT\n JUPYTER_PORT, JS9HELPER_PORT, HTTPSERVER_PORT, CARTA_PORT, CARTA_WS_PORT = userside_ports", "def port_not(self, port_not):\n\n self._port_not = port_not", "def port_not(self, port_not):\n\n self._port_not = port_not", "def create_vlan_ports(self, ports=None, vlans=None, tagged='Tagged'):\n pass", "def _set_port(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(False,port.port, yang_name=\"port\", rest_name=\"port\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='False', extensions=None), is_container='list', yang_name=\"port\", rest_name=\"port\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"port must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(False,port.port, yang_name=\"port\", rest_name=\"port\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='False', extensions=None), is_container='list', yang_name=\"port\", rest_name=\"port\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__port = t\n if hasattr(self, '_set'):\n self._set()", "def port_configure(self,port,**config):\n if not port in self.ports:\n self.ports[port] = {}\n\n for k,v in config.items():\n self.ports[port][k] = v", "def configure_dcbx_pfc(self, ports, **kwargs):\n pass", "def setPortRef(self, *args):\n return _libsbml.Port_setPortRef(self, *args)", "def AssignPorts(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[List[str], None]\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"assignPorts\", payload=payload, response_object=None)", "def configure_dcbx_ets(self, ports, **kwargs):\n pass", "def set_incoming_port(self, nIncomingPort):\n\t\tcall_sdk_function('PrlPortFwd_SetIncomingPort', self.handle, nIncomingPort)", "def set_vports(self, vports_dict: dict) -> str:\n if not vports_dict:\n return\n vports_list = []\n for host in vports_dict[\"virtualList\"]:\n vports_list.append(host[\"ip\"] + \",\" + host[\"inPort\"] +\n \",\" + host[\"outPort\"] + \",\" + host[\"protocol\"])\n return self._req_post(self._URLS['SetVports'], data={\n 'list': '~'.join(vports_list)})", "def configure_dcbx_app(self, ports, **kwargs):\n pass", "def set_input(self, *arg, **kw):\n # Convert arguments into keyword arguments\n for i, a in enumerate(arg):\n kw[str(i)] = a\n\n for name, value in six.iteritems(kw):\n if name not in self._inputs:\n raise ValueError(\"Invalid port name '{0}'\".format(name))\n\n if isinstance(value, Port):\n port = value\n else:\n port = Task.create_source(value).get_output()\n port.connect(self._inputs[name])\n\n self._dirty = True\n return self", "def bind_acl_to_ports(self, acl_name=None, ports=None):\n pass", "def getViewPorts(self):\n return self._viewPorts", "def AssignPorts(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[List[str], None]\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('assignPorts', payload=payload, response_object=None)", "def list_ports(self):\n return self.ironic_client.port.list()", "def filters(self):\n return {\n 'port_channels': port_channels\n }", "def dummy_set_comm_port(port):\n pass", "def _set_nport(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=nport.nport, is_container='container', presence=False, yang_name=\"nport\", rest_name=\"nport\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Adds N port(s) to the PG'}}, namespace='urn:brocade.com:mgmt:brocade-ag', defining_module='brocade-ag', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"nport must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=nport.nport, is_container='container', presence=False, yang_name=\"nport\", rest_name=\"nport\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Adds N port(s) to the PG'}}, namespace='urn:brocade.com:mgmt:brocade-ag', defining_module='brocade-ag', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__nport = t\n if hasattr(self, '_set'):\n self._set()", "def __init__(\n self,\n user_defined_ports=None,\n telnet_port=None,\n http_port=None,\n https_port=None,\n ftp_port=None,\n tr069_port=None,\n ssh_port=None,\n ):\n self.user_defined_ports = user_defined_ports\n self.telnet_port = telnet_port\n self.http_port = http_port\n self.https_port = https_port\n self.ftp_port = ftp_port\n self.tr069_port = tr069_port\n self.ssh_port = ssh_port", "def __init__(self, ips, ports):\n self._ips = ips\n self._ports = ports\n self._scanner = nmap.PortScanner()", "def setPortRef(self, *args):\n return _libsbml.SBaseRef_setPortRef(self, *args)", "def add_port(cls, port, ser):\n cls._open_ports[port] = ser", "def port_gt(self, port_gt):\n\n self._port_gt = port_gt", "def port_gt(self, port_gt):\n\n self._port_gt = port_gt", "async def set_port(self, port: int) -> None:\n self.port = port\n _LOGGER.info(\"Setting port to %s\", port)\n if self._server:\n self._server.stop()\n await self._start_server()", "def ports(self) -> List[int]:\n if self.head_port:\n return [self.head_port]\n else:\n ports = []\n for replica in self.pod_args['pods'][0]:\n if isinstance(replica.port, list):\n ports.extend(replica.port)\n else:\n ports.append(replica.port)\n return ports", "def portals(self, portals):\n\n self._portals = portals", "def set_filters(self, can_filters=None):\n self.sw_filters = can_filters or []\n\n if not len(self.sw_filters):\n logger.info(\"Filtering has been disabled\")\n else:\n for can_filter in can_filters:\n can_id = can_filter[\"can_id\"]\n can_mask = can_filter[\"can_mask\"]\n logger.info(\n \"Filtering on ID 0x%X, mask 0x%X\", can_id, can_mask)", "def set_flow_control_type(self, ports=None, control_type=None):\n pass", "def get_ports(self) -> tuple:\n raise NotImplementedError", "def _create_vports(self):\n vports = self._api.select_vports()\n imports = []\n for port in self._api.config.ports:\n if port.name not in vports.keys():\n index = len(vports) + len(imports) + 1\n imports.append({\n 'xpath': '/vport[%i]' % index,\n 'name': port.name,\n 'rxMode': 'captureAndMeasure',\n 'txMode': 'interleaved'\n })\n self._import(imports)\n for name, vport in self._api.select_vports().items():\n self._api.ixn_objects[name] = vport['href']", "def forward_ports(app, ports,ip):\n for p in ports[0:(len(ports)-1)]:\n\tprint p\n os.system('iptables -t nat -A PREROUTING -i eth0 -p tcp --dport %d -j DNAT --to %s:%d' % (p, ip, p))\n # the last port in ports is for remote access on 22 of LXC\n os.system('iptables -t nat -A PREROUTING -i eth0 -p tcp --dport %d -j DNAT --to %s:22' % (ports[len(ports)-1], ip))\n print \"Done port forwarding.\"", "def set_filters(self, filters: List[DataGridFilter]):\n self.filters = filters", "def connect_icache(self, port: Port) -> None:\n self.port_end.req_ports = port", "def set_redirect_port(self, nRedirectPort):\n\t\tcall_sdk_function('PrlPortFwd_SetRedirectPort', self.handle, nRedirectPort)", "def port(self, portNumber, factory, interface):\n p = Port(portNumber, factory, interface=interface)\n p.startListening()\n self.ports.append(p)\n return p", "def add_port(self, port):\n self._ports.add(port)", "def kk_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_kk_all:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_kk_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_kk_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_kk_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')", "def __init__(__self__, *,\n from_port: pulumi.Input[int],\n to_port: pulumi.Input[int]):\n pulumi.set(__self__, \"from_port\", from_port)\n pulumi.set(__self__, \"to_port\", to_port)", "def set_port(self, port, zero_it=True): ###\n if port == 0:\n raise ValueError(\"LPT ports are numbered 1, 2 and 3\")\n elif port == 1:\n port = 0x3BC\n elif port == 2:\n port = 0x378\n elif port == 3:\n port = 0x278\n\n self.port = port\n if self.port != None:\n self.port = int(self.port)\n if self.verbose:\n print(\"LPT address is set to 0x%03X\" % self.port)\n if zero_it:\n self.state(0)", "def server_port(self, server_port):\n\n self._server_port = server_port", "def port2(self, port2):\n\n self._port2 = port2", "def configure_dcbx_cn(self, ports, **kwargs):\n pass", "def port_lte(self, port_lte):\n\n self._port_lte = port_lte", "def port_lte(self, port_lte):\n\n self._port_lte = port_lte", "def hdr_ports(self, val):\n assert val < 4\n self._hdr = (self._hdr & ~APv6Udp.HDR_PORTS_MASK) | ((val & 0b11) << APv6Udp.HDR_PORTS_SHIFT)", "def create_port(self, fields=None, filters=None, mask=None, entity=None):\n entity = entity or self.entity.type.name\n fields = self.fields if fields is None else fields\n parameters = {k: None for k in self.context_types.input.rows}\n port = formfield.to_port(\n entity, fields,\n filters=filters,\n mask=mask,\n parameters=parameters,\n db=self.db)\n port = annotate_port(self.domain, port)\n return port", "def incoming_connections_ports(self) -> Sequence[str]:\n return pulumi.get(self, \"incoming_connections_ports\")", "def port_gte(self, port_gte):\n\n self._port_gte = port_gte", "def port_gte(self, port_gte):\n\n self._port_gte = port_gte", "def create_port(self, body=None):\r\n return self.post(self.ports_path, body=body)", "def patch_ports(cls, pair):\n if pair[0] in ('remote_port', 'local_port'):\n return pair[0], pair[1] and int(pair[1]) or None\n return pair", "def pick_port(*ports):\n sockets = []\n\n def find_free_port(port):\n if port:\n return port\n else:\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except OSError as e:\n # [Errno 97] Address family not supported by protocol\n # Likely indicates we are in an IPv6-only environment (BEAM-10618). Try\n # again with AF_INET6.\n if e.errno == 97:\n s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n else:\n raise e\n\n sockets.append(s)\n s.bind(('localhost', 0))\n return s.getsockname()[1]\n\n ports = list(map(find_free_port, ports))\n # Close sockets only now to avoid the same port to be chosen twice\n for s in sockets:\n s.close()\n return ports", "def add_port(self, port):\n self._main_model.add_port(port)", "def set(self, domain, port, path, regproc, protocol):\n self._set_attrs(domain, port, path, regproc, protocol)", "def list_ports(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('ports', self.ports_path, retrieve_all,\r\n **_params)", "def get_ports(self) -> tuple:\n return self._current_dev_manager.get_ports()", "def list_ports(self, filters=None):\n # If pushdown filters are specified and we do not have batched caching\n # enabled, bypass local caching and push down the filters.\n if filters and self._PORT_AGE == 0:\n return self._list_ports(filters)\n\n if (time.time() - self._ports_time) >= self._PORT_AGE:\n # Since we're using cached data anyway, we don't need to\n # have more than one thread actually submit the list\n # ports task. Let the first one submit it while holding\n # a lock, and the non-blocking acquire method will cause\n # subsequent threads to just skip this and use the old\n # data until it succeeds.\n # Initially when we never got data, block to retrieve some data.\n first_run = self._ports is None\n if self._ports_lock.acquire(first_run):\n try:\n if not (first_run and self._ports is not None):\n self._ports = self._list_ports({})\n self._ports_time = time.time()\n finally:\n self._ports_lock.release()\n # Wrap the return with filter_list so that if filters were passed\n # but we were batching/caching and thus always fetching the whole\n # list from the cloud, we still return a filtered list.\n return _utils._filter_list(self._ports, None, filters or {})" ]
[ "0.75998574", "0.6880327", "0.6804348", "0.6704648", "0.65429854", "0.6481354", "0.6428148", "0.61421347", "0.60838145", "0.60838145", "0.60838145", "0.6080848", "0.6070129", "0.60178405", "0.59989095", "0.5934444", "0.59169525", "0.5844672", "0.58260345", "0.5825795", "0.58092695", "0.5793928", "0.5792854", "0.5792121", "0.57907724", "0.57517576", "0.5748377", "0.5669408", "0.5631242", "0.5631242", "0.56156343", "0.5611547", "0.5610101", "0.56072325", "0.559134", "0.5586939", "0.5569657", "0.5561371", "0.55453587", "0.55393696", "0.5534739", "0.5534739", "0.5531219", "0.5526197", "0.55086666", "0.54911053", "0.5489787", "0.54893386", "0.5442844", "0.540348", "0.53849804", "0.53802013", "0.5377759", "0.53768814", "0.5373417", "0.5365748", "0.5348553", "0.53322613", "0.53212005", "0.52962536", "0.52868384", "0.52785397", "0.52774435", "0.5267468", "0.5264245", "0.5264245", "0.52590823", "0.52395374", "0.5225505", "0.52080035", "0.5198163", "0.5197591", "0.51967734", "0.5187909", "0.518584", "0.5163736", "0.51587903", "0.5154481", "0.51536596", "0.5146249", "0.5138474", "0.5136456", "0.5136166", "0.5118614", "0.51181924", "0.51106405", "0.51106405", "0.51038915", "0.5103035", "0.51004857", "0.5075864", "0.5075864", "0.5071941", "0.5068811", "0.50573385", "0.504956", "0.5045255", "0.503256", "0.5029999", "0.5024209" ]
0.8199667
0
Sets the protocol of this NetflowFilters.
def protocol(self, protocol): self._protocol = protocol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def switch_protocol(self):\n with self._lock:\n if self.protocol == 'rtmp':\n self._protocol = 'hls'\n else:\n self._protocol = 'rtmp'", "def fill_protocol(self, data):\n self.protocol = get_optional_value(data, self.PROTOCOL, \"http\")\n self.protocol = self.protocol or \"http\"", "def protocol_not(self, protocol_not):\n\n self._protocol_not = protocol_not", "def protocol_in(self, protocol_in):\n\n self._protocol_in = protocol_in", "def privacy_protocol(self, privacy_protocol):\n\n self._privacy_protocol = privacy_protocol", "def auth_protocol(self, auth_protocol):\n\n self._auth_protocol = auth_protocol", "def protocol(self) -> Optional[pulumi.Input[Union[str, 'Protocol']]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> str:\n return self.__parameters.protocol", "def transportprotocol(self, transportprotocol) :\n\t\ttry :\n\t\t\tself._transportprotocol = transportprotocol\n\t\texcept Exception as e:\n\t\t\traise e", "def FlowStatIpProtocol(self):\n\t\treturn self._get_attribute('flowStatIpProtocol')", "def protocol(self):\n raise UnsupportedCall(f\"'{self.__class__.__name__}' object has no attribute 'protocol'\")", "def auth_protocol_not(self, auth_protocol_not):\n\n self._auth_protocol_not = auth_protocol_not", "def registerProtocol(self, protocol):\r\n assert self._protocol is None\r\n verifyObject(IProtocol, protocol)\r\n self._protocol = protocol", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self):\n return self._protocol", "def protocol(self) -> NetworkProtocol:\n if hasattr(self, \"_protocol\"):\n return self._protocol\n _args: list[Arg] = []\n _ctx = self._select(\"protocol\", _args)\n return _ctx.execute_sync(NetworkProtocol)", "def __init__(self, protocol):\r\n self._protocol = protocol", "def privacy_protocol_not(self, privacy_protocol_not):\n\n self._privacy_protocol_not = privacy_protocol_not", "def protocol_id(self, protocol_id):\n self._protocol_id = protocol_id", "def protocol_id(self, protocol_id):\n\n self._protocol_id = protocol_id", "def protocol(self) -> str:\n return pulumi.get(self, \"protocol\")", "def v_protocol(self):\n return self._protocol", "def v_protocol(self):\n return self._protocol", "def protocol_version(self, protocol_version):\n\n self._protocol_version = protocol_version", "def ip_protocol(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"ip_protocol\")", "def protocol(self) -> Optional['ListenerProtocol']:\n return pulumi.get(self, \"protocol\")", "def ip_protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_protocol\")", "def protocol(self) -> Optional[pulumi.Input['TargetServerProtocol']]:\n return pulumi.get(self, \"protocol\")", "def protocol(self):\n self._recv_protocol()\n return self._protocol", "def protocol(self) -> Optional[pulumi.Input[Union[str, 'GatewayRouteConfigProtocol']]]:\n return pulumi.get(self, \"protocol\")", "def get_protocol(self):\n if self.ssl:\n return \"https\"\n else:\n return \"http\"", "def test_update_firewall_rule_protocol(self):\r\n resource = 'firewall_rule'\r\n cmd = firewallrule.UpdateFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--protocol', 'any'],\r\n {'protocol': None, })", "def set_protocol(cls, interface_name, proto='provision'): # pragma: no cover\n if proto not in cls.supported_proto:\n return\n try:\n ret = cls.get_logical_ifname(interface_name, proto)\n if not ret:\n return\n os.system('uci set network.%s.proto=%s' % (ret, proto))\n os.system('uci commit network')\n os.system('/etc/init.d/network reload')\n if proto == cls.supported_proto[1]:\n os.system('sysctl -w net.ipv6.conf.%s.autoconf=0' % interface_name)\n os.system('sysctl -w net.ipv6.conf.%s.use_tempaddr=2' % interface_name)\n cls.logger.debug(\"set %s[%s] DCHP protocol to %s\", interface_name, ret, proto)\n except OSError as e:\n cls.logger.error(\"Got exception:%s\" % str(e))", "def protocol(self):\n return 'https' if self.allow_https and self.is_secure else 'http'", "def select_protocol(self, protocol):\n # Not all probes support sending SWJ sequences.\n assert isinstance(protocol, DebugProbe.Protocol)\n if protocol == DebugProbe.Protocol.SWD:\n self.switch_to_swd()\n elif protocol == DebugProbe.Protocol.JTAG:\n self.switch_to_jtag()\n elif protocol == DebugProbe.Protocol.DEFAULT:\n raise ValueError(\"cannot send SWJ sequence for default protocol\")\n else:\n assert False, \"unhandled protocol %s in SWJSequenceSender\" % protocol", "def unregisterProtocol(self, protocol):\r\n self._protocol = None", "def protocol_not_in(self, protocol_not_in):\n\n self._protocol_not_in = protocol_not_in", "def set_protocol(name):\n\n global global_serializer, global_deserializer\n global_serializer = get_serializer(name)\n global_deserializer = get_deserializer(name)", "def getProtocol(self, _):\r\n return self._protocol", "def protocol(self):\n return self._host[CONF_PROTOCOL]", "def protocol(self):\n ...", "def streaming_protocol(self) -> pulumi.Input[Union[str, 'LiveEventInputProtocol']]:\n return pulumi.get(self, \"streaming_protocol\")", "def protocol(self):\n return helpers.get_protocol()", "def protocols(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"protocols\")", "def __ip_protocol(self, proto_num):\n if proto_num in self.protocols:\n return self.protocols[proto_num]\n return str(proto_num)", "def _set_protocol_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'staticp': {'value': 0}, u'ldp': {'value': 2}, u'rsvp': {'value': 1}},), is_leaf=True, yang_name=\"protocol-type\", rest_name=\"protocol-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='mpls-protocol-type', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"protocol_type must be of a type compatible with mpls-protocol-type\"\"\",\n 'defined-type': \"brocade-mpls-operational:mpls-protocol-type\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'staticp': {'value': 0}, u'ldp': {'value': 2}, u'rsvp': {'value': 1}},), is_leaf=True, yang_name=\"protocol-type\", rest_name=\"protocol-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='mpls-protocol-type', is_config=False)\"\"\",\n })\n\n self.__protocol_type = t\n if hasattr(self, '_set'):\n self._set()", "def get_http_protocol(self):\n if self.cfg.ssl:\n return \"https\"\n else:\n return \"http\"", "def protocol(self):\n\n raise NotImplementedError()", "def protocol(self, code: str) -> str:\n return 'https'", "def protocol(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self):\n return self._config[\"security.protocol\"]", "def auth_protocol_in(self, auth_protocol_in):\n\n self._auth_protocol_in = auth_protocol_in", "def FlowAggregatedStatIpProtocol(self):\n\t\treturn self._get_attribute('flowAggregatedStatIpProtocol')", "def protocol(self) -> typing.Optional[\"RedirectProtocol\"]:\n return self._values.get('protocol')", "def protocol(self) -> typing.Optional[\"RedirectProtocol\"]:\n return self._values.get('protocol')", "def get_protocols(self):\r\n\r\n return None", "def trafficProtocol(self):\n #\n # TODO: Reimplement this if possible.\n #\n return client.trafficProtocol(self)", "def setProtocolOptions(self,\n version=None,\n utf8validateIncoming=None,\n acceptMaskedServerFrames=None,\n maskClientFrames=None,\n applyMask=None,\n maxFramePayloadSize=None,\n maxMessagePayloadSize=None,\n autoFragmentSize=None,\n failByDrop=None,\n echoCloseCodeReason=None,\n serverConnectionDropTimeout=None,\n openHandshakeTimeout=None,\n closeHandshakeTimeout=None,\n tcpNoDelay=None,\n perMessageCompressionOffers=None,\n perMessageCompressionAccept=None,\n autoPingInterval=None,\n autoPingTimeout=None,\n autoPingSize=None):", "def setProtocolOptions(self,\n versions=None,\n webStatus=None,\n utf8validateIncoming=None,\n maskServerFrames=None,\n requireMaskedClientFrames=None,\n applyMask=None,\n maxFramePayloadSize=None,\n maxMessagePayloadSize=None,\n autoFragmentSize=None,\n failByDrop=None,\n echoCloseCodeReason=None,\n openHandshakeTimeout=None,\n closeHandshakeTimeout=None,\n tcpNoDelay=None,\n perMessageCompressionAccept=None,\n autoPingInterval=None,\n autoPingTimeout=None,\n autoPingSize=None,\n serveFlashSocketPolicy=None,\n flashSocketPolicy=None,\n allowedOrigins=None,\n allowNullOrigin=False,\n maxConnections=None,\n trustXForwardedFor=0):", "def protocols(self) -> Optional[pulumi.Input['ServiceProtocolsArgs']]:\n return pulumi.get(self, \"protocols\")", "def protocols(self) -> Optional[pulumi.Input['ServiceProtocolsArgs']]:\n return pulumi.get(self, \"protocols\")", "def protocol_type(self, value):\n self._write(MX_PROTOCOL_TYPE, value)", "def protocols(self):\n if self._protocols is None:\n uri = \"/loadbalancers/protocols\"\n resp, body = self.method_get(uri)\n self._protocols = [proto[\"name\"] for proto in body[\"protocols\"]]\n return self._protocols", "def resetProtocolOptions(self):", "def resetProtocolOptions(self):", "def set(self, domain, port, path, regproc, protocol):\n self._set_attrs(domain, port, path, regproc, protocol)", "def getProtocol(self) -> str:\n ...", "def protocol(self):\n\n if '://' in self.host:\n scheme, host = self.host.split('://', 1)\n return scheme\n elif self.port == 21:\n return 'ftp'\n elif self.port == 22:\n return 'sftp'\n elif self.port == 990:\n return 'ftps'\n else:\n # Uncertain, assume FTP.\n return 'ftp'", "def privacy_protocol_in(self, privacy_protocol_in):\n\n self._privacy_protocol_in = privacy_protocol_in", "def default_protocol(self):\n return \"sftp://\"", "def startProtocol(self):\n self.transport = LossyTransport(self.transport, self.lossPr)\n DatagramProtocol.transport = self.transport", "def startProtocol(self):\n self.transport = LossyTransport(self.transport, self.lossPr)\n\n DatagramProtocol.transport = self.transport", "def proxy_protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"proxy_protocol\")", "def proxy_protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"proxy_protocol\")", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def protocol(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"protocol\")", "def backend_protocol(self) -> Optional[pulumi.Input[Union[str, 'BackendProtocol']]]:\n return pulumi.get(self, \"backend_protocol\")", "def set_flow_control_type(self, ports=None, control_type=None):\n pass", "def _get_protocol_type(self):\n return self.__protocol_type", "def protocol(ctx: Context, protocol_specification_path: str):\n _generate_item(ctx, \"protocol\", protocol_specification_path)", "def app_protocol(self):\n if settings.INAPP_REQUIRE_HTTPS:\n return 'https'\n else:\n return 'https' if self.is_https else 'http'", "def unregisterProtocol(self, protocol):\r\n assert protocol in self._protocols\r\n del self._protocols[protocol]\r\n\r\n if not self._protocols:\r\n self.stop()", "def test_protocols_updated(self):\n assert self.connection_config.protocols == {self.new_protocol_id}", "def get_network_protocols(self):\n return self.mycam.devicemgmt.GetNetworkProtocols()", "def get_secgrp_protocol_param ( params ) :\n protocol = params.get( 'type' )\n if protocol:\n protocol = protocol.lower( )\n else :\n protocol = 'tcp'\n\n return protocol", "def transportprotocol(self) :\n\t\ttry :\n\t\t\treturn self._transportprotocol\n\t\texcept Exception as e:\n\t\t\traise e", "def __init__(__self__, *,\n protocol: Optional[pulumi.Input[str]] = None,\n url: Optional[pulumi.Input[str]] = None):\n if protocol is not None:\n pulumi.set(__self__, \"protocol\", protocol)\n if url is not None:\n pulumi.set(__self__, \"url\", url)", "def set(self, proto=\"\", **attrs):\n self._setattrs(handle=self.handle, proto=proto, **attrs)", "def protocol(self):\n return self._info.next # pylint: disable=E1101", "def ip_protocol(self) -> str:\n protocol = f\"ipv{self.ip_address.version}\"\n\n log.debug(\"Host %s: IP protocol for paramiko is %s.\", self.host)\n return protocol", "def protocol_name(self):\n self._protocol_name = 'kerberos'\n return self._protocol_name", "def _recv_protocol(self):\n if not self._protocol_recv:\n try:\n data = self._read_bytes(1, timeout=1.0)\n if len(data) == 0:\n self.close()\n raise PipeClosed()\n peer_protocol = struct.unpack('>B', data)[0]\n self._protocol = min(self._protocol or pickle.HIGHEST_PROTOCOL, peer_protocol)\n self._protocol_recv = True\n self._serializer = _PickleSerializer(self._protocol)\n except (OSError, socket.error):\n self.close()\n raise PipeClosed()" ]
[ "0.67306274", "0.66096395", "0.65845597", "0.62627715", "0.6239148", "0.6161418", "0.6159605", "0.605314", "0.5974029", "0.59525305", "0.5931918", "0.59305394", "0.58953136", "0.5892481", "0.5892481", "0.5892481", "0.5892481", "0.5892481", "0.5873497", "0.58330894", "0.576807", "0.57388973", "0.5723781", "0.5700271", "0.56941366", "0.5680959", "0.5680959", "0.5670976", "0.56595737", "0.5644223", "0.5625467", "0.5597638", "0.556794", "0.55645037", "0.55454016", "0.5514749", "0.5510851", "0.5510177", "0.55046123", "0.55014", "0.54914963", "0.54875356", "0.54862666", "0.5448288", "0.5445137", "0.5422055", "0.54131335", "0.5391081", "0.5386604", "0.538402", "0.53673786", "0.53410345", "0.5316977", "0.53167063", "0.53149176", "0.53044367", "0.5284418", "0.52568716", "0.52568716", "0.52557176", "0.52373046", "0.5236397", "0.5224467", "0.5190374", "0.5190374", "0.5188291", "0.5185871", "0.51682144", "0.51682144", "0.51275563", "0.5126919", "0.51234174", "0.51158166", "0.50939816", "0.50738513", "0.50655967", "0.5060802", "0.5060802", "0.50406915", "0.50399786", "0.50399786", "0.5039634", "0.5031717", "0.5021926", "0.50164026", "0.5014972", "0.501354", "0.49940515", "0.49808827", "0.4969908", "0.49588406", "0.49557772", "0.49376753", "0.49374774", "0.49211937", "0.48962173", "0.4891465" ]
0.74954563
3
Sets the ip_version of this NetflowFilters.
def ip_version(self, ip_version): self._ip_version = ip_version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vip(self, vip):\n\n self._vip = vip", "def protocol_version(self, protocol_version):\n\n self._protocol_version = protocol_version", "def setVersion(self, version) :\n if version is not None :\n try :\n self.version = [int(p) for p in version.split(\".\")]\n except AttributeError :\n if len(version) == 2 : # 2-tuple\n self.version = version\n else :\n try :\n self.version = [int(p) for p in str(float(version)).split(\".\")]\n except :\n self.version = [int(p) for p in IPP_VERSION.split(\".\")]", "def ip(self, ip):\n\n self._ip = ip", "def ip(self, ip):\n\n self._ip = ip", "def version(self, version):\n if self.local_vars_configuration.client_side_validation and version is None: # noqa: E501\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version", "def flow_encoding_version(self, flow_encoding_version):\n\n self._flow_encoding_version = flow_encoding_version", "def ip(self, ip):\n self._ip = ip\n return self", "def set_ip(self, party_ip) -> None:\n\n self._ip = party_ip", "def version(self, version):\n \n self._version = version", "def ip(self, ip: str):\n\n self._ip = ip", "def node_version(self, node_version):\n\n self._node_version = node_version", "def ip_address(self, ip_address):\n\n self._ip_address = ip_address", "def ip_address(self, ip_address):\n\n self._ip_address = ip_address", "def ip_address(self, ip_address):\n\n self._ip_address = ip_address", "def version(self, version: int):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n self._version = version", "def version(self, version):\n self._version = version", "def kernel_version(self, kernel_version):\n\n self._kernel_version = kernel_version", "def setIP(self, idx, ip):\n self.ip[int(idx)-1] = ip", "def version(self, version: str):\n\n self._version = version", "def version(self, version: str):\n\n self._version = version", "def version(self, version):\n if self.local_vars_configuration.client_side_validation and version is None: # noqa: E501\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n version is not None and len(version) > 64):\n raise ValueError(\"Invalid value for `version`, length must be less than or equal to `64`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n version is not None and len(version) < 1):\n raise ValueError(\"Invalid value for `version`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._version = version", "def virtual_router_ip(self, virtual_router_ip):\n self._virtual_router_ip = virtual_router_ip", "def hxdp_build_version(self, hxdp_build_version):\n\n self._hxdp_build_version = hxdp_build_version", "def carrier_settings_version(self, carrier_settings_version):\n\n self._carrier_settings_version = carrier_settings_version", "def hypervisor_version(self, hypervisor_version):\n\n self._hypervisor_version = hypervisor_version", "def version(self, version):\n if version is None:\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version", "def version(self, version):\n if version is None:\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version", "def version(self, version):\n if version is None:\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version", "def setPackageVersion(self, *args):\n return _libsbml.ISBMLExtensionNamespaces_setPackageVersion(self, *args)", "def ip_gt(self, ip_gt):\n\n self._ip_gt = ip_gt", "def set_version(self, protocol_version):\n self.version = protocol_version\n self.version_bytes = str(protocol_version).encode(\"latin1\")\n self.version_header = self.version_bytes + PROTOCOL_3x_HEADER\n if protocol_version == 3.2: # 3.2 behaves like 3.3 with type_0d\n # self.version = 3.3\n self.dev_type = \"type_0d\"\n elif protocol_version == 3.4:\n self.dev_type = \"v3.4\"", "def serialization_version(self, serialization_version):\n\n self._serialization_version = serialization_version", "def ip_lt(self, ip_lt):\n\n self._ip_lt = ip_lt", "def os_version(self, os_version):\n\n self._os_version = os_version", "def ip_lte(self, ip_lte):\n\n self._ip_lte = ip_lte", "def fw_version(self, fw_version):\n if self.local_vars_configuration.client_side_validation and fw_version is None: # noqa: E501\n raise ValueError(\"Invalid value for `fw_version`, must not be `None`\") # noqa: E501\n\n self._fw_version = fw_version", "def vport(self, vport):\n\n self._vport = vport", "def version(self, version: str):\n if version is None:\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version", "def app_version_id(self, app_version_id):\n\n self._app_version_id = app_version_id", "def ip_not(self, ip_not):\n\n self._ip_not = ip_not", "def firmware_version(self, firmware_version: str):\n\n self._firmware_version = firmware_version", "def ip_in(self, ip_in):\n\n self._ip_in = ip_in", "def ip_gte(self, ip_gte):\n\n self._ip_gte = ip_gte", "def hx_version(self, hx_version):\n\n self._hx_version = hx_version", "def set_version(self, version: str) -> None:\n if self.current_version == version:\n return\n self.current_version = version\n self._del_cached_property(\"version\")", "def version(self, version):\n self._version = utils.VersionParser().parse(version)", "def avatar_version(self, avatar_version):\n\n self._avatar_version = avatar_version", "def version_name(self, version_name):\n\n self._version_name = version_name", "def update_version(self, version):\n self.version = CPE.escape_for_cpe23_fs(version)", "def set_version(self, version=None, version_fun: Callable[[], str] = None):\n def version_compute():\n fun = version_fun\n if fun is None:\n fun = default_version_hash\n\n if version is None:\n return fun()\n else:\n return version\n\n self.version = version_compute\n return self", "def hdp_version(self, hdp_version):\n\n self._hdp_version = hdp_version", "def policy_target_version(self, policy_target_version):\n\n self._policy_target_version = policy_target_version", "def version_in(self, version_in):\n\n self._version_in = version_in", "def setAddress(self, ip_address):\n # type: (str)->None\n\n self._validator.validate_one(\n 'address', VALID_OPTS['address'], ip_address)\n self._ifAttributes['address'] = ip_address", "def setIpValue(self, ip_id, value): \n v = value\n # Eliminating basic outliers\n if ip_id not in self.dGrph:\n print(ip_id, ' does not exist in graph. Value not set.')\n return\n if ip_id in self.dGrph:\n if self.dGrph[ip_id][0] != 'i':\n print('Cannot set value of any other node. Value not set.')\n return\n \n if value >= 1:\n v = 1\n else:\n v = 0\n \n self.dGrph[ip_id][1] = v", "def host_ip(self, host_ip):\n\n self._host_ip = host_ip", "def __init__(self, ip, mask):\n self.vip = ip\n self.mask = mask", "def set_os_version(self, nVmOsVersion):\n\t\tcall_sdk_function('PrlVmCfg_SetOsVersion', self.handle, nVmOsVersion)", "def ip_address(self, ip_address):\n if ip_address is not None and len(ip_address) > 15:\n raise ValueError(\"Invalid value for `ip_address`, length must be less than or equal to `15`\") # noqa: E501\n\n self._ip_address = ip_address", "def product_version(self, product_version):\n\n self._product_version = product_version", "def ip_whitelist(self, ip_whitelist):\n if self.local_vars_configuration.client_side_validation and ip_whitelist is None: # noqa: E501\n raise ValueError(\"Invalid value for `ip_whitelist`, must not be `None`\") # noqa: E501\n\n self._ip_whitelist = ip_whitelist", "def set_version(self, version, dataset_name=None):\n if dataset_name is None:\n self._version = version\n return self._version\n\n # resolve dataset name\n dataset = self.__getitem__(dataset_name)\n if dataset is None:\n raise KeyError(\"Dataset %s does not exist\" % dataset_name)\n dataset.attrs[\"version\"] = version\n return version", "def define_ip_header(self, version=4, tos=None, ttl=None, proto=None,\n src=None, dst=None):\n\n if version == 4:\n ip_pkt = IP()\n else:\n ip_pkt = IPv6()\n\n if src:\n ip_pkt.src = src\n\n if dst:\n ip_pkt.dst = dst\n\n if tos:\n ip_pkt.tos = tos\n\n if ttl:\n ip_pkt.ttl = ttl\n\n if proto:\n ip_pkt.proto = proto\n\n return ip_pkt", "def version(self, version):\n if version is None:\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n if version is not None and len(version) < 1:\n raise ValueError(\"Invalid value for `version`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._version = version", "def SetVersion(self, addonVersion):\n self._addonVersion = addonVersion", "def set_ip_opt(self, opt, value):\r\n if isinstance(opt, str):\r\n o = globals()[self.ip_opt_prefix+opt]\r\n elif isinstance(opt, list) or isinstance(opt, tuple):\r\n o = globals()[self.ip_opt_prefix+opt[self.v6]]\r\n else:\r\n raise TypeError('opt argument is of wrong type: '+repr(opt))\r\n self.setsockopt(self.ip_proto, o, value)", "def set_host_ip(self, host, host_ip):\n host.setIP(str(host_ip.ip), prefixLen=self.NETPREFIX)", "def add_ip(self, inf, ip):\n self.interfaces[inf]['ip'] = ip", "def hyperv_host_count(self, hyperv_host_count):\n\n self._hyperv_host_count = hyperv_host_count", "def vm_count(self, vm_count):\n\n self._vm_count = vm_count", "def vm_count(self, vm_count):\n\n self._vm_count = vm_count", "def set_ip(self, ip: str, host_addr: str) -> None:\n self.config[\"linkIp\"] = ip\n self.config[\"ngapIp\"] = ip\n self.config[\"gtpIp\"] = ip", "def version_not(self, version_not):\n\n self._version_not = version_not", "def set_version(self, version):\n\n def update_version(version, filepath):\n with open(filepath, \"r\") as stream:\n contents = stream.read()\n\n new_contents = _fix_contents_version(contents, version)\n assert contents != new_contents\n with open(filepath, \"w\") as stream:\n stream.write(new_contents)\n\n update_version(version, os.path.join(\".\", \"package.json\"))\n update_version(version, os.path.join(\".\", \"src\", \"setup.py\"))\n update_version(\n version, os.path.join(\".\", \"src\", \"robocorp_code\", \"__init__.py\")\n )", "def pipeline_versions(self, pipeline_versions):\n if (self.local_vars_configuration.client_side_validation and\n pipeline_versions is not None and not isinstance(pipeline_versions, int)):\n raise ValueError(\"Parameter `pipeline_versions` must be an integer\") # noqa: E501\n\n self._pipeline_versions = pipeline_versions", "def update_vip(self, vip, body=None):\r\n return self.put(self.vip_path % (vip), body=body)", "def update(self):\n ip = get_ip()\n if ip != self.ip:\n self.ip = ip\n self.ind.set_label(ip)", "def registry_version(self, registry_version):\n\n self._registry_version = registry_version" ]
[ "0.63388056", "0.58770186", "0.5694638", "0.5656582", "0.5656582", "0.56357116", "0.55922115", "0.55871147", "0.5562405", "0.55404204", "0.5522308", "0.55216855", "0.5502596", "0.5502596", "0.5502596", "0.5491824", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54739237", "0.54739237", "0.5411744", "0.53880847", "0.5387309", "0.5387309", "0.53805697", "0.5354861", "0.53097206", "0.5217018", "0.52063066", "0.5170861", "0.5170861", "0.5170861", "0.5157323", "0.51314837", "0.51236016", "0.512241", "0.51113796", "0.5109428", "0.50805163", "0.5079737", "0.50548834", "0.50435114", "0.50374776", "0.5014268", "0.5008908", "0.4990676", "0.49800235", "0.49574375", "0.4956861", "0.49538416", "0.49473533", "0.49361208", "0.492533", "0.49252263", "0.49102154", "0.48707208", "0.4864863", "0.48641056", "0.48594674", "0.48506615", "0.48493358", "0.48416892", "0.48133242", "0.4796161", "0.47869933", "0.47829315", "0.47795796", "0.47677746", "0.47336802", "0.47195074", "0.4708246", "0.47041672", "0.46916178", "0.46652567", "0.46652567", "0.46638855", "0.46604908", "0.46500793", "0.4647262", "0.46422783", "0.4628358", "0.46179077" ]
0.8065355
0
Sets the netflow_devices of this NetflowFilters.
def netflow_devices(self, netflow_devices): self._netflow_devices = netflow_devices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def devices(self, devices):\n\n self._devices = devices", "def devices(self, devices):\n\n self._devices = devices", "def set_devices(args):\n global devices\n if args is not None:\n devices = [torch.device(i) for i in ast.literal_eval('[' + args + ']')]\n torch.cuda.set_device(devices[0])\n else:\n devices = [torch.device('cpu')]", "def device_interfaces(self, device_interfaces):\n\n self._device_interfaces = device_interfaces", "def devicenodes(self, devicenodes):\n\n self._devicenodes = devicenodes", "def set_device(self, device: torch.Tensor) -> None:\n raise NotImplementedError", "def set_device(num):\n safe_call(backend.get().af_set_device(num))", "def setFilters(self, filters):\n self.__filters = filters", "def set_device(self, device):\n self.device = device", "def set_filters(self, can_filters=None):\n self.sw_filters = can_filters or []\n\n if not len(self.sw_filters):\n logger.info(\"Filtering has been disabled\")\n else:\n for can_filter in can_filters:\n can_id = can_filter[\"can_id\"]\n can_mask = can_filter[\"can_mask\"]\n logger.info(\n \"Filtering on ID 0x%X, mask 0x%X\", can_id, can_mask)", "def set_toggle_devices_enabled(self, track, xclip, ident, value = None):\n for device in track.devices:\n if(hasattr(device, 'parameters')):\n self._parent._device_actions.set_device_on_off(device, track, xclip, ident);", "def configure_devices(self, ports):\n\n new_devices = []\n \n # for each port create a new Device and start the underlying thread\n for p in ports:\n new_device = Device(p)\n self.configured_devices[new_device.id] = new_device\n new_devices.append(new_device)\n new_device.start()\n\n return new_devices", "def flows(self, flows):\n\n self._flows = flows", "def magma_setdevice(dev):\n\n _libmagma.magma_setdevice(dev)", "def set_device_ids(self, device_ids):\n if not all(isinstance(device_id, str) for device_id in device_ids):\n raise ApiError(\"One or more invalid device IDs\")\n self._update_criteria(\"device.id\", device_ids)\n return self", "def _import_devices(self) -> None:\n self._devices.clear()\n\n # Exctract all devices\n for device in self._udev.list_devices():\n # Skip devices without mapping\n if not device.device_node or self.helper.hide_virtual_device(device):\n continue\n self._devices[device.sys_name] = Device.import_udev(device)", "def setup_devices(self, devices):\n \n self.devices = devices\n \n barrier = ReusableBarrier(len(devices))\n lock = Lock()\n aux_dict = {}\n\n for device in devices:\n device.barrier = barrier\n device.global_lock = lock\n for location in device.sensor_data: \n if location not in aux_dict:\n aux_dict[location] = Semaphore() \n \n for device in devices:\n device.device_semaphores = aux_dict\n\n self.setup_master_thread()", "def set_filters(self, filters):\n obj = []\n for fltr in filters:\n obj.append(fltr.jobject)\n javabridge.call(self.jobject, \"setFilters\", \"([Lweka/filters/Filter;)V\", obj)", "def network_interfaces(self, network_interfaces):\n\n self._network_interfaces = network_interfaces", "def modify_devices(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n other_devices = devices[\"other_devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n if other_devices:\n self._modify_other_devices(\n node, other_devices, kernel_devices, dpdk_devices\n )\n\n # Get the devices again for this node\n self._get_device(node)\n devices = node[\"devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n klen = len(kernel_devices)\n if klen > 0:\n print(\"\\nThese devices are safe to be used with VPP.\\n\")\n VppPCIUtil.show_vpp_devices(kernel_devices)\n question = (\n \"\\nWould you like to use any of these \" \"device(s) for VPP [y/N]? \"\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd = {}\n for dit in kernel_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to use device {} \".format(dvid)\n question += \"for VPP [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd[dvid] = device\n for dit in vppd.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n question = \"Would you like to bind the driver {} for {} [y/N]? \".format(\n driver, dvid\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n logging.debug(\n \"Binding device {} to driver {}\".format(\n dvid, driver\n )\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\n \"Could not bind device {}\".format(dvid)\n )\n dpdk_devices[dvid] = device\n del kernel_devices[dvid]\n\n dlen = len(dpdk_devices)\n if dlen > 0:\n print(\"\\nThese device(s) are already using DPDK.\\n\")\n VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)\n question = \"\\nWould you like to remove any of \"\n question += \"these device(s) [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to remove {} [y/N]? \".format(dvid)\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl[dvid] = device\n for dit in vppdl.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n logging.debug(\n \"Binding device {} to driver {}\".format(dvid, driver)\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\"Could not bind device {}\".format(dvid))\n else:\n kernel_devices[dvid] = device\n del dpdk_devices[dvid]\n\n interfaces = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n VppPCIUtil.vpp_create_interface(interfaces, dvid, device)\n node[\"interfaces\"] = interfaces\n\n self._update_auto_config()\n self.updateconfig()", "def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):\n self.num_inference_steps = num_inference_steps\n timesteps = (\n np.linspace(0, self.num_train_timesteps - 1, num_inference_steps + 1)\n .round()[::-1][:-1]\n .copy()\n .astype(np.int64)\n )\n self.timesteps = torch.from_numpy(timesteps).to(device)\n self.model_outputs = [\n None,\n ] * self.config.solver_order\n self.lower_order_nums = 0", "def refresh_port_filters(self, own_devices, other_devices):\n # These data structures are cleared here in order to avoid\n # losing updates occurring during firewall refresh.\n devices_to_refilter = self.devices_to_refilter\n global_refresh_firewall = self.global_refresh_firewall\n self.devices_to_refilter = set()\n self.global_refresh_firewall = False\n LOG.info(_LI(\"Going to refresh for devices: %s.\"),\n len(devices_to_refilter))\n if global_refresh_firewall:\n LOG.info(_LI(\"Refreshing firewall for all filtered devices.\"))\n self.firewall.clean_port_filters(other_devices)\n self.refresh_firewall()\n else:\n own_devices = (own_devices & devices_to_refilter)\n other_devices = (other_devices & devices_to_refilter)\n self.firewall.clean_port_filters(other_devices)\n if own_devices:\n LOG.info(_LI(\"Refreshing firewall for %d own devices.\"),\n len(own_devices))\n self.refresh_firewall(own_devices)\n if other_devices:\n LOG.info(_LI(\"Refreshing firewall for %d other devices.\"),\n len(other_devices))\n self.prepare_firewall(other_devices)\n LOG.info(_LI(\"Finished refresh for devices: %s.\"),\n len(devices_to_refilter))", "def ports(self, ports):\n\n self._ports = ports", "def set_devices(sys_device_ids):\n # Set the CUDA_VISIBLE_DEVICES environment variable\n import os\n visible_devices = ''\n for i in sys_device_ids:\n visible_devices += '{}, '.format(i)\n os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices\n # Return wrappers.\n # Models and user defined Variables/Tensors would be transferred to the\n # first device.\n device_id = 0 if len(sys_device_ids) > 0 else -1\n TVT = TransferVarTensor(device_id)\n TMO = TransferModulesOptims(device_id)\n return TVT, TMO", "def set_devices_for_ml(sys_device_ids):\n import os\n\n all_ids = []\n for ids in sys_device_ids:\n all_ids += ids\n unique_sys_device_ids = list(set(all_ids))\n unique_sys_device_ids.sort()\n if -1 in unique_sys_device_ids:\n unique_sys_device_ids.remove(-1)\n\n # Set the CUDA_VISIBLE_DEVICES environment variable\n\n visible_devices = ''\n for i in unique_sys_device_ids:\n visible_devices += '{}, '.format(i)\n os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices\n\n # Return wrappers\n\n relative_device_ids = []\n TVTs, TMOs = [], []\n for ids in sys_device_ids:\n relative_ids = []\n for id in ids:\n if id != -1:\n id = find_index(unique_sys_device_ids, id)\n relative_ids.append(id)\n relative_device_ids.append(relative_ids)\n\n # Models and user defined Variables/Tensors would be transferred to the\n # first device.\n TVTs.append(TransferVarTensor(relative_ids[0]))\n TMOs.append(TransferModulesOptims(relative_ids[0]))\n return TVTs, TMOs, relative_device_ids", "def set_device_type(device: str = \"cuda\"):\n DefaultDeviceType._default_device_type = device", "def flowers(self, flowers):\n\n self._flowers = flowers", "def device_count(self, device_count):\n\n self._device_count = device_count", "def initialize_devices(self):\n for k in self.devices:\n dev = self.devices[k]\n print('Starting %s' % dev.properties['name'])\n dev.initialize_driver()\n # print('Error initializing %s' % dev.properties['name'])\n if 'defaults' in dev.properties:\n defaults_file = dev.properties['defaults']\n defaults = from_yaml_to_dict(defaults_file)[dev.properties['name']]\n dev.apply_values(defaults)\n if dev.properties['type'] == 'daq':\n self.daqs[dev.properties['name']] = {'input': [],\n 'output': [],\n 'monitor': [], } # Creates an entry for every different DAQ.", "def __set_port_list(self):\n\n self._coms = [str(i.device) for i in sorted(self.ports)]", "def set_device_rules(self, rules, rule_objs):\n self.logger.debug(\"set_device_rules: rules: {}\".format(rules))\n self._load_device_rules(rules, rule_objs=rule_objs)\n self._determine_cli_command_list()\n self._determine_get_method_list()", "def set_udfs(self):\n\n flowcell_type = self.process.all_inputs()[0].udf.get('Flowcell Type')\n\n for key, val in self.process_settings[flowcell_type].items():\n self.process.udf[key] = val\n self.process.put()\n\n for art in self.artifacts:\n for key, val in self.artifact_settings[flowcell_type].items():\n art.udf[key] = val\n art.put()", "def set_device_parameters(request):\n def fin():\n request.cls.device.close()\n request.addfinalizer(fin)\n\n request.cls.driver = ros.ROSDriver\n request.cls.patched_driver = PatchedROSDevice\n request.cls.vendor = 'ros'\n parent_conftest.set_device_parameters(request)", "def __http_update_device_list(self):\n\n # Make sure we are (still) logged in\n self.__login_if_required()\n\n # Fetch all devices from Govee\n req = {\n 'key': '',\n 'transaction': self.__current_milli_time(),\n 'view': 0\n }\n res = self.__http_post(req, '/device/rest/devices/v1/list')\n\n # Response:\n \"\"\"\n {\n \"devices\": [\n {\n \"device\": \"AA:BB:CC:DD:EE:FF:11:22\",\n \"deviceExt\": {\n \"deviceSettings\": \"{\\\"wifiName\\\":\\\"MyWifi\\\",\\\"address\\\":\\\"CC:DD:EE:FF:11:22\\\",\\\"bleName\\\":\\\"ihoment_H6159_XXXX\\\",\\\"topic\\\":\\\"GD/123467890123467890123467890\\\",\\\"sku\\\":\\\"H6159\\\",\\\"device\\\":\\\"AA:BB:CC:DD:EE:FF:11:22\\\",\\\"deviceName\\\":\\\"Kitchen light\\\",\\\"versionHard\\\":\\\"1.00.01\\\",\\\"versionSoft\\\":\\\"1.02.14\\\"}\",\n \"extResources\": \"{\\\"skuUrl\\\":\\\"\\\",\\\"headOnImg\\\":\\\"\\\",\\\"headOffImg\\\":\\\"\\\",\\\"ext\\\":\\\"\\\"}\",\n \"lastDeviceData\": \"{\\\"online\\\":false}\"\n },\n \"deviceName\": \"Kitchen light\",\n \"goodsType\": 0,\n \"sku\": \"H6159\",\n \"versionHard\": \"1.00.01\",\n \"versionSoft\": \"1.02.14\"\n },\n {\n \"device\": \"A2:B2:C3:D4:E5:F6:77:88\",\n \"deviceExt\": {\n \"deviceSettings\": \"{\\\"wifiName\\\":\\\"MyWifi\\\",\\\"address\\\":\\\"C3:D4:E5:F6:77:88\\\",\\\"bleName\\\":\\\"ihoment_H6163_YYYY\\\",\\\"topic\\\":\\\"GD/123467890123467890123467890\\\",\\\"sku\\\":\\\"H6163\\\",\\\"device\\\":\\\"A2:B2:C3:D4:E5:F6:77:88\\\",\\\"deviceName\\\":\\\"Living room\\\",\\\"versionHard\\\":\\\"1.00.01\\\",\\\"versionSoft\\\":\\\"1.02.14\\\"}\",\n \"extResources\": \"{\\\"skuUrl\\\":\\\"\\\",\\\"headOnImg\\\":\\\"\\\",\\\"headOffImg\\\":\\\"\\\",\\\"ext\\\":\\\"\\\"}\",\n \"lastDeviceData\": \"{\\\"online\\\":false}\"\n },\n \"deviceName\": \"Living room\",\n \"goodsType\": 0,\n \"sku\": \"H6163\",\n \"versionHard\": \"1.00.01\",\n \"versionSoft\": \"1.02.14\"\n }\n ],\n \"message\": \"\",\n \"status\": 200\n }\n \"\"\"\n\n # Check response status\n if res['status'] != 200:\n raise GoveeException('Govee answered with device list status {}'.format(res['status'])) \n\n for raw_device in res['devices']:\n identifier = raw_device['device']\n sku = raw_device['sku']\n if not identifier or not sku:\n continue\n name = raw_device['deviceName']\n device_settings = json.loads(raw_device['deviceExt']['deviceSettings'])\n device_settings_keys = device_settings.keys()\n if not 'address' in device_settings_keys and not 'topic' in device_settings_keys:\n continue\n topic = device_settings['topic']\n\n if identifier in self.__devices.keys():\n device = self.__devices[identifier]\n device._name = name\n else:\n device_factory = self.__get_device_factory(sku)\n if not device_factory:\n continue\n last_device_data = json.loads(raw_device['deviceExt']['lastDeviceData'])\n if 'online' in last_device_data.keys():\n if last_device_data['online']:\n iot_connected = dev.IotConnectionStatus.ONLINE\n else:\n iot_connected = dev.IotConnectionStatus.OFFLINE\n elif not 'wifiName' in device_settings:\n iot_connected = dev.IotConnectionStatus.NO_IOT\n else:\n iot_connected = dev.IotConnectionStatus.UNKNOWN\n device = device_factory.build(self, identifier, topic, sku, name, iot_connected)\n if device:\n self.__devices[identifier] = device\n self.on_new_device(self, device, raw_device)", "def set_settings_devices(self):\n self.set_thermostat, self.set_humidifier, self.set_sprinklers, self.set_ventilation = self.settings[3:]", "def set_device_group(self, devicegroup, devices, exclusive=False):\n # TODO: Implement 'exclusive'\n self._logger.debug(\"Set device-group to '%s'\" % devicegroup)\n if issubclass(devices.__class__, base.PanDevice):\n devices = [devices]\n device_refresh_needed = False\n for device in devices:\n if device.serial is None or device.devicegroup is None:\n device_refresh_needed = True\n break\n if device_refresh_needed:\n self.refresh_devices_from_panorama(devices)\n # All devices have serial numbers now, so start setting devicegroup\n for device in devices:\n # If the device was in a group, and that group changed, pull it out of the current group\n if device.devicegroup != devicegroup and \\\n device.devicegroup is not None:\n self._logger.debug(\"Moving device %s out of device-group %s\" % (device.hostname, device.devicegroup))\n self.set_config_changed()\n self.xapi.delete(\n pandevice.XPATH_DEVICE_GROUPS +\n \"/entry[@name='%s']/devices\"\n \"/entry[@name='%s']\"\n % (device.devicegroup, device.serial)\n )\n device.devicegroup = None\n # If assigning device to a new group\n if devicegroup is not None:\n self.set_config_changed()\n self._logger.debug(\"Moving device %s into device-group %s\" % (device.hostname, devicegroup))\n self.xapi.set(\n pandevice.XPATH_DEVICE_GROUPS +\n \"/entry[@name='%s']/devices\" % (devicegroup,),\n \"<entry name='%s'/>\" % (device.serial,)\n )\n device.devicegroup = devicegroup", "def set_cameras(self, cameras):\n self._cameras = cameras[:]", "def set_nodes(self, nodes):\n self._drv_nodes = nodes", "def update_device_list(self):\n\n # Update devices via HTTP request (basic device data - no status)\n self.__http_update_device_list()\n\n # Fetch status for each known device via MQTT\n for gdev in self.__devices.values():\n gdev.request_status()", "def prepare_firewall(self, device_ids):\n LOG.info(_LI(\"Prepare firewall rules for %s ports.\"), len(device_ids))\n self._process_port_set(device_ids)", "def config_device(self, cfg):\n\n\t\tif self.host is not None:\n\t\t\tself.tell(\"Configuring device\")\n\n\t\tself._dev = cfg\n\n\t\tself.do_checklist([])", "def network_adapter_types(self, network_adapter_types):\n\n self._network_adapter_types = network_adapter_types", "def __init__(self, host=\"127.0.0.1\", port=5037):\n self._devices = []", "def __init__(self, host=\"127.0.0.1\", port=5037):\n self._devices = []", "def set_cloud_environment(\n self,\n vms: List[Dict[str, Union[str, List[str]]]],\n fw_rules: List[Dict[str, str]],\n ):\n # Validate data\n self._validated = False\n self._validate_data(vms, fw_rules)\n\n # Store data\n self._vms = vms\n self._fw_rules = fw_rules\n self._vm_count = len(vms)\n\n # Generate graph\n self._generate_tag_owners()\n self._generate_vertexes()\n self._generate_ribs()", "def set_cuda(self, is_cuda):\n self.is_cuda = is_cuda", "def set_scanning_filter(self, **kwargs):\n for k, v in kwargs.get(\"filters\", {}).items():\n if k == \"UUIDs\":\n self._filters[k] = Variant(\"as\", v)\n elif k == \"RSSI\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Pathloss\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Transport\":\n self._filters[k] = Variant(\"s\", v)\n elif k == \"DuplicateData\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Discoverable\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Pattern\":\n self._filters[k] = Variant(\"s\", v)\n else:\n logger.warning(\"Filter '%s' is not currently supported.\" % k)\n\n if \"Transport\" not in self._filters:\n self._filters[\"Transport\"] = Variant(\"s\", \"le\")", "def filters(self, filters):\n\n self._filters = filters", "def device(self, device):\n\n self._device = device", "def set_filters(self, filters: List[DataGridFilter]):\n self.filters = filters", "def network_ids(self, network_ids):\n\n self._network_ids = network_ids", "def product_groups(self, product_groups):\n\n self._product_groups = product_groups", "def set_rng_device_and_dtype(\n self, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32\n ) -> None:\n if self.device != device or self.dtype != dtype:\n self.make_samplers(device, dtype)\n self.device = device\n self.dtype = dtype", "def initialize_devices(self):\n if not self.loaded_devices:\n raise Exception('Devices have to be loaded before being initialized.')\n\n for dev in self.devices:\n d = self.devices[dev]['dev'] # This is the Device instance\n d.initialize_driver()", "def set_device(sys_device_id):\n device_id = -1\n cuda = (sys_device_id != -1)\n if cuda:\n # CUDA_VISIBLE_DEVICE is a list, and device_id is the index of its members.\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = str(sys_device_id)\n device_id = 0\n TVT = TransferVarTensor(device_id)\n TMO = TransferModulesOptims(device_id)\n return TVT, TMO", "def _assign_port_to_device(self):\n for i in range(0, len(self.stlink_devices)):\n self.stlink_devices[i]['usb_port'] = self.get_port_from_serial(self.stlink_devices[i]['serial'])", "def _update_all_devices(self):\n self.all_devices = []\n self.all_devices.extend(self.keyboards)\n self.all_devices.extend(self.mice)\n self.all_devices.extend(self.gamepads)\n self.all_devices.extend(self.other_devices)", "def device_class(self, device_class):\n # type: (string_types) -> None\n\n if device_class is not None:\n if not isinstance(device_class, string_types):\n raise TypeError(\"Invalid type for `device_class`, type has to be `string_types`\")\n\n self._device_class = device_class", "def set_channels(self, channels):\n if channels is None:\n self.channels = None\n return\n if isinstance(channels, Channels):\n self.channels = channels.create_channel_group(name='all')\n elif isinstance(channels, ChannelData):\n if not isinstance(channels, ChannelGroup):\n group_class = self.info.get_channel_group_class()\n self.channels = group_class(\n channels, indices=np.arange(channels.size), name='all')\n else:\n self.channels = channels.copy()\n else:\n raise ValueError(f\"Channels must be {Channels} or {ChannelData}, \"\n f\"not {channels}.\")", "def set_device(self, cuda=True):\n if cuda and torch.cuda.is_available():\n self.cuda = True\n self.device = torch.device('cuda')\n else:\n self.cuda = False\n self.device = torch.device('cpu')\n\n if self.verbose:\n if not cuda:\n print('Using CPU device')\n elif not self.cuda:\n print('CUDA is not available. Defaulting to CPU device')\n else:\n print('Using CUDA device')\n\n self.encoder.to(self.device)\n self.decoder.to(self.device)\n self.critic.to(self.device)", "def device(self, serial):\n self._devices = []", "def devices(self):\n\n return self.__devices", "def set_net_batch(network, batch_size):\n if trt.__version__[0] >= '7':\n shape = list(network.get_input(0).shape)\n shape[0] = batch_size\n network.get_input(0).shape = shape\n return network", "def devices(self):\n return self._devices", "def devices(self):\n return self._devices", "def devices(self):\n return self._devices", "def devices(self):\n return self._devices", "def load_dev(self, mode='train', devices='abc'):\n if not os.path.exists(self.dev_matrix_h5_path):\n print(self.dev_matrix_h5_path + \"not exists!\")\n sys.exit()\n\n with h5py.File(self.dev_matrix_h5_path, 'r') as f:\n data = []\n label = []\n for device in devices:\n data.append(np.array(f[mode][device]['data'].value))\n label.append(np.array(f[mode][device]['label'].value))\n # concat data and label from multi devices as required, along \"batch\" axis\n\n datas = np.concatenate(data, axis=0)\n labels = np.concatenate(label, axis=0)\n if self.verbose:\n print(\"[LOGGING]: Loading\", mode, devices, \"of shape: \", datas.shape)\n return datas, labels", "def get_devices(self):\n devices = self.get(\"event/device\")", "def list_devices(cls, filters={}):\n return cls.dbdriver.list_devices(filters)", "def load_devices(self, source=None):\n if source is not None:\n return\n init = self.measure['init']\n devices_file = init['devices']\n devices_list = from_yaml_to_devices(devices_file)\n for dev in devices_list:\n self.devices[dev.properties['name']] = dev\n if 'outputs' in dev.properties:\n self.output_devices.append(dev)\n print('Added %s to the experiment' % dev)\n if dev.properties['type'] == \"Rotation Stage\":\n self.rotation_stages.append(dev.properties['name'])", "def refresh_devices(self, devices=(), only_connected=False, expand_vsys=True, include_device_groups=True, add=False):\n logger.debug(self.hostname + \": refresh_devices called\")\n try:\n # Test if devices is iterable\n test_iterable = iter(devices)\n except TypeError:\n # This probably means a single device was passed in, not an iterable.\n # Convert to an iterable with a single item.\n devices = (devices,)\n # Remove None from list of devices\n devices = [x for x in devices if x is not None]\n # Get the list of managed devices\n if only_connected:\n cmd = \"show devices connected\"\n else:\n cmd = \"show devices all\"\n devices_xml = self.op(cmd)\n devices_xml = devices_xml.find(\"result/devices\")\n\n # Filter to only requested devices\n if devices:\n filtered_devices_xml = ET.Element(\"devices\")\n for serial, vsys in [(d.serial, d.vsys) for d in devices]:\n if serial is None:\n continue\n entry = devices_xml.find(\"entry[@name='%s']\" % serial)\n if entry is None:\n raise err.PanDeviceError(\"Can't find device with serial %s attached to Panorama at %s\" %\n (serial, self.hostname))\n multi_vsys = yesno(entry.findtext(\"multi-vsys\"))\n # Create entry if needed\n if filtered_devices_xml.find(\"entry[@name='%s']\" % serial) is None:\n entry_copy = deepcopy(entry)\n # If multivsys firewall with vsys defined, erase all vsys in filtered\n if multi_vsys and vsys != \"shared\" and vsys is not None:\n entry_copy.remove(entry_copy.find(\"vsys\"))\n ET.SubElement(entry_copy, \"vsys\")\n filtered_devices_xml.append(entry_copy)\n # Get specific vsys\n if vsys != \"shared\" and vsys is not None:\n vsys_entry = entry.find(\"vsys/entry[@name='%s']\" % vsys)\n if vsys_entry is None:\n raise err.PanDeviceError(\"Can't find device with serial %s and\"\n \" vsys %s attached to Panorama at %s\" %\n (serial, vsys, self.hostname)\n )\n vsys_section = filtered_devices_xml.find(\"entry[@name='%s']/vsys\" % serial)\n vsys_section.append(vsys_entry)\n devices_xml = filtered_devices_xml\n\n # Manipulate devices_xml so each vsys is a separate device\n if expand_vsys:\n original_devices_xml = deepcopy(devices_xml)\n for entry in original_devices_xml:\n multi_vsys = yesno(entry.findtext(\"multi-vsys\"))\n if multi_vsys:\n serial = entry.findtext(\"serial\")\n for vsys_entry in entry.findall(\"vsys/entry\"):\n if vsys_entry.get(\"name\") == \"vsys1\":\n continue\n new_vsys_device = deepcopy(entry)\n new_vsys_device.set(\"name\", serial)\n ET.SubElement(new_vsys_device, \"vsysid\").text = vsys_entry.get(\"name\")\n ET.SubElement(new_vsys_device, \"vsysname\").text = vsys_entry.findtext(\"display-name\")\n devices_xml.append(new_vsys_device)\n\n # Create firewall instances\n firewall_instances = firewall.Firewall.refresh_all_from_xml(devices_xml, refresh_children=not expand_vsys)\n\n if not include_device_groups:\n if add:\n self.removeall(firewall.Firewall)\n self.extend(firewall_instances)\n return firewall_instances\n\n # Create device-groups\n\n # Get the list of device groups\n devicegroup_xml = self.op(\"show devicegroups\")\n devicegroup_xml = devicegroup_xml.find(\"result/devicegroups\")\n\n devicegroup_instances = DeviceGroup.refresh_all_from_xml(devicegroup_xml, refresh_children=False)\n\n for dg in devicegroup_instances:\n dg_serials = [entry.get(\"name\") for entry in devicegroup_xml.findall(\"entry[@name='%s']/devices/entry\" % dg.name)]\n # Find firewall with each serial\n for dg_serial in dg_serials:\n all_dg_vsys = [entry.get(\"name\") for entry in devicegroup_xml.findall(\"entry[@name='%s']/devices/entry[@name='%s']\"\n \"/vsys/entry\" % (dg.name, dg_serial))]\n # Collect the firewall serial entry to get current status information\n fw_entry = devicegroup_xml.find(\"entry[@name='%s']/devices/entry[@name='%s']\" % (dg.name, dg_serial))\n if not all_dg_vsys:\n # This is a single-context firewall\n dg_vsys = \"vsys1\"\n fw = next((x for x in firewall_instances if x.serial == dg_serial and x.vsys == dg_vsys), None)\n if fw is None:\n # It's possible for device-groups to reference a serial/vsys that doesn't exist\n continue\n # Move the firewall to the device-group\n dg.add(fw)\n firewall_instances.remove(fw)\n fw.state.connected = yesno(fw_entry.findtext(\"connected\"))\n fw.state.unsupported_version = yesno(fw_entry.findtext(\"unsupported-version\"))\n fw.state.set_shared_policy_synced(fw_entry.findtext(\"shared-policy-status\"))\n else:\n # This is a multi-context firewall\n for dg_vsys in all_dg_vsys:\n fw = next((x for x in firewall_instances if x.serial == dg_serial and x.vsys == dg_vsys), None)\n if fw is None:\n # It's possible for device-groups to reference a serial/vsys that doesn't exist\n continue\n # Move the firewall to the device-group\n dg.add(fw)\n firewall_instances.remove(fw)\n fw.state.connected = yesno(fw_entry.findtext(\"connected\"))\n fw.state.unsupported_version = yesno(fw_entry.findtext(\"unsupported-version\"))\n fw.state.set_shared_policy_synced(fw_entry.findtext(\"shared-policy-status\"))\n\n if add:\n for dg in devicegroup_instances:\n found_dg = self.find(dg.name, DeviceGroup)\n if found_dg is not None:\n # Move the firewalls to the existing devicegroup\n found_dg.removeall(firewall.Firewall)\n found_dg.extend(dg.children)\n else:\n # Devicegroup doesn't exist, add it\n self.add(dg)\n # Add firewalls that are not in devicegroups\n self.removeall(firewall.Firewall)\n self.extend(firewall_instances)\n\n return firewall_instances + devicegroup_instances", "def config_training_instance(self):\n # Compute the average of the gradients main_train_device\n tower_grads = []\n\n # Distribute the model onto available GPUs\n for i in range(self.num_gpus):\n with tf.device(\"/gpu:{}\".format(i)):\n\n optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate)\n batch_size_instance = self.batch_size // self.num_gpus\n\n # Split data between GPUs\n inputs_instance = self.inputs[i * batch_size_instance:(i + 1) *\n batch_size_instance]\n labels_instance = self.labels[i * batch_size_instance:(i + 1) *\n batch_size_instance]\n\n logits = self.model(inputs_instance)\n trainable_variables = self.model.trainable_variables\n model_loss = self.construct_loss(logits=logits,\n labels=labels_instance)\n network_regularizers, regularizer_loss, exporter, costs = self.embed_morphnet(\n input_boundary=[inputs_instance.op],\n output_boundary=[logits.op],\n morphnet_regularization_strength=self.\n morphnet_regularization_strength_placeholder,\n morphnet_cost_thresholds=self.morphnet_target_cost_thresholds)\n total_loss = model_loss + regularizer_loss\n\n grads = optimizer.compute_gradients(\n total_loss, var_list=trainable_variables)\n tower_grads.append(grads)\n\n # Usually we would use the first GPU\n if i == 0:\n # Evaluate model (with test logits, for dropout to be disabled)\n self.logits_train_instance = logits\n self.model_loss_train_instance = model_loss\n self.probs_train_instance = tf.nn.softmax(logits)\n self.correct_pred_train_instance = tf.equal(\n tf.argmax(logits, 1), tf.argmax(labels_instance, 1))\n self.accuracy_train_instance = tf.reduce_mean(\n tf.cast(self.correct_pred_train_instance, tf.float32))\n\n self.network_regularizer_train_instance = network_regularizers\n self.regularizer_loss_train_instance = regularizer_loss\n self.total_loss_train_instance = total_loss\n self.exporter_train_instance = exporter\n self.cost_train_instance = costs\n\n # Compute the average of the gradients main_train_device\n with tf.device(self.main_train_device):\n grads = self.average_gradients(tower_grads)\n self.train_op = optimizer.apply_gradients(grads, global_step=None)", "def release_all_devices(cls, devclass: Optional[Type] = None) -> int:\n cls.Lock.acquire()\n try:\n remove_devs = set()\n for devkey in cls.Devices:\n if devclass:\n dev = cls._get_backend_device(cls.Devices[devkey][0])\n if dev is None or not isinstance(dev, devclass):\n continue\n dispose_resources(cls.Devices[devkey][0])\n remove_devs.add(devkey)\n for devkey in remove_devs:\n del cls.Devices[devkey]\n return len(remove_devs)\n finally:\n cls.Lock.release()", "def collectNet(self):\n network = self.options.net\n # net option from the config file is a string\n if isinstance(network, basestring):\n network = [network]\n # in case someone uses 10.0.0.0,192.168.0.1 instead of\n # --net 10.0.0.0 --net 192.168.0.1\n if isinstance(network, (list, tuple)) and \",\" in network[0]:\n network = [n.strip() for n in network[0].split(\",\")]\n count = 0\n devices = []\n if not network:\n network = yield self.config().callRemote(\"getDefaultNetworks\")\n\n if not network:\n self.log.warning(\"No networks configured\")\n defer.returnValue(None)\n\n for net in network:\n try:\n nets = yield self.config().callRemote(\n \"getNetworks\", net, self.options.subnets\n )\n if not nets:\n self.log.warning(\"No networks found for %s\", net)\n continue\n ips = yield self.discoverIps(nets)\n devices += ips\n count += len(ips)\n except Exception as ex:\n self.log.exception(\n \"Error performing net discovery on %s: %s\", net, ex\n )\n self.log.info(\"Working on devices: %s\", devices)\n\n foundDevices = []\n for device in devices:\n result = yield self.discoverDevice(\n device, self.options.deviceclass, self.options.productionState\n )\n if result is not None:\n foundDevices.append(result)\n defer.returnValue(foundDevices)", "def setup_networks(self, configs):\n self.__networks = self.setup_components(configs, 'scale_client.networks')", "def kk_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_kk_all:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_kk_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_kk_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_kk_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')", "def populate_netdevices(netdevices, engine):\n results = deque()\n for devobj in netdevices:\n id, _, name = devobj['Name'].split(' / ')\n title = devobj.get('nodeName', name)\n status = engine.store_json(id, title, devobj)\n results.append(status)\n\n # True if all store_json() calls returned None\n return set(results) == {None}", "def serial_dev(self, serial_dev):\n self._serial_dev = serial_dev\n return self", "def _SetDeviceSerial(self, device_serial):\n self._device_address = (\"127.0.0.1:%s\" % self._adb_port if\n self._adb_port else \"\")\n self._device_serial = (device_serial if device_serial else\n self._device_address)", "def setup(self, channels):\n self.channels = channels[:]", "def set_device(device: Union[str, torch.device]) -> torch.device:\n err_msg = None\n if isinstance(device, torch.device):\n pass\n elif device == 'auto':\n cuda = torch.cuda.is_available()\n device = torch.device('cuda' if cuda else 'cpu')\n elif device == 'gpu':\n cuda = torch.cuda.is_available()\n if cuda:\n device = torch.device('cuda')\n else:\n err_msg = ('Device set to \"gpu\", but could not access '\n 'any CUDA-enabled GPU. Please make sure that '\n 'a GPU is available and CUDA is installed '\n 'on this machine.')\n elif device == 'cpu':\n device = torch.device('cpu')\n else:\n err_msg = f'Unknown device \"{device}\". Try \"auto\".'\n if err_msg is not None:\n logger = get_logger(__name__, verbose=0)\n logger.error(f'Unknown device \"{device}\". Try \"auto\".')\n import sys\n sys.exit(1)\n return device", "def set_classes(self, classes):\n\t\tif type(classes) is not list or len(classes) == 0:\n\t\t\traise TypeError('NNetClassify.set_classes: classes should be a list with a length of at least 1')\n\t\tself.classes = classes", "def init_devices(self):\n self.hp_nb = int(self.rs_nb* self.hp_proportion/(1- self.hp_proportion))\n self.defense_cost = self.hp_nb * self.hp_unit_cost\n rs_devices = [True for i in range(self.rs_nb)] #rs --> True\n hp_devices = [False for i in range(self.hp_nb)] #hp --> False\n self.devices = rs_devices + hp_devices\n shuffle(self.devices)", "def modify_mstp_ports(self, ports, instance=0, **kwargs):\n pass", "def _set_device_parameter(self, target, *parameters, value):\n if self.is_connected:\n key = target.name + \".\" + parameters[0]\n if not key in self._device_parameters:\n for parameter in parameters:\n if not hasattr(target, parameter):\n raise Exception(f\"The instrument {self.name} does not have parameters {parameter}.\")\n target.set(parameter, value)\n self._device_parameters[key] = value\n elif self._device_parameters[key] != value:\n for parameter in parameters:\n target.set(parameter, value)\n self._device_parameters[key] = value\n else:\n raise Exception(f\"There is no connection to the instrument {self.name}.\")", "def __init__(self, connections, dev_cfg):\n super().__init__(connections, dev_cfg)\n\n self.enabled = True\n self.values = parse_values(self, self.connections, [\"ON\", \"OFF\"])\n\n\n self.known_inputs = []\n #grab inputs and register them one by one\n for (conn, subdict) in self.comm.items():\n #grab EnableSrc subdictionary and register\n if IN_ENABLE_SRC in subdict:\n self.connections[conn].register(subdict[IN_ENABLE_SRC], self.on_message)\n #grab InputSrc and register them one by one\n if IN_INPUT in subdict:\n for (param_name, src_list) in subdict[IN_INPUT].items():\n # make sure we got the list containing the InputSrc\n if isinstance(src_list, list):\n for src in src_list:\n def create_msg_handler(conn_src= conn + '_' + src,\n l_conn = conn, l_param_name = param_name,\n l_src = src):\n self.known_inputs.append(conn_src)\n def msg_handler(msg):\n if self.enabled:\n self.process_message(msg, conn_src)\n else:\n self.log.info(\"Actuator is disabled, ignoring command!\")\n self.connections[l_conn].register( {l_param_name : l_src },\n msg_handler)\n create_msg_handler()", "def set_gpu(gpus):\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus", "def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})", "def __init__(self, env, name, num_ports, forwarding_table=None):\n NetworkDevice.__init__(self, env, name, num_ports)\n env.process(self.listen_for_messages(self.forward_messages))\n if forwarding_table is None:\n self.forwarding_table = {}\n else:\n self.forwarding_table = forwarding_table", "def devices(self, query=None):\n if query is not None:\n query = clean(query, self.devices_parameters)\n query = \"?\" + urllib.parse.urlencode(query)\n else:\n query = \"\"\n return self.get(\"/devices\" + query)", "def modify_devices_tags(self, data):\n data = clean(data, self.tags_parameters)\n return self.put(\"/devices/tags\", data)", "def setup_net(self):\n pass", "def network_with_devices():\n new_names = Names()\n new_devices = Devices(new_names)\n new_network = Network(new_names, new_devices)\n\n [SW1_ID, SW2_ID, OR1_ID] = new_names.lookup([\"Sw1\", \"Sw2\", \"Or1\"])\n\n # Add devices\n new_devices.make_device(SW1_ID, new_devices.SWITCH, 0)\n new_devices.make_device(SW2_ID, new_devices.SWITCH, 0)\n new_devices.make_device(OR1_ID, new_devices.OR, 2)\n\n return new_network", "def set_cpus(self, num_cpus: int) -> None:\n if self.batch:\n if self.launcher in [\"pbs\", \"cobalt\"]:\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n if hasattr(self.batch_settings, \"set_ncpus\"):\n self.batch_settings.set_ncpus(num_cpus)\n if self.launcher == \"slurm\":\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n if hasattr(self.batch_settings, \"set_cpus_per_task\"):\n self.batch_settings.set_cpus_per_task(num_cpus)\n\n for db in self.dbnodes:\n db.run_settings.set_cpus_per_task(num_cpus)\n if db.is_mpmd and hasattr(db.run_settings, \"mpmd\"):\n for mpmd in db.run_settings.mpmd:\n mpmd.set_cpus_per_task(num_cpus)", "def device_type(self, device_type):\n allowed_values = [\"active\", \"inactive\", \"all\"]\n if device_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `device_type` ({0}), must be one of {1}\"\n .format(device_type, allowed_values)\n )\n\n self._device_type = device_type", "async def async_set_filters(self, filters, state_mode):\n if filters not in ON_OFF_LIST:\n return\n self._filter = filters.lower()\n self._state_mode = state_mode\n await self.async_send_cmd()", "def _update_device_types(self):\n device_types = self.adapter.device_types()\n for device_type in device_types.items:\n key = device_type.id\n self._make_up_to_date('/device_types', key, device_type)", "def devices(self):\n return list(self._device_types)", "def get_devices_per_node(self):\n\n for i in self._nodes.items():\n node = i[1]\n # Update the interface data\n\n self._get_device(node)\n\n self.updateconfig()" ]
[ "0.6428995", "0.6428995", "0.5585992", "0.54547", "0.5365216", "0.5246433", "0.51710474", "0.50898916", "0.50661755", "0.5038719", "0.5004575", "0.49756554", "0.4974928", "0.49667272", "0.4923213", "0.48840016", "0.48089606", "0.48036066", "0.47994307", "0.4792203", "0.47901726", "0.4782577", "0.4773169", "0.4769025", "0.47646013", "0.47079116", "0.46879467", "0.46842295", "0.46741813", "0.4669408", "0.46553808", "0.4608002", "0.46060514", "0.45830584", "0.45808038", "0.4573042", "0.4560529", "0.45579633", "0.45570612", "0.45560634", "0.4540685", "0.4539058", "0.4530926", "0.4530926", "0.45146832", "0.45125598", "0.45095572", "0.45090798", "0.4505834", "0.4505425", "0.44832516", "0.44818762", "0.44748285", "0.44705057", "0.4469013", "0.44478425", "0.4417022", "0.4412502", "0.44085225", "0.43985775", "0.43983296", "0.43914813", "0.4382239", "0.4381371", "0.4381371", "0.4381371", "0.4381371", "0.43787235", "0.43621767", "0.43466678", "0.43457147", "0.43456984", "0.43334356", "0.43304116", "0.4305116", "0.4288129", "0.4286651", "0.42765963", "0.42751813", "0.4257212", "0.42333862", "0.42234895", "0.42222977", "0.4219313", "0.42186984", "0.4214467", "0.42085272", "0.42081615", "0.42079145", "0.42066514", "0.4201807", "0.41993472", "0.41928545", "0.41910693", "0.41826662", "0.41744733", "0.41729075", "0.41662467", "0.4160596", "0.41599384" ]
0.86378056
0
Sets the top of this NetflowFilters.
def top(self, top): self._top = top
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bb_top(self, bb_top: float):\n\n self._bb_top = bb_top", "def top(self):\n # Sets our Z value to one.\n self.setZValue(1)\n # Set every colliding items Z value to 0\n for sibling in self.collidingItems():\n sibling.setZValue(0)", "def always_top(self, value: bool):\n self.tk_ref.wm_attributes('-topmost', int(value))", "def set_top(self,top_name):\n self.top_name = top_name", "def top(self, top):\n self.ptr.top(top)", "def top(self, top):\n # type: (float) -> None\n\n if top is not None:\n if not isinstance(top, (float, int)):\n raise TypeError(\"Invalid type for `top`, type has to be `float`\")\n\n self._top = top", "def top_type(self, top_type):\n\n self._top_type = top_type", "def _set_top(self, user_n, item_n):\n self.user_n = user_n\n self.item_n = item_n", "def GripperTop(self, attop=True):\r\n \r\n return self.SetFlag(self.optionGripperTop, attop)", "def top_bar(self, top_bar):\n\n self._top_bar = top_bar", "def setTopP(self, value):\n return self._set(topP=value)", "def setTopP(self, value):\n return self._set(topP=value)", "def setTopP(self, value):\n return self._set(topP=value)", "def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self", "def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self", "def page_top(self):\n self._pos = 0\n self._display()", "def top_attire_color(self, top_attire_color):\n\n self._top_attire_color = top_attire_color", "def do_top(self, arg):\n if self.curindex == 0:\n self.error('Oldest frame')\n return\n self._select_frame(0)", "def testPsychOnTop(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"on_top\")\n\n self.util.intPropertyTest(self, attr, \"on_top\")", "def draw_top(self):\n return group()", "def page_top(self):\n self._npos = 0\n self.display()", "def margin_top(self, value):\n self._margin_top = value", "def top(self, value):\n\n pass", "def top(self) -> None:\n # We remove ourselves from the list then insert ourselves to the end of the list\n current_index = ALL_WINDOWS.index(self)\n ALL_WINDOWS.pop(current_index)\n ALL_WINDOWS.append(self)", "def top(self):\n return super().peek()", "def _set_top_preps(self) -> None :\n prep_dict = self._system.getPReps(1, 20)\n prep_address_list = prep_dict['preps']\n for each_prep in prep_address_list:\n self._top_preps.put(each_prep['address'])", "def _reset_top_preps(self) -> None:\n if self._system.getIISSInfo()[\"nextPRepTerm\"] > self._block_height_week.get() + (7 * 43200):\n self._block_height_week.set(self._system.getIISSInfo()[\"nextPRepTerm\"])\n for i in range(len(self._top_preps)):\n self._top_preps.pop()\n self._set_top_preps()", "def set_top_container (self, top_container_id):\n instance = self.get_instance()\n instance['sub_container']['top_container']['ref'] = '/repositories/2/top_containers/%s' % top_container_id", "def top(self):", "def top(self) -> int:\n top = self.stack.pop()\n self.stack.append(top)\n for i in range(len(self.stack) - 1):\n self.stack.append(self.stack.pop())\n return top", "def __init__(self):\n self.top = None", "def __init__(self):\n self.top = None", "def __init__(self):\n self.top = None", "def move_top ( self ):\n list, index = self.get_info()\n self.value = [ list[index] ] + list[:index] + list[index+1:]", "def top_layer(self):\n return self._top", "def __init__(self, top_k):\n super(StreamTopkAccuracy, self).__init__(\n reset_at=\"stream\", emit_at=\"stream\", mode=\"eval\", top_k=top_k\n )\n self.top_k = top_k", "def set_top_widget(self, widg):\r\n if widg in self.widgets:\r\n self.widgets.remove(widg)\r\n self.widgets.insert(0, widg)\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if not i == widg:\r\n i.unfocus()", "def setTopK(self, value):\n return self._set(topK=value)", "def setTopK(self, value):\n return self._set(topK=value)", "def top(self):\n return self._top", "def top(self):\n return self._top", "def top(self):\n\n return self._top", "def top(self) -> int:\n last = self.pop()\n self.push(last)\n return last", "def getDepthToTop(self):\n return DEFAULT_ZTOR", "def always_top(self) -> bool:\n return bool(self.tk_ref.wm_attributes('-topmost'))", "def set_top_border(self, val):\n self.tborder = val", "def TopSnappable(self, b=True):\r\n \r\n return self.SetFlag(self.optionTopSnapped, b)", "def isTop(self):\n return self.top", "def getDepthToTop(self):\n return constants.DEFAULT_ZTOR", "def top(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"top\")", "def top(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"top\")", "def top(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"top\")", "def resize_top(self, new_z, padding=None):\n self.upper_vertex[2] = new_z + padding", "def bb_top(self) -> float:\n return self._bb_top", "def top(self, **kwargs):\n return self.client.api.top(self.id, **kwargs)", "def top(self) -> Optional[FloatObject]:\n return self.get(\"/Top\", None)", "def finalize_top(self, top):\n if not self.use_pr:\n return top\n\n # Expose AXI interface to the top level, and then wrap the entire\n # user design.\n top.add_port('axil_clk', 'axil_clk', parent_sig=False, dir='in')\n top.add_port('axil_rst_n', 'axil_clk', parent_sig=False, dir='in')\n top.add_port('M_AXI_araddr', 'M_AXI_araddr', width=32, parent_sig=False, dir='in')\n top.add_port('M_AXI_arready', 'M_AXI_arready', parent_sig=False, dir='out')\n top.add_port('M_AXI_arvalid', 'M_AXI_arvalid', parent_sig=False, dir='in')\n top.add_port('M_AXI_awaddr', 'M_AXI_awaddr', width=32, parent_sig=False, dir='in')\n top.add_port('M_AXI_awready', 'M_AXI_awready', parent_sig=False, dir='out')\n top.add_port('M_AXI_awvalid', 'M_AXI_awvalid', parent_sig=False, dir='in')\n top.add_port('M_AXI_bready', 'M_AXI_bready', parent_sig=False, dir='in')\n top.add_port('M_AXI_bresp', 'M_AXI_bresp', width=2, parent_sig=False, dir='out')\n top.add_port('M_AXI_bvalid', 'M_AXI_bvalid', parent_sig=False, dir='out')\n top.add_port('M_AXI_rdata', 'M_AXI_rdata', width=32, parent_sig=False, dir='out')\n top.add_port('M_AXI_rready', 'M_AXI_rready', parent_sig=False, dir='in')\n top.add_port('M_AXI_rresp', 'M_AXI_rresp', width=2, parent_sig=False, dir='out')\n top.add_port('M_AXI_rvalid', 'M_AXI_rvalid', parent_sig=False, dir='out')\n top.add_port('M_AXI_wdata', 'M_AXI_wdata', width=32, parent_sig=False, dir='in')\n top.add_port('M_AXI_wready', 'M_AXI_wready', parent_sig=False, dir='out')\n top.add_port('M_AXI_wstrb', 'M_AXI_wstrb', width=4, parent_sig=False, dir='in')\n top.add_port('M_AXI_wvalid', 'M_AXI_wvalid', parent_sig=False, dir='in')\n if self.enable_wishbone:\n top.add_port('wbm_cyc_o', 'wbm_cyc_o', parent_sig=False, dir='in')\n top.add_port('wbm_stb_o', 'wbm_stb_o', parent_sig=False, dir='in')\n top.add_port('wbm_we_o ', 'wbm_we_o ', parent_sig=False, dir='in')\n top.add_port('wbm_sel_o', 'wbm_sel_o', parent_sig=False, dir='in', width=4)\n top.add_port('wbm_adr_o', 'wbm_adr_o', parent_sig=False, dir='in', width=32)\n top.add_port('wbm_dat_o', 'wbm_dat_o', parent_sig=False, dir='in', width=32)\n top.add_port('wbm_dat_i', 'wbm_dat_i', parent_sig=False, dir='out', width=32)\n top.add_port('wbm_ack_i', 'wbm_ack_i', parent_sig=False, dir='out')\n top.add_port('wb_clk_i', 'wb_clk_i', parent_sig=False, dir='in')\n top.add_port('wb_rst_i', 'wb_rst_i', parent_sig=False, dir='in')\n top.instantiate_child_ports()\n # With PR, we're not going to be using this module as top. Instead, let's\n # rename is `user_top` which will be instantiated within a high-level static top-level.\n # The assumpion is that the static top-level is already routed and included in a project and need\n # not be generated here.\n top.name = 'user_top'\n return top", "def top(self):\n # type: () -> float\n return self._top", "def __init__(self) -> None:\n self.top = []", "def top_option():\n active = get_active_window()\n Width=get_middle_Width(active)\n Height=get_top_Height()\n PosX = get_middle_PosX(active,Width)\n PosY=get_top_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def top(self) -> int:\n return self.topvalue", "def right_top(self, right_top):\n\n self._right_top = right_top", "def top(self):\r\n return self.topele", "def left_top(self, left_top):\n\n self._left_top = left_top", "def top_tolerance(self, top_tolerance):\n if top_tolerance is None:\n raise ValueError(\"Invalid value for `top_tolerance`, must not be `None`\") # noqa: E501\n\n self._top_tolerance = top_tolerance", "def set_top_unique_num(self, number):\n self.top_unique_num = number", "def setUp(self):\n self.tb = gr.top_block()", "def reveal_top_card(self):\n if self.get_length() != 0:\n if not self.get_topmost_card().get_exposed():\n self.get_topmost_card().flip_card()", "def top_code(self, code):\n self.code_for_top = (self.y, code)\n self.y += self.unit*1.5", "def top(self) -> Face:\n return self.bodies[0].faces[self._top_index]", "def top(self):\n with self.mutating:\n top = self.queue[0]\n return top", "def reset_stack_arm(top):\n if top is not None and top.name in ['sandwichtop', 'sandwichtop_no_label']:\n if top.ey > 0:\n top.reset_y()", "def getContainerTop(self):\n return self.containerTop", "def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})", "def top(self) -> int:\n r = self.data.get()\n self.push(r)\n return r", "def showTopView(self):\r\n if(self.dataController.fileLoaded == True): \r\n self.dataController.showTopView()\r\n self.midsagittalView = True\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False", "def __init__(self, top_k: int = 1) -> None:\n self.top_k = top_k", "def top(self):\n return self[0]", "def top(self, *args):\n return _ida_hexrays.history_t_top(self, *args)", "def top(self) -> int:\n return self.topEle", "def showTopStories(self):\n\t\tif self.newestOrTop == \"newest\":\n\t\t\tself.newestOrTop = \"top\"\n\t\t\tprint \"Getting the latest stories from HN...\"\n\t\t\tself.refreshStories()\n\t\telse:\n\t\t\tinput = raw_input(\"Already showing top stories. Press Return to continue.\")\n\t\tself.printStories()", "def top10(self, top10: List[Word]):\n\n self._top10 = top10", "def IsTopSnappable(self):\r\n \r\n return self.HasFlag(self.optionTopSnapped)", "def __init__(self, top_k):\n super(MinibatchTopkAccuracy, self).__init__(\n reset_at=\"iteration\", emit_at=\"iteration\", mode=\"train\", top_k=top_k\n )\n self.top_k = top_k", "def test_set_pin_to_top(self) -> None:\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n\n old_subs, _ = gather_subscriptions(user)\n sub = old_subs[0]\n stream_id = sub[\"stream_id\"]\n new_pin_to_top = not sub[\"pin_to_top\"]\n result = self.api_post(\n user,\n \"/api/v1/users/me/subscriptions/properties\",\n {\n \"subscription_data\": orjson.dumps(\n [{\"property\": \"pin_to_top\", \"stream_id\": stream_id, \"value\": new_pin_to_top}]\n ).decode()\n },\n )\n self.assert_json_success(result)\n\n updated_sub = get_subscription(sub[\"name\"], user)\n\n self.assertIsNotNone(updated_sub)\n self.assertEqual(updated_sub.pin_to_top, new_pin_to_top)", "def pop_top_card(self):\n return self.pop_card(top=True)", "def top(self):\n print(self.arr[-1])", "def top(self):\n if self.is_empty():\n raise RuntimeError(\"Attempt to get a top of the empty stack!\")\n return self._items[-1]", "def populateRigTopNode(self):\n topNode = getRigTopNode()\n if topNode:\n self.gtUIInst.rigNode_lineEdit.setText(topNode)", "def top(self, **kwargs) -> Dict[str, Any]:", "def setDefaultFilter(self):\n self.logsItem.setDefaultFilter()", "def get_top(self):\n elements = self.S.get_maximal_elements()\n data = {}\n alot = Nat().get_top()\n for e in elements:\n data[e] = alot\n return Multiset(data, self.S)", "def top(self):\n return self.q1.return_top()", "def reset_params(self):\n self.blur = -1\n self.closing = -1\n self.thresh = -1", "def reset(self):\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)", "def top(self):\n if len(self._data) == 0:\n raise StackError(\"Stek je prazan. Ne moze se izvrsiti funkcija top.\")\n else:\n return self._data[-1]", "def top(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Stack is empty')\n\t\treturn self._head._element", "def getPageTop(self):\n return self.pageTop", "def top(self):\n if self.stack == []:\n return None\n return self.stack[-1]" ]
[ "0.67293346", "0.66441184", "0.66229576", "0.6613329", "0.6542814", "0.65164036", "0.6361207", "0.6278201", "0.62754864", "0.620966", "0.60119075", "0.60119075", "0.60119075", "0.59842026", "0.59842026", "0.5849825", "0.58208567", "0.5782378", "0.5741379", "0.5739408", "0.57287025", "0.566843", "0.5667478", "0.56568104", "0.563995", "0.55853176", "0.5583333", "0.55805856", "0.5565484", "0.5547596", "0.54751736", "0.54751736", "0.54751736", "0.547507", "0.54726505", "0.5461233", "0.5456641", "0.5438895", "0.5438895", "0.54158473", "0.54158473", "0.5395914", "0.5366002", "0.5349473", "0.53421146", "0.5336722", "0.5315375", "0.53001595", "0.5278686", "0.52574795", "0.52574795", "0.52574795", "0.52383727", "0.5179078", "0.5176599", "0.5171165", "0.5154903", "0.51532704", "0.5151186", "0.51316684", "0.51274616", "0.50798523", "0.50778294", "0.5066972", "0.5044994", "0.5037498", "0.5028384", "0.50264156", "0.502536", "0.50227505", "0.50157017", "0.49987474", "0.49842262", "0.49773675", "0.4971475", "0.49577063", "0.49498942", "0.49471873", "0.49448514", "0.4943205", "0.49013695", "0.4872212", "0.48693797", "0.48467165", "0.48431656", "0.484052", "0.48389143", "0.48282108", "0.48141563", "0.47959903", "0.47955388", "0.4794489", "0.4793759", "0.4788958", "0.47856095", "0.4785122", "0.47813162", "0.47766408", "0.47758728" ]
0.73888093
0
Sets the app_type of this NetflowFilters.
def app_type(self, app_type): self._app_type = app_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _app_type(self):\n return self._event['app_type']", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def set_type(self, type):\n self._type = type", "def set_type(self, type: int):\r\n self.type = type\r\n self.canvas.itemconfig(self.item, image=self._get_image())", "def set_application(self, app):\n \n self.app = app", "def item_group_type(self, item_group_type):\n\n self._item_group_type = item_group_type", "def image_type(self, image_type: ImageType):\n\n self._image_type = image_type", "def setDataSetType(self, type):\n self.__data_set_type__ = type", "def set_input_type(self, input_type):\n if input_type is not None: self._input_type.value = input_type\n return self", "def set_execution_type(self, type):\n self.execution_type = type", "def set_type(self, rtype=ALL_USERS):\r\n self.type = rtype", "def type(self, type: str):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def package_type(self, package_type):\n\n self._package_type = package_type", "def type(self, type):\n self._type = type", "def type(self, type):\n self._type = type", "def type(self, type):\n allowed_values = [\"android\", \"ios\"]\n if type.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for type -> \" + type)\n self._type = \"outdated_sdk_version\"\n else:\n self._type = type", "def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type", "def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type", "def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type", "def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type", "def type(self, type):\n if type is not None and len(type) < 1:\n raise ValueError(\"Invalid value for `type`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._type = type", "def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type", "def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type", "def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type", "def __init__(__self__, *,\n event_type: Optional[pulumi.Input[Sequence[pulumi.Input['FilterEventTypeItem']]]] = None):\n if event_type is not None:\n pulumi.set(__self__, \"event_type\", event_type)", "def platform_type(self, platform_type):\n self._platform_type = platform_type", "def set_type(self, type_balle):\n self.type_balle = type_balle", "def set_animal_type(self, type):\n self.__animal_type = type", "def engine_type(self, engine_type):\n\n self._engine_type = engine_type", "def set_input_type_class(self, input_type_class):\n if input_type_class is not None:\n self._input_type.expected = (input_type_class,)\n return self", "def set_type(self, value):\n self._set_one_attribute(self.AttributeNames.TYPE, value)\n return self", "def SetType(self, ct_type):\r\n\r\n self._type = ct_type", "def item_data_type(self, item_data_type):\n\n self._item_data_type = item_data_type", "def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\")\n\n self._type = type", "def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\")\n\n self._type = type", "def set_review_type(self, review_type):\n if review_type not in [APPROVE, REQUEST_REVISION]:\n raise RuntimeError(\n \"%s.review_type should be set to either %s or %s, not %s\"\n % (self.__class__.__name__, APPROVE, REQUEST_REVISION, review_type)\n )\n\n index = self.findText(review_type)\n if index != -1:\n self.setCurrentIndex(index)", "def set_app(self, app):\n self._app = app\n\n # Let the subclass choose the authentication method.\n self._authenticator = self._set_authenticator()", "def setDocumentType(self,value):\n self.PDFreactorConfiguration.in1[\"documentType\"] = value", "def entity_type(self, entity_type: str):\n\n self._entity_type = entity_type", "def attr_type(self, attr_type):\n\n self._attr_type = attr_type", "def msa_app_type(self) -> Optional[pulumi.Input[Union[str, 'MsaAppType']]]:\n return pulumi.get(self, \"msa_app_type\")", "def entity_type(self, entity_type):\n\n self._entity_type = entity_type", "def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n if type is not None and len(type) < 1:\n raise ValueError(\"Invalid value for `type`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._type = type", "def set_type(self, type, asset=None):\n self._set_property('pc:type', type, asset)", "def change_type(self, change_type):\n\n self._change_type = change_type", "def bot_type(self, bot_type):\n\n self._bot_type = bot_type", "def type(self, type):\n allowed_values = [\"None\", \"File\", \"FileManagerFile\", \"BusOb\", \"History\", \"Other\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `type` ({0}), must be one of {1}\" # noqa: E501\n .format(type, allowed_values)\n )\n\n self._type = type", "def set_type(self, typ):\n if typ in range(5):\n self._type = typ\n\n else:\n raise ValueError(\n \"ERROR: Invalid input. Please give a numerical value \"\n \"between 0 and 4 ( both inclusive ) \")", "def task_type(self, task_type):\n\n self._task_type = task_type", "def entity_type(self, entity_type):\n self._entity_type = entity_type", "def view_type(self, view_type):\n\n self.container['view_type'] = view_type", "def fs_type(self, fs_type):\n\n self._fs_type = fs_type", "def setType(self, newType):\n self._itemType = newType", "def setType(self,newtype):\n\t\tself.type = newtype;", "def set_application(self, app_id):\n if self._use_channel_info:\n self._channel = \"\"\n self._channel_name = app_id\n self._is_forced_val = True\n self._forced_count = 0", "def set_feedback_type(self, feedback_type):\r\n return self._arm.set_feedback_type(feedback_type)", "def type(self, type):\n\n self.container['type'] = type", "def type(self, type):\n\n self.container['type'] = type", "def SetDocumentType(self, document_type):\n if document_type:\n self._doc_type = document_type.lower()\n else:\n self._doc_type = u'event'", "def set_auth_type(self, auth_type):\n pass", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def setMappingType(self, mapping_type):\n \n self.mapping_type = mapping_type", "def set_type(self, val):\n if not contain_in_list_equal(val, PARAM_TYPES):\n raise ArgumentError(\"[WARNING] `type`, should be \" + \", \".join(PARAM_TYPES))\n self._type = val\n pass", "def experiment_type(self, new_type: str) -> None:\n self._db_data.experiment_type = new_type", "def setType(self, type):\n\t\tif not self.Loaded:\n\t\t\tself.type = type\n\t\t\tself.loader = NetLoader.getNetwork(type)\n\t\t\tself.isTypeSet = True", "def _set_filter_type(filter):\n if filter == 'nat':\n return '-N'\n if filter == 'options':\n return '-O'\n if filter == 'filter':\n return '-R'", "def program_type(self, value):\n\n types = {\n 'executableProgram': '1'\n }\n\n self._program_type = types[value]", "def app(self, app):\n\n self._app = app", "def put(self):\n logging.info(\"PUT method for API for ApplicationTypes not supported.\")\n pass" ]
[ "0.6110402", "0.6007707", "0.6007707", "0.5881547", "0.5826859", "0.5705285", "0.5672125", "0.5645346", "0.5516108", "0.5502036", "0.5485557", "0.54566836", "0.5454073", "0.5433174", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.5431337", "0.54295516", "0.54295516", "0.5422906", "0.54210323", "0.54210323", "0.54210323", "0.54210323", "0.54202926", "0.541741", "0.541741", "0.541741", "0.5415377", "0.5402667", "0.539673", "0.5390782", "0.53883487", "0.5376631", "0.5342713", "0.5327699", "0.5326127", "0.52950364", "0.52950364", "0.52740175", "0.5267209", "0.5253098", "0.5222035", "0.52208465", "0.52180153", "0.5216041", "0.51923484", "0.51889914", "0.5188924", "0.5183844", "0.51751685", "0.517325", "0.5161823", "0.5150919", "0.51468587", "0.5143236", "0.51399195", "0.51367676", "0.5130178", "0.51232284", "0.51110417", "0.51110417", "0.51045257", "0.5103409", "0.50999707", "0.50999707", "0.50999707", "0.50999707", "0.508586", "0.5078654", "0.50677264", "0.5065488", "0.50620145", "0.5060752", "0.5037088", "0.50338477" ]
0.8083322
0
Sets the nbar_application_names of this NetflowFilters.
def nbar_application_names(self, nbar_application_names): self._nbar_application_names = nbar_application_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setNameFilters(self, filters):\n if self._completer:\n self._completer.model().setNameFilters(filters)", "def set_name(self, application_name):\r\n self._name = application_name", "def app_names(self):\n return self.get_app_names()", "def app_name(self, value):\n self._app_name = value", "def config_bucket_names(self, config_bucket_names: ConfigNodePropertyArray):\n\n self._config_bucket_names = config_bucket_names", "def setaxesnames(self):\n if not self._axesnames or self.prop['skipsai']:\n return\n debug('ControllerStartup.setaxesnames()')\n oldaxes = self.pidevice.qSAI_ALL()\n for i, newaxis in enumerate(self.axesnames):\n if newaxis != oldaxes[i] or self.prop['forcesai']:\n setstage = False\n if self.pidevice.HasqCST():\n if self.pidevice.qCST()[oldaxes[i]] == 'NOSTAGE':\n try:\n debug('try rename NOSTAGE to TEMP (0x3C)')\n self.pidevice.SPA(oldaxes[i], 0x3c, 'TEMP')\n setstage = True\n except GCSError:\n pass\n self.pidevice.SAI(oldaxes[i], newaxis)\n if setstage:\n self.pidevice.SPA(newaxis, 0x3c, 'NOSTAGE')\n debug('restore NOSTAGE (0x3C)')", "def axesnames(self, axesnames):\n if axesnames is None:\n self._axesnames = None\n else:\n assert isinstance(axesnames, list), 'axesnames must be list'\n self._axesnames = axesnames\n debug('ControllerStartup.axesnames = %s', itemstostr(self._axesnames))", "def category_names(self, category_names):\n\n self._category_names = category_names", "def RAppNames(self):\n\t\tnames=[]\n\t\tfor item in range(self.rApps.Count):\n\t\t\tnames.append(self.rApps.Item(item).Name)\n\t\treturn names", "def set_pinnames(self, names):\n self.pnames = names", "def reset_name_labels(infr):\n infr.print('reset_name_labels', 1)\n orig_names = infr.get_node_attrs('orig_name_label')\n infr.set_node_attrs('name_label', orig_names)", "def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n for axis in self.axisList:\n self.axesNames.append(labels[i] + ' - ' + axis.id)\n i += 1", "def set_index_names(self, names, axis=0):\n self.get_axis(axis).names = names", "def tag_names(self, tag_names):\n\n self._tag_names = tag_names", "def nvmf_namespace_num(self, nvmf_namespace_num):\n\n self._nvmf_namespace_num = nvmf_namespace_num", "def set_fnames(self, fnames):\n self.fnames = fnames[:]", "def set_application(self, app):\n \n self.app = app", "def setName(self, *args):\n return _libsbml.FluxBound_setName(self, *args)", "def set_title_bar_visible(self, visible):\n self.widget.SetTitleBarVisible(visible)", "def set_FilterName(self, value):\n super(GetCallbackDataInputSet, self)._set_input('FilterName', value)", "def apps(self, apps):\n\n self._apps = apps", "def get_app_names(self):\n groups = self['__store']\n lookup = {\n g.group_id: g.name[2:]\n for g in groups\n if (g.name.startswith('a_'))\n }\n return set(map(lookup.get, self.get_app_ids()))", "def setProgramName(self, *args):\n return _libsbml.SBMLWriter_setProgramName(self, *args)", "def merge_nonjunk_into_new_name(self, event=None):\n # Delete all original names\n aid_list = self.all_aid_list\n aid_list_filtered = ut.filterfalse_items(\n aid_list, self.ibs.get_annot_isjunk(aid_list)\n )\n # Rename annotations\n self.ibs.set_annot_names_to_same_new_name(aid_list_filtered)\n self.update_callback()\n self.backend_callback()\n self.show_page()", "def set_all_inactive(self):\n for name in self.get_names():\n self.set_inactive(name)", "def clean_name(self) -> None:\n\n for regex, group in self.parse_generic_regex:\n m = regex.match(self.app_name)\n\n if m:\n self.app_name = m.group(group).strip()\n return", "def setFilters(self, filters):\n self.__filters = filters", "def applications(self, applications: List[ApplicationRequestResponse]):\n\n self._applications = applications", "def set_git_filter_attribute(self, filtername):\n self._filter = filtername", "def set_atom_labels(self, labels):\n self.set_attribute(\"atom_labels\", labels)", "def setCollectorsNames(self, collectors_names):\n networkx.set_node_attributes(self, \n values=collectors_names,\n name='fullname')", "def setIndexNames(self):\n self.xi = self.i1\n self.yi = self.i2", "def names(self, names):\n\n self._names = names", "def program_ids(self, program_ids):\n\n self._program_ids = program_ids", "def namespace_resource_whitelist(self, namespace_resource_whitelist):\n\n self._namespace_resource_whitelist = namespace_resource_whitelist", "def set_num_images(self,num_images):\n for roi in self.rois:\n roi.set_num_images(num_images)\n self.num_images = num_images", "def get_app_names(self):\n return [name for name in self._proxies.keys() if not name.startswith('_')]", "def nvmf_namespace_num_not(self, nvmf_namespace_num_not):\n\n self._nvmf_namespace_num_not = nvmf_namespace_num_not", "def set_nAxis(self, newval):\n rest_val = str(newval)\n return self._setAttr(\"nAxis\", rest_val)", "def set_application(self, app_id):\n if self._use_channel_info:\n self._channel = \"\"\n self._channel_name = app_id\n self._is_forced_val = True\n self._forced_count = 0", "def app_name(self): # pylint:disable=function-redefined\n return self._app_name", "def set_apps(self, new_apps):\n self.remove_apps()\n for app_id in new_apps:\n self.add_app(Webapp.objects.get(pk=app_id))\n index_webapps.delay(new_apps)", "def setName(self, *args):\n return _libsbml.OutwardBindingSite_setName(self, *args)", "def set_name(self, new_name):\n self._name = new_name\n\n if self._uimanager is None:\n return\n\n gtk_action_group = self.get_data('gtk_action_group')\n self._uimanager.remove_action_group(gtk_action_group)\n new_gtk_action_group = self.create_gtk_action_group(self._uimanager)\n\n for action in gtk_action_group.list_actions():\n gtk_action_group.remove_action(action)\n new_gtk_action_group.add_action(action)", "def setnames(self, *args, **kwargs):\n return _coordsys.coordsys_setnames(self, *args, **kwargs)", "def set_all_active(self):\n for name in self.get_names():\n self.set_active(name)", "def app_name(self, app_name):\n if app_name is None:\n raise ValueError(\"Invalid value for `app_name`, must not be `None`\") # noqa: E501\n\n self._app_name = app_name", "def set_num_images(self,num_images):\n if num_images != self.num_images:\n self.counts = [{} for _ in range(num_images)]\n\n for _ in range(num_images,len(self.thresholds)): # delete unneeded thresholds\n self.thresholds.pop()\n for _ in range(len(self.thresholds), num_images): # make new thresholds\n self.thresholds.append(self.default_threshold)\n\n for _ in range(num_images,len(self.autothreshs)): # delete unneeded autothreshs\n self.autothreshs.pop()\n for _ in range(len(self.autothreshs), num_images): # make new autothreshs\n self.autothreshs.append(self.default_autothresh)\n\n self.num_images = num_images", "def namespace_name(self, namespace_name):\n\n self._namespace_name = namespace_name", "def setName(self, *args):\n return _libsbml.Input_setName(self, *args)", "def nvmf_namespace_num_not_in(self, nvmf_namespace_num_not_in):\n\n self._nvmf_namespace_num_not_in = nvmf_namespace_num_not_in", "def module_name(self, module_name=''):\n\n self._module_name = module_name", "def set_amiSheetNames(self):\n\n self.pres_sheetname = None\n self.edit_sheetname = None\n self.notransfer_sheetname = None\n\n for sheet in self.wb.sheet_names():\n sheet_lower = sheet.lower()\n #Check if two sheets get identfied by regex below?\n if re.match(\"(original|preservation|file|full|archive)\",\n sheet_lower):\n self.pres_sheetname = sheet\n elif re.match(\"edit\", sheet_lower):\n self.edit_sheetname = sheet\n elif re.match(\"not transferred\", sheet_lower):\n self.notransfer_sheetname = sheet", "def update_namespaces_info(self):\n namespaces = BlockDev.nvdimm_list_namespaces(idle=True)\n\n self._namespaces = dict((namespace.dev, namespace) for namespace in namespaces)", "def set_ns_prefix(self, ns_for_name: Dict[str, Tuple[str, str]]) -> None:\n for instance in self.instances:\n instance.set_ns_prefix(ns_for_name)", "def set_vendor_names(self, vendor_names):\n if not all(isinstance(vendor_name, str) for vendor_name in vendor_names):\n raise ApiError(\"One or more invalid vendor names\")\n self._update_criteria(\"vendor_name\", vendor_names)\n return self", "def set_vendor_names(self, vendor_names):\n if not all(isinstance(vendor_name, str) for vendor_name in vendor_names):\n raise ApiError(\"One or more invalid vendor names\")\n self._update_criteria(\"vendor_name\", vendor_names)\n return self", "def name_not_in(self, name_not_in):\n\n self._name_not_in = name_not_in", "def name_not_in(self, name_not_in):\n\n self._name_not_in = name_not_in", "def name_not_in(self, name_not_in):\n\n self._name_not_in = name_not_in", "def application_ids(self, application_ids):\n if self.local_vars_configuration.client_side_validation and application_ids is None: # noqa: E501\n raise ValueError(\"Invalid value for `application_ids`, must not be `None`\") # noqa: E501\n\n self._application_ids = application_ids", "def set_documents_names(cls, input_list_names: List[str]) -> None:\n cls.documents_names = input_list_names", "def resource_names(self, resource_names):\n\n self._resource_names = resource_names", "def set_product_names(self, product_names):\n if not all(isinstance(product_name, str) for product_name in product_names):\n raise ApiError(\"One or more invalid product names\")\n self._update_criteria(\"product_name\", product_names)\n return self", "def set_product_names(self, product_names):\n if not all(isinstance(product_name, str) for product_name in product_names):\n raise ApiError(\"One or more invalid product names\")\n self._update_criteria(\"product_name\", product_names)\n return self", "def set_labelname(self, labelname):\n self.options['labelname'] = labelname", "def getApplicationName(self) -> unicode:\n ...", "def setNamespaces(self, *args):\n return _libsbml.SBase_setNamespaces(self, *args)", "def nsg_ids(self, nsg_ids):\n self._nsg_ids = nsg_ids", "def setChannelNames(self, n1, n2):\n\t\tfor i, val in enumerate(self.headervals):\n\t\t\ts = val[0]\n\t\t\ts = s.replace(\"%ch1%\", n1)\n\t\t\ts = s.replace(\"%ch2%\", n2)\n\t\t\tself.headervals[i][0] = s\n\t\t\tself.SetStringItem(i, 0, s)", "def set_filters(self, filters: List[DataGridFilter]):\n self.filters = filters", "def setName(self, *args):\n return _libsbml.SBase_setName(self, *args)", "def setIndexNames(self):\n self.theta = self.i1\n self.radial = self.i2", "def get_sidebar_saved_filter_names(self):\n radio_options = [\n {\"label\": f\"{x['name']}\", \"value\": f\"{x['name']}\"} for x in self._filters\n ]\n radio_options.insert(0, {\"label\": \"None\", \"value\": \"None\"})\n return [\n dbc.FormGroup(\n children=[\n dbc.Label(\"Saved Filters\", className=\"mr-2\"),\n dbc.RadioItems(\n options=radio_options,\n value=\"None\",\n id=\"filter_radioitems_input\",\n ),\n ]\n ),\n ]", "def set_ns_prefix(\n self, ns_for_name: Dict[str, Tuple[str, str]], c_ns: str, f_ns: str\n ) -> None:\n self.c_prefix = c_ns\n self.f_prefix = f_ns", "def selected_applications(self) -> Optional[pulumi.Input['NamespacedNamesArgs']]:\n return pulumi.get(self, \"selected_applications\")", "def selected_applications(self) -> Optional[pulumi.Input['NamespacedNamesArgs']]:\n return pulumi.get(self, \"selected_applications\")", "def remove_apps(self, app_names):\n groups = self['__store']\n for name in app_names:\n supergroup = groups.first(name='a_' + name)\n if supergroup:\n supergroup.remove_subgroup(self)", "def filters(self, filters):\n\n self._filters = filters", "def _generateApplicationName(self, obj, **args):\n result = []\n try:\n result.append(obj.getApplication().name)\n except:\n pass\n return result", "def SetTitleBarVisible(self, visible):\n if self._title_bar_visible != visible:\n self._title_bar_visible = visible\n def closure(pane):\n left = self._title_bar_orientation == wx.VERTICAL\n pane.CaptionVisible(visible, left)\n self._PaneInfoOperation(closure)", "def reset_namelist_menu(self):\n new_nml = wx.Menu() # build new menu\n\n # add single element, don't bind it to anything\n nmlItem = new_nml.Append(wx.ID_ANY, '--No File Loaded--', '--No File Loaded--')\n\n # replace the second menu, index=1\n self.menubar.Replace(self.nml_menu_index, new_nml, '&Namelists')\n\n self.namelist = None # there is no longer a current namelist\n self.statusbar.SetStatusText(\"Namelist: --No File Loaded--\", 1)", "def setCaptainNames(self):\n self.captainNames = anwp.func.names.getNames('system_names.txt',self.maxCaptainNames+100, self.rand.randint(1,100))\n self.currentCaptainName = 0", "def update_playbook_name(self, old_playbook, new_playbook):\n for key in [name for name in self.workflows.keys() if name.playbook == old_playbook]:\n self.update_workflow_name(old_playbook, key.workflow, new_playbook, key.workflow)", "def setSearchFieldnames(self, fieldnames):\n self._search_fieldnames = fieldnames", "def setName(self, *args):\n return _libsbml.FluxObjective_setName(self, *args)", "def octopus_names(self, msg, args):\r\n self.names.send_names(msg, args)", "async def async_set_filters(self, filters, state_mode):\n if filters not in ON_OFF_LIST:\n return\n self._filter = filters.lower()\n self._state_mode = state_mode\n await self.async_send_cmd()", "def set_ns_prefix(self, ns_for_name: Dict[str, Tuple[str, str]]) -> None:\n self.c_prefix, self.f_prefix = ns_for_name[self.name]", "def updateNINADeviceNameList(self, deviceNames):\n self.ui.ninaDeviceList.clear()\n self.ui.ninaDeviceList.setView(QListView())\n for deviceName in deviceNames:\n self.ui.ninaDeviceList.addItem(deviceName)\n return True", "def clear_name_labels(infr):\n infr.print('clear_name_labels()', 1)\n # make distinct names for all nodes\n distinct_names = {node: -aid for node, aid in infr.get_node_attrs('aid').items()}\n infr.set_node_attrs('name_label', distinct_names)", "def set_title_bar_orientation(self, orientation):\n self.widget.SetTitleBarOrientation(_ORIENTATION_MAP[orientation])", "def on_show_wrong_name(self):\n self._set_filter_value(\n 'showWrongNameState', self.wrong_name_btn.isChecked())", "def setAllAxisLabels(self, labels):\n self.__axis_labels__ = labels", "def set_program_name(program_name):\n global _PROGRAM_NAME\n _PROGRAM_NAME = program_name", "def setName(self, *args):\n return _libsbml.Event_setName(self, *args)", "def setScope(self, fileBasename):\n self.fileBasename = fileBasename\n scopeNamespace = self.defaultNamespacePrefix + fileBasename + '/'\n \n # Annotations go to a different namespace\n annotationScopeNamespace = self.annotationsNamespacePrefix + fileBasename + '/'\n \n self.log.debug('Adding namespace for {0}: {1}'.format(fileBasename, scopeNamespace))\n \n self.namespaces['scope'] = Namespace(scopeNamespace)\n self.annotationNamespaces['scope'] = Namespace(annotationScopeNamespace)\n self.graph.namespace_manager.bind('', self.namespaces['scope'])\n self.annotationGraph.namespace_manager.bind('', self.annotationNamespaces['scope'])", "def rename_global(self, *args):\n return _ida_hexrays.vdui_t_rename_global(self, *args)", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")" ]
[ "0.5579578", "0.5503502", "0.51913893", "0.5184525", "0.51769996", "0.5006621", "0.4993549", "0.48435128", "0.48244205", "0.4778834", "0.4738988", "0.47321886", "0.47280967", "0.47159183", "0.4684673", "0.46767935", "0.46593088", "0.46463352", "0.4644338", "0.46052152", "0.46007124", "0.4590774", "0.45867503", "0.45630264", "0.45449847", "0.4543776", "0.45381227", "0.45350233", "0.45310223", "0.44987962", "0.44887957", "0.4467481", "0.44550285", "0.44250607", "0.44234362", "0.442327", "0.44222078", "0.44208315", "0.4407079", "0.44008118", "0.44003808", "0.439531", "0.43908563", "0.43667203", "0.43663105", "0.4363239", "0.43470952", "0.43338808", "0.43307793", "0.4316715", "0.43042699", "0.42954576", "0.42883915", "0.42857784", "0.42853257", "0.428252", "0.428252", "0.42641428", "0.42641428", "0.42641428", "0.42607987", "0.42412603", "0.4239188", "0.42317963", "0.42317963", "0.42164075", "0.4215616", "0.42100593", "0.42057082", "0.4194936", "0.41947708", "0.41943473", "0.41886243", "0.41866484", "0.41777965", "0.41766044", "0.41766044", "0.41729876", "0.4170599", "0.41620934", "0.41618422", "0.41604915", "0.41566014", "0.41563863", "0.4155399", "0.41526586", "0.41505563", "0.41447338", "0.41428405", "0.41356567", "0.41342127", "0.4133628", "0.41280094", "0.41234577", "0.41172105", "0.41157782", "0.41093168", "0.41040304", "0.41022962", "0.41022962" ]
0.86759675
0
Sets the node_a of this NetflowFilters.
def node_a(self, node_a): self._node_a = node_a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_node(self, node):\n self.__node = node", "def from_node(self, a):\n return a == self.__node_a", "def nodes(self, nodes_array):\n self.nodes_set = nodes_array", "def set_node(self, name, state):\n self.source_net.nodes[name] = state", "def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Zeroslike(%s)\" % node_A.name\n return new_node", "def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Zeroslike(%s)\" % node_A.name\r\n return new_node", "def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Oneslike[%s]\" % node_A.name\r\n return new_node", "def node_b(self, node_b):\n\n self._node_b = node_b", "def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Oneslike(%s)\" % node_A.name\n return new_node", "def a(self, a):\n\n self._a = a", "def nodes(self, nodes):\n\n self._nodes = nodes", "def setA(self, a):\n\t\tself.a = int(a)", "def set_node(self, index, node):\r\n self.loc.coord[index] = node", "def __init__(self, node_a, node_b):\n self.node_a = node_a\n self.node_b = node_b\n self.base_color = 'blue'\n self.tint_color = 'white'\n self.tint = 0\n self.options = []", "def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value", "def node_data(self, node_data):\n\n self._node_data = node_data", "def set_nodes(self, nodes):\n self._drv_nodes = nodes", "def set_node_attribute(\n node: MatterNode,\n endpoint: int,\n cluster_id: int,\n attribute_id: int,\n value: Any,\n) -> None:\n attribute_path = f\"{endpoint}/{cluster_id}/{attribute_id}\"\n node.endpoints[endpoint].set_attribute_value(attribute_path, value)", "def node_id(self, node_id):\n\n self._node_id = node_id", "def set_node(self, uri, info):\n\t\tself.node_uri = uri\n\t\tself.node_info = info", "def node_info(self, node_info):\n\n self._node_info = node_info", "def set(self, node, value):\n self.val[node] = value", "def set_node_id(self, node_id):\n self._node_id = node_id", "def nodes(self, nodes):\n self.nodes_ = nodes\n self.last_sequence_ind = int(self.nodes_.shape[0] - 1)\n logging.debug(\n \"Segment - Nodes {n_shape} set.\".format(n_shape=self.nodes_.shape)\n )", "async def set_nodes(self, node_callq: Dict):\n for svc in self._services:\n await svc.set_nodes(node_callq)", "def set_apex_node(self):\n if self.opt == 'CT':\n self.epi_apex_node = self.mesh_poly.GetPoints().GetPoint(3604)\n self.endo_apex_node = self.mesh_poly.GetPoints().GetPoint(3579)\n else:\n self.endo_apex_node = None # we do not know this\n self.epi_apex_node = self.mesh_poly.GetPoints().GetPoint(0)", "def node_count(self, node_count):\n\n self._node_count = node_count", "def node_id(self, node_id: int):\r\n self._node_id = node_id", "def _update_nodes_ids(self, change=None):\n self._nodes_filter.val_range = self.nodes_range\n self.nodes_ids = self._nodes_filter.val_ids\n self._update_edges_filtered(change)", "def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Sqrt(%s)\" % (node_A.name)\r\n return new_node", "def _onSetParameterA(self, value):\n self._parameters['a'] = value\n self._logger.info(\"Parameter 'a' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def setattr(self, node, attr, value):\n node.set(attr, value)", "def _mutate_node(self, node):\n self.idx += 1\n\n if self.idx != self.r:\n return\n\n # Exclude some things like signatures, etc.\n exclusions = ['signature', 'crc']\n for ex in exclusions:\n if ex in node._pfp__name.lower():\n return\n\n if type(node) == pfp.fields.Dom:\n return\n elif self._base_name(node) == 'Struct':\n # This is a container, interested in\n # its children nodes\n return\n elif self._base_name(node) == 'Array':\n print(\"%s is an Array of %s (%s)\" % (node._pfp__name,\n node.field_cls, node.width))\n # I can change the data at once:\n node.raw_data = \"cacaca\"\n\n # Or iterate through its elements:\n # for e in node:\n # e._pfp__set_value(e._pfp__value + 1)\n else:\n # CORE TYPE\n # This is supposed to cast\n print('CORE TYPE?')\n node._pfp__set_value(1337)", "def set_nodes_values(self, node_dict):\n\n # Requires nodes to have type defined in lookup array\n raise Exception(\"Not yet implemented.\")", "def node_version(self, node_version):\n\n self._node_version = node_version", "def setup_ant(self):\n self.visited_nodes[1:] = []\n self.actual_node = self.start_pos", "def set_nodeset(self, nodeset):\n self.nodeset = set(nodeset) # overwrite the existing nodeset with the input nodeset\n\n self.__check_validity() # check if graph is valid - throws exception if not", "def __init__(__self__, *,\n nodes: pulumi.Input[Sequence[pulumi.Input[str]]]):\n pulumi.set(__self__, \"nodes\", nodes)", "def change_node(self, node: dict):\n # check if it is not overriding existing node\n if node.get('id') is not None:\n if node['id'] not in self._nodes:\n raise ValueError('tried to change non-existing node %s' % node['id'])\n else:\n raise ValueError('no id for node provided')\n\n # change attributes\n id_ = node['id']\n del node['id']\n for attribute in node:\n self._nodes[id_][attribute] = node[attribute]", "def nodes(self, nodes):\n if nodes is None:\n raise ValueError(\"Invalid value for `nodes`, must not be `None`\") # noqa: E501\n\n self._nodes = nodes", "def nodes(self):\n new = self.copy()\n new._filter = [\"nodes\"]\n return new", "def __copy__(self, fa):\n assert isinstance(fa, FA)\n\n self.node_index = fa.node_index\n self.start_node = fa.start_node\n self.states = fa.states\n self.graph = fa.graph", "def setFilter(self, afilter):\n\n if afilter in (self.FilterU, self.FilterG, self.FilterR, self.FilterI, self.FilterZ, self.FilterY):\n self.filter = afilter\n else:\n raise ValueError(\"No '%s' filter.\" % afilter)", "def onSelectArefNode(self):\n self.ArefNode = self.Aref_Selector.currentNode()\n if not self.ArefNode:\n self.SetLayoutViewer(None, 'Red')\n return \n self.SetLayoutViewer(self.ArefNode, 'Red')", "def __init__(self, node_a, node_b, id, edge_value=\"null\"):\n self.__node_a = node_a\n self.__node_b = node_b\n self.__edge_value = edge_value\n self.__id = id", "def update_node(node, attribute, value):\n node.set(attribute, value)\n return", "def __init__(self, node_b=None, qos_type=None, device_interfaces=None, ports=None, protocol=None, ip_version=None, netflow_devices=None, top=None, app_type=None, nbar_application_names=None, node_a=None, conversation=None, if_names=None, direction=None): # noqa: E501 # noqa: E501\n\n self._node_b = None\n self._qos_type = None\n self._device_interfaces = None\n self._ports = None\n self._protocol = None\n self._ip_version = None\n self._netflow_devices = None\n self._top = None\n self._app_type = None\n self._nbar_application_names = None\n self._node_a = None\n self._conversation = None\n self._if_names = None\n self._direction = None\n self.discriminator = None\n\n if node_b is not None:\n self.node_b = node_b\n if qos_type is not None:\n self.qos_type = qos_type\n if device_interfaces is not None:\n self.device_interfaces = device_interfaces\n if ports is not None:\n self.ports = ports\n if protocol is not None:\n self.protocol = protocol\n if ip_version is not None:\n self.ip_version = ip_version\n if netflow_devices is not None:\n self.netflow_devices = netflow_devices\n if top is not None:\n self.top = top\n if app_type is not None:\n self.app_type = app_type\n if nbar_application_names is not None:\n self.nbar_application_names = nbar_application_names\n if node_a is not None:\n self.node_a = node_a\n if conversation is not None:\n self.conversation = conversation\n if if_names is not None:\n self.if_names = if_names\n if direction is not None:\n self.direction = direction", "def activate_nodes(graph, nodes, record_to=None):\n for agent in nodes:\n graph.nodes[agent][\"agent\"].activate()\n if record_to is not None:\n # |= is union + assignment\n record_to |= set(nodes)", "def setParameterNode(self, parameterNode):\r\n # framework\r\n profbox()\r\n self.parameterNode = parameterNode", "def txa(self):\n\n self.a = self.x\n self.set_zn(self.a)", "def visit_node(self, node: OnnxNode, network: Network):\n pass", "def add_node(self, server_address, set_register_connection=False):\n print(\"node \", server_address, set_register_connection, \" added to stream nodes\")\n server_ip, server_port = server_address\n server_ip = Node.parse_ip(server_ip)\n self.nodes.append(Node(server_address=(server_ip, server_port), set_register=set_register_connection))", "def __init__(self, nodes):\n\n self._nodes = nodes", "def add_node_pairs(self, node_a,node_b):\r\n \r\n if node_b is not None : \r\n self.nodes[node_a].append(node_b)", "def add_nodes_from(self, nodes):\n self._Impl._nodes[\"all_nodes\"] = cudf.Series(nodes)", "def visit(self, node):\n try:\n node_type = node.xml_type\n except AttributeError:\n raise ValueError('Not a valid Amara node %r' % node)\n\n try:\n visit = self._dispatch[node_type]\n except KeyError:\n # unknown node type, try and get a \"pretty\" name for the error\n #FIXME: Not ported for Amara 2\n node_types = {}\n for name in dir(Node):\n if name.endswith('_NODE'):\n node_types[getattr(Node, name)] = name\n node_type = node_types.get(node.node_type, node.node_type)\n raise ValueError('Unknown node type %r' % node_type)\n else:\n visit(self, node)", "def setNodeTypeFlag(*args, display: bool=True, threadSafe: bool=True, q=True, query=True,\n **kwargs)->Union[bool, Any]:\n pass", "def set_node_value(node: Node, value: np.ndarray):\n if node.type != 'Const':\n raise Exception('Can\\'t set value for non-constant node {}'.format(node.name))\n data_type = np.float32\n if node.out_port(0).is_data_type_defined():\n data_type = node.out_port(0).get_data_type()\n node.out_port(0).data.set_value(np.array(value).astype(data_type))", "def replace_node(self, network_node: Node, node: Node) -> None:\n index = self.network.index(network_node)\n self.network[index] = node", "def player_a_id(self, player_a_id):\n\n self._player_a_id = player_a_id", "def setContextNode(self, node):\n if node is None: node__o = None\n else: node__o = node._o\n libxml2mod.xmlXPathSetContextNode(self._o, node__o)", "def setAnchor(self,a):\n self.anchor = a", "def setNs(self, node):\n if node is None: node__o = None\n else: node__o = node._o\n libxml2mod.xmlSetNs(node__o, self._o)", "def add_node(self, name, node):\n self.nodes.setdefault(name, node)", "def prepare_node(self, node):\n # Every change at the position of node will be recognized\n aexpr(lambda: node.position, globals(), locals())\\\n .on_change(lambda obs, oldv, newv: self.set_node_position(node, *newv))", "def set_nodeprops(self, nodeprops):\n assert isinstance(nodeprops, dict), \"nodeprops must be a dictionary, even if empty\"\n self.nodeprops = nodeprops", "def set_node_colors(self, node_colors):\n\n self.node_colors = node_colors", "def init_adjacency(self, A):\n A[A==0] = self.INFINITY", "def add(self, a, b):\n a, b = (a, b) if a in self.node_id else (b, a)\n target_id = self.node_id[a]\n self.node_id[b] = target_id\n self.groups[target_id] |= set([b])", "def move_ant(self, node_to_visit):\n self.actual_node = node_to_visit\n self.remember_visited_node(node_to_visit)", "def node_data(self, node_data):\n self.node_data_ = node_data\n self.label = node_data.label\n self.node_type = node_data.node_type\n self.arity = node_data.arity\n self.min_depth = node_data.min_depth\n self.child_type = node_data.child_type\n self.numpy_func = node_data.numpy_func\n self.tensorflow_func = node_data.tensorflow_func", "def _add_node_attributes(self):\n ensemble_mapping = SankeyLayout._ensemble_map(\n df=self.supergraph.gf.df, nxg=self.nxg, columns=SankeyLayout._COLUMNS\n )\n for idx, key in enumerate(ensemble_mapping):\n nx.set_node_attributes(self.nxg, name=key, values=ensemble_mapping[key])\n\n dataset_mapping = {}\n for run in self.runs:\n dataset_mapping[run] = SankeyLayout._dataset_map(\n df=self.supergraph.gf.df,\n nxg=self.nxg,\n tag=run,\n columns=SankeyLayout._COLUMNS,\n )\n nx.set_node_attributes(\n self.nxg, name=self.supergraph.tag, values=dataset_mapping[run]\n )", "def add_node(self, name, node):\n\n self.nodes[name] = fold_constant(node)", "def node_selector(self, node_selector: Dict[str, str]):\n\n self._node_selector = node_selector", "def __initilization(self,node_set):\n \n print \"*********************************\"\n \n for x in node_set:\n x.node_vol=np.transpose(np.matrix([cmath.exp(0), cmath.exp(complex(0,math.pi*2/3)), cmath.exp(complex(0,-math.pi*2/3))]))\n \n print \"Forward/Backward Algorithm Initialization Done!\"", "def set_node_position(self, node, x, y, z=0):\n pass", "def set_node_positions(self):", "def set_node(self, node_id):\n info = self._get_info(self.EXPECTED)\n if node_id in info:\n self._node_id = node_id\n return True\n return False", "def set_working_node(self, node):\n self.working_node = node", "def assign_aov(self, aov: AOV):\n\t\tif aov not in self.assigned_aovs:\n\t\t\tfor material in self.materials:\n\t\t\t\tshader_node_tree = material.node_tree\n\t\t\t\tassert shader_node_tree is not None, \"Material must have a node tree\"\n\t\t\t\taov.add_to_shader(shader_node_tree)\n\n\t\tself.assigned_aovs.append(aov)", "def network_node_event(self, node, value):\n if node.node_id == self.node.node_id:\n self.node_event(value)", "def set_node_value(self, node_name, node_dict: dict):\n\n message = SetValueMessage(id=node_name, values=node_dict)\n requests.post(self.channel, data=message.json(), params=\"set_node\")", "def set_node_attributes(G, attr_name):\n if attr_name == 'k-index':\n core_number = nx.core_number(G)\n nx.set_node_attributes(G, core_number, name=attr_name)\n else:\n print('Unknown attribute name:', attr_name)", "def __decorate_nodes(nodes, space):\n for n in nodes:\n if n not in self.__node_spaces:\n self.__node_spaces[n] = set([])\n self.__node_spaces[n].add(space)\n pred_nodes = self.__plan_graph.subjects(AGORA.next, n)\n __decorate_nodes(pred_nodes, space)", "def set_node_attributes(graph: BaseGraph, attributes: Dict) -> None:\n return set_node_attributes(graph.graph, attributes)", "def setAn(self, an):\n self.an = an", "def setSelectModeNode(self):\n self._nodeSelectMode = True\n self._dataSelectMode = False\n self._elemSelectMode = False", "def setModeAddNode(self):\n self.scene().mode = fsScene.MODE_ADDNODE", "def setName(self, name):\n libxml2mod.xmlNodeSetName(self._o, name)", "def setnodeequation(self, node_p=None, eqn=None):\n node_p = self.getnodenamed(node_p) # Verify pointer.\n\n # (node_bn* node, const char* eqn)\n cnetica.SetNodeEquation_bn.argtypes = [c_void_p, c_char_p]\n cnetica.SetNodeEquation_bn.restype = None\n cnetica.SetNodeEquation_bn(node_p, ccharp(eqn))", "def set_attribute(self, node, attribute, value):\n name = '{}.{}'.format(node, attribute)\n try:\n attr_type = mc.getAttr(name, typ=True)\n if 'string' in attr_type:\n mc.setAttr(name, value, typ='string')\n elif 'float3' in attr_type:\n mc.setAttr(\n name, value[0][0], value[0][1], value[0][2], typ='float3'\n )\n else:\n mc.setAttr(name, value)\n except Exception:\n return False\n return True", "def __init__(self, nodes=None):\r\n self.nodes = nodes", "def add_node(self, node):\n self.nodes[node.id] = node\n\n self.layers = max(self.layers, node.layer + 1)", "def _start_oef_node(self, network_node):", "def changeAlias(self, alias, node):", "def setGoalNode(self, newGoal):\r\n\t\tself.goalNode = newGoal", "def setParameterNode(self, parameterNode):\n #framework\n profbox()\n self.parameterNode = parameterNode", "def reset_adadelta_variables(t_A=self.t_A):\n A0 = np.zeros_like(t_A.get_value()).astype(theano.config.floatX)\n t_ada_Eg2.set_value(A0)\n t_ada_dA2.set_value(A0)\n t_A.set_value(A0)", "def setSoon(self, name, node):\n\t\tself.soon.append([name,node])\n\t\treturn node", "def subject_a(self, subject_a):\n\n self._subject_a = subject_a" ]
[ "0.58251303", "0.5789575", "0.5745441", "0.5615295", "0.557767", "0.5573292", "0.5538027", "0.55000454", "0.5496195", "0.54542196", "0.5382919", "0.5307776", "0.51498705", "0.5089265", "0.507674", "0.5060496", "0.5041549", "0.50394356", "0.4960041", "0.49577066", "0.4915388", "0.49085498", "0.48717454", "0.48525095", "0.48383656", "0.4815578", "0.48139805", "0.4796978", "0.47719133", "0.47489136", "0.47413674", "0.47269842", "0.47154218", "0.47145554", "0.47014403", "0.46970356", "0.46957958", "0.4686159", "0.467854", "0.46636456", "0.46595368", "0.46554968", "0.46542323", "0.46529564", "0.4645061", "0.46418694", "0.46412218", "0.4639481", "0.46379566", "0.46304575", "0.46232083", "0.46192", "0.46151447", "0.45892093", "0.45880267", "0.458375", "0.4583277", "0.4581848", "0.4576636", "0.45745653", "0.4568855", "0.4558642", "0.4555589", "0.45540637", "0.45465854", "0.45455712", "0.45261517", "0.45236143", "0.4517946", "0.45161986", "0.45100585", "0.45055103", "0.4500091", "0.44957745", "0.4490823", "0.44875136", "0.4485832", "0.44767147", "0.44753247", "0.4474591", "0.4468988", "0.44688228", "0.44654262", "0.44617787", "0.44608086", "0.44580472", "0.44498742", "0.44497445", "0.4449663", "0.44473362", "0.44437596", "0.44424003", "0.4437457", "0.443177", "0.4429841", "0.44292328", "0.44273746", "0.4410529", "0.44081038", "0.44060445" ]
0.79782516
0
Sets the conversation of this NetflowFilters.
def conversation(self, conversation): self._conversation = conversation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_conversation(self, conversation):\r\n self.conversation = conversation", "def set_gift_conversation(self, conversation_string):\r\n self.gift_conversation = conversation_string", "def update(self, conversation):\n self.content_type = \"application/json\"\n self.method = \"PATCH\"\n entity = Conversation(json.loads(self.send(conversation).content))\n self._initialize_collection_properties(entity)\n return entity", "def conversation_participant_name(self, conversation_participant_name):\n\n self._conversation_participant_name = conversation_participant_name", "def set_convert(self, connection_conv):\n self.convert = connection_conv", "def conversation_participant_uuid(self, conversation_participant_uuid):\n\n self._conversation_participant_uuid = conversation_participant_uuid", "def conversation(self, thread):\r\n assert isinstance(thread, int) and 0 <= thread < len(self._threads), \"Thread {} don't exists at channel {}!\".\\\r\n format(thread, self.name)\r\n return self._threads[thread][\"conversation\"]", "def conversations(self):\n if self._conversations is None:\n self._conversations = Conversations(self)\n return self._conversations", "def sent(self, sent):\n\n self._sent = sent", "def handle_chat_received(self, peer: Peer):\n if not self._conversation_view:\n return\n\n if peer is not self._conversation_view.peer():\n # The active conversation is different than the one receiving the message\n index = self.__convs_list.model().index_of(peer)\n if index is not None:\n model_index = self.__convs_list.model().index(index, 0, QModelIndex())\n self.__convs_list.model().setData(model_index, QBrush(Qt.red), Qt.ForegroundRole)", "def start_conversation(self, event):\n if self._border.get_background_color(False) == globals.GROUP_ODD_COLOR:\n background_color = globals.CONVERSATION_EVEN_COLOR\n else:\n background_color = globals.CONVERSATION_ODD_COLOR\n \n self._conv.append(Conversation(self, bg=background_color))", "def initialise_conversation_model(self):\n self.conversation = model.conversation.ConversationSystem()\n #\n # Set all as alive\n for name in 'abcde':\n self.conversation.addKnowledge(['{0}-alive'.format(name)])\n #\n # And set the requires\n self.conversation.convertPresentToRequires('{0}-alive')", "def set_chatbot(self, chatbot):\n super(MultiLogicAdapter, self).set_chatbot(chatbot)\n\n for adapter in self.adapters:\n adapter.set_chatbot(chatbot)", "def get_conversations(self):\n\t\treturn self.conversations", "def sentmodel(sent_data):\n\n # with tf.variable_scope(\"sent\", reuse=tf.AUTO_REUSE):\n with tf.variable_scope(\"sent\"):\n sent_data = tf.expand_dims(sent_data, -1)\n filter_sizes = [2, 3, 5]\n filter_bitsent = mul_filtercnn(filter_sizes, sent_data, 'sent')\n \n fc_sent = tf.identity(tf.layers.conv1d(\\\n inputs=filter_bitsent,\\\n filters=1,\\\n kernel_size=1,\\\n padding=\"same\",\\\n activation=tf.nn.sigmoid),name=\"fc_sent\")\n return fc_sent", "def channel(self, channel):\n allowed_values = [\"sms\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and channel not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `channel` ({0}), must be one of {1}\" # noqa: E501\n .format(channel, allowed_values)\n )\n\n self._channel = channel", "def setRxConvolved(self, rx_convolved):\n \n self.rx_convolved = rx_convolved", "def filter(self, message):\n conversations = Conversations()\n conversation = conversations.get_conversation(message.from_user.id)\n if conversation is None:\n return False\n\n return conversation.type == self.conversation_type", "def set_sentences(self, sentences):\n self._sentences = sentences", "def list(self, request, *args, **kwargs):\n return super(ConversationViewSet, self).list(request, *args, **kwargs)", "def get_gift_conversation(self):\r\n return self.gift_conversation", "def __init__(self, request_url, client, options):\n super(ConversationRequest, self).__init__(request_url, client, options)", "def set_channel(cls, channel):\n cls.channel = channel", "def _set_channel_(self, channel):\n self._channel = channel", "async def set_filter(self, filter_name: str, **kwargs: Any) -> None:\n\n # valid filter?\n if filter_name not in self._telescope.filters:\n raise ValueError(\"Invalid filter name.\")\n\n # log and send event\n if filter_name != self._telescope.filter_name:\n # set it\n logging.info(\"Setting filter to %s\", filter_name)\n await self._change_motion_status(MotionStatus.SLEWING, interface=\"IFilters\")\n await asyncio.sleep(3)\n await self._change_motion_status(MotionStatus.POSITIONED, interface=\"IFilters\")\n self._telescope.filter_name = filter_name\n\n # send event\n await self.comm.send_event(FilterChangedEvent(filter_name))\n logging.info(\"New filter set.\")", "def conversation_participant_arn(self, conversation_participant_arn):\n\n self._conversation_participant_arn = conversation_participant_arn", "def show_conversation(self, conversation_item):\n #\n # Handle any special knowledge which might arise\n self.handle_special_knowledge()\n #\n if conversation_item.conversation_text.strip():\n new_item = textentry.TextEntry(\n 'person_{0}'.format(conversation_item.person),\n self.markup_text(conversation_item.conversation_text),\n width=S['text-entry-width'],\n fontname='computerfont',\n color=S['vdu-colour'],\n )\n self.tabbed.add_dialog_item(new_item)\n #\n try:\n self.awaiting_conversations.remove(conversation_item)\n except KeyError:\n pass", "def _set_send_community(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=send_community.send_community, is_container='container', presence=False, yang_name=\"send-community\", rest_name=\"send-community\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Send community attribute to this neighbor', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"send_community must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=send_community.send_community, is_container='container', presence=False, yang_name=\"send-community\", rest_name=\"send-community\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Send community attribute to this neighbor', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__send_community = t\n if hasattr(self, '_set'):\n self._set()", "def set_voice_actor(sim_info: SimInfo, voice_actor: Union[int, CommonVoiceActorType]) -> None:\n sim_info.voice_actor = int(voice_actor)", "async def set_chat(self, args):\n value = args if isinstance(args, bool) else args.lower() in ('yes', 'true', '1')\n if self.chat == value:\n return\n self.chat = value\n if self.chat_message is not None:\n await self.delete_message(self.chat_message)\n await self.set_trigger('chat_init', None)\n await self.set_trigger('chat', None)\n tag = 'chat' if self.chat else 'chat_init'\n self.chat_message = await self.send_tag(tag, emoji.TRIGGERS[tag], 'Chat enabled' if self.chat else 'Chat muted')\n if not self.chat:\n await self.shell_terminate_all(self.shell_chat)", "def create(self, request, *args, **kwargs):\n self.serializer_class = ConversationDetailSerializer\n return super(ConversationViewSet, self).create(request, *args, **kwargs)", "def setDiffusivity(self, diff): # real signature unknown; restored from __doc__\n pass", "def set_git_filter_attribute(self, filtername):\n self._filter = filtername", "def setInputSentence(self, sentence):\n self.inputSentence = sentence", "def update_conversation(self, convo, packet):\n convo['end_ts'] = packet['ts']\n convo['bytes'] += int(packet['ip']['len']) + 14\n convo['out_bytes'] += int(packet['ip']['len']) + 14\n convo['pkt_cnt'] += 1", "def messaging(self, value: bool):\n if type(value) is not bool:\n raise TypeError(\"Value must be of type 'bool' ('{}' given)\".format(type(value)))\n\n self._messaging = value", "def setConversionFactor(self, *args):\n return _libsbml.Model_setConversionFactor(self, *args)", "def create(self, request, *args, **kwargs):\n self.serializer_class = ConversationDetailSerializer\n return super(PublicChatViewSet, self).create(request, *args, **kwargs)", "def flow(self, flow):\n\n self._flow = flow", "def recipient(self, recipient):\n\n self._recipient = recipient", "def _set_message(self):\n\n type_to_message = {\n MESSAGE_TYPES['join'] : (\n u\"User '%s' has joined the chat.\" % \n self.author\n ),\n MESSAGE_TYPES['leave'] : (\n u\"User '%s' has left the chat.\" % \n self.author\n ),\n }\n\n message = self.message\n if type_to_message.has_key(self.type):\n message = type_to_message[self.type]\n self.message = message", "def stop_conversation(self):\n self.redis.publish('conversation_stopped_by_operator', json.dumps(self.operator_token))\n self.conversation_started = False", "def connect(self, telegram_bot, message_sender):\n\n self.__telegram_bot = telegram_bot\n self.__message_sender = message_sender", "def set_actor(self, vtk_act):\n assert isinstance(vtk_act, vtkActor)\n self.vtk_act = vtk_act", "def _set_communities(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"communities\", rest_name=\"communities\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"communities must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"communities\", rest_name=\"communities\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__communities = t\n if hasattr(self, '_set'):\n self._set()", "async def set_activity(self, msg, activity=None, *args):\n await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=activity))", "def conan_channel(self, conan_channel):\n\n self._conan_channel = conan_channel", "def set_channel_status(self, keyfind, valfind, updatedict, origin):\n\n self.get_channel_obj(keyfind, valfind, origin).set_status(updatedict)", "def sent_by_recipient_id(self, sent_by_recipient_id):\n\n self._sent_by_recipient_id = sent_by_recipient_id", "def step_impl_the_msg_to_is_set_to_respondent(context):\n step_impl_the_msg_to_is_set_to(context, context.bdd_helper.respondent_id)", "def add_conversation(self,conversation):\n\t\tif isinstance(conversation,Conversation):\n\t\t\tself.conversations.append(conversation)\n\t\telse:\n\t\t\traise NotAConversation(\"Must pass Conversation object, not {}\".format(type(conversation)))", "def get_conversations(self, name: str) -> ConversationDict:\n if self.conversations:\n pass\n elif not self.single_file:\n filename = f\"{self.filename}_conversations\"\n data = self._load_file(filename)\n if not data:\n data = {name: {}}\n self.conversations = data\n else:\n self._load_singlefile()\n return self.conversations.get(name, {}).copy() # type: ignore[union-attr]", "def set_interactions(self, interactions: dict):\n self._interactions = interactions", "def set_cur_flow(self, flow):\n self.cur_flow = flow", "def filter(self, filter):\n self._filter = filter", "def set_community(self, community):\n assert community is None or isinstance(community, Community), type(community)\n self._community = community\n if community:\n self._dispersy = community.dispersy", "def filter(self, message):\n conversations = Conversations()\n return conversations.get_conversation(message.from_user.id) is not None", "def setDescriptorChannels(self, dch): # real signature unknown; restored from __doc__\n pass", "def setFilters(self, filters):\n self.__filters = filters", "def setOutputFilterChannel(self, Channel, filt, stringOnly=0):\n msg = ''\n if filt == 'Through' or filt == 'INF':\n msg = \"OUTPut\"+str(Channel)+\":FILTer:LPASs:FREQuency INF\"\n\n if stringOnly==0:\n self.sendMessage(msg)\n else:\n return msg", "def switchToChat(self):\n \n self.lastView = self.currentView\n self.currentView = 1\n self.stacked.setCurrentIndex(1)\n self.show()", "def send_chat(self, text, sender, target, whisper=False):\n self.connection.send({'type': 'chat', 'sender': sender, 'target': target,\n 'text': text, 'whisper': whisper})", "def __init__(self, graph_conv, mlp=None):\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n if isinstance(mlp, chainer.Link):\n self.mlp = mlp\n if not isinstance(mlp, chainer.Link):\n self.mlp = mlp", "def setCoalesceSentences(self, value):\n return self._set(coalesceSentences=value)", "def setCoalesceSentences(self, value):\n return self._set(coalesceSentences=value)", "def setCoalesceSentences(self, value):\n return self._set(coalesceSentences=value)", "def setCoalesceSentences(self, value):\n return self._set(coalesceSentences=value)", "def setCoalesceSentences(self, value):\n return self._set(coalesceSentences=value)", "def setCoalesceSentences(self, value):\n return self._set(coalesceSentences=value)", "def setCoalesceSentences(self, value):\n return self._set(coalesceSentences=value)", "def setCoalesceSentences(self, value):\n return self._set(coalesceSentences=value)", "def conversation(self, line, teamchat):\n # print(type(line))\n if (line.split(\" \")[0] == \"[chat]:\" or line.split(\" \")[0] == \"[teamchat]:\") and line.split(\" \")[1] != \"***\":\n if teamchat:\n result = re.search(\"\\[teamchat\\]: (\\d+):(.+):(.+): (.+)\", line)\n else:\n result = re.search(\"\\[chat\\]: (\\d+):(.+):(.+): (.+)\", line)\n name = result.groups()[2]\n ide = result.groups()[0]\n message = result.groups()[-1]\n team_chat = result.groups()[1]\n info = [name, message, ide, team_chat]\n return info\n #[chat]: 0:-2:LeveL 5: mo\n else:\n info = [\"NONE\", \"NONE\", \"NONE\"]\n return info", "def set_owner(self, owner, is_stream=False):\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner", "def set_channel(self, channel):\n self.response['channel'] = channel", "def __init__(self, kernel_size=11, log_t=False):\n super(Attention, self).__init__()\n assert kernel_size % 2 == 1, \"Kernel size should be odd for 'same' conv.\"\n padding = (kernel_size - 1) // 2\n self.conv = nn.Conv1d(1, 1, kernel_size, padding=padding)\n self.log_t = log_t", "def community(self, community):\n\n self._community = community", "def retrieve(self, request, *args, **kwargs):\n return super(ConversationViewSet, self).retrieve(request, *args, **kwargs)", "async def SetSuggestionChannel(self, ctx, ch=None):\r\n\r\n\t\tif not ch:\r\n\t\t\tch = ctx.channel\r\n\r\n\t\tif ch != 0:\t\r\n\t\t\tch = self.settings.Get(ctx, 'channel', ch)\r\n\t\t\tself.settings.BotConfig('SuggestionChannel', ch.id)\r\n\t\telse:\r\n\t\t\tself.settings.BotConfig('SuggestionChannel', ch)\r\n\r\n\t\tawait ctx.send('Setting Suggestion Channel to: **' + str(ch) + '**')", "def set_input_space(self, space):\n\n setup_deconv_detector_layer_c01b(layer=self,\n input_space=space,\n rng=self.mlp.rng)\n\n rng = self.mlp.rng\n\n detector_shape = self.detector_space.shape\n\n\n self.output_space = self.detector_space\n\n logger.info('Output space: {0}'.format(self.output_space.shape))", "def channel(self, channel):\n allowed_values = [\"whatsapp\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and channel not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `channel` ({0}), must be one of {1}\" # noqa: E501\n .format(channel, allowed_values)\n )\n\n self._channel = channel", "def activity(self, activity):\n if activity is None:\n raise ValueError(\"Invalid value for `activity`, must not be `None`\") # noqa: E501\n\n self._activity = activity", "def conv_block(self, output_channel, filter_, activate):\n return self.add_layer(conv_block, output_channel, filter_, activate)", "def community_not(self, community_not):\n\n self._community_not = community_not", "def ready_to_talk(self, conversation_item):\n self.health_panels[conversation_item.person].set_wants_to_talk(conversation_item)\n self.awaiting_conversations.add(conversation_item)\n #\n # Make sure the right people are visible\n for name, panel in self.health_panels.items():\n if name in conversation_item.present:\n panel.show_person()\n panel.set_drinking_state(healthpanel.S_TALKING)\n else:\n panel.hide_person()\n panel.set_drinking_state(healthpanel.S_OFF)", "def chan_spacing(self, chan_spacing):\n if chan_spacing is None:\n raise ValueError(\"Invalid value for `chan_spacing`, must not be `None`\") # noqa: E501\n\n self._chan_spacing = chan_spacing", "def message_change():\n d = curdoc()\n _remove_fig(d)\n _remove_selection(d)\n message_name = d.get_model_by_name(MESSAGE_SELECTION).value\n d.get_model_by_name(GRAPH_SELECTION).options = GRAPH_OPTIONS if message_name != DEFAULT_UNSELECTED else [DEFAULT_UNSELECTED]\n d.get_model_by_name(GRAPH_SELECTION).value = DEFAULT_UNSELECTED", "def changeConvolutionalDepth(self,depth):\n self.conv_depth = depth", "def setConformance(self, conformance):\n self.PDFreactorConfiguration.in1[\"conformance\"] = conformance", "def setSelectionfilter(self, scenefilter):\n self._selectionFilter = scenefilter\n sceneviewerfilter = self._sceneviewer.getScenefilter()\n if self._selectionFilter is not None:\n scenefiltermodule = self._context.getScenefiltermodule()\n scenefilter = scenefiltermodule.createScenefilterOperatorAnd()\n scenefilter.appendOperand(sceneviewerfilter)\n if self._selectionFilter is not None:\n scenefilter.appendOperand(self._selectionFilter)\n else:\n scenefilter = sceneviewerfilter\n self._scenepicker.setScenefilter(scenefilter)", "async def set_channel(self, ctx: commands.Context):\n if ctx.message.author.id != conf.user:\n return None\n\n new_channel = ctx.channel.id\n\n conf.channel = new_channel\n\n log.info(f\"Bot channel set to channel with: #{ctx.channel} (ID:{ctx.channel.id})\")\n await ctx.message.channel.send(f\"✅ Set bot channel for {ctx.message.author} to #{ctx.channel}\")", "def toggle_sentence_window_mode(self):\n self.__sentence_mode = True", "async def setTransactionLogChannel(self, ctx, tlog : discord.Channel):\n server_dict = self.get_server_dict(ctx)\n\n try:\n server_dict.setdefault('Transaction Channel', tlog.id)\n self.save_data()\n await self.bot.say(\":white_check_mark: Transaction log channel now set to {0}\".format(tlog.mention))\n except:\n await self.bot.say(\":x: Error setting transaction log channel to {0}\".format(tlog.mention))", "def chf(self, chf):\n\n self.logger.debug(\"In 'chf' setter.\")\n\n self._chf = chf", "def on_netconf_message(self, stream_message):\n raise NotImplementedError", "def step_impl_the_msg_to_is_set_to_respondent_as_string_not_array(context):\n context.bdd_helper.message_data[\"msg_to\"] = context.bdd_helper.respondent_id", "def setPeerToPeerNetwork(self, peerToPeerNetwork):\r\n raise NotImplementedError()", "def update_flow(self, flow):\r\n self.flow = flow", "def set_scope(self, scope):\n self.vis.set_scope(scope)", "def send_msg(self, recipient, message):\n bus = SessionBus()\n purple = bus.get(\n \"im.pidgin.purple.PurpleService\",\n \"/im/pidgin/purple/PurpleObject\"\n )\n my_id = purple.PurpleAccountsGetAllActive()[0]\n conv = purple.PurpleConversationNew(1, my_id, recipient)\n conv_im = purple.PurpleConvIm(conv)\n purple.PurpleConvImSend(conv_im, message)", "def set_webhook(self, webhook):\n self.webhook = webhook\n return" ]
[ "0.7668855", "0.6350055", "0.52989596", "0.5078657", "0.50487155", "0.50432694", "0.49181578", "0.49066126", "0.4869592", "0.48333606", "0.4763982", "0.47448006", "0.47074327", "0.46813306", "0.46577984", "0.45800743", "0.4569655", "0.45474747", "0.45351785", "0.44976678", "0.44866642", "0.44683528", "0.44654623", "0.44592154", "0.4428278", "0.44042638", "0.44039187", "0.43915802", "0.43814868", "0.4380441", "0.43688998", "0.43616566", "0.43590555", "0.4353243", "0.43489712", "0.43224216", "0.43209448", "0.43164194", "0.43126595", "0.43074578", "0.43021914", "0.42887422", "0.42621318", "0.42519858", "0.4247591", "0.42462793", "0.42445093", "0.42420685", "0.4228236", "0.42159948", "0.42108694", "0.42046246", "0.4203355", "0.42026946", "0.42023817", "0.42017564", "0.41990653", "0.41965613", "0.41845015", "0.41840908", "0.41796687", "0.41650203", "0.41406333", "0.4129434", "0.4129434", "0.4129434", "0.4129434", "0.4129434", "0.4129434", "0.4129434", "0.4129434", "0.4114578", "0.4114266", "0.410628", "0.41020054", "0.40996027", "0.4096011", "0.40927157", "0.40885866", "0.40866458", "0.40833426", "0.4068673", "0.40664655", "0.40632433", "0.40610957", "0.4057461", "0.40391412", "0.40349567", "0.4025243", "0.40224966", "0.40191934", "0.40131012", "0.40049523", "0.39950278", "0.3993351", "0.39834866", "0.3976552", "0.39761692", "0.39744127", "0.39722586" ]
0.72746265
1
Sets the if_names of this NetflowFilters.
def if_names(self, if_names): self._if_names = if_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ifaces_init(*ifnames):\n for ifname in ifnames:\n _set_eth_admin_state(ifname, schema.InterfaceState.ABSENT)", "def setNameFilters(self, filters):\n if self._completer:\n self._completer.model().setNameFilters(filters)", "def setFilters(self, filters):\n self.__filters = filters", "def set_filters(self, filters):\n obj = []\n for fltr in filters:\n obj.append(fltr.jobject)\n javabridge.call(self.jobject, \"setFilters\", \"([Lweka/filters/Filter;)V\", obj)", "def set_filters(self, filters: List[DataGridFilter]):\n self.filters = filters", "def filters(self, filters):\n\n self._filters = filters", "def _set_ifname(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"ifname\", rest_name=\"ifname\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ifname must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"ifname\", rest_name=\"ifname\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__ifname = t\n if hasattr(self, '_set'):\n self._set()", "def set_FilterName(self, value):\n super(GetCallbackDataInputSet, self)._set_input('FilterName', value)", "def set_filters(self, can_filters=None):\n self.sw_filters = can_filters or []\n\n if not len(self.sw_filters):\n logger.info(\"Filtering has been disabled\")\n else:\n for can_filter in can_filters:\n can_id = can_filter[\"can_id\"]\n can_mask = can_filter[\"can_mask\"]\n logger.info(\n \"Filtering on ID 0x%X, mask 0x%X\", can_id, can_mask)", "def names(self, names):\n\n self._names = names", "def ifaces(self, ifaces):\n \n self._ifaces = ifaces", "def update_filters(self, filters: str) -> None:\r\n\r\n log.debug(f'Updating filters to {filters}')\r\n\r\n parts = filters.split('&')\r\n\r\n for part in parts:\r\n value, key = part.split('=')\r\n\r\n if value == 'iv':\r\n self.__payload['prevMinIV'] = self.__payload['minIV']\r\n self.__payload['minIV'] = key.strip()\r\n elif value == 'exiv':\r\n self.__payload['exMinIV'] = key.strip()\r\n else:\r\n log.debug(f'Dont know filter: \"{part}\", ignoring...')\r\n\r\n self.__filters_string = filters", "def tag_names(self, tag_names):\n\n self._tag_names = tag_names", "def SetNames(self, names):\n # parse the names (a semicolon seperated list of names)\n if isinstance(names, str):\n names = names.split(';')\n if self.__names != names:\n self.__names = names\n self.Modified()", "def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version", "def set_pinnames(self, names):\n self.pnames = names", "def setSearchFieldnames(self, fieldnames):\n self._search_fieldnames = fieldnames", "def set_fnames(self, fnames):\n self.fnames = fnames[:]", "def addNames(self, names):\n for name in names:\n self.tags.setdefault(name, ModelTag(name))", "def set_git_filter_attribute(self, filtername):\n self._filter = filtername", "def set_scanning_filter(self, **kwargs):\n for k, v in kwargs.get(\"filters\", {}).items():\n if k == \"UUIDs\":\n self._filters[k] = Variant(\"as\", v)\n elif k == \"RSSI\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Pathloss\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Transport\":\n self._filters[k] = Variant(\"s\", v)\n elif k == \"DuplicateData\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Discoverable\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Pattern\":\n self._filters[k] = Variant(\"s\", v)\n else:\n logger.warning(\"Filter '%s' is not currently supported.\" % k)\n\n if \"Transport\" not in self._filters:\n self._filters[\"Transport\"] = Variant(\"s\", \"le\")", "def network_interfaces(self, network_interfaces):\n\n self._network_interfaces = network_interfaces", "def input_names(self):\n raise NotImplementedError(\n 'Derived ExternalGreyBoxModel classes need to implement the method: input_names'\n )", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def setFilters(self, regex=None):\n if regex is not None:\n try:\n self.__regex = re.compile(regex)\n except Exception as e:\n return\n\n self.__all_filters = (self.__regex,)\n\n self.__customFilterEnabled = any(self.__all_filters)\n self.invalidateFilter()", "def load_custom_filters(environment):\n\n # TODO deprecate ipaddr_index and netmask for the better ipnet ones\n filter_list = {\n 'dpkg_arch': filter_dpkg_arch,\n 'storage_size_num': filter_storage_size_num,\n 'ipnet_hostaddr': filter_ipnet_hostaddr,\n 'ipnet_hostmin': filter_ipnet_hostmin,\n 'ipnet_hostmax': filter_ipnet_hostmax,\n 'ipnet_broadcast': filter_ipnet_broadcast,\n 'ipnet_netmask': filter_ipnet_netmask,\n 'ipnet_contains_ip': filter_ipnet_contains_ip,\n 'ipnet_contains_iprange': filter_ipnet_contains_iprange,\n 'ipnet_range_size': filter_ipnet_range_size,\n 'ipaddr_index': filter_ipaddr_index,\n 'netmask': filter_netmask\n }\n\n for name, function in filter_list.items():\n environment.filters[name] = function", "async def set_filter(self, filter_name: str, **kwargs: Any) -> None:\n\n # valid filter?\n if filter_name not in self._telescope.filters:\n raise ValueError(\"Invalid filter name.\")\n\n # log and send event\n if filter_name != self._telescope.filter_name:\n # set it\n logging.info(\"Setting filter to %s\", filter_name)\n await self._change_motion_status(MotionStatus.SLEWING, interface=\"IFilters\")\n await asyncio.sleep(3)\n await self._change_motion_status(MotionStatus.POSITIONED, interface=\"IFilters\")\n self._telescope.filter_name = filter_name\n\n # send event\n await self.comm.send_event(FilterChangedEvent(filter_name))\n logging.info(\"New filter set.\")", "def _set_interface_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..512']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}),], is_leaf=True, yang_name=\"interface-name\", rest_name=\"interface-name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='union', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface_name must be of a type compatible with union\"\"\",\n 'defined-type': \"brocade-fcoe-ext:union\",\n 'generated-type': \"\"\"YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..512']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}),], is_leaf=True, yang_name=\"interface-name\", rest_name=\"interface-name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='union', is_config=True)\"\"\",\n })\n\n self.__interface_name = t\n if hasattr(self, '_set'):\n self._set()", "def setChannelNames(self, n1, n2):\n\t\tfor i, val in enumerate(self.headervals):\n\t\t\ts = val[0]\n\t\t\ts = s.replace(\"%ch1%\", n1)\n\t\t\ts = s.replace(\"%ch2%\", n2)\n\t\t\tself.headervals[i][0] = s\n\t\t\tself.SetStringItem(i, 0, s)", "def setIndexNames(self):\n self.xi = self.i1\n self.yi = self.i2", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def setName(self, *args):\n return _libsbml.Input_setName(self, *args)", "def set_interface(self, ifname):\n \n if not self._slave_dhcp_process is None:\n raise Exception('DhcpClientAlreadyStarted')\n \n self._ifname = ifname", "async def async_set_filters(self, filters, state_mode):\n if filters not in ON_OFF_LIST:\n return\n self._filter = filters.lower()\n self._state_mode = state_mode\n await self.async_send_cmd()", "def setName(self, name):\n # type: (str)->None\n self._validator.validate_one('name', VALID_OPTS['name'], name)\n self._ifAttributes['name'] = str(name)", "def set_inputs(self, inputs):\n for name, config in _iteritems(inputs):\n self.add_input(name, config[\"file\"], config[\"type\"] if \"type\" in config else None)", "def set_attributes(self):\n\n self.input_file = None # the InputFile object\n self.namelist = None # the currently selected namelist\n self.file_loaded = False # is an input file loaded or not", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def add_interface(self, inf):\n self.interfaces[inf] = {'ip': 'unassigned', 'status': 'shutdown', 'connect': ['none', 'none']}", "def name_in(self, name_in):\n\n self._name_in = name_in", "def name_in(self, name_in):\n\n self._name_in = name_in", "def name_in(self, name_in):\n\n self._name_in = name_in", "def set_index_names(self, names, axis=0):\n self.get_axis(axis).names = names", "def names(self, *names):\n assert len(names) == len(self._preds)\n self._names = names\n return self", "def set_sensitive_to_filter(self, sensitive_name, sensitive_val):\n self.name += str(sensitive_val)\n self.sensitive_filter = sensitive_val\n self.sensitive_for_metric = sensitive_name", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n filters = [self._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def _set_interface(self, cfg, itf):\n self.interface = None\n for i in range(cfg.bNumInterfaces):\n x = cfg[(i,0)]\n if x.bInterfaceNumber == itf:\n self.interface = x\n endpoints = sorted([ep.bEndpointAddress for ep in self.interface])\n self.ep_out, self.ep_in = endpoints[:2]", "def set_match_names(self, names):\n result = self.copy()\n result.match_names = names\n return result", "def set_documents_names(cls, input_list_names: List[str]) -> None:\n cls.documents_names = input_list_names", "def SetFilters(self):\n \n try:\n \n for Element in self.OptionMenuList:\n \n Element.destroy()\n \n except:\n \n pass\n \n self.OptionMenuList = []\n self.Variables = []\n self.ImagingTypeList = []\n \n self.DefaultList = ['',\n 'All Types',\n 'All Wavelength',\n 'All Powers',\n 'All Gratings',\n 'All Objectifs',\n 'All Durations',\n 'All N. Acquisis.',\n 'All Sample IDs',\n 'All Samples',\n 'All Substrates',\n 'All Sam. Info',\n 'All Sub. Info']\n \n if not self.Condensensed == None:\n \n for i in range(1,len(self.Condensensed)):\n \n #create the variable for this drop down\n self.Variables.append(StringVar())\n \n #create the two lists\n self.ImagingTypeList.append([self.DefaultList[i]])\n \n for j in range(0, len(self.Condensensed[i])):\n \n self.ImagingTypeList[-1].append(self.Condensensed[i][j][0])\n \n #Create the two elements\n self.OptionMenuList.append(ttk.OptionMenu(self.FilterFrame,\n self.Variables[-1],\n self.ImagingTypeList[-1][0],\n *self.ImagingTypeList[-1],\n command = self.Filter))\n\n #set it\n self.OptionMenuList[-1].grid(column = (i-1)%6, row = (i-1)/6, sticky = 'ew')\n\n\n for i in range(6):\n\n self.FilterFrame.grid_columnconfigure(i, weight = 1)", "def names(filter=None):", "def configure_filters(app):\n\n for (name, filter) in _filters.iteritems():\n app.jinja_env.filters[name] = filter", "def get_interface_names(ip_to_interface_map, bind_ip_list):\n interface_name_list = set()\n\n for ip_address in bind_ip_list:\n interface_name_list.add(ip_to_interface_map[ip_address])\n\n return interface_name_list", "def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})", "def resource_names(self, resource_names):\n\n self._resource_names = resource_names", "def setFilter(self, afilter):\n\n if afilter in (self.FilterU, self.FilterG, self.FilterR, self.FilterI, self.FilterZ, self.FilterY):\n self.filter = afilter\n else:\n raise ValueError(\"No '%s' filter.\" % afilter)", "def build_ip_filters(self):\n if is_list_empty(self.data['ipfilter']['myfilter']) is False:\n for item in self.data['ipfilter']['myfilter']:\n self.cidr_filter_list.append(item)\n else:\n logger.warning(\n \"my filter field is empty in the given input file , rules for the same will not be created in \"\n \"Nginx configuration\")", "def setName(self, *args):\n return _libsbml.ListOfMembers_setName(self, *args)", "def update_interfaces(self, interfaces):\n for i in interfaces:\n self.update_interface(i)", "def set_all_active(self):\n for name in self.get_names():\n self.set_active(name)", "def load_all_filters(self, interp=True, lamb=None):\n raise NotImplementedError", "def set_product_names(self, product_names):\n if not all(isinstance(product_name, str) for product_name in product_names):\n raise ApiError(\"One or more invalid product names\")\n self._update_criteria(\"product_name\", product_names)\n return self", "def set_product_names(self, product_names):\n if not all(isinstance(product_name, str) for product_name in product_names):\n raise ApiError(\"One or more invalid product names\")\n self._update_criteria(\"product_name\", product_names)\n return self", "def enableInputImages(self, **inputImages):\n self.logger.debug('Updating enabled input images types with %s', inputImages)\n self.inputImages.update(inputImages)\n self.logger.debug('Enabled input images types: %s', self.inputImages)", "def setNameservers(self, nameserver):\n # type: (tp.Any)->None\n\n self.validateOne('nameservers', self._valid['nameservers'], nameserver)\n self._ifAttributes['nameservers'] = nameserver", "def set_all_inactive(self):\n for name in self.get_names():\n self.set_inactive(name)", "def set_interface(interface, name=''):\n if not interface:\n raise ValueError('interface is empty')\n\n global interfaces\n logger.debug('connection_name: \"{}\" -> {}.{}'.format(\n name,\n interface.__module__,\n interface.__class__.__name__\n ))\n interfaces[name] = interface", "def setName(self, *args):\n return _libsbml.SBase_setName(self, *args)", "def setFilterTrackingCookies(self, filterTrackingCookies):\n if filterTrackingCookies == self.__filterTrackingCookies:\n return\n \n self.__filterTrackingCookies = filterTrackingCookies\n self.__saveTimer.changeOccurred()", "def setName(self, *args):\n return _libsbml.FluxBound_setName(self, *args)", "def webhook_headers(self, webhook_headers: \"Dict[str, List[str]]\"):\n self._attrs[\"webhookHeaders\"] = webhook_headers", "def webhook_headers(self, webhook_headers: \"Dict[str, List[str]]\"):\n self._attrs[\"webhookHeaders\"] = webhook_headers", "def name(self, operator: Enum, name: list | str):\n if isinstance(name, list) and operator not in self.list_types:\n raise RuntimeError(\n 'Operator must be CONTAINS, NOT_CONTAINS, IN'\n 'or NOT_IN when filtering on a list of values.'\n )\n\n self._tql.add_filter('name', operator, name, TqlType.STRING)", "def name(self, operator: Enum, name: list | str):\n if isinstance(name, list) and operator not in self.list_types:\n raise RuntimeError(\n 'Operator must be CONTAINS, NOT_CONTAINS, IN'\n 'or NOT_IN when filtering on a list of values.'\n )\n\n self._tql.add_filter('name', operator, name, TqlType.STRING)", "def enableAllInputImages(self):\n self.logger.debug('Enabling all input image types')\n for imageType in getInputImageTypes():\n self.inputImages[imageType] = {}\n self.logger.debug('Enabled input images types: %s', self.inputImages)", "def set_config(self, existing_l3_interfaces_facts):\n config = self._module.params.get(\"config\")\n want = []\n if config:\n for w in config:\n w.update({\"name\": normalize_interface(w[\"name\"])})\n want.append(remove_empties(w))\n have = deepcopy(existing_l3_interfaces_facts)\n self.init_check_existing(have)\n resp = self.set_state(want, have)\n return to_list(resp)", "def set_inputs(self, inputs):\n self.attributes[\"inputs\"] = inputs", "def category_names(self, category_names):\n\n self._category_names = category_names", "def _set_classifers(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_classifers_openconfig_qos_interfaces__qos_interfaces_interface_input_classifers, is_container='container', yang_name=\"classifers\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"classifers must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_classifers_openconfig_qos_interfaces__qos_interfaces_interface_input_classifers, is_container='container', yang_name=\"classifers\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__classifers = t\n if hasattr(self, '_set'):\n self._set()", "def set_Names(self, value):\n super(GetTokenDetailsInputSet, self)._set_input('Names', value)", "async def _async_update_ipv6_filter_states(self, filter_states: FilterStatesList):\n if self.token is None:\n await self.async_initialize_token()\n\n val_enabled = '*'.join([str(fs.enabled) for fs in filter_states.entries])\n val_del = '*'.join(['0' for fs in filter_states.entries])\n val_idd = '*'.join([str(fs.idd) for fs in filter_states.entries])\n\n params = OrderedDict()\n params['act'] = 1\n params['dir'] = 0\n params['enabled'] = val_enabled\n params['allow_traffic'] = ''\n params['protocol'] = ''\n params['src_addr'] = ''\n params['src_prefix'] = ''\n params['dst_addr'] = ''\n params['dst_prefix'] = ''\n params['ssport'] = ''\n params['seport'] = ''\n params['dsport'] = ''\n params['deport'] = ''\n params['del'] = val_del\n params['idd'] = val_idd\n params['sIpRange'] = ''\n params['dsIpRange'] = ''\n params['PortRange'] = ''\n params['TMode'] = self._ipv6_filters_time.TMode\n if self._ipv6_filters_time.TMode == 1:\n params['TRule'] = self._ipv6_filters_time.XmlGeneralTime\n elif self._ipv6_filters_time.TMode == 2:\n params['TRule'] = self._ipv6_filters_time.XmlDailyTime\n else:\n params['TRule'] = 0\n\n await self._async_ws_set_function(CMD_SET_IPV6_FILTER_RULE, params)", "def setup_fq_checkboxes(self):\n checked_fqs = self.get_settings_value(\"checkedfqs\", [])\n if len(checked_fqs) > 0: # else there is not saved state... take gui defaults\n for checkbox in self.fq_checkboxes.keys():\n ls_type = self.fq_checkboxes[checkbox]\n checkbox.setChecked(ls_type.name in checked_fqs)", "def add_filter(self, name: str, value: any):\n self.filters[name] = value", "def setFilenames(self, filenames):\n\t\tself.filenames = filenames\n\t\tif len(filenames) == 0:\n\t\t\treturn\n\n\t\tif not self.dimensions:\n\t\t\tself.retrieveImageInfo(filenames[0])\n\n\t\tif not self.checkImageDimensions(filenames):\n\t\t\traise Logging.GUIError(\"Image dimensions do not match\", \\\n\t\t\t\t\t\t\t\t\t\"Some of the selected files have differing dimensions, \\\n\t\t\t\t\t\t\t\t\tand cannot be imported into the same dataset.\")\t\t \n\t\tself.getReadersFromFilenames()\n\t\tself.numberOfImages = len(filenames)\n\t\tif self.is3D:\n\t\t\tif self.readers:\n\t\t\t\tself.numberOfImages = 0\n\t\t\t\tfor rdr in self.readers:\n\t\t\t\t\tself.numberOfImages += rdr.GetNumberOfSubFiles()", "def set_inputs(self,inputs):\n raise NotImplementedError(\"Robot.set_inputs\")", "def filter_methods(self, filter_methods: ConfigNodePropertyArray):\n\n self._filter_methods = filter_methods", "def set_filter_address(self, addresses):\r\n if isinstance(addresses, basestring):\r\n addresses = [addresses]\r\n self.filter_src_addresses = addresses", "def _set_classifers(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_classifers_openconfig_qos_elements__qos_interfaces_interface_input_classifers, is_container='container', yang_name=\"classifers\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"classifers must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_classifers_openconfig_qos_elements__qos_interfaces_interface_input_classifers, is_container='container', yang_name=\"classifers\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__classifers = t\n if hasattr(self, '_set'):\n self._set()", "def _set_classifers(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_classifers_openconfig_qos__qos_interfaces_interface_input_classifers, is_container='container', yang_name=\"classifers\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"classifers must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_classifers_openconfig_qos__qos_interfaces_interface_input_classifers, is_container='container', yang_name=\"classifers\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__classifers = t\n if hasattr(self, '_set'):\n self._set()", "def __init__(self, name=None, os_type=None, ip_addr=None, iflist=None):\n self.name = name\n self.os_type = os_type\n self.ip_addr = ip_addr\n self.iflist = iflist", "def list_interfaces(self, instance_name):\n return ['A_VIF']", "def _config_interfaces(self):\n self.interfaces['loopback'] = \"127.0.0.1\"\n self.interfaces['internal'] = \"127.0.0.1\"\n self.interfaces['external'] = \"0.0.0.0\"\n self.interfaces[\"any\"] = \"0.0.0.0\"\n self.interfaces[\"localhost\"] = \"127.0.0.1\"", "def _interfacesToNames(interfaces):\n if interfaces is ALL_IMPLEMENTED:\n names = ALL_IMPLEMENTED_DB\n else:\n _checkConflictingNames(interfaces)\n names = u','.join(map(qual, interfaces))\n return names", "def _set_interfaces(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_interfaces_openconfig_qos_interfaces__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interfaces must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_interfaces_openconfig_qos_interfaces__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__interfaces = t\n if hasattr(self, '_set'):\n self._set()", "def set_param_names(self, param_names, k_params=None):\n if k_params is not None:\n self.k_params = k_params\n else:\n k_params = self.k_params\n\n if k_params == len(param_names):\n self.data.xnames = param_names\n else:\n raise ValueError('param_names has the wrong length')", "def setup_images(self, images):\n if isinstance(images, str):\n images = [images]\n self._image_list.extend(images)", "def on_filter_instances(self):\n self._set_filter_value(\n 'filterInstances', self.filter_instances_btn.isChecked())", "def setName(self, *args):\n return _libsbml.InSpeciesTypeBond_setName(self, *args)", "def setName(self, *args):\n return _libsbml.Port_setName(self, *args)", "def set_headers(self, headers):\n\n if isinstance(headers, dict):\n headers = headers.items()\n\n # NOTE(kgriffs): We can't use dict.update because we have to\n # normalize the header names.\n _headers = self._headers\n for name, value in headers:\n _headers[name.lower()] = value" ]
[ "0.5829484", "0.5813386", "0.5766594", "0.5461771", "0.5420651", "0.53666186", "0.5335555", "0.5328612", "0.5256119", "0.5160971", "0.5098839", "0.50947213", "0.5075196", "0.5055912", "0.5040678", "0.4927203", "0.49013257", "0.48951134", "0.48855725", "0.48845008", "0.48448634", "0.48366588", "0.4816613", "0.47895256", "0.47626862", "0.47608158", "0.47430158", "0.474297", "0.47307447", "0.4720284", "0.47145936", "0.47005755", "0.46765488", "0.46715927", "0.4653011", "0.4636435", "0.46211722", "0.46136385", "0.46115148", "0.4603834", "0.4603834", "0.4603834", "0.45888752", "0.458216", "0.45526895", "0.4549324", "0.45347264", "0.45080665", "0.44994038", "0.44965968", "0.44929805", "0.44849342", "0.44552934", "0.44514441", "0.44455838", "0.44298443", "0.44287884", "0.4428769", "0.44106784", "0.43983433", "0.4381562", "0.43700334", "0.43700334", "0.43656805", "0.43547535", "0.43522602", "0.43450275", "0.43410534", "0.43408144", "0.43407354", "0.43402642", "0.43402642", "0.4337457", "0.4337457", "0.43365663", "0.43301228", "0.43180773", "0.4314473", "0.4309218", "0.42953813", "0.429496", "0.42937025", "0.42887992", "0.42781815", "0.42764187", "0.4274433", "0.42730406", "0.42687953", "0.42652488", "0.42644775", "0.42642128", "0.42633152", "0.42561498", "0.4254411", "0.42521363", "0.42512086", "0.42465836", "0.4238584", "0.42383975", "0.4230643" ]
0.8094472
0
Sets the direction of this NetflowFilters.
def direction(self, direction): self._direction = direction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_direction(self, direction: str) -> None:\n self.wink.set_fan_direction(direction)", "def set_direction(self, new_dir):\n self.__direction = new_dir", "def setDirection(self,stepDir = 2):\n pass", "def setdirection(self, *args, **kwargs):\n return _coordsys.coordsys_setdirection(self, *args, **kwargs)", "def set_direction(self, dir):\n if dir == 0:\n self.direction = [0, -1]\n elif dir == 1:\n self.direction = [1, 0]\n elif dir == 2:\n self.direction = [0, 1]\n elif dir == 3:\n self.direction = [-1, 0]", "async def async_set_direction(self, direction: str) -> None:\n if direction == DIRECTION_FORWARD:\n self._device.fan_dir = SENSEME_DIRECTION_FORWARD\n else:\n self._device.fan_dir = SENSEME_DIRECTION_REVERSE", "def direction(self, direction):\n allowed_values = [\"supports\", \"does_not_support\"] # noqa: E501\n if direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `direction` ({0}), must be one of {1}\" # noqa: E501\n .format(direction, allowed_values)\n )\n\n self._direction = direction", "def set_direction(self, direction: str) -> None:\n if direction == \"forward\":\n self._bond.setDirection(self._deviceId, Directions.FORWARD)\n elif direction == \"reverse\":\n self._bond.setDirection(self._deviceId, Directions.REVERSE)\n self._attributes['current_direction'] = direction", "def set_port_direction(self, port, direction):\n\n if port == 1:\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, direction)\n self.__port_b_direction = direction\n else:\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, direction)\n self.__port_a_direction = direction\n return", "def set_direction(self, direction):\n\n def same_axis(direction1, direction2):\n y_axis = [Direction.Y_POSITIVE, Direction.Y_NEGATIVE]\n x_axis = [Direction.X_POSITIVE, Direction.X_NEGATIVE]\n return ((direction1 in x_axis and direction2 in x_axis)\n or (direction1 in y_axis and direction2 in y_axis))\n\n if direction is None:\n return\n elif not same_axis(self.direction, direction):\n self.direction = direction", "def direction(self):\n _direction = self._custom.get(\"direction\")\n if _direction is not None:\n return _direction\n\n _direction = self._infer_direction()\n self._custom[\"direction\"] = _direction\n\n return _direction", "def setDirection (self, ra, dec):\n self._response.setDirection(ra, dec)", "def setRobotDirection(self, direction):\n self.direction = direction", "def setRobotDirection(self, direction):\n self.direction = direction", "def SetLayoutDirection(*args, **kwargs):\n return _gdi_.DC_SetLayoutDirection(*args, **kwargs)", "def direction(self, direction):\n _api.check_in_list(['horizontal', 'vertical'], direction=direction)\n if hasattr(self, '_direction') and direction != self._direction:\n # remove previous artists\n self._selection_artist.remove()\n if self._interactive:\n self._edge_handles.remove()\n self._direction = direction\n self.new_axes(self.ax)\n if self._interactive:\n self._setup_edge_handles(self._handle_props)\n else:\n self._direction = direction", "def setRobotDirection(self, direction):\n self.direction = direction\n #raise NotImplementedError", "def setRobotDirection(self, direction):\n self.direction = direction\n #raise NotImplementedError", "def set_direction(self, right_or_left):\r\n if right_or_left == \"r\":\r\n self.__direction = self.__direction - 7\r\n elif right_or_left == \"l\":\r\n self.__direction = self.__direction + 7", "def Direction(self, direction):\r\n \r\n self.dock_direction = direction\r\n return self", "def set_direction(self, direction: int) -> None: \r\n self.direction = direction\r\n if (direction == Directions.turn_left or\r\n direction == Directions.turn_right):\r\n self.stop_timer = time.time() + self.driving_time_turning\r\n else:\r\n self.stop_timer = time.time() + self.driving_time", "def dock_direction_set(self, value):\r\n \r\n self._dock_direction = value", "def direction(self):\n return self._direction.copy()", "def steer(self, direction):\n\n if -1 <= direction <= 1:\n target_position = self.steering_limit * direction\n self.brick_pi.set_motor_position(\n self.motor_steer, -target_position)", "def shiftDir(self, direction, n):\n assert Direction.isDir(direction), \"incorrect type of arg direction: should be a Direction, is {}\".format(type(direction))\n assert isinstance(n, AxisDistance), 'incorrect type of arg n: should be type AxisDistance, is type {}'.format(type(n))\n direction = Direction(direction)\n self.x += direction.dx * n\n self.y += direction.dy * n\n return self", "def set_pin_direction(self, pin, direction):\n pin = pin - 1\n if pin < 8:\n self.__port_a_direction = self.__helper.updatebyte(\n self.__port_a_direction, pin, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, self.__port_a_direction)\n else:\n self.__port_b_direction = self.__helper.updatebyte(\n self.__port_b_direction, pin - 8, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, self.__port_b_direction)\n return", "def setBitDirection(self, bit_mask):\n DPxSetDinDataDir(bit_mask)", "def set_dir(self, dir, resistor=None):\n self.IN = mraa.DIR_IN\n self.OUT = mraa.DIR_OUT\n self.PULL_UP = mraa.DIR_OUT_HIGH\n self.PULL_DOWN = mraa.DIR_OUT_LOW\n if dir not in (mraa.DIR_OUT, mraa.DIR_IN):\n # incorrect arguments passed in\n raise Exception(\"Incorrect pin direction dir={}. Use 'gpio.IN' or 'gpio.OUT'\".format(dir))\n elif resistor not in (None, self.PULL_UP, self.PULL_DOWN):\n # incorrect arguments passed in\n raise Exception(\"Incorrect resistor={}. Use 'UP' or 'Down'\".format(resistor))\n elif dir is self.IN:\n self.dir = dir\n self.gpio_pin.dir(self.IN)\n if resistor is not None:\n raise Warning('default', 'Pin dir is {} but should be \\'None\\' when using resistor'.format(dir))\n elif resistor is not None:\n self.resistor = resistor\n self.dir = dir\n # default to only output\n if resistor is self.PULL_UP:\n self.gpio_pin.dir(mraa.DIR_OUT_HIGH)\n else:\n self.gpio_pin.dir(mraa.DIR_OUT_LOW)\n else:\n self.resistor = resistor\n self.dir = dir\n # default to only output\n self.gpio_pin.dir(mraa.DIR_OUT)", "def direction(self):\n return self.cfg.direction", "def direction(self) -> int:\n return self._direction", "def direction(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"direction\")", "def get_direction(self):\r\n return self.__direction", "def sendDirection(self,direction):\n self.broadcaster.sendDirection(direction)", "def getDirection(self, direction: str):\n return direction", "def move(self, direction):\r\n self.stored_direction = direction", "def set_study_direction(self, study_id: int, direction: study.StudyDirection) -> None:\n raise NotImplementedError", "def direction(self) -> Optional[str]:\n return self._direction", "def get_direction(self):\n return self.direction", "def sendDirection(self,direction):\n x,y = direction\n data = _RobotCommunicator.DIRECTION_HEADER + \\\n pack(_RobotCommunicator.DIRECTION_FORMAT,x,y)\n self.udpSock.sendto(data,self.addr)", "def _set_vrf_label_direction(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'out': {'value': 1}, u'in': {'value': 0}},), is_leaf=True, yang_name=\"vrf-label-direction\", rest_name=\"vrf-label-direction\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='direction', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"vrf_label_direction must be of a type compatible with direction\"\"\",\n 'defined-type': \"brocade-bgp-operational:direction\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'out': {'value': 1}, u'in': {'value': 0}},), is_leaf=True, yang_name=\"vrf-label-direction\", rest_name=\"vrf-label-direction\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='direction', is_config=False)\"\"\",\n })\n\n self.__vrf_label_direction = t\n if hasattr(self, '_set'):\n self._set()", "def optimization_force_direction(self, optimization_force_direction):\n\n self._optimization_force_direction = optimization_force_direction", "def direction(self) -> str:\n return pulumi.get(self, \"direction\")", "def test_direction(self):\n\n # Default initialized direction is forward.\n self.assertEqual(self.group_tr.getDirection(),\n OCIO.TRANSFORM_DIR_FORWARD)\n\n for direction in OCIO.TransformDirection.__members__.values():\n self.group_tr.setDirection(direction)\n self.assertEqual(self.group_tr.getDirection(), direction)\n\n # Wrong type tests.\n for invalid in (None, 1, 'test'):\n with self.assertRaises(TypeError):\n self.group_tr.setDirection(invalid)", "def direction(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"direction\")", "def set_direct(self, direct):\n self._direct = direct", "def update_player_direction(self,direction):\n pass", "def direction(self):\n return(copysign(1, self.volume))", "def direction(self):\n return self._dir", "def _set_integration_direction(self, T0, Tend):\n if Tend is None:\n # Use the default which is increasing from 0K\n return\n if T0 > Tend:\n self._integration_direction = \"decreasing\"\n else:\n self._integration_direction = \"increasing\"", "def setDirect(self, direct):\n self._direct = direct", "def direction(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"direction\")", "def flip(self, bev_direction: str = 'horizontal') -> None:\n pass", "def _change_fleet_direction(self):\n for alien in self.aliens.sprites():\n alien.rect.x -= self.settings.fleet_approach_speed\n self.settings.fleet_direction *= -1", "def reset_movement(self):\n self.direction = [0, 0]", "def turn(self, dir):\n if dir.upper() == 'R':\n if self.direction == 3:\n self.direction = 0\n else:\n self.direction += 1\n if dir.upper() == 'L':\n if self.direction == 0:\n self.direction = 3\n else:\n self.direction -= 1", "def direction(self):\n if self._is_hit:\n return Direction.NOT_MOVING\n return self._dir", "def _change_fleet_direction(self):\n for alien in self.aliens.sprites():\n alien.rect.y += self.settings.fleet_drop_speed\n self.settings.fleet_direction *= -1", "def _change_fleet_direction(self):\n for alien in self.aliens.sprites():\n alien.rect.y += self.settings.fleet_drop_speed\n self.settings.fleet_direction *= -1", "def setup_direction(args, dir_file, net):\n print('-------------------------------------------------------------------')\n print('setup_direction')\n print('-------------------------------------------------------------------')\n \n # Setup env for preventing lock on h5py file for newer h5py versions\n os.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\"\n \n # Skip if the direction file already exists\n if exists(dir_file):\n if args.no_resume:\n os.remove(dir_file)\n else: \n f = h5py.File(dir_file, 'r')\n if (args.y and 'ydirection' in f.keys()) or 'xdirection' in f.keys():\n f.close()\n print (\"%s is already setted up\" % dir_file)\n return\n f.close()\n\n # Create the plotting directions\n f = h5py.File(dir_file,'w') # create file, fail if exists\n if not args.dir_file:\n print(\"Setting up the plotting directions...\")\n if args.model_file2:\n net2 = model_loader.load(args.dataset, args.model, args.model_file2)\n xdirection = create_target_direction(net, net2, args.dir_type)\n else:\n xdirection = create_random_direction(net, args.dir_type, args.xignore, args.xnorm)\n h5_util.write_list(f, 'xdirection', xdirection)\n\n if args.y:\n if args.same_dir:\n ydirection = xdirection\n elif args.model_file3:\n net3 = model_loader.load(args.dataset, args.model, args.model_file3)\n ydirection = create_target_direction(net, net3, args.dir_type)\n else:\n ydirection = create_random_direction(net, args.dir_type, args.yignore, args.ynorm)\n h5_util.write_list(f, 'ydirection', ydirection)\n\n f.close()\n print (\"direction file created: %s\" % dir_file)", "def getDirection(self):\n return self.ray.direction", "def move(self, direction):\n pass", "def change_direction(self, direction):\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.key == pygame.K_UP:\r\n if self.direction == [0, 1]:\r\n self.direction == [0, 1]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [0, -1]\r\n return self.direction\r\n elif event.key == pygame.K_DOWN:\r\n if self.direction == [0, -1]:\r\n self.direction == [0, -1]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [0, 1]\r\n return self.direction\r\n elif event.key == pygame.K_LEFT:\r\n if self.direction == [1, 0]:\r\n self.direction == [1, 0]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [-1, 0]\r\n return self.direction\r\n elif event.key == pygame.K_RIGHT:\r\n if self.direction == [-1, 0]:\r\n self.direction == [-1, 0]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [1, 0]\r\n return self.direction", "def _change_fleet_direction(self):\n\t\tfor alien in self.aliens.sprites():\n\t\t\talien.rect.y += self.settings.fleet_drop_speed\n\t\tself.settings.fleet_direction*=-1", "def current_direction(self) -> str:\n if self._device.fan_dir == SENSEME_DIRECTION_FORWARD:\n return DIRECTION_FORWARD\n return DIRECTION_REVERSE", "def _change_fleet_direction(self):\n\t\tfor auto in self.autos.sprites():\n\t\t\tauto.rect.y -= self.settings.fleet_rise_speed\n\t\tself.settings.fleet_direction *= -1", "def motorsDirection(self, direction):\n\n print (direction)\n if direction == 'r' or direction == 'R':\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)\n print (\"Direction reverse\")\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n print (\"Direction forward\")", "def direction(self):\n norm=math.sqrt(self.x**2 + self.y**2 + self.z**2)\n return Vector3(self.x/norm, self.y/norm, self.z/norm)", "def direction(self) -> np.ndarray:\n return self._direction", "def turn_direction(self, turn_direction):\n allowed_values = [\"UNKNOWN\", \"STRAIGHT\", \"RIGHT\", \"LEFT\", \"SLIGHT_RIGHT\", \"SLIGHT_LEFT\", \"SHARP_LEFT\", \"SHARP_RIGHT\"] # noqa: E501\n if turn_direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `turn_direction` ({0}), must be one of {1}\" # noqa: E501\n .format(turn_direction, allowed_values)\n )\n\n self._turn_direction = turn_direction", "def turn(self, turnDir):\n if turnDir == 0: # left\n if self.dir == \"N\":\n self.dir = \"W\"\n elif self.dir == \"W\":\n self.dir = \"S\"\n elif self.dir == \"S\":\n self.dir = \"E\"\n elif self.dir == \"E\":\n self.dir = \"N\"\n else:\n raise ValueError(\"invalid dir %s\" % self.dir)\n elif turnDir == 1: # right\n if self.dir == \"N\":\n self.dir = \"E\"\n elif self.dir == \"E\":\n self.dir = \"S\"\n elif self.dir == \"S\":\n self.dir = \"W\"\n elif self.dir == \"W\":\n self.dir = \"N\"\n else:\n raise ValueError(\"invalid dir %s\" % self.dir)\n else:\n raise ValueError(\"invalid turnDir %d\" % turnDir)", "def _directionUpdated(self, *args, **kwargs):\n # Invert direction to manipulate the 'source' pointing to\n # the center of the viewport\n x, y, z = - self._light.direction\n\n # Horizontal plane is plane xz\n azimuth = int(round(numpy.degrees(numpy.arctan2(x, z))))\n altitude = int(round(numpy.degrees(numpy.pi/2. - numpy.arccos(y))))\n\n if azimuth != self.getAzimuthAngle():\n self.setAzimuthAngle(azimuth)\n\n if altitude != self.getAltitudeAngle():\n self.setAltitudeAngle(altitude)", "def set_flip(self, flipconv):\n if flipconv is None:\n flipconv = 'astro' # default\n if flipconv == 'astro': self._flip = -1\n elif flipconv == 'geo': self._flip = 1\n else: raise ValueError(\"flipconv must be 'astro', 'geo' or None for default.\")", "def _change_fleet_direction(self): \n for alien in self.aliens.sprites():\n alien.rect.y += self.settings.fleet_drop_speed\n self.settings.fleet_direction *= -1", "def filter_direction(frame, direction):\n return frame[frame['direction'] == direction].copy()", "def traffic_direction(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"traffic_direction\")", "def getDirection(self):\n return self.listener.direction", "def change_direction(self, event):\n if event.keysym in self.mapping:\n self.vector = self.mapping[event.keysym]", "def directionRight(self):\n return self.__directionRight", "def reset_directions(directions, dir='from'):\n if dir == 'from':\n # reverse the direction, so it plots \"to\"\n print(\"reversing direction\")\n directions = (directions + 90) * -1\n elif dir == 'to':\n # don't reverse, so it plots \"to\"\n directions = (directions - 90) * -1\n else:\n raise ValueError('dir has to be either \"from\" or \"to\"')\n return directions", "def wind_direction(self):\n return self.flow_field.wind_direction", "def update_direction(self, update_data: dict):\n if self.on_update_direction:\n self.on_update_direction(self, update_data)", "def setOrientation(self, direction=None, up=None):\n if direction is None: # Use current direction\n direction = self.direction\n else:\n assert len(direction) == 3\n direction = numpy.array(direction, copy=True, dtype=numpy.float32)\n direction /= numpy.linalg.norm(direction)\n\n if up is None: # Use current up\n up = self.up\n else:\n assert len(up) == 3\n up = numpy.array(up, copy=True, dtype=numpy.float32)\n\n # Update side and up to make sure they are perpendicular and normalized\n side = numpy.cross(direction, up)\n sidenormal = numpy.linalg.norm(side)\n if sidenormal == 0.:\n raise RuntimeError('direction and up vectors are parallel.')\n # Alternative: when one of the input parameter is None, it is\n # possible to guess correct vectors using previous direction and up\n side /= sidenormal\n up = numpy.cross(side, direction)\n up /= numpy.linalg.norm(up)\n\n self._side = side\n self._up = up\n self._direction = direction\n self.notify()", "def directionLeft(self):\n return self.__directionLeft", "def turn_right(self):\n temp = self.direction[0]\n self.direction[0] = -self.direction[1]\n self.direction[1] = temp", "def initialize_direction(self):\n\n self.mu = 2. * np.random.rand(1)[0] - 1.", "def connection_port_direction(self, connection_port_direction: str):\n allowed_values = [\"BIDIRECTIONAL\", \"INPUT\", \"OUTPUT\", \"UNIDENTIFIED_OR_UNKNOWN\"] # noqa: E501\n if connection_port_direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `connection_port_direction` ({0}), must be one of {1}\"\n .format(connection_port_direction, allowed_values)\n )\n\n self._connection_port_direction = connection_port_direction", "def set_flip(self, val):\n self.flip = val", "def steer(direction):\n if direction == 1:\n steerMotor.run(Adafruit_MotorHAT.FORWARD)\n steerMotor.setSpeed(255)\n if direction == -1:\n steerMotor.run(Adafruit_MotorHAT.BACKWARD)\n steerMotor.setSpeed(255)\n if direction == 0:\n steerMotor.setSpeed(0)\n steerMotor.run(Adafruit_MotorHAT.RELEASE)", "def direction(self):\n return None if not bool(self.relation) else (self.s_end <= self.o_start)", "def move(self):\n if (self._dir is Direction.UP) and (self._y_pos is 0):\n self._dir = Direction.DOWN\n elif (self._dir is Direction.DOWN) and (self._y_pos+self._len is self._bs):\n self._dir = Direction.UP\n elif (self._dir is Direction.LEFT) and (self._x_pos is 0):\n self._dir = Direction.RIGHT\n elif (self._dir is Direction.RIGHT) and (self._x_pos+self._len is self._bs):\n self._dir = Direction.LEFT\n self.change_pos(self._dir)\n return self._dir", "def generate_direction(self):\n random_enum = random.randint(1, 4)\n random_direction = flow_processing_input.Direction(random_enum)\n assert isinstance(random_direction, flow_processing_input.Direction)\n return random_direction", "def generate_direction(self):\n random_enum = random.randint(1, 4)\n random_direction = flow_processing_input.Direction(random_enum)\n assert isinstance(random_direction, flow_processing_input.Direction)\n return random_direction", "def setFlow(self, edge, value):\r\n self.flow[edge] = value\r\n self.flow[edge[::-1]] = - value", "def setScrollDirection(self,loc=None):\n self.target_location = loc\n self.cardinal_direction = getCardinalDirection((self.cx,self.cy), self.target_location)\n self.distance_to_target = straightDistance((self.cx,self.cy),self.target_location)\n\n print(self.target_location)\n print(self.cardinal_direction)\n print(self.distance_to_target)", "def rotate(self, direction):\n \n # If the direction is actually different, then rotate the polygons\n if direction != self.direction:\n self.fdirection = direction\n\n # Set bounding box to change\n self.bounds_changed = True\n\n # Set marker to move\n self.moved = True", "def direction(self):\n return atan2d(self.y, self.x)", "def flip_direction(direction):\n if direction==\"NORTH\": return \"SOUTH\"\n if direction==\"SOUTH\": return \"NORTH\"\n if direction==\"WEST\": return \"EAST\"\n if direction==\"EAST\": return \"WEST\"\n elif isinstance(direction, float):\n return (direction + np.pi)%(2*np.pi)", "def getDirection(self):\n if 'N' in str(self.trip_update.trip.trip_id):\n direction = 'northbound'\n if 'S' in str(self.trip_update.trip.trip_id):\n direction = 'southbound'\n return direction", "def set_wheel(self, wheel):\n self.wheel_turn = clamp(wheel, -1, 1)", "def set_velocity(self):\r\n if self.direction == 'left':\r\n self.x_vel = -2\r\n else:\r\n self.x_vel = 2\r\n\r\n self.y_vel = 0" ]
[ "0.7139598", "0.7040138", "0.7006834", "0.69987583", "0.6970682", "0.68825686", "0.6802282", "0.6730239", "0.63998103", "0.6399511", "0.6314273", "0.6307516", "0.63062197", "0.63062197", "0.62925655", "0.62565696", "0.6163797", "0.6163797", "0.60774386", "0.60496444", "0.60301095", "0.6010736", "0.58777267", "0.58369535", "0.58177066", "0.5744206", "0.5712969", "0.57048905", "0.56974995", "0.569339", "0.56756824", "0.55516684", "0.5548559", "0.5531756", "0.5524502", "0.5523332", "0.5478372", "0.54772294", "0.5473961", "0.5464224", "0.54449505", "0.5411518", "0.53751576", "0.5364947", "0.5320105", "0.53164256", "0.5311447", "0.52833796", "0.52137816", "0.5190701", "0.51799893", "0.51502645", "0.5118015", "0.51053506", "0.50988775", "0.50943595", "0.5079006", "0.5079006", "0.50743353", "0.5060379", "0.5054466", "0.5041406", "0.5037527", "0.50282156", "0.50253236", "0.5025275", "0.50234663", "0.50015944", "0.498578", "0.49789345", "0.4975466", "0.49619", "0.49551105", "0.49423465", "0.49351662", "0.49291435", "0.49269098", "0.49261913", "0.4919293", "0.48848897", "0.48571607", "0.4826444", "0.48227593", "0.48227575", "0.48063415", "0.4798793", "0.4794354", "0.47839078", "0.47740433", "0.47473195", "0.47456637", "0.47456637", "0.4744422", "0.47443956", "0.47322384", "0.47145346", "0.47122648", "0.47092816", "0.4707786", "0.47063607" ]
0.7080141
1
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(NetflowFilters, dict): for key, value in self.items(): result[key] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n for prop in self.__properties__\n if prop in vars(self)\n }\n rv.update(self._props)\n return rv", "def get_properties(self):\n return self.properties", "def get_properties():", "def getProperties():", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def getProperties(self):\n return self.properties", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})", "def get_model_properties(self):\n properties = {}\n\n filename = self._get_data_filename(\"modelargs.json\")\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n properties[\"image_size\"] = results.get(\"image_size\")\n properties[\"num_classes\"] = results.get(\"num_classes\")\n properties[\"model\"] = results.get(\"model\")\n properties[\"name\"] = results.get(\"name\")\n properties[\"filter_size\"] = results.get(\"filter_size\", 3)\n properties[\"increase_factor\"] = results.get(\"increase_factor\", 0)\n self.model = properties[\"name\"] # regardless of the name of the folder, this will get the proper model name (i.e. <modelname>.cntk)\n\n # optional property\n properties[\"trainer\"] = results.get(\"trainer\", \"CNTK 2.2\")\n\n self._ensure_model_file()\n properties[\"size_mb\"] = round(os.path.getsize(self.model_file) / (1000 * 1000))\n\n return properties", "def as_dict(self):\n result = {}\n for attr in self.__attr:\n result[attr] = getattr(self, attr)\n return result", "def to_dict_model(self) -> dict:\n return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def get_modelDict(self):\n return self.__modelDict", "def attributes(self):\n return dict(self.__attributes)", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def to_dict(self):\n result = {}\n for p in self.json_properties():\n value = getattr(self, p)\n if isinstance(value, datetime.datetime):\n value = value.strftime('%s%f')[:-3]\n result[Jsonifiable.transform_to_camelcase(p)] = value\n return result", "def properties(self):\n return self._props", "def properties(self):\n pass", "def to_dict(self):\n d = {}\n for attr in self.__class__.attributes:\n d[attr] = getattr(self, attr)\n return d", "def properties_get(self):\n return self._get('properties')", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def getPropertyDict(self):\n \n d = self.getChild('__properties')\n if d:\n return d.getDict()\n else:\n return {}", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def to_dict(self, include=None):\n _MODEL = type(self)\n repr_dict = {}\n if include is None:\n include = []\n for name, prop in _MODEL._properties.iteritems():\n if hasattr(prop, 'public') and getattr(prop, 'public', False):\n include.append(name)\n\n for name in include:\n # check if this property is even allowed to be public\n # or has a value set\n if not hasattr(self, name):\n continue\n\n value = getattr(self, name)\n if type(getattr(_MODEL, name)) == ndb.StructuredProperty:\n if isinstance(value, list):\n items = []\n for item in value:\n items.append(item.to_dict(include=None))\n repr_dict[name] = items\n else:\n repr_dict[name] = value.to_dict(include=None)\n elif isinstance(value, date):\n repr_dict[name] = value.isoformat()\n elif isinstance(value, ndb.Key):\n repr_dict[name] = value.urlsafe()\n else:\n repr_dict[name] = value\n\n if self._key:\n repr_dict['key'] = self.get_key_urlsafe()\n return repr_dict", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k == 'POSSIBLE_METHODS':\n continue\n if k == 'keysamplers':\n properties[k] = [i.to_dict() for i in self.__dict__[k] if hasattr(i,'to_dict')]\n elif k in {'pooler'}:\n properties[k] = self.__dict__[k].to_dict()\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def properties(self):\n\n return self._properties", "def ToDict(self):\n atributes_dictionary = {}\n for key, value in self.__dict__.iteritems():\n atributes_dictionary[key] = value\n return atributes_dictionary", "def properties(self):", "def properties(self):", "def properties(self):", "def modelPropertiesDictionary(sql_row_list):\n \n properties_dictionary = \\\n {\n \"id\": sql_row_list[0],\n \"name\": sql_row_list[1],\n \"last_deploy_timestamp\": sql_row_list[2],\n \"active_version\": sql_row_list[3],\n \"build_id\": sql_row_list[4]\n };\n\n return properties_dictionary;", "def as_dict(self):\n data = dict()\n for name in self.fields:\n val = getattr(self, name)\n if isinstance(val, Model):\n val = val.as_dict()\n elif isinstance(val, list) and val and isinstance(val[0], Model):\n val = [sub.as_dict() for sub in val]\n data[name] = val\n return data", "def to_dict(self):\n if self._dict is not None:\n return self._dict\n\n result = {}\n for key in self.ATTRIBUTES:\n value = getattr(self, key)\n if value:\n result[key] = value\n self._dict = result\n return result", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'created':\n _dict[f.name] = str(f.value_from_object(self))\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "def to_dict(self):\r\n return self.__dict__", "def properties(self):\n return None", "def properties(self):\n return None", "def to_dict(self):\n return attr.asdict(self)", "def as_dict(self):\n return self.__dict__", "def _get_model_state(self) -> dict:\n return dict(model=self.model, kwargs=self._model_kwargs)", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n # \"created_by\": self.created_by,\n # \"created_on\": self.created_on,\n # \"modified_by\": self.modified_by,\n # \"modified_on\": self.modified_on\n }", "def properties(self):\r\n return resources.Properties(self)", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def to_dict(self):\n return vars(self)", "def to_dict(self):\n\n # Check if is the right instance.\n if isinstance(self, db.Model):\n # construct a dictionary from column names and values.\n dict_representation = {c.name: getattr(self, c.name) for c in self.__table__.columns}\n return dict_representation\n else:\n raise AttributeError(type(self).__name__ + \" is not instance of \" + db.Model.__name__)", "def bson_properties(self):\n return []", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name\n }", "def get_dict(self):\n return", "def to_dict(self):\n return to_dict(self.__dict__)", "def to_json(self):\n properties = self.to_dict()\n if isinstance(self, db.Model):\n properties['id'] = unicode(self.key().id())\n return json.dumps(properties)", "def to_dict(self):", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def get_attributes(self) -> Dict[str, str]:\n pass", "def config(self) -> ModelConfigDict:\n return self.config_obj.to_dict()", "def properties(self):\n return self.properties_with_uid[1:]", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def serialise(self):\n return {\n 'id': self.id,\n 'category_id': self.category_id,\n 'name': self.name,\n 'description': self.description,\n 'quantity': self.quantity,\n 'price': self.price,\n 'user_id': self.user_id\n }", "def getPropertiesAll():", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def model_info(self):\n if not self._model_info:\n self._load_model_info()\n try:\n data = json.loads(self._model_info)\n except (TypeError, ValueError):\n data = {}\n return data", "def to_dict(self):\n return {\n 'name': self.get_name(),\n 'description': self.get_description()\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def as_dict(self):\n return self.__dict__", "def to_dict(self):\r\n\r\n return {\r\n 'product_id': self.product_id,\r\n 'product_name': self.product_name\r\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }", "def asdict(self):\n return attr.asdict(self)", "def to_dict(self) -> dict:", "def getDict(self):\n res = {}\n for attr, value in self.__dict__.iteritems():\n if type(attr) is IntType or type(attr) is StringType or type(attr) is LongType or type(attr) is UnicodeType:\n res[attr] = value\n elif isinstance(attr, datetime.datetime):\n res[attr] = value.isoformat('-')\n \n return res", "def attributes(self):\n return self.__dict.keys()", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}" ]
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.690053", "0.6881568", "0.6881568", "0.6857664", "0.68415916", "0.68122137", "0.680096", "0.67914945", "0.6757063", "0.6753585", "0.6741746", "0.6741746", "0.6741746", "0.6735291", "0.67126125", "0.6697801", "0.6695801", "0.6689893", "0.6680752", "0.66802895", "0.66802895", "0.66802895", "0.66547817", "0.66495687", "0.6633999", "0.6619567", "0.6619567", "0.66156983", "0.66049474", "0.6590706", "0.6590706", "0.6590206", "0.6587873", "0.65861845", "0.65822417", "0.65794736", "0.65792733", "0.657747", "0.6571183", "0.65662557", "0.65637356", "0.6539919", "0.65396816", "0.65283066", "0.65252614", "0.6513477", "0.65098846", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.6507418", "0.6505772", "0.65015876", "0.64951885", "0.64951885", "0.64951885", "0.64857763", "0.6474329", "0.6469453", "0.64684683", "0.6453606", "0.6453024", "0.6453024", "0.6430734", "0.6429058", "0.6426903", "0.64215595", "0.64201874", "0.6417152", "0.6414739", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.64035517" ]
0.0
-1
Returns the string representation of the model
def to_str(self): return pprint.pformat(self.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n model_fields = get_model_fields(\n opts.model,\n foreign=False,\n m2m=False,\n exclude=self.exclude_from_str\n )\n # TODO: replace the above with the below to remove the get_model_fields call:\n # model_fields = [\n # f for f in opts.get_fields()\n # if f.concrete\n # and not (f.primary_key or f.is_relation or f.name in self.exclude_from_str)\n # ]\n result = \" \".join(\n [\n str(fld.value_from_object(self))\n for fld in model_fields\n if fld.value_from_object(self)\n ]\n )\n return result.strip() or super().__str__()", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __repr__(self):\n\n mod = f\"{self.__class__.__name__} Model\"\n try:\n mod += f': {self.filename}'\n except AttributeError:\n pass\n s = [mod]\n for name, v in self.metadata.items():\n s += [f\"{name:16} : {v}\"]\n return '\\n'.join(s)", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def model_info(self) -> str:\n return self._model_info(self.model).decode(\"utf-8\")", "def __str__(self):\n return str(self.serialize())", "def __str__ (self) :\n\n return self.as_string()", "def __str__(self):\n\n return self.toString()", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return grid_search_to_str(self.model)", "def __str__(self):\n return self.toString()", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def to_representation(self) -> str:\n raise NotImplementedError()", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return self.make_flat()", "def dump_model(self):", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self) -> str:\n model_str = [\"\\nModel info:\\n\", \" Unimodal encoder:\\n\"]\n\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_encoder[modality]}\")\n\n model_str.append(\"\\n\\n Unimodal decoder:\\n\")\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_decoder[modality]}\")\n\n if self.multimodal_decoder is not None:\n model_str.append(\"\\n\\n Multimodal decoder:\\n\")\n model_str.append(f\" {self.multimodal_decoder}\")\n\n return \"\".join(model_str)", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def to_string(self):\r\n return self.__str__()", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __str__(self):\n return str(self.get_data())", "def __str__(self):\n return f\"model {self._name}\"", "def __str__(self):\n\n return self.raw_field", "def __repr__(self):\n \n s = 'text model name: ' + self.name + '\\n' \n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' number of word stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of commas counts: ' + str(len(self.commas_per_sentence)) + '\\n'\n return s", "def serialize(self):\n\n\t\treturn str(self)", "def __str__(self):\n return self.get_str()", "def serialize(self):\n\n return str(self)", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__(self):\n return self.s", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths))\\\n + '\\n'\n s += ' number of punctuation types: ' + str(len(self.punctuation))\n return s", "def dumps(self, indent=0):\n outstr = \" \"*indent + \"MewloDbModel object '{0}' attribute values:\\n\".format(self.__class__.__name__)\n public_props = (name for name in dir(object) if not name.startswith('_'))\n for name in public_props:\n outstr += \" \"*indent + \"{0}: {1}\\n\".format(name, str(getattr(self,name)))\n return outstr", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.title or str(_(\"Empty title\"))\n\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442643", "0.74416703", "0.7433768", "0.7411771", "0.7405439", "0.7379557", "0.7361716", "0.7361716", "0.732774", "0.7325511", "0.732528", "0.73097324", "0.73078936", "0.73001266", "0.7296789", "0.7292791", "0.7289445", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7279803", "0.7261615", "0.7250399", "0.7244789", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068" ]
0.0
-1
For `print` and `pprint`
def __repr__(self): return self.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_out():\n pass", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def _printable(self):\n pass", "def _print_custom(self):\n pass", "def pypprint(*args, **kwargs): # type: ignore\n from typing import Iterable\n\n if len(args) != 1:\n print(*args, **kwargs)\n return\n x = args[0]\n if isinstance(x, dict):\n for k, v in x.items():\n print(f\"{k}:\", v, **kwargs)\n elif isinstance(x, Iterable) and not isinstance(x, str):\n for i in x:\n print(i, **kwargs)\n else:\n print(x, **kwargs)", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def print(*args, **kwargs):\n with P_LOCK:\n __builtins__.print(*args, **kwargs)", "def print(self):\n # Your implementation here", "def p(value):\n pp.pprint(value)", "def static_print(*args, __p=print, **kwargs):\n __p(*args, **kwargs)", "def print(self, *args, **kwargs):\n print(*args, **kwargs)", "def pprint(self):\n print(self.pprint_str())", "def pprint(obj):\n for argname in sorted([x for x in dir(obj) if not x.startswith('__')]):\n # Skip callables\n if hasattr(getattr(obj, argname), '__call__'):\n continue\n print(\"{} : {}\".format(argname, getattr(obj, argname)))", "def print_(self, s: str) -> None:", "def my_pprint(obj, intend = 0):\n if isinstance(obj, dict):\n for key, value in obj.items():\n print(intend*\" \"+str(key)+\" : \")\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, list):\n for value in obj:\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, bytes):\n print(\"<binary data>\")\n \n else:\n try:\n print(intend*\" \"+str(obj))\n except UnicodeDecodeError:\n print(intend*\" \"\"<?>\")", "def test_print(chikin):\n chikin.print()", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def out(*args):\r\n print(*args)", "def __pprint(object, stream=None, indent=1, width=80, depth=None):\n printer = PrettyPrinterExt(\n stream=stream, indent=indent, width=width, depth=depth)\n printer.pprint(object)", "def DumpPprint(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import pprint\n \n text = pprint.pformat(data)\n \n return text", "def repl_print_statements():\n pass", "def test_03_pass_print(self):\n print('Hello World!')", "def p(self):\n self.printstdout = True", "def print(*args, **kwargs):\n new_args = []\n for arg in args:\n if builtins.isinstance(arg, models.Point):\n new_args.append(\"({0}, {1})\".format(arg.x, arg.y))\n else:\n new_args.append(arg)\n\n builtins.print(*new_args, **kwargs)", "def real_print(*args, **kwargs):\n\n kwargs.setdefault('file', real_stdout)\n _python_print_function(*args, **kwargs)", "def to_print_out(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('print')\n else:\n self.output('print')", "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "def print(self):\r\n self.print_avec_separateur()", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])", "def print(self):\n print(self.pretty_str())", "def test_print4(self):\n writer = StringIO()\n collatz_print(writer, 1, 1, 1)\n self.assertEqual(writer.getvalue(), \"1 1 1\\n\")", "def eprint(*args, **kwargs):\n\tprint(*args, file=sys.stderr, **kwargs)", "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "def use_pypprint_for_implicit_print(self) -> None:\n if self.implicit_print is not None:\n self.implicit_print.func.id = \"pypprint\" # type: ignore\n # Make sure we import it later\n self.undefined.add(\"pypprint\")", "def test_print(self):\n writer = StringIO()\n collatz_print(writer, 1, 10, 20)\n self.assertEqual(writer.getvalue(), \"1 10 20\\n\")", "def pprint(self):\n return pformat(repr(self))", "def printer(message):\n if VERBOSITY:\n pprint(message)", "def rec_print(p):\n if len(p) == 0:\n return\n t = p.pop(0)\n print t\n rec_print(p)", "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def printc(*a, **kw):\n print(*a, **kw)", "def pr(x):\n Card.print_pretty_cards(x)", "def debug_print(self, *content):\n if self.debug:\n print(*content)", "def pprint(object, stream=None):\r\n printer = PrettyPrinter(stream=stream)\r\n printer.pprint(object)", "def safe_print(*objs, errors=\"replace\"):\n\tprint(*(to_stdout(str(o), errors) for o in objs))", "def magic_Pprint(self, parameter_s=''):\n \n self.shell.outputcache.Pprint = 1 - self.shell.outputcache.Pprint\n print 'Pretty printing has been turned', \\\n ['OFF','ON'][self.shell.outputcache.Pprint]", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def _Print(self, t):\n self.RaiseError(t, \"Print not supported\")", "def vprint(*args, **kwargs ):\n\n forceprint = False\n for key in kwargs:\n if key == \"forceprint\":\n forceprint =kwargs[key]\n \n line = ''\n if debug or forceprint : \n for arg in args:\n line += str(arg) +\" \"\n log = open(exepath + 'pyframe.log', 'a') \n log.write(line + \"\\n\")\n log.close() \n print line", "def eprint(*pargs, **kargs):\n print('\\u001b[31m', end='', file=sys.stderr)\n print(*pargs, file=sys.stderr, **kargs)\n print('\\u001b[0m', end='', file=sys.stderr)", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def _print(self, *args):\n return _ida_hexrays.qstring_printer_t__print(self, *args)", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def hook_print():\n sys.stdout = PrintHook()", "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "def print_list(self):\r\n pass", "def debugprint(obj, depth=-1, print_type=False,\r\n file=None, ids='CHAR', stop_on_name=False):\r\n if file == 'str':\r\n _file = StringIO()\r\n elif file is None:\r\n _file = sys.stdout\r\n else:\r\n _file = file\r\n done = dict()\r\n results_to_print = []\r\n order = []\r\n if isinstance(obj, gof.Variable):\r\n results_to_print.append(obj)\r\n elif isinstance(obj, gof.Apply):\r\n results_to_print.extend(obj.outputs)\r\n elif isinstance(obj, Function):\r\n results_to_print.extend(obj.maker.fgraph.outputs)\r\n order = obj.maker.fgraph.toposort()\r\n elif isinstance(obj, (list, tuple)):\r\n results_to_print.extend(obj)\r\n elif isinstance(obj, gof.FunctionGraph):\r\n results_to_print.extend(obj.outputs)\r\n order = obj.toposort()\r\n elif isinstance(obj, (int, long, float, numpy.ndarray)):\r\n print obj\r\n else:\r\n raise TypeError(\"debugprint cannot print an object of this type\", obj)\r\n for r in results_to_print:\r\n debugmode.debugprint(r, depth=depth, done=done, print_type=print_type,\r\n file=_file, order=order, ids=ids,\r\n stop_on_name=stop_on_name)\r\n if file is _file:\r\n return file\r\n elif file == 'str':\r\n return _file.getvalue()\r\n else:\r\n _file.flush()", "def _get_print_fn(file=sys.stdout):\n def _print_fn(op, xin,):\n for attr in op.attrs:\n temp = getattr(xin, attr)\n if callable(temp):\n pmsg = temp()\n else:\n pmsg = temp\n print(op.message, attr, '=', pmsg, file=file)\n return _print_fn", "def test_print1(self):\n writer = StringIO()\n collatz_print(writer, 100, 200, 125)\n self.assertEqual(writer.getvalue(), \"100 200 125\\n\")", "def printOutput(self):\n pass", "def _print(self, *args):\n return _ida_hexrays.cnumber_t__print(self, *args)", "def setPrint():\n (e,d,sr,sw) = codecs.lookup('utf-8')\n unicode_to_utf8 = sw(sys.stdout)\n sys.stdout = unicode_to_utf8", "def pr(string, verbose):\n if(verbose):\n print(string)", "def print(*args, sep=\" \"):\n pass", "def printv(self, *arg):\n if self.verbose:\n print(*arg)", "def print(self):\n\n print(self)", "def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)", "def test_print2(self):\n writer = StringIO()\n collatz_print(writer, 201, 210, 89)\n self.assertEqual(writer.getvalue(), \"201 210 89\\n\")", "def print_pointers(self):\n\n ### FILL IN ###", "def foo_printer(self):\n print(\"\\nHi I'm {}\".format(self.foo))", "def printed(method):\n\t\tdef wrapper(cls, *args):\n\t\t\tif cls.verbose:\n\t\t\t\treturn method(cls, *args)\n\t\treturn wrapper", "def print_me(self, tabs=0, tab=' '):\n pre = tab*tabs\n print(pre+'Producer:')\n print(pre+' produces:', self._produces)\n print(pre+' consumes:', self._consumes)\n print(pre+' transfer:', self._transfer)\n print(pre+' capacity:', self._capacity)", "def _print(cls, quad):\n\t\tprint(\"\\nLIGHT OUTPUT:\\n<<<<{}>>>>\".format(ast.literal_eval(str(cls.get_address_value(quad.result)))))\n\t\tprint(\"END\")\n\n\t\tvar = cls.get_address_value(quad.result)\n\t\tif isinstance(var, collections.Iterable):\n\t\t\tprint(\"DEEP COPY\")\n\t\t\tcls.print_queue.enqueue(copy.deepcopy(var))\n\t\telse:\n\t\t\tcls.print_queue.enqueue(var)", "def printout(*args, **kwargs):\n console_print(sys.stdout, *args, **kwargs)", "def pprint(x):\n if is_theano_object(x):\n return _gettheano().printing.pprint(x)\n else:\n return str(x)", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def print(self):\n self.print_avec_separateur(\" \")", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def pprint_helper(self, angle, indent):\n # just here for defining the interface; work is done in subclasses\n pass", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n param_names = [p for p in params.keys() if p is not \"cost\"]\n param_names.sort()\n\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, name in enumerate(param_names):\n value = params[name]\n if isinstance(value, float):\n this_repr = '%s=%s' % (name, str(value))\n else:\n this_repr = '%s=%s' % (name, printer(value))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if (this_line_length + len(this_repr) >= 75 or '\\n' in this_repr):\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n # options = np.get_printoptions()\n # np.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def init_printing(pretty_print=True, order=None, use_unicode=None):\n if pretty_print:\n stringify_func = lambda arg: pretty(arg, order=order, use_unicode=use_unicode)\n else:\n stringify_func = sstrrepr\n\n try:\n import IPython\n\n ip = IPython.ipapi.get()\n\n if ip is not None:\n def result_display(self, arg):\n \"\"\"IPython's pretty-printer display hook.\n\n This function was adapted from:\n\n ipython/IPython/hooks.py:155\n\n \"\"\"\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)\n\n ip.set_hook('result_display', result_display)\n return\n except ImportError:\n pass\n\n import __builtin__, sys\n\n def displayhook(arg):\n \"\"\"Python's pretty-printer display hook.\n\n This function was adapted from:\n\n http://www.python.org/dev/peps/pep-0217/\n\n \"\"\"\n if arg is not None:\n __builtin__._ = None\n print stringify_func(arg)\n __builtin__._ = arg\n\n sys.displayhook = displayhook", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def _print(self, *args):\n return _ida_hexrays.cinsn_t__print(self, *args)", "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")", "def sequential_print_statements():\n pass", "def print_post():\n print('| | |'),", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def debugprint(r, prefix='', depth=-1, done=None, print_type=False,\r\n file=sys.stdout, print_destroy_map=False,\r\n print_view_map=False, order=None, ids='CHAR',\r\n stop_on_name=False, prefix_child=None):\r\n if depth == 0:\r\n return\r\n\r\n if order is None:\r\n order = []\r\n\r\n if done is None:\r\n done = dict()\r\n\r\n if print_type:\r\n type_str = ' <%s>' % r.type\r\n else:\r\n type_str = ''\r\n\r\n if prefix_child is None:\r\n prefix_child = prefix\r\n\r\n def get_id_str(obj):\r\n if obj in done:\r\n id_str = done[obj]\r\n elif ids == \"id\":\r\n id_str = \"[@%s]\" % str(id(r))\r\n elif ids == \"int\":\r\n id_str = \"[@%s]\" % str(len(done))\r\n elif ids == \"CHAR\":\r\n id_str = \"[@%s]\" % char_from_number(len(done))\r\n elif ids == \"\":\r\n id_str = \"\"\r\n done[obj] = id_str\r\n return id_str\r\n\r\n if hasattr(r.owner, 'op'):\r\n # this variable is the output of computation,\r\n # so just print out the apply\r\n a = r.owner\r\n\r\n r_name = getattr(r, 'name', '')\r\n # normally if the name isn't set, it'll be None, so\r\n # r_name is None here\r\n if r_name is None:\r\n r_name = ''\r\n\r\n if print_destroy_map:\r\n destroy_map_str = str(getattr(r.owner.op, 'destroy_map', ''))\r\n else:\r\n destroy_map_str = ''\r\n\r\n if print_view_map:\r\n view_map_str = str(getattr(r.owner.op, 'view_map', ''))\r\n else:\r\n view_map_str = ''\r\n if destroy_map_str and destroy_map_str != '{}':\r\n destroy_map_str = 'd=' + destroy_map_str\r\n if view_map_str and view_map_str != '{}':\r\n view_map_str = 'v=' + view_map_str\r\n\r\n o = ''\r\n if order:\r\n o = str(order.index(r.owner))\r\n already_printed = a in done # get_id_str put it in the dict\r\n id_str = get_id_str(a)\r\n\r\n if len(a.outputs) == 1:\r\n print >> file, '%s%s %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n id_str,\r\n type_str, r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n else:\r\n print >> file, '%s%s.%i %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n a.outputs.index(r),\r\n id_str, type_str,\r\n r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n if not already_printed:\r\n if (not stop_on_name or\r\n not (hasattr(r, 'name') and r.name is not None)):\r\n new_prefix = prefix_child + ' |'\r\n new_prefix_child = prefix_child + ' |'\r\n for idx, i in enumerate(a.inputs):\r\n if idx == len(a.inputs) - 1:\r\n new_prefix_child = prefix_child + ' '\r\n\r\n debugprint(i, new_prefix, depth=depth - 1, done=done,\r\n print_type=print_type, file=file, order=order,\r\n ids=ids, stop_on_name=stop_on_name,\r\n prefix_child=new_prefix_child)\r\n else:\r\n #this is an input variable\r\n id_str = get_id_str(r)\r\n print >> file, '%s%s %s%s' % (prefix, r, id_str, type_str)\r\n\r\n return file", "def bpprint(self, out=None):\n if out is None:\n out = sys.stdout\n print(self.bpformat(), file=out)", "def vprint(expr, **settings):\n\n outstr = vsprint(expr, **settings)\n\n import builtins\n if (outstr != 'None'):\n builtins._ = outstr\n print(outstr)", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n options = numpy.get_printoptions()\n numpy.set_printoptions(precision=5, threshold=64, edgeitems=2)\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, (k, v) in enumerate(sorted(params.items())):\n if isinstance(v, float):\n # use str for representing floating point numbers\n # this way we get consistent representation across\n # architectures and versions.\n this_repr = '%s=%s' % (k, str(v))\n else:\n # use repr of the rest\n this_repr = '%s=%s' % (k, printer(v))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if this_line_length + len(this_repr) >= 75 or '\\n' in this_repr:\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n\n numpy.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def print_(*args, **kwargs):\n fp = kwargs.pop(\"file\", sys.stdout)\n if fp is None:\n return\n\n def write(data):\n if not isinstance(data, basestring):\n data = str(data)\n fp.write(data)\n want_unicode = False\n sep = kwargs.pop(\"sep\", None)\n if sep is not None:\n if isinstance(sep, unicode):\n want_unicode = True\n elif not isinstance(sep, str):\n raise TypeError(\"sep must be None or a string\")\n end = kwargs.pop(\"end\", None)\n if end is not None:\n if isinstance(end, unicode):\n want_unicode = True\n elif not isinstance(end, str):\n raise TypeError(\"end must be None or a string\")\n if kwargs:\n raise TypeError(\"invalid keyword arguments to print()\")\n if not want_unicode:\n for arg in args:\n if isinstance(arg, unicode):\n want_unicode = True\n break\n if want_unicode:\n newline = unicode(\"\\n\")\n space = unicode(\" \")\n else:\n newline = \"\\n\"\n space = \" \"\n if sep is None:\n sep = space\n if end is None:\n end = newline\n for i, arg in enumerate(args):\n if i:\n write(sep)\n write(arg)\n write(end)" ]
[ "0.75577617", "0.73375154", "0.6986672", "0.698475", "0.6944995", "0.692333", "0.6899106", "0.6898902", "0.68146646", "0.6806209", "0.6753795", "0.67497987", "0.6744008", "0.6700308", "0.6691256", "0.6674591", "0.6658083", "0.66091245", "0.6606931", "0.6601862", "0.6563738", "0.6561717", "0.65549695", "0.6494838", "0.6473391", "0.64491546", "0.6411177", "0.6340302", "0.6339321", "0.6335031", "0.6332035", "0.6315847", "0.631272", "0.6297732", "0.62969106", "0.6283717", "0.6279154", "0.6271603", "0.62673396", "0.6265511", "0.62629336", "0.6258366", "0.6258278", "0.62501305", "0.6248315", "0.62459755", "0.6244254", "0.6242083", "0.62393075", "0.62156516", "0.6208198", "0.62068796", "0.62062824", "0.62062824", "0.6194123", "0.6189738", "0.6183852", "0.6183035", "0.61697906", "0.61614454", "0.6160741", "0.61544997", "0.61528033", "0.6150831", "0.6147288", "0.61380607", "0.613793", "0.61300766", "0.61278135", "0.6125416", "0.6114217", "0.61126333", "0.6100682", "0.60998785", "0.6096818", "0.6081694", "0.6076982", "0.6072701", "0.6060028", "0.60581726", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6043662", "0.6037599", "0.60336643", "0.6030174", "0.60290223", "0.60242903", "0.6016989", "0.6004274", "0.60005474", "0.60005474", "0.60003483", "0.599558", "0.59923434", "0.5979316", "0.59777945" ]
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, NetflowFilters): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if isinstance(self, other.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n # Ensure same class and values match\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "def is_equal(self, a, b):\n return a is b", "def is_equal(self, a, b):\n return a == b", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\r\n if isinstance(other, self.__class__):\r\n return self.__dict__ == other.__dict__\r\n else:\r\n return False", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def __eq__(self,other):\n return self is other", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def __eq__(self, other):\n return type(self) == type(other) and self.id == other.id", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Single2HaObject):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__hash__() == other.__hash__()\n return False", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if self.primary != other.primary:\n return False\n return True", "def __eq__(self, other) -> bool:\n if other is None:\n return False\n return self.__hash__() == other.__hash__()", "def __eq__(self, other):\n if not isinstance(other, ObjectInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __eq__(self, other):\n return id(self) == id(other)", "def __eq__(self, other) -> bool:\n return type(self) == type(other) and \\\n self._id == other.id and \\\n self.code == other.code and \\\n self.name == other.name and \\\n self.gender == other.gender and \\\n self.date_of_birth == other.date_of_birth", "def equals(self, other): # -> bool:\n ...", "def equals(self, obj: object) -> bool:\n ...", "def __eq__(self, other):\n for attr in self._attrs_to_save:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n return True", "def __eq__(self, other):\n if type(other) is type(self):\n return (self.x == other.x and self.y == other.y and self.z == other.z)\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, self.__class__):\n return NotImplemented\n\n return (\n self.name,\n self.submit_at,\n self.subreddit,\n self.title,\n self.body_template,\n ) == (\n other.name,\n other.submit_at,\n other.subreddit,\n other.title,\n other.body_template,\n )", "def __eq__(self, other):\n # Check that we share the same class as this object\n if not isinstance(other, type(self)):\n return False\n\n return hash(self) == hash(other)", "def __eq__(self, other):\n if not isinstance(other, PreviewObjectAutofill):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return equal(self, other)", "def __eq__(self, other: Any) -> bool:\n return self.__class__ is other.__class__ and self.identifier == other.identifier", "def __eq__(self, other):\n return self.__id == other.get_id()", "def __eq__ (self, other):\n if type(self) == type(other):\n return self._m == other._m\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Referent):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.properties == other.properties", "def __eq__(self, other):\n return self.items() == other.items()", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, other):\n\n if self is other:\n return True\n return hash(self) == hash(other)", "def __eq__(self, other):\n if other._field1 == self._field1:\n return True\n return False", "def same_as(self, other):\n return super().__eq__(other)", "def __eq__(self, other):\n try:\n return other and \\\n self.id == other.id\n\n except AttributeError:\n return False", "def __eq__(self, other):\r\n\t\treturn self._to_pylist() == other._to_pylist()", "def __eq__(self, other):\n if not isinstance(other, Fiddle):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.7961088", "0.7961088", "0.79433626", "0.79303336", "0.7926563", "0.7897525", "0.78826123", "0.78826123", "0.78806067", "0.7872423", "0.7868354", "0.78668815", "0.7825702", "0.7819993", "0.78162885", "0.78078854", "0.78068274", "0.7796298", "0.7794721", "0.7784825", "0.77790844", "0.7769397", "0.77534705", "0.7746211", "0.7741107", "0.77282816", "0.7725766", "0.7719537", "0.770273", "0.7685999", "0.7677552", "0.76739407", "0.7664857", "0.76557016", "0.7655046", "0.76282835", "0.7625795", "0.76242626", "0.76237214", "0.76237214", "0.76237214", "0.7617347", "0.7600536", "0.7599156", "0.7595863", "0.75945824", "0.7594092", "0.75899327" ]
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n return not self.__ne__(other)", "def __ne__(self, other):\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__ (self, other):\n return not self == other" ]
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Get a list of all Accounts authorized for the provided token. Get a list of Transactions pages that satisfy a timebased Transaction query.
def get_transactions(self, account_id, from_date=None, to_date=None, page_size=None, type_list=None): endpoint = 'accounts/{0}/transactions'.format(account_id) params = {} if from_date: params["from"] = from_date if to_date: params["to"] = to_date if page_size: params["pageSize"] = page_size if type_list: type_list = "%2C".join(type_list) params["type"] = type_list return self._api.request(endpoint, params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def list_accounts(self):\n pass", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "def list_account_transactions(self,\r\n year,\r\n month=None,\r\n get_as_csv=None):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(year=year)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/admin/invoice'\r\n _query_parameters = {\r\n 'year': year,\r\n 'month': month,\r\n 'getAsCsv': get_as_csv\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _context.response.status_code == 400:\r\n raise APIException('Bad request', _context)\r\n elif _context.response.status_code == 403:\r\n raise APIException('Forbidden (Access denied)', _context)\r\n elif _context.response.status_code == 500:\r\n raise APIException('Internal server error', _context)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, Transaction.from_dictionary)", "def listtransactions(self, account=None, count=10, from_=0, address=None):\n accounts = [account] if account is not None else list(self.listaccounts(as_dict=True).keys())\n return [TransactionInfo(**tx) for acc in accounts for\n tx in self.proxy.listtransactions(acc, count, from_) if\n address is None or tx[\"address\"] == address]", "def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def get_accounts():\n graph = facebook.GraphAPI(mytoken)\n pages = graph.get_object('me/accounts')\n pages_info=[]\n for page in pages['data']:\n pages_info.append( ( page['name'], page['access_token'] ) )\n return pages_info", "def accounts(self):\n # get the summary data\n options = { 'PayLoadText' : self.request_xml() }\n\n print(self.url)\n print(options)\n\n response = requests.get(self.url, params=options) \\\n .content\n print(response)\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n self.security_token = xml_tree.find('ClientSecurityToken').text\n\n accounts = [ \n self.create_account(account)\n for account in xml_tree.iter('CardAccounts')\n ]\n\n return accounts", "def get_accounts(self):\n return self.accounts.all()", "def get_account_transactions(self, min_row=0, max_row=100):\n data = {\n 'min_row': min_row,\n 'max_row': max_row\n }\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, ACCOUNTID, 'transactions', query_string),\n auth=HTTPBasicAuth(KEY, SECRET))\n\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'", "def transactions(self) -> List[Transaction]:\n return self.session.get_transactions(self.account_id)", "def get_account_transactions(self, account_number):\n\n logger.debug('Fetching account transactions for account %s',\n account_number)\n\n # Get javax.faces.ViewState from the last request\n last_req_hidden_inputs = self._hidden_inputs_as_dict(\n BeautifulSoup(self.last_req_body, 'html.parser'))\n\n data = {\n 'dialog-overview_showAccount': 'Submit',\n 'menuLinks_SUBMIT': 1,\n 'menuLinks:_idcl': '',\n 'menuLinks:_link_hidden_': '',\n 'javax.faces.ViewState': last_req_hidden_inputs.get(\n 'javax.faces.ViewState'),\n '_token': self.token,\n 'productId': account_number\n }\n\n path = '/im/im/csw.jsf'\n req = self.session.post(self.BASE_URL + path, data=data)\n self.last_req_body = req.content\n\n logger.debug('Transaction request response code %s', req.status_code)\n\n self._parse_tokens(req.text)\n\n # Parse transactions\n transactions = self._parse_account_transactions(req.text)\n\n # Request was ok but but no transactions were found. Try to refetch.\n # Requests seems to loose the connections sometimes with the message\n # \"Resetting dropped connection\". This should work around that\n # problem.\n if req.status_code == requests.codes.ok and not transactions:\n transactions = self.get_account_transactions(account_number)\n\n return transactions", "def get_transaction_list(self, account_id, from_date, to_date,\n type_list=None):\n endpoint = 'accounts/{0}/transactions/idrange'.format(account_id)\n\n params = {}\n\n params[\"from\"] = from_date\n params[\"to\"] = to_date\n\n if type_list:\n type_list = \"%2C\".join(type_list)\n params[\"type\"] = type_list\n\n return self._api.request(endpoint, params=params)", "def transactions(self):\n return self._call_account_method(\n 'transactions'\n )", "def get_acc_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def transactions(self):\r\n return tx.AccountTransactions(self)", "def get_accounts(self):\n uri = '/credentials'\n response = gate_request(uri=uri)\n assert response.ok, 'Failed to get accounts: {0}'.format(response.text)\n\n all_accounts = response.json()\n self.log.debug('Accounts in Spinnaker:\\n%s', all_accounts)\n\n filtered_accounts = []\n for account in all_accounts:\n if account['type'] == self.provider:\n filtered_accounts.append(account)\n\n if not filtered_accounts:\n raise ForemastError('No Accounts matching {0}.'.format(self.provider))\n\n return filtered_accounts", "def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def list_accounts(self):\n information = []\n for provider in self._accounts.values():\n information.append({\n 'token': provider.credentials.token,\n 'url': provider.credentials.url,\n })\n\n return information", "def transaction_list(request, model_class=Transaction, template_name='budget/transactions/list.html'):\n transaction_list = model_class.active.order_by('-date', '-created')\n try:\n paginator = Paginator(transaction_list, getattr(settings, 'BUDGET_LIST_PER_PAGE', 50))\n page = paginator.page(request.GET.get('page', 1))\n transactions = page.object_list\n except InvalidPage:\n raise Http404('Invalid page requested.')\n return render_to_response(template_name, {\n 'transactions': transactions,\n 'paginator': paginator,\n 'page': page,\n }, context_instance=RequestContext(request))", "def fetchAllAccounts(config):\n allAccounts = []\n currentStart = 1\n currentLimit = 99\n while currentLimit > 98 :\n currentPull = fetchBatchAccounts(accountsConfig, currentStart, currentLimit)['data']\n allAccounts = allAccounts + currentPull\n currentLimit = int(len(currentPull))\n currentStart = int(currentStart) + int(currentLimit)\n return allAccounts", "def get_transactions_trc20():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions/trc20\".format(wallet) # noqa: E501\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def get_accounts(self):\n return self.accounts", "def get_accounts(self):\r\n return self._accounts", "def list(self, filter, *args, timeout=None):\n req = AccountListRequest()\n req.meta.CopyFrom(ListRequestMetadata())\n page_size_option = self.parent._test_options.get('PageSize')\n if isinstance(page_size_option, int):\n req.meta.limit = page_size_option\n\n req.filter = plumbing.quote_filter_args(filter, *args)\n\n def generator(svc, req):\n tries = 0\n while True:\n try:\n plumbing_response = svc.stub.List(\n req,\n metadata=svc.parent.get_metadata('Accounts.List', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n tries = 0\n for plumbing_item in plumbing_response.accounts:\n yield plumbing.convert_account_to_porcelain(plumbing_item)\n if plumbing_response.meta.next_cursor == '':\n break\n req.meta.cursor = plumbing_response.meta.next_cursor\n\n return generator(self, req)", "def get_tx_history(account_id, total):\n query = iroha.query(\"GetTransactions\", account_id=account_id, page_size=total)\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def get_accounts(self):\n\n\t\treturn self.__accounts", "def get_accounts(self):\n me = objects.AdUser(fbid=\"me\")\n my_accounts = list(me.get_ad_accounts(fields=[\n 'id',\n 'name',\n 'timezone_name',\n 'amount_spent',\n 'currency']))\n return my_accounts", "def list(self, filter, *args, timeout=None):\n req = AccountGrantListRequest()\n req.meta.CopyFrom(ListRequestMetadata())\n page_size_option = self.parent._test_options.get('PageSize')\n if isinstance(page_size_option, int):\n req.meta.limit = page_size_option\n\n req.filter = plumbing.quote_filter_args(filter, *args)\n\n def generator(svc, req):\n tries = 0\n while True:\n try:\n plumbing_response = svc.stub.List(\n req,\n metadata=svc.parent.get_metadata(\n 'AccountGrants.List', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n tries = 0\n for plumbing_item in plumbing_response.account_grants:\n yield plumbing.convert_account_grant_to_porcelain(\n plumbing_item)\n if plumbing_response.meta.next_cursor == '':\n break\n req.meta.cursor = plumbing_response.meta.next_cursor\n\n return generator(self, req)", "async def get_open_accounts(self):\n result = []\n URL = API_HOST + \"/api/resources/header\"\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(URL)\n\n json_data = await response.json()\n accounts = json_data[\"data\"][\"accounts\"][\"data\"][\"data\"]\n\n for account in accounts:\n if account[\"statusCategory\"] == STATUS_CATEGORY_OPEN:\n result.append(account[\"accountNumber\"])\n\n return result", "def get_transactions(self, crypto, address, confirmations=1):\n raise NotImplementedError(\n \"This service does not support getting historical transactions. \"\n \"Or rather it has no defined 'get_transactions' method.\"\n )", "def list_tokens_from_contract(owner_address, contract_address, limit = 0):\n\n owner_address = owner_address.lower()\n contract_address = contract_address.lower()\n \n # Check all ERC721 transactions from account using FTScan API\n erc721transfers_url = \"https://api.ftmscan.com/api?module=account&action=tokennfttx&address=\" + \\\n owner_address + \"&startblock=0&endblock=999999999&sort=asc\"\n\n try:\n res = requests.get(erc721transfers_url)\n res_json = res.json()\n transfers = res_json[\"result\"]\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)\n\n # Loop through ERC721 transactions and count token sent and received\n token_counts = {}\n\n for transaction in transfers:\n if transaction[\"contractAddress\"] == contract_address:\n token_id = int(transaction[\"tokenID\"])\n if transaction[\"to\"] == owner_address:\n token_counts[token_id] = token_counts.get(token_id, 0) + 1\n if transaction[\"from\"] == owner_address:\n token_counts[token_id] = token_counts.get(token_id, 0) - 1\n\n # Tokens we still own should have a count of 1 (we could have sent them and gotten them back)\n # We should only ever have counts of -1 and +1\n token_ids = [token for token in token_counts if token_counts[token] > 0]\n if limit:\n print(f\"Limiting results to {limit} / {len(token_ids)} tokens.\")\n token_ids = token_ids[0:limit]\n\n return token_ids", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.budget.id}/accounts/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def test_list_scheduled_payments_specific_accounts(self):\n pass", "def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')", "def get_asset_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountAssetTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def accounts_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account/all', param, self.timeout)", "def get(self):\n args = request.args\n page = int(args.get('page', 1))\n filters = []\n if \"filter_trade_market\" in args:\n filter_trade_market = request.args.getlist('filter_trade_market')\n filters.append(CurrencyPurchaseTransactions.stock_market_id.in_(filter_trade_market))\n if 'start_date' in request.args:\n start_date = datetime.strptime(args['start_date'], '%Y-%m-%d')\n filters.append(CurrencyPurchaseTransactions.timestamp >= start_date)\n if 'end_date' in request.args:\n end_date = datetime.strptime(args['end_date'], '%Y-%m-%d')\n end_date += timedelta(days=1)\n else:\n end_date = start_date + timedelta(days=1)\n filters.append(CurrencyPurchaseTransactions.timestamp < end_date)\n\n query_current = CurrencyPurchaseTransactions.query.filter(and_(*filters)).paginate(page=page,\n per_page=10,\n error_out=True)\n\n transactions = []\n for transaction in query_current.items:\n data = transaction.to_json()\n data.update(transaction.get_purchase_status())\n transactions.append(data)\n\n transactions.append({'number_of_pages': query_current.pages,\n \"current_page\": query_current.page,\n \"has_next_page\": query_current.has_next,\n \"has_prev_page\": query_current.has_prev})\n\n return transactions, 200", "def listSearches(self, authenticationToken):\r\n pass", "def get_account_transactions(self, StartTime, EndTime):\n params = clean_locals(locals())\n date_time_sent = datetime.datetime.utcnow()\n response = self.request('ListAccountPostings', params, secure=True)\n data = self.process_response(response, date_time_sent, None)\n return parse_account_postings(data.get('data', {})) if data.get('data') else {}", "def get_all_latest_transactions(self):\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n return transactions", "def get_accounts(self, session: \"Session\") -> List[Account]:\n\n self.__get_dn(session)\n\n result = session.soapclient.get_accounts_by_owner(self.dn)\n return [Account(session, account=r) for r in result]", "def get(self):\n accounts = database.get_all(Accounts)\n all_accounts = []\n for account in accounts:\n all_transactions = []\n for transaction in account.transactions:\n all_transactions.append(transaction.id)\n new_account = {\n \"id\": account.id,\n \"name\": account.name,\n \"iban\": account.iban,\n \"balance\": float(account.balance),\n \"currency\": account.currency,\n \"transactions ids\": all_transactions\n }\n\n all_accounts.append(new_account)\n return json.dumps(all_accounts), 200", "def accounts(self):\n return self._accounts.values()", "def all_accounts(request):\n accounts = Account.objects.all()\n return render(request, 'app/home.html', {'accounts': accounts})", "def accounts(self):\r\n return acc.Accounts(self)", "def getConnectedAccounts(**kwargs):\n strProdURL = kwargs[\"strProdURL\"]\n orgID = kwargs[\"ORG_ID\"]\n sessiontoken = kwargs[\"sessiontoken\"]\n\n accounts = get_connected_accounts_json(strProdURL, orgID, sessiontoken)\n orgtable = PrettyTable(['OrgID'])\n orgtable.add_row([orgID])\n print(str(orgtable))\n table = PrettyTable(['Account Number','id'])\n for i in accounts:\n table.add_row([i['account_number'],i['id']])\n \n print(\"Connected Accounts\")\n print(table)", "def list_member_accounts(nextToken=None, maxResults=None):\n pass", "def accounts():\n if not session.get('authed', False):\n flash(\"Please log in.\")\n return redirect(my_url('index'))\n account_ids = redis_client.smembers('%s-accounts' % session['phone'])\n accounts = [kloudless.Account.retrieve(i) for i in account_ids]\n callback_url = quote_plus(my_url('auth_callback'))\n return render_template('accounts.html', accounts=accounts, app_number=APP_NUMBER,\n callback_url=callback_url, app_id=KLOUDLESS_APP_ID)", "def see_all_transfers(request):\n transfers = Transaction.objects.all().order_by('-executed_time')\n return render(request, 'app/allTransfers.html', {'transfers': transfers})", "def get_accounts(self, count: int = 100, account_type: str = None) -> list:\n all_accounts = list(\n itertools.islice(self.client.accounts.get_all_generator(), count)\n )\n if account_type is None:\n return all_accounts\n return [a for a in all_accounts if a[\"acctType\"] == account_type]", "def get_transactions(self):\n transactions = []\n for subaccount_pointer in range((clargs.args.search_subaccounts or 0) + 1):\n utxos = self.scan_subaccount(subaccount_pointer, clargs.args.key_search_depth)\n if len(utxos) == 0:\n continue\n\n transaction, used_utxo = self.create_transaction(utxos)\n if transaction:\n signed_transaction = self.sign_transaction(transaction, used_utxo)\n transactions.append(signed_transaction)\n\n if transactions:\n self.test_transactions(transactions)\n\n logging.debug('transactions: {}'.format(transactions))\n flags = wally.WALLY_TX_FLAG_USE_WITNESS\n return [(wally.tx_from_hex(transaction, flags), None) for transaction in transactions]", "def listaccounts(self, minconf=1, as_dict=False):\n if as_dict:\n return dict(self.proxy.listaccounts(minconf))\n else:\n return list(self.proxy.listaccounts(minconf).keys())", "def _search_account_history(cyclos, account, direction, begin_date, end_date, payment_types=[]):\n current_page = 0\n account_history = []\n while True:\n search_history_data = {\n 'account': account,\n 'direction': direction,\n 'period':\n {\n 'begin': begin_date,\n 'end': end_date,\n },\n 'orderBy': 'DATE_ASC',\n 'pageSize': 1000, # maximum pageSize: 1000\n 'currentPage': current_page,\n }\n search_history_res = cyclos.post(method='account/searchAccountHistory', data=search_history_data)\n account_history.extend(search_history_res['result']['pageItems'])\n page_count = search_history_res['result']['pageCount']\n if page_count == 0 or current_page + 1 == page_count:\n break\n else:\n current_page += 1\n filtered_history = []\n for entry in account_history:\n # On filtre d'abord par type de paiement et ensuite on regarde\n # si le paiement a fait l'objet d'une opposition de paiement\n # (dans cet ordre car pour voir s'il y a une oppostion de\n # paiement, il faut faire une requête au serveur).\n # On récupère les données de la transaction et on vérifie si la\n # donnée 'chargedBackBy' est présente dans le transfert associé.\n #\n # Note : Les transactions importées lors de la migration de\n # Cyclos 3 à Cyclos 4 sont de type ImportedTransactionData et\n # n'ont pas de transfert associé. Elles ne peuvent pas être\n # annulées. Les transactions enregistrées depuis (les\n # transactions \"normales\" en quelque sorte), sont de type\n # PaymentData.\n if entry['type']['id'] in payment_types:\n get_data_res = cyclos.get(method='transaction/getData/{}'.format(entry['transactionId']))\n transaction_data = get_data_res['result']\n if (transaction_data['class'] ==\n 'org.cyclos.model.banking.transactions.ImportedTransactionData'\n or (transaction_data['class'] ==\n 'org.cyclos.model.banking.transactions.PaymentData'\n and'chargedBackBy' not in transaction_data['transfer'].keys())):\n filtered_history.append(entry)\n return filtered_history", "def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)", "def get_all_customers(connection):\n connection.command_path = \"customers\"\n extra_headers = {connection.header_key: connection.token}\n url = connection.build_url()\n verify_ssl = connection.verify_ssl\n res = requests.get(url=url, headers=extra_headers, verify=verify_ssl)\n if res.status_code > 210:\n return\n body = res.content\n return customers.parse_all_customers(body)", "def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions", "def GetAccountList(self):\n\t\treturn self.accounts.keys()", "def list_accounts(min_conf=1):\n min_conf = str(min_conf)\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"listaccounts\", min_conf])\n accounts = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return accounts", "def accounts(self):\n if self._accounts is None:\n url = f'{self._ynab.api_url}/budgets/{self.id}/accounts'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving accounts, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n self._accounts = [Account(self, account)\n for account in response.json().get('data', {}).get('accounts', [])]\n return self._accounts", "def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)", "def all_transactions(self, request):\n user_id = request.data[\"user\"]\n user = User.objects.get(id=user_id)\n user_transactions = user.transactions.all()\n serializer = TransactionSerializer(user_transactions, many=True)\n\n return Response(serializer.data)", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def get_account_trades(self, symbol: Symbol, trade_id: Optional[int],\n limit: int = 100, receive_window: Optional[int] = None):\n api_params = {\n \"symbol\": symbol.value,\n \"limit\": limit,\n \"timestamp\": get_current_time_milliseconds()\n }\n\n if trade_id is not None:\n api_params['tradeId'] = trade_id\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.get(path='/trade/account', params=api_params)", "def list_account_request(request):\n account_list = Account.objects.all()\n context = {'account_list': account_list}\n return render(request, \"accounts/account_list.html\", context)", "def _get_ad_accounts() -> [adaccount.AdAccount]:\n system_user = user.User(fbid='me')\n ad_accounts = system_user.get_ad_accounts(fields=['account_id',\n 'name',\n 'created_time',\n 'timezone_offset_hours_utc'])\n return list(ad_accounts)", "def list_offering_transactions(nextToken=None):\n pass", "def display_accounts(cls):\n return cls.account_list", "def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list", "def test_wallets_get_transaction_list(self):\n pass", "def list(self, **params):\n\n _, _, absence_transactions = self.http_client.get(\"/absencetransactions\", params=params)\n return absence_transactions", "def get_all_transactions(self) -> Iterator[BaseTransaction]:\n # It is necessary to retain a copy of the current scope because this method will yield\n # and the scope may undergo changes. By doing so, we ensure the usage of the scope at the\n # time of iterator creation.\n scope = self.get_allow_scope()\n for tx in self._get_all_transactions():\n if scope.is_allowed(tx):\n yield tx", "def search_accounts(request, client):\n try:\n result = client(SearchRequest(request, 10))\n return result\n except Exception as e:\n logging.warning(e)\n return []", "def accounts(self):\r\n return resources.Accounts(self)", "def test__transactions(self, mock_get):\n uri = 'https://test.com/v3/accounts/{}/transactions'.format(accountID)\n resp = responses[\"_v3_accounts_accountID_transactions\"]['response']\n text = json.dumps(resp)\n mock_get.register_uri('GET',\n uri,\n text=text)\n r = transactions.TransactionList(accountID)\n result = api.request(r)\n self.assertTrue(len(result['pages']) > 0)", "def list_transactions(\n self,\n account_ids: List[str] = None,\n payment_order_ids: List[str] = None,\n payee_ids: List[str] = None,\n direction: TransactionDirection = None,\n statuses: List[TransactionStatus] = None,\n value_timestamp_range: Dict[str, datetime] = None,\n booking_timestamp_range: Dict[str, datetime] = None,\n last_update_timestamp_range: Dict[str, datetime] = None,\n charge_amount_value_range: Dict[str, str] = None,\n order_by: List[TransactionOrderBy] = None\n ) -> TransactionsList:\n return self._list_transactions(\n account_ids,\n payment_order_ids,\n payee_ids,\n direction,\n statuses,\n value_timestamp_range,\n booking_timestamp_range,\n last_update_timestamp_range,\n charge_amount_value_range,\n order_by,\n None\n )", "def tenants_for_token(self, context):\n token_ref = self.token_api.get_token(context=context,\n token_id=context['token_id'])\n assert token_ref is not None\n\n user_ref = token_ref['user']\n tenant_refs = []\n for tenant_id in user_ref['tenants']:\n tenant_refs.append(self.identity_api.get_tenant(\n context=context,\n tenant_id=tenant_id))\n return self._format_tenants_for_token(tenant_refs)", "def get_transaction_history(address, page=0, page_size=1000, include_full_tx=False, tx_type='ALL',\n order='ASC', endpoint=_default_endpoint, timeout=_default_timeout\n ) -> list:\n params = [\n {\n 'address': address,\n 'pageIndex': page,\n 'pageSize': page_size,\n 'fullTx': include_full_tx,\n 'txType': tx_type,\n 'order': order\n }\n ]\n method = 'hmy_getTransactionsHistory'\n tx_history = rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)\n try:\n return tx_history['result']['transactions']\n except KeyError as e:\n raise InvalidRPCReplyError(method, endpoint) from e", "def authorize(self, token=None, persist_pages=True):\n token = token or self.token\n response = requests.get(self.auth_url, headers=self.auth_header)\n if response.status_code == 200:\n content = response.json()\n pages = content['pageOverviews']\n if persist_pages:\n for page in pages:\n # retrieve page details and persist it\n page_detail = '{}/{}'.format(self.pages_url, page['id'])\n r = requests.get(page_detail, headers=self.auth_header)\n if r.status_code == 200:\n self.pages.append(Page(self.token, data=r.json()))\n return self._response(response)", "def getAllAccounts(z, opts):\n response = z.request('GetAllAccountsRequest', opts=opts)\n names = [account['name'] for account in response['GetAllAccountsResponse']['account']]\n return sorted(names)", "def get_transactions(\n self,\n since: Optional[date] = None,\n count: int = 1000,\n offset: int = 0,\n include_pending: bool = False,\n ) -> List[Transaction]:\n return self.session.get_transactions(\n self.account_id,\n options={\n 'since': since,\n 'count': count,\n 'offset': offset,\n 'include_pending': include_pending,\n },\n )", "def accounts():", "def transactions(self, billing_period=0, \n transaction_type='recent'):\n result = defaultdict(list)\n billing_periods = pyamex.utils.to_list(billing_period)\n\n for period in billing_periods:\n options = { 'PayLoadText' : self.client.transactions_request_xml(\n card_index=0, \n billing_period=period, \n transaction_type=transaction_type)}\n\n response = requests.get(self.client.url, options) \\\n .content\n\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n for transaction in xml_tree.findall('StatementDetails/CardAccounts/CardAccount/TransactionDetails/Transaction'):\n result[period].append(Transaction(transaction))\n\n return result", "def list(request, queryset, *args, **kwargs):\r\n return object_list(\r\n request,\r\n queryset.filter(account = request.account), \r\n *args, \r\n **kwargs\r\n )", "def get(self):\n ctx = _request_ctx_stack.top\n current_user = ctx.user\n user = User.get_by_id(current_user.id)\n page = request.args.get('page', 1, type=int)\n return response_paginate_accounts(user, page)", "def _get_all_transactions(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def _all_accounts(self, leaf_only=False):\n accounts = [child_account.account\n for child_account in\n realization.iter_children(self.all_root_account,\n leaf_only=leaf_only)]\n\n return accounts[1:]", "def test_get_transaction_list_request(self):\n self.trans_details.get_transaction_list(\n batch_id = 123456,\n )", "def accounts(self):\r\n return accounts.Accounts(self)", "def listSearches(self, authenticationToken):\r\n self.send_listSearches(authenticationToken)\r\n return self.recv_listSearches()", "def get_orders(access_token,start_date,end_date,status):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/orders'\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n #Parameters for the order\r\n params = {'accountId':TDAuth_Info.account_num,\r\n 'fromEnteredTime': start_date,\r\n 'toEnteredTime': end_date,\r\n 'status': status}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers,params=params)\r\n return orders_data_json.json()", "def fetch_owner_accounts():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n return owner_accounts", "async def test_txn_list_paginated(self):\n paging = Mocks.make_paging_response(1, 4)\n self.stream.preset_response(\n head_id='d',\n paging=paging,\n transactions=Mocks.make_txns('c'))\n\n response = await self.get_assert_200('/transactions?min=1&count=1')\n controls = Mocks.make_paging_controls(1, start_index=1)\n self.stream.assert_valid_request_sent(paging=controls)\n\n self.assert_has_valid_head(response, 'd')\n self.assert_has_valid_link(response, '/transactions?head=d&min=1&count=1')\n self.assert_has_valid_paging(response, paging,\n '/transactions?head=d&min=2&count=1',\n '/transactions?head=d&min=0&count=1')\n self.assert_has_valid_data_list(response, 1)\n self.assert_txns_well_formed(response['data'], 'c')", "def accounts(request, pk):\n \n customer = get_object_or_404(Customer, id=pk)\n orders = customer.order_set.all()\n o_l = len(orders)\n\n order_filter = OrderFilter(request.GET,queryset=orders)\n orders = order_filter.qs\n\n context = {'customer':customer, 'orders':orders, 'o_l':o_l, 'order_filter':order_filter}\n return render(request,'accounts/customers.html', context)", "def get_transaction_list2(self, account_id, aid):\n endpoint = 'accounts/{0}/transactions/sinceid'.format(account_id)\n\n params = {}\n params[\"id\"] = aid\n\n return self._api.request(endpoint, params=params)", "def gameaccount_list(request, page):\n gameaccounts = GameAccount.query.filter_by(user=request.user).limit(PER_PAGE).offset(PER_PAGE * (page - 1)).all()\n pagination = AdminPagination('account/gameaccounts', page, PER_PAGE,\n GameAccount.query.filter_by(user=request.user).count())\n if not gameaccounts and page != 1:\n raise NotFound()\n\n return render_account_response('account/gameaccount_list.html', 'gameaccounts',\n gameaccounts=gameaccounts, pagination=pagination)", "def accounts(web3):\n return web3.eth.accounts", "def get_all_accounts(self, account_id=None, account_name=None, search=False):\n if search:\n re_meth = re.search\n else:\n re_meth = re.match\n if account_id and not re.match(\"\\d{12}\", account_id):\n if not account_name:\n account_name = account_id\n account_id = None\n self.log.debug('Attempting to fetch all accounts matching- account_id:' +\n str(account_id) + ' account_name:' + str(account_name))\n response = self.get_response_items('ListAccounts', {}, item_marker='accounts',\n list_marker='Accounts')\n retlist = []\n for account in response:\n if account_name is not None:\n if not search:\n account_name = \"^{0}$\".format(account_name.strip())\n if not re_meth(account_name, account['account_name']):\n continue\n if account_id is not None:\n if not search:\n account_id = \"^{0}$\".format(account_id .strip())\n if not re_meth(account['account_id'], account_id):\n continue\n retlist.append(account)\n return retlist" ]
[ "0.6856845", "0.6346517", "0.6344958", "0.6305827", "0.62168396", "0.61625844", "0.61552113", "0.6129915", "0.6112865", "0.60796094", "0.60191786", "0.5938606", "0.59356767", "0.58972865", "0.58753717", "0.58455265", "0.5837338", "0.5836202", "0.5822045", "0.5800987", "0.5792973", "0.57698256", "0.5767514", "0.57385653", "0.57313687", "0.57090664", "0.56953937", "0.56777066", "0.565382", "0.5632098", "0.56196743", "0.5619305", "0.56192684", "0.56188405", "0.5614919", "0.56109124", "0.559893", "0.5598858", "0.5595673", "0.5582762", "0.5559338", "0.5556239", "0.55497676", "0.5545224", "0.55215615", "0.5489731", "0.5488579", "0.5468316", "0.5465096", "0.54529667", "0.5417735", "0.5407586", "0.53920734", "0.5387697", "0.5367454", "0.53544605", "0.53485286", "0.5340688", "0.5335281", "0.53199136", "0.531199", "0.5307529", "0.53062487", "0.5286979", "0.52772075", "0.52761555", "0.5262831", "0.5257072", "0.5240344", "0.52245945", "0.5213771", "0.5210288", "0.5210104", "0.5201384", "0.5178572", "0.5172905", "0.5165295", "0.5161419", "0.51562107", "0.5154739", "0.5153886", "0.51520866", "0.5150459", "0.51382273", "0.5133799", "0.51302177", "0.5127336", "0.5127236", "0.5117881", "0.511771", "0.5115068", "0.51086515", "0.5106236", "0.5105269", "0.50989455", "0.50947917", "0.5082812", "0.50744855", "0.50719875", "0.50667924" ]
0.60980934
9
Get a list of all Accounts authorized for the provided token. Get the details of a single Account Transaction.
def get_transition_details(self, account_id, transaction_id): endpoint = 'accounts/{0}/transactions{1}'.format(account_id, transaction_id) return self._api.request(endpoint)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "def get_accounts(self):\n return self.accounts.all()", "def list_accounts(self):\n pass", "def accounts_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account/all', param, self.timeout)", "def list_account_transactions(self,\r\n year,\r\n month=None,\r\n get_as_csv=None):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(year=year)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/admin/invoice'\r\n _query_parameters = {\r\n 'year': year,\r\n 'month': month,\r\n 'getAsCsv': get_as_csv\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _context.response.status_code == 400:\r\n raise APIException('Bad request', _context)\r\n elif _context.response.status_code == 403:\r\n raise APIException('Forbidden (Access denied)', _context)\r\n elif _context.response.status_code == 500:\r\n raise APIException('Internal server error', _context)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, Transaction.from_dictionary)", "def accounts(self):\n # get the summary data\n options = { 'PayLoadText' : self.request_xml() }\n\n print(self.url)\n print(options)\n\n response = requests.get(self.url, params=options) \\\n .content\n print(response)\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n self.security_token = xml_tree.find('ClientSecurityToken').text\n\n accounts = [ \n self.create_account(account)\n for account in xml_tree.iter('CardAccounts')\n ]\n\n return accounts", "def get_accounts(self):\n return self.accounts", "def get_accounts(self):\n uri = '/credentials'\n response = gate_request(uri=uri)\n assert response.ok, 'Failed to get accounts: {0}'.format(response.text)\n\n all_accounts = response.json()\n self.log.debug('Accounts in Spinnaker:\\n%s', all_accounts)\n\n filtered_accounts = []\n for account in all_accounts:\n if account['type'] == self.provider:\n filtered_accounts.append(account)\n\n if not filtered_accounts:\n raise ForemastError('No Accounts matching {0}.'.format(self.provider))\n\n return filtered_accounts", "def listtransactions(self, account=None, count=10, from_=0, address=None):\n accounts = [account] if account is not None else list(self.listaccounts(as_dict=True).keys())\n return [TransactionInfo(**tx) for acc in accounts for\n tx in self.proxy.listtransactions(acc, count, from_) if\n address is None or tx[\"address\"] == address]", "def get_accounts(self, session: \"Session\") -> List[Account]:\n\n self.__get_dn(session)\n\n result = session.soapclient.get_accounts_by_owner(self.dn)\n return [Account(session, account=r) for r in result]", "def get_accounts(self):\r\n return self._accounts", "def get(self):\n accounts = database.get_all(Accounts)\n all_accounts = []\n for account in accounts:\n all_transactions = []\n for transaction in account.transactions:\n all_transactions.append(transaction.id)\n new_account = {\n \"id\": account.id,\n \"name\": account.name,\n \"iban\": account.iban,\n \"balance\": float(account.balance),\n \"currency\": account.currency,\n \"transactions ids\": all_transactions\n }\n\n all_accounts.append(new_account)\n return json.dumps(all_accounts), 200", "def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)", "def list_accounts(self):\n information = []\n for provider in self._accounts.values():\n information.append({\n 'token': provider.credentials.token,\n 'url': provider.credentials.url,\n })\n\n return information", "def transactions(self):\r\n return tx.AccountTransactions(self)", "def GetAccountList(self):\n\t\treturn self.accounts.keys()", "def accounts(self):\n return self._accounts.values()", "def get_accounts(self):\n\n\t\treturn self.__accounts", "def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def accounts(self):\n if self._accounts is None:\n url = f'{self._ynab.api_url}/budgets/{self.id}/accounts'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving accounts, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n self._accounts = [Account(self, account)\n for account in response.json().get('data', {}).get('accounts', [])]\n return self._accounts", "def transactions(self) -> List[Transaction]:\n return self.session.get_transactions(self.account_id)", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def transactions(self):\n return self._call_account_method(\n 'transactions'\n )", "def get_accounts(self):\n me = objects.AdUser(fbid=\"me\")\n my_accounts = list(me.get_ad_accounts(fields=[\n 'id',\n 'name',\n 'timezone_name',\n 'amount_spent',\n 'currency']))\n return my_accounts", "def list_accounts(min_conf=1):\n min_conf = str(min_conf)\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"listaccounts\", min_conf])\n accounts = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return accounts", "def accounts(self):\r\n return acc.Accounts(self)", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.budget.id}/accounts/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"", "def get_account_transactions(self, min_row=0, max_row=100):\n data = {\n 'min_row': min_row,\n 'max_row': max_row\n }\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, ACCOUNTID, 'transactions', query_string),\n auth=HTTPBasicAuth(KEY, SECRET))\n\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'", "def listaccounts(self, minconf=1, as_dict=False):\n if as_dict:\n return dict(self.proxy.listaccounts(minconf))\n else:\n return list(self.proxy.listaccounts(minconf).keys())", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def get_transactions(self, account_id, from_date=None, to_date=None,\n page_size=None, type_list=None):\n endpoint = 'accounts/{0}/transactions'.format(account_id)\n\n params = {}\n\n if from_date:\n params[\"from\"] = from_date\n\n if to_date:\n params[\"to\"] = to_date\n\n if page_size:\n params[\"pageSize\"] = page_size\n\n if type_list:\n type_list = \"%2C\".join(type_list)\n params[\"type\"] = type_list\n\n return self._api.request(endpoint, params=params)", "def get_account_transactions(self, account_number):\n\n logger.debug('Fetching account transactions for account %s',\n account_number)\n\n # Get javax.faces.ViewState from the last request\n last_req_hidden_inputs = self._hidden_inputs_as_dict(\n BeautifulSoup(self.last_req_body, 'html.parser'))\n\n data = {\n 'dialog-overview_showAccount': 'Submit',\n 'menuLinks_SUBMIT': 1,\n 'menuLinks:_idcl': '',\n 'menuLinks:_link_hidden_': '',\n 'javax.faces.ViewState': last_req_hidden_inputs.get(\n 'javax.faces.ViewState'),\n '_token': self.token,\n 'productId': account_number\n }\n\n path = '/im/im/csw.jsf'\n req = self.session.post(self.BASE_URL + path, data=data)\n self.last_req_body = req.content\n\n logger.debug('Transaction request response code %s', req.status_code)\n\n self._parse_tokens(req.text)\n\n # Parse transactions\n transactions = self._parse_account_transactions(req.text)\n\n # Request was ok but but no transactions were found. Try to refetch.\n # Requests seems to loose the connections sometimes with the message\n # \"Resetting dropped connection\". This should work around that\n # problem.\n if req.status_code == requests.codes.ok and not transactions:\n transactions = self.get_account_transactions(account_number)\n\n return transactions", "def accounts(web3):\n return web3.eth.accounts", "def accounts(self):\r\n return resources.Accounts(self)", "def get_accounts():\n graph = facebook.GraphAPI(mytoken)\n pages = graph.get_object('me/accounts')\n pages_info=[]\n for page in pages['data']:\n pages_info.append( ( page['name'], page['access_token'] ) )\n return pages_info", "def get_acc_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def fetch_owner_accounts():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n return owner_accounts", "def accounts(self):\r\n return accounts.Accounts(self)", "def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')", "def display_accounts(cls):\n return cls.account_list", "def get_account_trades(self, symbol: Symbol, trade_id: Optional[int],\n limit: int = 100, receive_window: Optional[int] = None):\n api_params = {\n \"symbol\": symbol.value,\n \"limit\": limit,\n \"timestamp\": get_current_time_milliseconds()\n }\n\n if trade_id is not None:\n api_params['tradeId'] = trade_id\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.get(path='/trade/account', params=api_params)", "def accounts(self):\n if self.__accounts_manager is None:\n self.__accounts_manager = AccountsManager(\"/accounts\", self._client)\n return self.__accounts_manager", "def get_transaction_list(self, account_id, from_date, to_date,\n type_list=None):\n endpoint = 'accounts/{0}/transactions/idrange'.format(account_id)\n\n params = {}\n\n params[\"from\"] = from_date\n params[\"to\"] = to_date\n\n if type_list:\n type_list = \"%2C\".join(type_list)\n params[\"type\"] = type_list\n\n return self._api.request(endpoint, params=params)", "async def get_open_accounts(self):\n result = []\n URL = API_HOST + \"/api/resources/header\"\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(URL)\n\n json_data = await response.json()\n accounts = json_data[\"data\"][\"accounts\"][\"data\"][\"data\"]\n\n for account in accounts:\n if account[\"statusCategory\"] == STATUS_CATEGORY_OPEN:\n result.append(account[\"accountNumber\"])\n\n return result", "def fetchAllAccounts(config):\n allAccounts = []\n currentStart = 1\n currentLimit = 99\n while currentLimit > 98 :\n currentPull = fetchBatchAccounts(accountsConfig, currentStart, currentLimit)['data']\n allAccounts = allAccounts + currentPull\n currentLimit = int(len(currentPull))\n currentStart = int(currentStart) + int(currentLimit)\n return allAccounts", "def get_all_accounts(self, account_id=None, account_name=None, search=False):\n if search:\n re_meth = re.search\n else:\n re_meth = re.match\n if account_id and not re.match(\"\\d{12}\", account_id):\n if not account_name:\n account_name = account_id\n account_id = None\n self.log.debug('Attempting to fetch all accounts matching- account_id:' +\n str(account_id) + ' account_name:' + str(account_name))\n response = self.get_response_items('ListAccounts', {}, item_marker='accounts',\n list_marker='Accounts')\n retlist = []\n for account in response:\n if account_name is not None:\n if not search:\n account_name = \"^{0}$\".format(account_name.strip())\n if not re_meth(account_name, account['account_name']):\n continue\n if account_id is not None:\n if not search:\n account_id = \"^{0}$\".format(account_id .strip())\n if not re_meth(account['account_id'], account_id):\n continue\n retlist.append(account)\n return retlist", "def accounts():", "def getCustomerAccount(self):\n self.logger.debug(\"\")\n for cust in self.getCustomerAccountData():\n accounts = len(cust['accounts'])\n self.logger.debug(\"%d accounts in %s\", accounts, cust['CustomerId'])\n ii = 1\n for acct in cust['accounts']:\n self.logger.debug(\"yield %s, %s\", cust['CustomerId'], acct['Id'])\n yield cust['CustomerId'], acct['Id'], ii, accounts\n ii += 1", "def list_accounts(self):\r\n\r\n account = self.client['Account']\r\n mask = 'cdnAccounts[%s]' % ', '.join(['id',\r\n 'createDate',\r\n 'cdnAccountName',\r\n 'cdnSolutionName',\r\n 'cdnAccountNote',\r\n 'status'])\r\n return account.getObject(mask=mask).get('cdnAccounts', [])", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def get_transactions_trc20():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions/trc20\".format(wallet) # noqa: E501\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def get_account():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}\".format(wallet)\n\n print(url)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def getAllAccounts(z, opts):\n response = z.request('GetAllAccountsRequest', opts=opts)\n names = [account['name'] for account in response['GetAllAccountsResponse']['account']]\n return sorted(names)", "def get_asset_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountAssetTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def test_access_account_info_with_token(self):\n\n print(\" --------------------------- Test 6 - Access Account Information ----------------------------\")\n\n user_id = uuid.uuid4()\n password = \"my-precious\"\n currency = \"EUR\"\n\n register_user(user_id, password, currency)\n response = login_user(user_id, password)\n\n self.assertTrue(response.json()['message']['auth_token'])\n\n auth_token = response.json()['message']['auth_token']\n headers = {'Content-Type': \"application/json\", 'Authorization': auth_token}\n\n data = \"{\\\"amount\\\" : 20.0}\"\n requests.post('http://192.168.85-208/account/amount', headers=headers, data=data)\n requests.post('http://192.168.85-208/account/amount', headers=headers, data=data)\n requests.post('http://192.168.85-208/account/amount', headers=headers, data=data)\n\n # Get the buyer account information to check if the money comes in\n response = requests.get('http://0.0.0.0:5000/account', headers=headers)\n print(json.dumps(response.json()['message'], indent=4))", "def get_customer_accounts(self,\r\n accept,\r\n customer_id,\r\n status=None):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(accept=accept,\r\n customer_id=customer_id)\r\n\r\n # Prepare query URL\r\n _url_path = '/aggregation/v1/customers/{customerId}/accounts'\r\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, {\r\n 'customerId': customer_id\r\n })\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += _url_path\r\n _query_parameters = {\r\n 'status': status\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'Finicity-App-Key': Configuration.finicity_app_key,\r\n 'Accept': accept\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n CustomHeaderAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, CustomerAccounts.from_dictionary)", "def list(self, **params):\n\n _, _, absence_transactions = self.http_client.get(\"/absencetransactions\", params=params)\n return absence_transactions", "def test_companies_company_id_data_bank_accounts_account_id_transactions_get(self):\n pass", "def get_transaction_list2(self, account_id, aid):\n endpoint = 'accounts/{0}/transactions/sinceid'.format(account_id)\n\n params = {}\n params[\"id\"] = aid\n\n return self._api.request(endpoint, params=params)", "def get_account_info(self):\n resp = requests.get(\n self.URL + 'info/',\n headers={'Authorization': 'Token ' + self.api_key}\n )\n\n return self.__handle_response(resp)", "def list_accounts():\n\n try:\n accounts = Account.query.all()\n except NoResultFound:\n print(f\"No account configured yet.\")\n return\n n_len = max([len(a.nickname) for a in accounts if a.nickname != 'no.name'])\n fmt = \"{nickname:\" + str(n_len) + \"s}: {email:s}\"\n #import pdb; pdb.set_trace()\n for acct in [acct for acct in accounts if acct.nickname != 'no.name']:\n print(fmt.format(nickname=acct.nickname, email=acct.email))\n return", "def tenants_for_token(self, context):\n token_ref = self.token_api.get_token(context=context,\n token_id=context['token_id'])\n assert token_ref is not None\n\n user_ref = token_ref['user']\n tenant_refs = []\n for tenant_id in user_ref['tenants']:\n tenant_refs.append(self.identity_api.get_tenant(\n context=context,\n tenant_id=tenant_id))\n return self._format_tenants_for_token(tenant_refs)", "def list_account_request(request):\n account_list = Account.objects.all()\n context = {'account_list': account_list}\n return render(request, \"accounts/account_list.html\", context)", "def get(self):\n held_accounts = User.get_held_accounts(\n get_jwt_identity(), initialize_models=True)\n\n schema = AccountsListSchema(many=True)\n response = schema.dumps(held_accounts)\n\n return jsonify_response(json.loads(response.data), 200)", "def account_info(self):\n url, params, headers = self.request(\"/account/info\", method='GET')\n\n return self.rest_client.GET(url, headers)", "def _get_arns(self):\n client = self._get_client()\n\n account_arns = set()\n\n for role in list_roles(**self.conn_details):\n account_arns.add(role['Arn'])\n\n for user in list_users(**self.conn_details):\n account_arns.add(user['Arn'])\n\n for page in client.get_paginator('list_policies').paginate(Scope='Local'):\n for policy in page['Policies']:\n account_arns.add(policy['Arn'])\n\n for page in client.get_paginator('list_groups').paginate():\n for group in page['Groups']:\n account_arns.add(group['Arn'])\n\n result_arns = set()\n for arn in self.arn_list:\n if arn.lower() == 'all':\n return account_arns\n\n if arn not in account_arns:\n self.current_app.logger.warn(\"Provided ARN {arn} not found in account.\".format(arn=arn))\n continue\n\n result_arns.add(arn)\n\n self.current_app.logger.debug(\"got %d arns\", len(result_arns))\n return list(result_arns)", "def get_accounts(self, count: int = 100, account_type: str = None) -> list:\n all_accounts = list(\n itertools.islice(self.client.accounts.get_all_generator(), count)\n )\n if account_type is None:\n return all_accounts\n return [a for a in all_accounts if a[\"acctType\"] == account_type]", "def test_wallets_get_transaction_list(self):\n pass", "def _get_ad_accounts() -> [adaccount.AdAccount]:\n system_user = user.User(fbid='me')\n ad_accounts = system_user.get_ad_accounts(fields=['account_id',\n 'name',\n 'created_time',\n 'timezone_offset_hours_utc'])\n return list(ad_accounts)", "def get_bank_accounts(self):\n spec = {'owner': DBRef(self.collection_name, self._id)}\n return BankAccount.collection.find(spec)", "def getConnectedAccounts(**kwargs):\n strProdURL = kwargs[\"strProdURL\"]\n orgID = kwargs[\"ORG_ID\"]\n sessiontoken = kwargs[\"sessiontoken\"]\n\n accounts = get_connected_accounts_json(strProdURL, orgID, sessiontoken)\n orgtable = PrettyTable(['OrgID'])\n orgtable.add_row([orgID])\n print(str(orgtable))\n table = PrettyTable(['Account Number','id'])\n for i in accounts:\n table.add_row([i['account_number'],i['id']])\n \n print(\"Connected Accounts\")\n print(table)", "def accounts(self):\r\n return Accounts(self)", "def currency_account(self, currency):\r\n param = {}\r\n param['currency'] = currency\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account', param, self.timeout)", "def get_balances(self):\r\n balances = self.api.find(\"tokens\", \"balances\", query={\"account\": self.account})\r\n return balances", "def get_account():\n\n # get user\n user = g.user\n\n # response\n return jsonify({'user_account': UserAccountAdminSchema().dump(user)}), 200", "def get_account(url, token, marker=None, limit=None, prefix=None,\n end_marker=None, http_conn=None, full_listing=False,\n service_token=None, headers=None, delimiter=None):\n req_headers = {'X-Auth-Token': token, 'Accept-Encoding': 'gzip'}\n if service_token:\n req_headers['X-Service-Token'] = service_token\n if headers:\n req_headers.update(headers)\n\n close_conn = False\n if not http_conn:\n http_conn = http_connection(url)\n close_conn = True\n if full_listing:\n rv = get_account(url, token, marker, limit, prefix, end_marker,\n http_conn, headers=req_headers, delimiter=delimiter)\n listing = rv[1]\n while listing:\n marker = listing[-1]['name']\n listing = get_account(url, token, marker, limit, prefix,\n end_marker, http_conn, headers=req_headers,\n delimiter=delimiter)[1]\n if listing:\n rv[1].extend(listing)\n return rv\n parsed, conn = http_conn\n qs = 'format=json'\n if marker:\n qs += '&marker=%s' % quote(marker)\n if limit:\n qs += '&limit=%d' % limit\n if prefix:\n qs += '&prefix=%s' % quote(prefix)\n if delimiter:\n qs += '&delimiter=%s' % quote(delimiter)\n if end_marker:\n qs += '&end_marker=%s' % quote(end_marker)\n full_path = '%s?%s' % (parsed.path, qs)\n method = 'GET'\n conn.request(method, full_path, '', req_headers)\n resp = conn.getresponse()\n body = resp.read()\n if close_conn:\n conn.close()\n http_log((\"%s?%s\" % (url, qs), method,), {'headers': req_headers},\n resp, body)\n\n resp_headers = resp_header_dict(resp)\n if resp.status < 200 or resp.status >= 300:\n raise ClientException.from_response(resp, 'Account GET failed', body)\n if resp.status == 204:\n return resp_headers, []\n return resp_headers, parse_api_response(resp_headers, body)", "async def get_user_account(self):\n uri = \"/fapi/v1/account\"\n ts = tools.get_cur_timestamp_ms()\n params = {\n \"timestamp\": str(ts)\n }\n success, error = await self.request(\"GET\", uri, params, auth=True)\n return success, error", "def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)", "def _generate_accounts(self):\n accounts = []\n auth_url = 'http://{}:5000/v3/'.format(self.host)\n\n for tenant, network in self.tenants:\n account = RwcalYang.CloudAccount.from_dict({\n 'name': 'rift.auto.openstack',\n 'account_type': 'openstack',\n 'openstack': {\n 'key': self.user or self._DEFAULT_USERNAME,\n 'secret': self._DEFAULT_PASSWORD,\n 'auth_url': auth_url,\n 'tenant': tenant,\n 'mgmt_network': network}})\n\n accounts.append(account)\n\n return accounts", "def get_transactions_for_ynab_account(self, account_name):\n account = self.get_account_by_name(account_name)\n if not account:\n return []\n return [YnabServerTransaction(transaction, transaction.account)\n for transaction in account.transactions]", "def get_orders(access_token,start_date,end_date,status):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/orders'\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n #Parameters for the order\r\n params = {'accountId':TDAuth_Info.account_num,\r\n 'fromEnteredTime': start_date,\r\n 'toEnteredTime': end_date,\r\n 'status': status}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers,params=params)\r\n return orders_data_json.json()", "def list_tokens_from_contract(owner_address, contract_address, limit = 0):\n\n owner_address = owner_address.lower()\n contract_address = contract_address.lower()\n \n # Check all ERC721 transactions from account using FTScan API\n erc721transfers_url = \"https://api.ftmscan.com/api?module=account&action=tokennfttx&address=\" + \\\n owner_address + \"&startblock=0&endblock=999999999&sort=asc\"\n\n try:\n res = requests.get(erc721transfers_url)\n res_json = res.json()\n transfers = res_json[\"result\"]\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)\n\n # Loop through ERC721 transactions and count token sent and received\n token_counts = {}\n\n for transaction in transfers:\n if transaction[\"contractAddress\"] == contract_address:\n token_id = int(transaction[\"tokenID\"])\n if transaction[\"to\"] == owner_address:\n token_counts[token_id] = token_counts.get(token_id, 0) + 1\n if transaction[\"from\"] == owner_address:\n token_counts[token_id] = token_counts.get(token_id, 0) - 1\n\n # Tokens we still own should have a count of 1 (we could have sent them and gotten them back)\n # We should only ever have counts of -1 and +1\n token_ids = [token for token in token_counts if token_counts[token] > 0]\n if limit:\n print(f\"Limiting results to {limit} / {len(token_ids)} tokens.\")\n token_ids = token_ids[0:limit]\n\n return token_ids", "def account_get(request):\n fields = [\"email\", \"token\"]\n\n # serializes the quert string to a dict (neeto)\n args = request.args\n\n query_validation = validate_query_params(args, fields)\n # check that body validation succeeded\n if query_validation[1] != 200:\n return query_validation\n\n auth = azure_refresh_token(args[\"token\"])\n if not auth[0]:\n return http400(\"Not Authenticated\")\n\n account_db = Database(\"accounts\")\n\n try:\n response = {\n \"access_token\": auth[0],\n \"refresh_token\": auth[1],\n \"data\": account_db.get(args[\"email\"]).to_dict(),\n }\n return jsonHttp200(\"Account returned\", response)\n except:\n return http400(\"Account not found\")", "def _all_accounts(self, leaf_only=False):\n accounts = [child_account.account\n for child_account in\n realization.iter_children(self.all_root_account,\n leaf_only=leaf_only)]\n\n return accounts[1:]", "async def get_user_account(self):\n ts = tools.get_cur_timestamp_ms()\n params = {\n \"timestamp\": str(ts)\n }\n success, error = await self.request(\"GET\", \"/api/v3/account\", params, auth=True)\n return success, error", "def get_account():\n\n bus = session_bus()\n\n goa_manager = bus.get_object(GOA_NAME, GOA_PATH)\n\n goa_objects = goa_manager.GetManagedObjects(dbus_interface=OBJECT_MANAGER)\n\n accounts = [\n obj for obj in goa_objects\n if obj != GOA_MANAGER_PATH\n ]\n\n if len(accounts) > 1:\n sys.exit(\"More than one account found.\")\n\n (account_path,) = accounts\n\n return bus.get_object(GOA_NAME, account_path)", "def get_transaction(self, excludes_list):\n response = client.get(self.url, \"transactions\", {\"exclude_hash\": excludes_list})\n if response.status == 200:\n print(\"Transaction successfully received\")\n return Transaction.parse(response.data)\n elif response.status == 404:\n # print(\"no request to be received\")\n return None\n else:\n print(\"Unknown error while requesting transaction\")\n return None", "def list(self, filter, *args, timeout=None):\n req = AccountGrantListRequest()\n req.meta.CopyFrom(ListRequestMetadata())\n page_size_option = self.parent._test_options.get('PageSize')\n if isinstance(page_size_option, int):\n req.meta.limit = page_size_option\n\n req.filter = plumbing.quote_filter_args(filter, *args)\n\n def generator(svc, req):\n tries = 0\n while True:\n try:\n plumbing_response = svc.stub.List(\n req,\n metadata=svc.parent.get_metadata(\n 'AccountGrants.List', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n tries = 0\n for plumbing_item in plumbing_response.account_grants:\n yield plumbing.convert_account_grant_to_porcelain(\n plumbing_item)\n if plumbing_response.meta.next_cursor == '':\n break\n req.meta.cursor = plumbing_response.meta.next_cursor\n\n return generator(self, req)", "def show_accounts(customer_id):\n customer_accounts = db_helper.get_customer_accounts(customer_id)\n if not customer_accounts:\n return api_utils.error(\"No accounts for customer with id \\\n number {} found\".format(customer_id), 404)\n else:\n return jsonify({\"accounts\": customer_accounts})", "def list_member_accounts(nextToken=None, maxResults=None):\n pass", "def list(self, filter, *args, timeout=None):\n req = AccountListRequest()\n req.meta.CopyFrom(ListRequestMetadata())\n page_size_option = self.parent._test_options.get('PageSize')\n if isinstance(page_size_option, int):\n req.meta.limit = page_size_option\n\n req.filter = plumbing.quote_filter_args(filter, *args)\n\n def generator(svc, req):\n tries = 0\n while True:\n try:\n plumbing_response = svc.stub.List(\n req,\n metadata=svc.parent.get_metadata('Accounts.List', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n tries = 0\n for plumbing_item in plumbing_response.accounts:\n yield plumbing.convert_account_to_porcelain(plumbing_item)\n if plumbing_response.meta.next_cursor == '':\n break\n req.meta.cursor = plumbing_response.meta.next_cursor\n\n return generator(self, req)", "def get_account_transactions_by_id(self, TransactionId):\n params = clean_locals(locals())\n date_time_sent = datetime.datetime.utcnow()\n response = self.request('ListAccountPostingsById', params, secure=True)\n data = self.process_response(response, date_time_sent, None)\n return parse_account_postings(data.get('data', {})) if data.get('data') else {}", "def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]", "def accounts():\n if not session.get('authed', False):\n flash(\"Please log in.\")\n return redirect(my_url('index'))\n account_ids = redis_client.smembers('%s-accounts' % session['phone'])\n accounts = [kloudless.Account.retrieve(i) for i in account_ids]\n callback_url = quote_plus(my_url('auth_callback'))\n return render_template('accounts.html', accounts=accounts, app_number=APP_NUMBER,\n callback_url=callback_url, app_id=KLOUDLESS_APP_ID)", "def all_transactions(self, request):\n user_id = request.data[\"user\"]\n user = User.objects.get(id=user_id)\n user_transactions = user.transactions.all()\n serializer = TransactionSerializer(user_transactions, many=True)\n\n return Response(serializer.data)", "def all_accounts(request):\n accounts = Account.objects.all()\n return render(request, 'app/home.html', {'accounts': accounts})", "def get_tx_history(account_id, total):\n query = iroha.query(\"GetTransactions\", account_id=account_id, page_size=total)\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def listreceivedbyaccount(self, minconf=1, includeempty=False):\n return [AccountInfo(**x) for x in\n self.proxy.listreceivedbyaccount(minconf, includeempty)]", "def accounts():\n pass" ]
[ "0.6953453", "0.67745745", "0.67192453", "0.6699322", "0.665223", "0.66492224", "0.64944196", "0.64375126", "0.6423205", "0.6415874", "0.64149076", "0.63608265", "0.635944", "0.63580346", "0.63404256", "0.6307913", "0.6304804", "0.62903816", "0.6289546", "0.62893355", "0.62631434", "0.62464255", "0.62396806", "0.6210221", "0.61691177", "0.61535674", "0.6127989", "0.6096354", "0.60782206", "0.60148084", "0.59851795", "0.5935817", "0.5932532", "0.5929224", "0.59143037", "0.5913332", "0.5907564", "0.5897421", "0.5875081", "0.58377177", "0.58270705", "0.5781894", "0.5772769", "0.5749407", "0.57283974", "0.57273364", "0.5701782", "0.5698314", "0.5658985", "0.5649157", "0.56478554", "0.5624134", "0.5622394", "0.55927646", "0.55752707", "0.556599", "0.55619746", "0.55553675", "0.5552746", "0.5538032", "0.55357736", "0.5523075", "0.5521066", "0.5516481", "0.55009687", "0.54826975", "0.54814446", "0.547546", "0.54669213", "0.54572165", "0.5454803", "0.5445922", "0.542809", "0.542023", "0.5406647", "0.5400732", "0.54005563", "0.53991675", "0.5393559", "0.53872997", "0.5384446", "0.53730756", "0.53730136", "0.5369354", "0.5367654", "0.53669596", "0.5364474", "0.5361888", "0.5359182", "0.53489816", "0.53480685", "0.5347369", "0.53330106", "0.53263265", "0.53231204", "0.5319553", "0.53091687", "0.5304392", "0.52996755", "0.5295581", "0.5290988" ]
0.0
-1
Get a list of all Accounts authorized for the provided token. Get a range of Transactions for an Account based on the Transaction IDs.
def get_transaction_list(self, account_id, from_date, to_date, type_list=None): endpoint = 'accounts/{0}/transactions/idrange'.format(account_id) params = {} params["from"] = from_date params["to"] = to_date if type_list: type_list = "%2C".join(type_list) params["type"] = type_list return self._api.request(endpoint, params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "def list_accounts(self):\n pass", "def get_accounts(self):\n return self.accounts.all()", "def listtransactions(self, account=None, count=10, from_=0, address=None):\n accounts = [account] if account is not None else list(self.listaccounts(as_dict=True).keys())\n return [TransactionInfo(**tx) for acc in accounts for\n tx in self.proxy.listtransactions(acc, count, from_) if\n address is None or tx[\"address\"] == address]", "def get_account_transactions(self, min_row=0, max_row=100):\n data = {\n 'min_row': min_row,\n 'max_row': max_row\n }\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, ACCOUNTID, 'transactions', query_string),\n auth=HTTPBasicAuth(KEY, SECRET))\n\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'", "def get_accounts(self):\n uri = '/credentials'\n response = gate_request(uri=uri)\n assert response.ok, 'Failed to get accounts: {0}'.format(response.text)\n\n all_accounts = response.json()\n self.log.debug('Accounts in Spinnaker:\\n%s', all_accounts)\n\n filtered_accounts = []\n for account in all_accounts:\n if account['type'] == self.provider:\n filtered_accounts.append(account)\n\n if not filtered_accounts:\n raise ForemastError('No Accounts matching {0}.'.format(self.provider))\n\n return filtered_accounts", "def list_account_transactions(self,\r\n year,\r\n month=None,\r\n get_as_csv=None):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(year=year)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/admin/invoice'\r\n _query_parameters = {\r\n 'year': year,\r\n 'month': month,\r\n 'getAsCsv': get_as_csv\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _context.response.status_code == 400:\r\n raise APIException('Bad request', _context)\r\n elif _context.response.status_code == 403:\r\n raise APIException('Forbidden (Access denied)', _context)\r\n elif _context.response.status_code == 500:\r\n raise APIException('Internal server error', _context)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, Transaction.from_dictionary)", "def fetchAllAccounts(config):\n allAccounts = []\n currentStart = 1\n currentLimit = 99\n while currentLimit > 98 :\n currentPull = fetchBatchAccounts(accountsConfig, currentStart, currentLimit)['data']\n allAccounts = allAccounts + currentPull\n currentLimit = int(len(currentPull))\n currentStart = int(currentStart) + int(currentLimit)\n return allAccounts", "def accounts(self):\n # get the summary data\n options = { 'PayLoadText' : self.request_xml() }\n\n print(self.url)\n print(options)\n\n response = requests.get(self.url, params=options) \\\n .content\n print(response)\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n self.security_token = xml_tree.find('ClientSecurityToken').text\n\n accounts = [ \n self.create_account(account)\n for account in xml_tree.iter('CardAccounts')\n ]\n\n return accounts", "def get_transactions(self, account_id, from_date=None, to_date=None,\n page_size=None, type_list=None):\n endpoint = 'accounts/{0}/transactions'.format(account_id)\n\n params = {}\n\n if from_date:\n params[\"from\"] = from_date\n\n if to_date:\n params[\"to\"] = to_date\n\n if page_size:\n params[\"pageSize\"] = page_size\n\n if type_list:\n type_list = \"%2C\".join(type_list)\n params[\"type\"] = type_list\n\n return self._api.request(endpoint, params=params)", "def get_accounts(self):\n return self.accounts", "def get_accounts(self, session: \"Session\") -> List[Account]:\n\n self.__get_dn(session)\n\n result = session.soapclient.get_accounts_by_owner(self.dn)\n return [Account(session, account=r) for r in result]", "def transactions(self) -> List[Transaction]:\n return self.session.get_transactions(self.account_id)", "def get_accounts(self):\r\n return self._accounts", "def get_accounts(self):\n\n\t\treturn self.__accounts", "def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def accounts(self):\n return self._accounts.values()", "def transactions(self):\r\n return tx.AccountTransactions(self)", "def accounts(self):\n if self._accounts is None:\n url = f'{self._ynab.api_url}/budgets/{self.id}/accounts'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving accounts, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n self._accounts = [Account(self, account)\n for account in response.json().get('data', {}).get('accounts', [])]\n return self._accounts", "def tenants_for_token(self, context):\n token_ref = self.token_api.get_token(context=context,\n token_id=context['token_id'])\n assert token_ref is not None\n\n user_ref = token_ref['user']\n tenant_refs = []\n for tenant_id in user_ref['tenants']:\n tenant_refs.append(self.identity_api.get_tenant(\n context=context,\n tenant_id=tenant_id))\n return self._format_tenants_for_token(tenant_refs)", "def list_accounts(self):\n information = []\n for provider in self._accounts.values():\n information.append({\n 'token': provider.credentials.token,\n 'url': provider.credentials.url,\n })\n\n return information", "def list_tokens_from_contract(owner_address, contract_address, limit = 0):\n\n owner_address = owner_address.lower()\n contract_address = contract_address.lower()\n \n # Check all ERC721 transactions from account using FTScan API\n erc721transfers_url = \"https://api.ftmscan.com/api?module=account&action=tokennfttx&address=\" + \\\n owner_address + \"&startblock=0&endblock=999999999&sort=asc\"\n\n try:\n res = requests.get(erc721transfers_url)\n res_json = res.json()\n transfers = res_json[\"result\"]\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)\n\n # Loop through ERC721 transactions and count token sent and received\n token_counts = {}\n\n for transaction in transfers:\n if transaction[\"contractAddress\"] == contract_address:\n token_id = int(transaction[\"tokenID\"])\n if transaction[\"to\"] == owner_address:\n token_counts[token_id] = token_counts.get(token_id, 0) + 1\n if transaction[\"from\"] == owner_address:\n token_counts[token_id] = token_counts.get(token_id, 0) - 1\n\n # Tokens we still own should have a count of 1 (we could have sent them and gotten them back)\n # We should only ever have counts of -1 and +1\n token_ids = [token for token in token_counts if token_counts[token] > 0]\n if limit:\n print(f\"Limiting results to {limit} / {len(token_ids)} tokens.\")\n token_ids = token_ids[0:limit]\n\n return token_ids", "def list_accounts(min_conf=1):\n min_conf = str(min_conf)\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"listaccounts\", min_conf])\n accounts = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return accounts", "def GetAccountList(self):\n\t\treturn self.accounts.keys()", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.budget.id}/accounts/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def accounts(self):\r\n return acc.Accounts(self)", "def transactions(self):\n return self._call_account_method(\n 'transactions'\n )", "def get_account_trades(self, symbol: Symbol, trade_id: Optional[int],\n limit: int = 100, receive_window: Optional[int] = None):\n api_params = {\n \"symbol\": symbol.value,\n \"limit\": limit,\n \"timestamp\": get_current_time_milliseconds()\n }\n\n if trade_id is not None:\n api_params['tradeId'] = trade_id\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.get(path='/trade/account', params=api_params)", "def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def get_accounts(self):\n me = objects.AdUser(fbid=\"me\")\n my_accounts = list(me.get_ad_accounts(fields=[\n 'id',\n 'name',\n 'timezone_name',\n 'amount_spent',\n 'currency']))\n return my_accounts", "def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def get(self):\n accounts = database.get_all(Accounts)\n all_accounts = []\n for account in accounts:\n all_transactions = []\n for transaction in account.transactions:\n all_transactions.append(transaction.id)\n new_account = {\n \"id\": account.id,\n \"name\": account.name,\n \"iban\": account.iban,\n \"balance\": float(account.balance),\n \"currency\": account.currency,\n \"transactions ids\": all_transactions\n }\n\n all_accounts.append(new_account)\n return json.dumps(all_accounts), 200", "def list(self, filter, *args, timeout=None):\n req = AccountGrantListRequest()\n req.meta.CopyFrom(ListRequestMetadata())\n page_size_option = self.parent._test_options.get('PageSize')\n if isinstance(page_size_option, int):\n req.meta.limit = page_size_option\n\n req.filter = plumbing.quote_filter_args(filter, *args)\n\n def generator(svc, req):\n tries = 0\n while True:\n try:\n plumbing_response = svc.stub.List(\n req,\n metadata=svc.parent.get_metadata(\n 'AccountGrants.List', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n tries = 0\n for plumbing_item in plumbing_response.account_grants:\n yield plumbing.convert_account_grant_to_porcelain(\n plumbing_item)\n if plumbing_response.meta.next_cursor == '':\n break\n req.meta.cursor = plumbing_response.meta.next_cursor\n\n return generator(self, req)", "def list_transactions(\n self,\n account_ids: List[str] = None,\n payment_order_ids: List[str] = None,\n payee_ids: List[str] = None,\n direction: TransactionDirection = None,\n statuses: List[TransactionStatus] = None,\n value_timestamp_range: Dict[str, datetime] = None,\n booking_timestamp_range: Dict[str, datetime] = None,\n last_update_timestamp_range: Dict[str, datetime] = None,\n charge_amount_value_range: Dict[str, str] = None,\n order_by: List[TransactionOrderBy] = None\n ) -> TransactionsList:\n return self._list_transactions(\n account_ids,\n payment_order_ids,\n payee_ids,\n direction,\n statuses,\n value_timestamp_range,\n booking_timestamp_range,\n last_update_timestamp_range,\n charge_amount_value_range,\n order_by,\n None\n )", "async def get_open_accounts(self):\n result = []\n URL = API_HOST + \"/api/resources/header\"\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(URL)\n\n json_data = await response.json()\n accounts = json_data[\"data\"][\"accounts\"][\"data\"][\"data\"]\n\n for account in accounts:\n if account[\"statusCategory\"] == STATUS_CATEGORY_OPEN:\n result.append(account[\"accountNumber\"])\n\n return result", "def _generate_accounts(self):\n accounts = []\n auth_url = 'http://{}:5000/v3/'.format(self.host)\n\n for tenant, network in self.tenants:\n account = RwcalYang.CloudAccount.from_dict({\n 'name': 'rift.auto.openstack',\n 'account_type': 'openstack',\n 'openstack': {\n 'key': self.user or self._DEFAULT_USERNAME,\n 'secret': self._DEFAULT_PASSWORD,\n 'auth_url': auth_url,\n 'tenant': tenant,\n 'mgmt_network': network}})\n\n accounts.append(account)\n\n return accounts", "def getCustomerAccount(self):\n self.logger.debug(\"\")\n for cust in self.getCustomerAccountData():\n accounts = len(cust['accounts'])\n self.logger.debug(\"%d accounts in %s\", accounts, cust['CustomerId'])\n ii = 1\n for acct in cust['accounts']:\n self.logger.debug(\"yield %s, %s\", cust['CustomerId'], acct['Id'])\n yield cust['CustomerId'], acct['Id'], ii, accounts\n ii += 1", "def accounts(self):\r\n return resources.Accounts(self)", "def list_member_accounts(nextToken=None, maxResults=None):\n pass", "def fetch_owner_accounts():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n return owner_accounts", "def accounts_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account/all', param, self.timeout)", "def get_transactions_trc20():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions/trc20\".format(wallet) # noqa: E501\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def get_account_transactions(self, account_number):\n\n logger.debug('Fetching account transactions for account %s',\n account_number)\n\n # Get javax.faces.ViewState from the last request\n last_req_hidden_inputs = self._hidden_inputs_as_dict(\n BeautifulSoup(self.last_req_body, 'html.parser'))\n\n data = {\n 'dialog-overview_showAccount': 'Submit',\n 'menuLinks_SUBMIT': 1,\n 'menuLinks:_idcl': '',\n 'menuLinks:_link_hidden_': '',\n 'javax.faces.ViewState': last_req_hidden_inputs.get(\n 'javax.faces.ViewState'),\n '_token': self.token,\n 'productId': account_number\n }\n\n path = '/im/im/csw.jsf'\n req = self.session.post(self.BASE_URL + path, data=data)\n self.last_req_body = req.content\n\n logger.debug('Transaction request response code %s', req.status_code)\n\n self._parse_tokens(req.text)\n\n # Parse transactions\n transactions = self._parse_account_transactions(req.text)\n\n # Request was ok but but no transactions were found. Try to refetch.\n # Requests seems to loose the connections sometimes with the message\n # \"Resetting dropped connection\". This should work around that\n # problem.\n if req.status_code == requests.codes.ok and not transactions:\n transactions = self.get_account_transactions(account_number)\n\n return transactions", "def get_accounts():\n graph = facebook.GraphAPI(mytoken)\n pages = graph.get_object('me/accounts')\n pages_info=[]\n for page in pages['data']:\n pages_info.append( ( page['name'], page['access_token'] ) )\n return pages_info", "def listaccounts(self, minconf=1, as_dict=False):\n if as_dict:\n return dict(self.proxy.listaccounts(minconf))\n else:\n return list(self.proxy.listaccounts(minconf).keys())", "def getConnectedAccounts(**kwargs):\n strProdURL = kwargs[\"strProdURL\"]\n orgID = kwargs[\"ORG_ID\"]\n sessiontoken = kwargs[\"sessiontoken\"]\n\n accounts = get_connected_accounts_json(strProdURL, orgID, sessiontoken)\n orgtable = PrettyTable(['OrgID'])\n orgtable.add_row([orgID])\n print(str(orgtable))\n table = PrettyTable(['Account Number','id'])\n for i in accounts:\n table.add_row([i['account_number'],i['id']])\n \n print(\"Connected Accounts\")\n print(table)", "def list(self, ids=None, parent_id=None, name=None, limit=None):\n query = {'ids': ids, 'parentId': parent_id, 'name': name, 'limit': limit}\n kwargs = {'query': dict(((k, v) for k, v in query.items() if v))}\n return self.client.find_all_accounts(**kwargs)", "def get_accounts(self, count: int = 100, account_type: str = None) -> list:\n all_accounts = list(\n itertools.islice(self.client.accounts.get_all_generator(), count)\n )\n if account_type is None:\n return all_accounts\n return [a for a in all_accounts if a[\"acctType\"] == account_type]", "def accounts(web3):\n return web3.eth.accounts", "def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')", "def fetchById(accountIdList):\n accounts= []\n url = accountsConfig['domain']\n for accId in accountIdList:\n r = requests.get(url +'/'+ str(accId), headers=accountsConfig['headers']).json()\n accounts.append(r)\n return accounts", "def list(self, filter, *args, timeout=None):\n req = AccountListRequest()\n req.meta.CopyFrom(ListRequestMetadata())\n page_size_option = self.parent._test_options.get('PageSize')\n if isinstance(page_size_option, int):\n req.meta.limit = page_size_option\n\n req.filter = plumbing.quote_filter_args(filter, *args)\n\n def generator(svc, req):\n tries = 0\n while True:\n try:\n plumbing_response = svc.stub.List(\n req,\n metadata=svc.parent.get_metadata('Accounts.List', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n tries = 0\n for plumbing_item in plumbing_response.accounts:\n yield plumbing.convert_account_to_porcelain(plumbing_item)\n if plumbing_response.meta.next_cursor == '':\n break\n req.meta.cursor = plumbing_response.meta.next_cursor\n\n return generator(self, req)", "def get_all_tenants():\n tenants = identity.Tenant.query.all()\n return tenants", "def get_acc_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def accounts(self):\r\n return accounts.Accounts(self)", "def get_transaction_list2(self, account_id, aid):\n endpoint = 'accounts/{0}/transactions/sinceid'.format(account_id)\n\n params = {}\n params[\"id\"] = aid\n\n return self._api.request(endpoint, params=params)", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def accounts():\n if not session.get('authed', False):\n flash(\"Please log in.\")\n return redirect(my_url('index'))\n account_ids = redis_client.smembers('%s-accounts' % session['phone'])\n accounts = [kloudless.Account.retrieve(i) for i in account_ids]\n callback_url = quote_plus(my_url('auth_callback'))\n return render_template('accounts.html', accounts=accounts, app_number=APP_NUMBER,\n callback_url=callback_url, app_id=KLOUDLESS_APP_ID)", "def get_all_latest_transactions(self):\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n return transactions", "def _all_accounts(self, leaf_only=False):\n accounts = [child_account.account\n for child_account in\n realization.iter_children(self.all_root_account,\n leaf_only=leaf_only)]\n\n return accounts[1:]", "def list_accounts(self):\r\n\r\n account = self.client['Account']\r\n mask = 'cdnAccounts[%s]' % ', '.join(['id',\r\n 'createDate',\r\n 'cdnAccountName',\r\n 'cdnSolutionName',\r\n 'cdnAccountNote',\r\n 'status'])\r\n return account.getObject(mask=mask).get('cdnAccounts', [])", "def accounts():", "def getAllAccounts(z, opts):\n response = z.request('GetAllAccountsRequest', opts=opts)\n names = [account['name'] for account in response['GetAllAccountsResponse']['account']]\n return sorted(names)", "def display_accounts(cls):\n return cls.account_list", "def list_accounts(self, **kwargs):\r\n if 'mask' not in kwargs:\r\n items = [\r\n 'id',\r\n 'name',\r\n 'status',\r\n 'nodes',\r\n ]\r\n kwargs['mask'] = \"mask[%s]\" % ','.join(items)\r\n\r\n return self.client['Account'].getMessageQueueAccounts(**kwargs)", "def get_customer_accounts(self,\r\n accept,\r\n customer_id,\r\n status=None):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(accept=accept,\r\n customer_id=customer_id)\r\n\r\n # Prepare query URL\r\n _url_path = '/aggregation/v1/customers/{customerId}/accounts'\r\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, {\r\n 'customerId': customer_id\r\n })\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += _url_path\r\n _query_parameters = {\r\n 'status': status\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'Finicity-App-Key': Configuration.finicity_app_key,\r\n 'Accept': accept\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n CustomHeaderAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, CustomerAccounts.from_dictionary)", "def test_list_scheduled_payments_specific_accounts(self):\n pass", "def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]", "def get_transactions(self):\n transactions = []\n for subaccount_pointer in range((clargs.args.search_subaccounts or 0) + 1):\n utxos = self.scan_subaccount(subaccount_pointer, clargs.args.key_search_depth)\n if len(utxos) == 0:\n continue\n\n transaction, used_utxo = self.create_transaction(utxos)\n if transaction:\n signed_transaction = self.sign_transaction(transaction, used_utxo)\n transactions.append(signed_transaction)\n\n if transactions:\n self.test_transactions(transactions)\n\n logging.debug('transactions: {}'.format(transactions))\n flags = wally.WALLY_TX_FLAG_USE_WITNESS\n return [(wally.tx_from_hex(transaction, flags), None) for transaction in transactions]", "def get_all_accounts(self, account_id=None, account_name=None, search=False):\n if search:\n re_meth = re.search\n else:\n re_meth = re.match\n if account_id and not re.match(\"\\d{12}\", account_id):\n if not account_name:\n account_name = account_id\n account_id = None\n self.log.debug('Attempting to fetch all accounts matching- account_id:' +\n str(account_id) + ' account_name:' + str(account_name))\n response = self.get_response_items('ListAccounts', {}, item_marker='accounts',\n list_marker='Accounts')\n retlist = []\n for account in response:\n if account_name is not None:\n if not search:\n account_name = \"^{0}$\".format(account_name.strip())\n if not re_meth(account_name, account['account_name']):\n continue\n if account_id is not None:\n if not search:\n account_id = \"^{0}$\".format(account_id .strip())\n if not re_meth(account['account_id'], account_id):\n continue\n retlist.append(account)\n return retlist", "def _get_ad_accounts() -> [adaccount.AdAccount]:\n system_user = user.User(fbid='me')\n ad_accounts = system_user.get_ad_accounts(fields=['account_id',\n 'name',\n 'created_time',\n 'timezone_offset_hours_utc'])\n return list(ad_accounts)", "def _get_arns(self):\n client = self._get_client()\n\n account_arns = set()\n\n for role in list_roles(**self.conn_details):\n account_arns.add(role['Arn'])\n\n for user in list_users(**self.conn_details):\n account_arns.add(user['Arn'])\n\n for page in client.get_paginator('list_policies').paginate(Scope='Local'):\n for policy in page['Policies']:\n account_arns.add(policy['Arn'])\n\n for page in client.get_paginator('list_groups').paginate():\n for group in page['Groups']:\n account_arns.add(group['Arn'])\n\n result_arns = set()\n for arn in self.arn_list:\n if arn.lower() == 'all':\n return account_arns\n\n if arn not in account_arns:\n self.current_app.logger.warn(\"Provided ARN {arn} not found in account.\".format(arn=arn))\n continue\n\n result_arns.add(arn)\n\n self.current_app.logger.debug(\"got %d arns\", len(result_arns))\n return list(result_arns)", "def fetchBatchAccounts(config, start, limit): \n config['params']['from'] = start\n config['params']['limit'] = limit\n url = config['domain']\n r = requests.get(url, headers=config['headers'], params=config['params']).json()\n print(\"Downloading From: \", config['params']['from'], ' To: ', config['params']['from'] + config['params']['limit'], '| Limit: ', config['params']['limit'])\n return r", "def get_account_ids(response):\n return [account['Id'] for account in response[0]]", "def list(self, **params):\n\n _, _, absence_transactions = self.http_client.get(\"/absencetransactions\", params=params)\n return absence_transactions", "def get_all_transactions(self) -> Iterator[BaseTransaction]:\n # It is necessary to retain a copy of the current scope because this method will yield\n # and the scope may undergo changes. By doing so, we ensure the usage of the scope at the\n # time of iterator creation.\n scope = self.get_allow_scope()\n for tx in self._get_all_transactions():\n if scope.is_allowed(tx):\n yield tx", "def accounts(self):\n if self.__accounts_manager is None:\n self.__accounts_manager = AccountsManager(\"/accounts\", self._client)\n return self.__accounts_manager", "def get_all_customer_ids():\n\n # your code", "def get_transactions(self, crypto, address, confirmations=1):\n raise NotImplementedError(\n \"This service does not support getting historical transactions. \"\n \"Or rather it has no defined 'get_transactions' method.\"\n )", "def filtered_accounts(\n urja_data: UrjanetData,\n ) -> List[Account]:\n return [\n account\n for account in urja_data.accounts\n if account.IntervalEnd is not None and account.IntervalStart is not None\n ]", "def list_accounts():\n\n try:\n accounts = Account.query.all()\n except NoResultFound:\n print(f\"No account configured yet.\")\n return\n n_len = max([len(a.nickname) for a in accounts if a.nickname != 'no.name'])\n fmt = \"{nickname:\" + str(n_len) + \"s}: {email:s}\"\n #import pdb; pdb.set_trace()\n for acct in [acct for acct in accounts if acct.nickname != 'no.name']:\n print(fmt.format(nickname=acct.nickname, email=acct.email))\n return", "def tokens(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenList)['tokens']", "def get_balances(self):\r\n balances = self.api.find(\"tokens\", \"balances\", query={\"account\": self.account})\r\n return balances", "def get_account_transactions(self, StartTime, EndTime):\n params = clean_locals(locals())\n date_time_sent = datetime.datetime.utcnow()\n response = self.request('ListAccountPostings', params, secure=True)\n data = self.process_response(response, date_time_sent, None)\n return parse_account_postings(data.get('data', {})) if data.get('data') else {}", "def get_asset_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountAssetTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def get_transfer_list(self,\n address: str,\n token_address: str,\n start_block: Optional[int] = None,\n end_block: Optional[int] = None) -> Tuple[Transfer, ...]:\n ...", "def get_all_customers(connection):\n connection.command_path = \"customers\"\n extra_headers = {connection.header_key: connection.token}\n url = connection.build_url()\n verify_ssl = connection.verify_ssl\n res = requests.get(url=url, headers=extra_headers, verify=verify_ssl)\n if res.status_code > 210:\n return\n body = res.content\n return customers.parse_all_customers(body)", "def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)", "def _get_member_accounts(billing_account_id, options):\n assumed_credentials = _request_sts_credentials(\n billing_account_id=billing_account_id,\n options=options,\n )\n billing_account_session = boto3.Session(\n aws_access_key_id=assumed_credentials[\"AccessKeyId\"],\n aws_secret_access_key=assumed_credentials[\"SecretAccessKey\"],\n aws_session_token=assumed_credentials[\"SessionToken\"],\n )\n org_client = billing_account_session.client(\"organizations\")\n list_accounts_paginator = org_client.get_paginator(\"list_accounts\")\n accounts = []\n for page in list_accounts_paginator.paginate():\n accounts.extend(page[\"Accounts\"])\n\n # Remove any account that is not actively part of this organization yet.\n only_active_accounts = filter(lambda a: a[\"Status\"] == \"ACTIVE\", accounts)\n\n # Only return the key: value pairs that are defined in the --field option.\n only_certain_fields_of_active = list(\n map(\n lambda a: {k: v for k, v in a.items() if k in options[\"--field\"]},\n only_active_accounts,\n )\n )\n return only_certain_fields_of_active", "def access(config, region, accounts=()):\n config = validate.callback(config)\n accounts_report = []\n\n def check_access(account):\n accounts_report.append(account)\n session = get_session(account['role'], region)\n identity = session.client('sts').get_caller_identity()\n account['account_id'] = identity['Account']\n account.pop('groups')\n account.pop('role')\n client = session.client('iam')\n policy_arn = identity['Arn']\n if policy_arn.count('/') > 1:\n policy_arn = policy_arn.rsplit('/', 1)[0]\n if ':sts:' in policy_arn:\n policy_arn = policy_arn.replace(':sts', ':iam')\n if ':assumed-role' in policy_arn:\n policy_arn = policy_arn.replace(':assumed-role', ':role')\n evaluation = client.simulate_principal_policy(\n PolicySourceArn=policy_arn,\n ActionNames=['logs:CreateExportTask'])['EvaluationResults']\n account['access'] = evaluation[0]['EvalDecision']\n\n with ThreadPoolExecutor(max_workers=16) as w:\n futures = {}\n for account in config.get('accounts', ()):\n if accounts and account['name'] not in accounts:\n continue\n futures[w.submit(check_access, account)] = None\n for f in as_completed(futures):\n pass\n accounts_report.sort(key=operator.itemgetter('access'), reverse=True)\n print(tabulate(accounts_report, headers='keys'))", "def accounts(self):\r\n return Accounts(self)", "def get_transactions(\n self,\n since: Optional[date] = None,\n count: int = 1000,\n offset: int = 0,\n include_pending: bool = False,\n ) -> List[Transaction]:\n return self.session.get_transactions(\n self.account_id,\n options={\n 'since': since,\n 'count': count,\n 'offset': offset,\n 'include_pending': include_pending,\n },\n )", "def ordered_accounts(filtered_accounts: List[Account]) -> List[Account]:\n return sorted(\n filtered_accounts, key=lambda x: (x.IntervalEnd, -x.PK), reverse=True\n )", "def list_offering_transactions(nextToken=None):\n pass", "def get_tenants(self, **kwargs):\n url = self.get_url('tenants', kwargs, ['begin', 'end'])\n return self.api_client.get(url).json()", "def get_fedcm_account_list(self):\n pass", "def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list" ]
[ "0.68266255", "0.6736031", "0.65336376", "0.6370594", "0.63402724", "0.6323471", "0.62967646", "0.6295912", "0.62643474", "0.6197194", "0.6178552", "0.6152341", "0.60816187", "0.6077081", "0.60660774", "0.6049993", "0.60333514", "0.6030597", "0.6026027", "0.60059094", "0.59751254", "0.5967328", "0.5957622", "0.5947605", "0.5942622", "0.58913183", "0.5890948", "0.58874077", "0.58414537", "0.5823673", "0.5818305", "0.58116484", "0.58065385", "0.57925516", "0.5788438", "0.5785288", "0.5778045", "0.57471305", "0.57441396", "0.5733609", "0.57073396", "0.57005733", "0.56989455", "0.5696062", "0.56886524", "0.567089", "0.5661176", "0.5643709", "0.56209946", "0.56097066", "0.56060386", "0.5590016", "0.55806965", "0.5577778", "0.55686575", "0.5538519", "0.5520927", "0.5515807", "0.5475994", "0.5467237", "0.5455305", "0.5441955", "0.54373944", "0.54339737", "0.5428467", "0.5427649", "0.54268926", "0.5418282", "0.54155207", "0.5411627", "0.54109955", "0.5410757", "0.54049295", "0.5398826", "0.53814703", "0.53777474", "0.5376343", "0.53704256", "0.5358272", "0.5356988", "0.5305672", "0.52970266", "0.5292699", "0.52862984", "0.5277427", "0.5264929", "0.52627856", "0.52174294", "0.5217099", "0.5212537", "0.5205266", "0.52028066", "0.52027893", "0.5202408", "0.5201752", "0.5193723", "0.51759976", "0.51692474", "0.5144547", "0.5142493" ]
0.66186446
2
Get a list of all Accounts authorized for the provided token. Get a range of Transactions for an Account starting at (but not including) a provided Transaction ID.
def get_transaction_list2(self, account_id, aid): endpoint = 'accounts/{0}/transactions/sinceid'.format(account_id) params = {} params["id"] = aid return self._api.request(endpoint, params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def get_transaction_list(self, account_id, from_date, to_date,\n type_list=None):\n endpoint = 'accounts/{0}/transactions/idrange'.format(account_id)\n\n params = {}\n\n params[\"from\"] = from_date\n params[\"to\"] = to_date\n\n if type_list:\n type_list = \"%2C\".join(type_list)\n params[\"type\"] = type_list\n\n return self._api.request(endpoint, params=params)", "def get_account_transactions(self, min_row=0, max_row=100):\n data = {\n 'min_row': min_row,\n 'max_row': max_row\n }\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, ACCOUNTID, 'transactions', query_string),\n auth=HTTPBasicAuth(KEY, SECRET))\n\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "def listtransactions(self, account=None, count=10, from_=0, address=None):\n accounts = [account] if account is not None else list(self.listaccounts(as_dict=True).keys())\n return [TransactionInfo(**tx) for acc in accounts for\n tx in self.proxy.listtransactions(acc, count, from_) if\n address is None or tx[\"address\"] == address]", "def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def get_transactions(self, account_id, from_date=None, to_date=None,\n page_size=None, type_list=None):\n endpoint = 'accounts/{0}/transactions'.format(account_id)\n\n params = {}\n\n if from_date:\n params[\"from\"] = from_date\n\n if to_date:\n params[\"to\"] = to_date\n\n if page_size:\n params[\"pageSize\"] = page_size\n\n if type_list:\n type_list = \"%2C\".join(type_list)\n params[\"type\"] = type_list\n\n return self._api.request(endpoint, params=params)", "def list_accounts(self):\n pass", "def transactions(self) -> List[Transaction]:\n return self.session.get_transactions(self.account_id)", "def list_account_transactions(self,\r\n year,\r\n month=None,\r\n get_as_csv=None):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(year=year)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/admin/invoice'\r\n _query_parameters = {\r\n 'year': year,\r\n 'month': month,\r\n 'getAsCsv': get_as_csv\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _context.response.status_code == 400:\r\n raise APIException('Bad request', _context)\r\n elif _context.response.status_code == 403:\r\n raise APIException('Forbidden (Access denied)', _context)\r\n elif _context.response.status_code == 500:\r\n raise APIException('Internal server error', _context)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, Transaction.from_dictionary)", "def get_account_trades(self, symbol: Symbol, trade_id: Optional[int],\n limit: int = 100, receive_window: Optional[int] = None):\n api_params = {\n \"symbol\": symbol.value,\n \"limit\": limit,\n \"timestamp\": get_current_time_milliseconds()\n }\n\n if trade_id is not None:\n api_params['tradeId'] = trade_id\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.get(path='/trade/account', params=api_params)", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.budget.id}/accounts/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def get_accounts(self):\n return self.accounts.all()", "def transactions(self):\r\n return tx.AccountTransactions(self)", "def fetchAllAccounts(config):\n allAccounts = []\n currentStart = 1\n currentLimit = 99\n while currentLimit > 98 :\n currentPull = fetchBatchAccounts(accountsConfig, currentStart, currentLimit)['data']\n allAccounts = allAccounts + currentPull\n currentLimit = int(len(currentPull))\n currentStart = int(currentStart) + int(currentLimit)\n return allAccounts", "def get_transactions_trc20():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions/trc20\".format(wallet) # noqa: E501\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def list_tokens_from_contract(owner_address, contract_address, limit = 0):\n\n owner_address = owner_address.lower()\n contract_address = contract_address.lower()\n \n # Check all ERC721 transactions from account using FTScan API\n erc721transfers_url = \"https://api.ftmscan.com/api?module=account&action=tokennfttx&address=\" + \\\n owner_address + \"&startblock=0&endblock=999999999&sort=asc\"\n\n try:\n res = requests.get(erc721transfers_url)\n res_json = res.json()\n transfers = res_json[\"result\"]\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)\n\n # Loop through ERC721 transactions and count token sent and received\n token_counts = {}\n\n for transaction in transfers:\n if transaction[\"contractAddress\"] == contract_address:\n token_id = int(transaction[\"tokenID\"])\n if transaction[\"to\"] == owner_address:\n token_counts[token_id] = token_counts.get(token_id, 0) + 1\n if transaction[\"from\"] == owner_address:\n token_counts[token_id] = token_counts.get(token_id, 0) - 1\n\n # Tokens we still own should have a count of 1 (we could have sent them and gotten them back)\n # We should only ever have counts of -1 and +1\n token_ids = [token for token in token_counts if token_counts[token] > 0]\n if limit:\n print(f\"Limiting results to {limit} / {len(token_ids)} tokens.\")\n token_ids = token_ids[0:limit]\n\n return token_ids", "def transactions(self):\n return self._call_account_method(\n 'transactions'\n )", "def get_acc_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def accounts(self):\n # get the summary data\n options = { 'PayLoadText' : self.request_xml() }\n\n print(self.url)\n print(options)\n\n response = requests.get(self.url, params=options) \\\n .content\n print(response)\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n self.security_token = xml_tree.find('ClientSecurityToken').text\n\n accounts = [ \n self.create_account(account)\n for account in xml_tree.iter('CardAccounts')\n ]\n\n return accounts", "def list_transactions(\n self,\n account_ids: List[str] = None,\n payment_order_ids: List[str] = None,\n payee_ids: List[str] = None,\n direction: TransactionDirection = None,\n statuses: List[TransactionStatus] = None,\n value_timestamp_range: Dict[str, datetime] = None,\n booking_timestamp_range: Dict[str, datetime] = None,\n last_update_timestamp_range: Dict[str, datetime] = None,\n charge_amount_value_range: Dict[str, str] = None,\n order_by: List[TransactionOrderBy] = None\n ) -> TransactionsList:\n return self._list_transactions(\n account_ids,\n payment_order_ids,\n payee_ids,\n direction,\n statuses,\n value_timestamp_range,\n booking_timestamp_range,\n last_update_timestamp_range,\n charge_amount_value_range,\n order_by,\n None\n )", "def get_accounts(self):\n return self.accounts", "def get_accounts(self):\n uri = '/credentials'\n response = gate_request(uri=uri)\n assert response.ok, 'Failed to get accounts: {0}'.format(response.text)\n\n all_accounts = response.json()\n self.log.debug('Accounts in Spinnaker:\\n%s', all_accounts)\n\n filtered_accounts = []\n for account in all_accounts:\n if account['type'] == self.provider:\n filtered_accounts.append(account)\n\n if not filtered_accounts:\n raise ForemastError('No Accounts matching {0}.'.format(self.provider))\n\n return filtered_accounts", "def list_accounts(min_conf=1):\n min_conf = str(min_conf)\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"listaccounts\", min_conf])\n accounts = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return accounts", "def get_account_transactions(self, account_number):\n\n logger.debug('Fetching account transactions for account %s',\n account_number)\n\n # Get javax.faces.ViewState from the last request\n last_req_hidden_inputs = self._hidden_inputs_as_dict(\n BeautifulSoup(self.last_req_body, 'html.parser'))\n\n data = {\n 'dialog-overview_showAccount': 'Submit',\n 'menuLinks_SUBMIT': 1,\n 'menuLinks:_idcl': '',\n 'menuLinks:_link_hidden_': '',\n 'javax.faces.ViewState': last_req_hidden_inputs.get(\n 'javax.faces.ViewState'),\n '_token': self.token,\n 'productId': account_number\n }\n\n path = '/im/im/csw.jsf'\n req = self.session.post(self.BASE_URL + path, data=data)\n self.last_req_body = req.content\n\n logger.debug('Transaction request response code %s', req.status_code)\n\n self._parse_tokens(req.text)\n\n # Parse transactions\n transactions = self._parse_account_transactions(req.text)\n\n # Request was ok but but no transactions were found. Try to refetch.\n # Requests seems to loose the connections sometimes with the message\n # \"Resetting dropped connection\". This should work around that\n # problem.\n if req.status_code == requests.codes.ok and not transactions:\n transactions = self.get_account_transactions(account_number)\n\n return transactions", "def accounts(self):\n if self._accounts is None:\n url = f'{self._ynab.api_url}/budgets/{self.id}/accounts'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving accounts, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n self._accounts = [Account(self, account)\n for account in response.json().get('data', {}).get('accounts', [])]\n return self._accounts", "def get_accounts(self, session: \"Session\") -> List[Account]:\n\n self.__get_dn(session)\n\n result = session.soapclient.get_accounts_by_owner(self.dn)\n return [Account(session, account=r) for r in result]", "def get_account_transactions_by_id(self, TransactionId):\n params = clean_locals(locals())\n date_time_sent = datetime.datetime.utcnow()\n response = self.request('ListAccountPostingsById', params, secure=True)\n data = self.process_response(response, date_time_sent, None)\n return parse_account_postings(data.get('data', {})) if data.get('data') else {}", "def get_accounts(self):\r\n return self._accounts", "def list(self, filter, *args, timeout=None):\n req = AccountGrantListRequest()\n req.meta.CopyFrom(ListRequestMetadata())\n page_size_option = self.parent._test_options.get('PageSize')\n if isinstance(page_size_option, int):\n req.meta.limit = page_size_option\n\n req.filter = plumbing.quote_filter_args(filter, *args)\n\n def generator(svc, req):\n tries = 0\n while True:\n try:\n plumbing_response = svc.stub.List(\n req,\n metadata=svc.parent.get_metadata(\n 'AccountGrants.List', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n tries = 0\n for plumbing_item in plumbing_response.account_grants:\n yield plumbing.convert_account_grant_to_porcelain(\n plumbing_item)\n if plumbing_response.meta.next_cursor == '':\n break\n req.meta.cursor = plumbing_response.meta.next_cursor\n\n return generator(self, req)", "def getCustomerAccount(self):\n self.logger.debug(\"\")\n for cust in self.getCustomerAccountData():\n accounts = len(cust['accounts'])\n self.logger.debug(\"%d accounts in %s\", accounts, cust['CustomerId'])\n ii = 1\n for acct in cust['accounts']:\n self.logger.debug(\"yield %s, %s\", cust['CustomerId'], acct['Id'])\n yield cust['CustomerId'], acct['Id'], ii, accounts\n ii += 1", "def get_accounts(self):\n\n\t\treturn self.__accounts", "def get(self):\n accounts = database.get_all(Accounts)\n all_accounts = []\n for account in accounts:\n all_transactions = []\n for transaction in account.transactions:\n all_transactions.append(transaction.id)\n new_account = {\n \"id\": account.id,\n \"name\": account.name,\n \"iban\": account.iban,\n \"balance\": float(account.balance),\n \"currency\": account.currency,\n \"transactions ids\": all_transactions\n }\n\n all_accounts.append(new_account)\n return json.dumps(all_accounts), 200", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def get_asset_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountAssetTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def get_accounts(self):\n me = objects.AdUser(fbid=\"me\")\n my_accounts = list(me.get_ad_accounts(fields=[\n 'id',\n 'name',\n 'timezone_name',\n 'amount_spent',\n 'currency']))\n return my_accounts", "def accounts(self):\n return self._accounts.values()", "def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"", "def GetAccountList(self):\n\t\treturn self.accounts.keys()", "def list(self, filter, *args, timeout=None):\n req = AccountListRequest()\n req.meta.CopyFrom(ListRequestMetadata())\n page_size_option = self.parent._test_options.get('PageSize')\n if isinstance(page_size_option, int):\n req.meta.limit = page_size_option\n\n req.filter = plumbing.quote_filter_args(filter, *args)\n\n def generator(svc, req):\n tries = 0\n while True:\n try:\n plumbing_response = svc.stub.List(\n req,\n metadata=svc.parent.get_metadata('Accounts.List', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n tries = 0\n for plumbing_item in plumbing_response.accounts:\n yield plumbing.convert_account_to_porcelain(plumbing_item)\n if plumbing_response.meta.next_cursor == '':\n break\n req.meta.cursor = plumbing_response.meta.next_cursor\n\n return generator(self, req)", "def list_member_accounts(nextToken=None, maxResults=None):\n pass", "def accounts(self):\r\n return acc.Accounts(self)", "def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')", "def get_transactions(self, crypto, address, confirmations=1):\n raise NotImplementedError(\n \"This service does not support getting historical transactions. \"\n \"Or rather it has no defined 'get_transactions' method.\"\n )", "def get_transactions(self):\n transactions = []\n for subaccount_pointer in range((clargs.args.search_subaccounts or 0) + 1):\n utxos = self.scan_subaccount(subaccount_pointer, clargs.args.key_search_depth)\n if len(utxos) == 0:\n continue\n\n transaction, used_utxo = self.create_transaction(utxos)\n if transaction:\n signed_transaction = self.sign_transaction(transaction, used_utxo)\n transactions.append(signed_transaction)\n\n if transactions:\n self.test_transactions(transactions)\n\n logging.debug('transactions: {}'.format(transactions))\n flags = wally.WALLY_TX_FLAG_USE_WITNESS\n return [(wally.tx_from_hex(transaction, flags), None) for transaction in transactions]", "def accounts_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account/all', param, self.timeout)", "def accounts(web3):\n return web3.eth.accounts", "def tenants_for_token(self, context):\n token_ref = self.token_api.get_token(context=context,\n token_id=context['token_id'])\n assert token_ref is not None\n\n user_ref = token_ref['user']\n tenant_refs = []\n for tenant_id in user_ref['tenants']:\n tenant_refs.append(self.identity_api.get_tenant(\n context=context,\n tenant_id=tenant_id))\n return self._format_tenants_for_token(tenant_refs)", "def get_all_accounts(self, account_id=None, account_name=None, search=False):\n if search:\n re_meth = re.search\n else:\n re_meth = re.match\n if account_id and not re.match(\"\\d{12}\", account_id):\n if not account_name:\n account_name = account_id\n account_id = None\n self.log.debug('Attempting to fetch all accounts matching- account_id:' +\n str(account_id) + ' account_name:' + str(account_name))\n response = self.get_response_items('ListAccounts', {}, item_marker='accounts',\n list_marker='Accounts')\n retlist = []\n for account in response:\n if account_name is not None:\n if not search:\n account_name = \"^{0}$\".format(account_name.strip())\n if not re_meth(account_name, account['account_name']):\n continue\n if account_id is not None:\n if not search:\n account_id = \"^{0}$\".format(account_id .strip())\n if not re_meth(account['account_id'], account_id):\n continue\n retlist.append(account)\n return retlist", "def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)", "def get_tx_history(account_id, total):\n query = iroha.query(\"GetTransactions\", account_id=account_id, page_size=total)\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def get_all_latest_transactions(self):\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n return transactions", "async def get_open_accounts(self):\n result = []\n URL = API_HOST + \"/api/resources/header\"\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(URL)\n\n json_data = await response.json()\n accounts = json_data[\"data\"][\"accounts\"][\"data\"][\"data\"]\n\n for account in accounts:\n if account[\"statusCategory\"] == STATUS_CATEGORY_OPEN:\n result.append(account[\"accountNumber\"])\n\n return result", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def _generate_accounts(self):\n accounts = []\n auth_url = 'http://{}:5000/v3/'.format(self.host)\n\n for tenant, network in self.tenants:\n account = RwcalYang.CloudAccount.from_dict({\n 'name': 'rift.auto.openstack',\n 'account_type': 'openstack',\n 'openstack': {\n 'key': self.user or self._DEFAULT_USERNAME,\n 'secret': self._DEFAULT_PASSWORD,\n 'auth_url': auth_url,\n 'tenant': tenant,\n 'mgmt_network': network}})\n\n accounts.append(account)\n\n return accounts", "def get_customer_accounts(self,\r\n accept,\r\n customer_id,\r\n status=None):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(accept=accept,\r\n customer_id=customer_id)\r\n\r\n # Prepare query URL\r\n _url_path = '/aggregation/v1/customers/{customerId}/accounts'\r\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, {\r\n 'customerId': customer_id\r\n })\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += _url_path\r\n _query_parameters = {\r\n 'status': status\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'Finicity-App-Key': Configuration.finicity_app_key,\r\n 'Accept': accept\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n CustomHeaderAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, CustomerAccounts.from_dictionary)", "def accounts(self):\r\n return resources.Accounts(self)", "def list_accounts(self):\n information = []\n for provider in self._accounts.values():\n information.append({\n 'token': provider.credentials.token,\n 'url': provider.credentials.url,\n })\n\n return information", "def list(self, ids=None, parent_id=None, name=None, limit=None):\n query = {'ids': ids, 'parentId': parent_id, 'name': name, 'limit': limit}\n kwargs = {'query': dict(((k, v) for k, v in query.items() if v))}\n return self.client.find_all_accounts(**kwargs)", "def show_accounts(customer_id):\n customer_accounts = db_helper.get_customer_accounts(customer_id)\n if not customer_accounts:\n return api_utils.error(\"No accounts for customer with id \\\n number {} found\".format(customer_id), 404)\n else:\n return jsonify({\"accounts\": customer_accounts})", "def test_companies_company_id_data_bank_accounts_account_id_transactions_get(self):\n pass", "def list_offering_transactions(nextToken=None):\n pass", "def get_all_transactions(self) -> Iterator[BaseTransaction]:\n # It is necessary to retain a copy of the current scope because this method will yield\n # and the scope may undergo changes. By doing so, we ensure the usage of the scope at the\n # time of iterator creation.\n scope = self.get_allow_scope()\n for tx in self._get_all_transactions():\n if scope.is_allowed(tx):\n yield tx", "def test_wallets_get_transaction_list(self):\n pass", "def getConnectedAccounts(**kwargs):\n strProdURL = kwargs[\"strProdURL\"]\n orgID = kwargs[\"ORG_ID\"]\n sessiontoken = kwargs[\"sessiontoken\"]\n\n accounts = get_connected_accounts_json(strProdURL, orgID, sessiontoken)\n orgtable = PrettyTable(['OrgID'])\n orgtable.add_row([orgID])\n print(str(orgtable))\n table = PrettyTable(['Account Number','id'])\n for i in accounts:\n table.add_row([i['account_number'],i['id']])\n \n print(\"Connected Accounts\")\n print(table)", "def get_accounts(self, count: int = 100, account_type: str = None) -> list:\n all_accounts = list(\n itertools.islice(self.client.accounts.get_all_generator(), count)\n )\n if account_type is None:\n return all_accounts\n return [a for a in all_accounts if a[\"acctType\"] == account_type]", "def fetchById(accountIdList):\n accounts= []\n url = accountsConfig['domain']\n for accId in accountIdList:\n r = requests.get(url +'/'+ str(accId), headers=accountsConfig['headers']).json()\n accounts.append(r)\n return accounts", "def get_accounts():\n graph = facebook.GraphAPI(mytoken)\n pages = graph.get_object('me/accounts')\n pages_info=[]\n for page in pages['data']:\n pages_info.append( ( page['name'], page['access_token'] ) )\n return pages_info", "def test_list_scheduled_payments_specific_accounts(self):\n pass", "def get_transfer_list(self,\n address: str,\n token_address: str,\n start_block: Optional[int] = None,\n end_block: Optional[int] = None) -> Tuple[Transfer, ...]:\n ...", "def listaccounts(self, minconf=1, as_dict=False):\n if as_dict:\n return dict(self.proxy.listaccounts(minconf))\n else:\n return list(self.proxy.listaccounts(minconf).keys())", "def fetch_owner_accounts():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n return owner_accounts", "def test_get_transaction_list_request(self):\n self.trans_details.get_transaction_list(\n batch_id = 123456,\n )", "def _get_all_transactions(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def get_all_customer_ids():\n\n # your code", "def _get_member_accounts(billing_account_id, options):\n assumed_credentials = _request_sts_credentials(\n billing_account_id=billing_account_id,\n options=options,\n )\n billing_account_session = boto3.Session(\n aws_access_key_id=assumed_credentials[\"AccessKeyId\"],\n aws_secret_access_key=assumed_credentials[\"SecretAccessKey\"],\n aws_session_token=assumed_credentials[\"SessionToken\"],\n )\n org_client = billing_account_session.client(\"organizations\")\n list_accounts_paginator = org_client.get_paginator(\"list_accounts\")\n accounts = []\n for page in list_accounts_paginator.paginate():\n accounts.extend(page[\"Accounts\"])\n\n # Remove any account that is not actively part of this organization yet.\n only_active_accounts = filter(lambda a: a[\"Status\"] == \"ACTIVE\", accounts)\n\n # Only return the key: value pairs that are defined in the --field option.\n only_certain_fields_of_active = list(\n map(\n lambda a: {k: v for k, v in a.items() if k in options[\"--field\"]},\n only_active_accounts,\n )\n )\n return only_certain_fields_of_active", "def get_transaction_list(self,\n address: str,\n start_block: Optional[int] = None,\n end_block: Optional[int] = None) -> Tuple[Transaction, ...]:\n ...", "def filtered_accounts(\n urja_data: UrjanetData,\n ) -> List[Account]:\n return [\n account\n for account in urja_data.accounts\n if account.IntervalEnd is not None and account.IntervalStart is not None\n ]", "def accounts(self):\r\n return accounts.Accounts(self)", "def fetchBatchAccounts(config, start, limit): \n config['params']['from'] = start\n config['params']['limit'] = limit\n url = config['domain']\n r = requests.get(url, headers=config['headers'], params=config['params']).json()\n print(\"Downloading From: \", config['params']['from'], ' To: ', config['params']['from'] + config['params']['limit'], '| Limit: ', config['params']['limit'])\n return r", "def list(self, **params):\n\n _, _, absence_transactions = self.http_client.get(\"/absencetransactions\", params=params)\n return absence_transactions", "def get_account_transactions(self, StartTime, EndTime):\n params = clean_locals(locals())\n date_time_sent = datetime.datetime.utcnow()\n response = self.request('ListAccountPostings', params, secure=True)\n data = self.process_response(response, date_time_sent, None)\n return parse_account_postings(data.get('data', {})) if data.get('data') else {}", "def get_account_withdraw_quota(self, currency: 'str') -> list:\n check_should_not_none(currency, \"currency\")\n\n params = {\n \"currency\": currency,\n }\n\n from huobi.service.wallet.get_account_withdraw_quota import GetAccountWithdrawQuotaService\n return GetAccountWithdrawQuotaService(params).request(**self.__kwargs)", "def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]", "def accounts():", "def get_all_tenants():\n tenants = identity.Tenant.query.all()\n return tenants", "def get_transactions(\n self,\n since: Optional[date] = None,\n count: int = 1000,\n offset: int = 0,\n include_pending: bool = False,\n ) -> List[Transaction]:\n return self.session.get_transactions(\n self.account_id,\n options={\n 'since': since,\n 'count': count,\n 'offset': offset,\n 'include_pending': include_pending,\n },\n )", "def get_account_ids(response):\n return [account['Id'] for account in response[0]]", "def _get_ad_accounts() -> [adaccount.AdAccount]:\n system_user = user.User(fbid='me')\n ad_accounts = system_user.get_ad_accounts(fields=['account_id',\n 'name',\n 'created_time',\n 'timezone_offset_hours_utc'])\n return list(ad_accounts)", "def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list", "def accounts():\n if not session.get('authed', False):\n flash(\"Please log in.\")\n return redirect(my_url('index'))\n account_ids = redis_client.smembers('%s-accounts' % session['phone'])\n accounts = [kloudless.Account.retrieve(i) for i in account_ids]\n callback_url = quote_plus(my_url('auth_callback'))\n return render_template('accounts.html', accounts=accounts, app_number=APP_NUMBER,\n callback_url=callback_url, app_id=KLOUDLESS_APP_ID)", "def list_accounts(self):\r\n\r\n account = self.client['Account']\r\n mask = 'cdnAccounts[%s]' % ', '.join(['id',\r\n 'createDate',\r\n 'cdnAccountName',\r\n 'cdnSolutionName',\r\n 'cdnAccountNote',\r\n 'status'])\r\n return account.getObject(mask=mask).get('cdnAccounts', [])", "def display_accounts(cls):\n return cls.account_list", "def _fund(src_acc, accounts, amount, shard_index):\n if not accounts:\n return []\n hashes = []\n for account in accounts:\n from_address = cli.get_address(src_acc)\n to_address = cli.get_address(account)\n passphrase = get_passphrase(src_acc)\n h = send_transaction(from_address, to_address, shard_index, shard_index, amount,\n passphrase=passphrase, retry=True, wait=True)\n if h is None:\n raise RuntimeError(f\"Failed to send tx from {from_address} to {to_address}\")\n hashes.append(h)\n return hashes", "def request_transactions(self, blockchain):\n excludes_list, balance_dict = list(), dict()\n print(\"Requesting transactions to %s...\" % self.url)\n while len(self.transactions) < Miner.TX_PER_BLOCK:\n transaction = self.get_transaction(excludes_list)\n if transaction:\n verif = transaction.verify_signature()\n print(\"Verifying signature of TX %s: %s\"\n % (transaction.hash, verif))\n if verif:\n balance_keys = balance_dict.keys()\n sender = Address.generate_address(transaction.sender_public_key)\n receiver, amount = transaction.receiver, transaction.amount\n if not (sender in balance_keys):\n balance_dict[sender] = blockchain.get_balance(sender)\n if not (receiver in balance_keys):\n balance_dict[receiver] = blockchain.get_balance(receiver)\n hasEnoughBalance = self.sender_has_enough_balance(sender, amount, balance_dict)\n print(\"In TX %s sender has enough balance: %s\" % (transaction.hash, hasEnoughBalance))\n if hasEnoughBalance:\n balance_dict[sender] -= transaction.amount\n balance_dict[receiver] += transaction.amount\n self.add_transaction(transaction)\n\n print(\"Excluding TX: %s\" % transaction.hash)\n excludes_list.append(transaction.hash)\n print(\"Received %s transactions\" % Miner.TX_PER_BLOCK)", "def get_wallet_trades(self, walletId, filters={}):\n return", "def get_transactions_for_ynab_account(self, account_name):\n account = self.get_account_by_name(account_name)\n if not account:\n return []\n return [YnabServerTransaction(transaction, transaction.account)\n for transaction in account.transactions]", "def access(config, region, accounts=()):\n config = validate.callback(config)\n accounts_report = []\n\n def check_access(account):\n accounts_report.append(account)\n session = get_session(account['role'], region)\n identity = session.client('sts').get_caller_identity()\n account['account_id'] = identity['Account']\n account.pop('groups')\n account.pop('role')\n client = session.client('iam')\n policy_arn = identity['Arn']\n if policy_arn.count('/') > 1:\n policy_arn = policy_arn.rsplit('/', 1)[0]\n if ':sts:' in policy_arn:\n policy_arn = policy_arn.replace(':sts', ':iam')\n if ':assumed-role' in policy_arn:\n policy_arn = policy_arn.replace(':assumed-role', ':role')\n evaluation = client.simulate_principal_policy(\n PolicySourceArn=policy_arn,\n ActionNames=['logs:CreateExportTask'])['EvaluationResults']\n account['access'] = evaluation[0]['EvalDecision']\n\n with ThreadPoolExecutor(max_workers=16) as w:\n futures = {}\n for account in config.get('accounts', ()):\n if accounts and account['name'] not in accounts:\n continue\n futures[w.submit(check_access, account)] = None\n for f in as_completed(futures):\n pass\n accounts_report.sort(key=operator.itemgetter('access'), reverse=True)\n print(tabulate(accounts_report, headers='keys'))", "def all_transactions(self, request):\n user_id = request.data[\"user\"]\n user = User.objects.get(id=user_id)\n user_transactions = user.transactions.all()\n serializer = TransactionSerializer(user_transactions, many=True)\n\n return Response(serializer.data)" ]
[ "0.6701486", "0.6636998", "0.6507858", "0.64012176", "0.63822055", "0.6338222", "0.6296368", "0.6234683", "0.6104485", "0.6088276", "0.6046946", "0.6040939", "0.60122126", "0.6005696", "0.5965876", "0.59531087", "0.59082997", "0.5885692", "0.5884602", "0.5837892", "0.58305675", "0.5803219", "0.57996356", "0.57853174", "0.576111", "0.5754231", "0.5743111", "0.573307", "0.5723423", "0.569237", "0.56891644", "0.5662928", "0.5659932", "0.5640312", "0.5629132", "0.5621146", "0.56048733", "0.55871373", "0.5562844", "0.5561122", "0.55423087", "0.55336773", "0.5504395", "0.54964024", "0.5491384", "0.5490605", "0.5475358", "0.5474084", "0.5465003", "0.5463779", "0.5456663", "0.5445342", "0.53738284", "0.5368851", "0.5367616", "0.53616667", "0.53489375", "0.5341963", "0.5339534", "0.5336507", "0.53339434", "0.5318109", "0.5303393", "0.5301109", "0.5300052", "0.52934355", "0.52926797", "0.52896476", "0.5286583", "0.52730095", "0.52690595", "0.52589697", "0.52434623", "0.5238514", "0.52287114", "0.5221397", "0.5216884", "0.5211563", "0.520964", "0.520905", "0.5205822", "0.51943064", "0.5184733", "0.51706296", "0.5165159", "0.516325", "0.51597136", "0.51552093", "0.51539594", "0.51474077", "0.51429904", "0.5133437", "0.51220876", "0.5118758", "0.51084167", "0.51058203", "0.509971", "0.5099288", "0.50989383", "0.50982136" ]
0.5752467
26
To add parents to database
def add_parent(session, df): try: for _, row in df.iterrows(): parent = Parent() parent.name = row['parent_name'] parent.family = row['family'] session.add(parent) except Exception as ex: session.rollback() raise ex else: session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_append_children_category(self):\n category = Category(catname='olympic games')\n category1 = Category(catname='Tennis')\n category.parents.append(category1)\n category.save()\n assert category.parents", "def add_parent(sender, instance, **kwargs):\n if not kwargs['created']:\n return\n for att in ['term', 'term_secondary', 'context']:\n if getattr(instance, att) is None:\n continue\n parent = getattr(instance, att).item_id\n child = instance.item_id\n ItemRelation.objects.get_or_create(\n parent_id=parent,\n child_id=child,\n visible=True,\n active=instance.active,\n )", "def set_parents(self):\n route53 = self.pcf_field.get_particles(flavor=\"route53_record\")\n route53_record_pcf_name = route53.get(\"pcf_name\", self.name)\n ec2_particles = self.pcf_field.get_particles(flavor=\"ec2_instance\")\n\n self.pcf_field.particles[\"route53_record\"][route53_record_pcf_name].parents.update(list(ec2_particles.values()))\n self.pcf_field.link_particles(self.pcf_field.particles)", "def set_parents_table(self) -> None:\n self.parents[\"A\"] = \"start\"\n self.parents[\"B\"] = \"start\"\n self.parents[\"fin\"] = None", "def update_parents(self):\n for a_parent in self.parents:\n for child in self.children:\n for a_dest in self.children[child]:\n if (a_dest[0] + a_parent.children[self][0][0],\n a_parent.children[self][0][1]) not in a_parent.children[child]:\n a_parent.children[child].append((a_dest[0] + a_parent.children[self][0][0],\n a_parent.children[self][0][1]))\n a_parent.update_parents()", "def parents(self, path):\n pass", "def add_parent(self, node):\n self.parents.append(node)\n self.parent_depencencies_left += 1", "def parents(rectype, source, include):\n click.echo('Migrating {}s...'.format(rectype))\n with commit():\n import_parents_from_file(source, rectype=rectype, include=include)", "def insert(self, parent, name):\n pid = self.db.insert_returning_id('simple', dict(parent=parent, name=name))\n return pid", "def insert(self, parent, name):\n if parent is None:\n #pid = self.db.execute('''\n # INSERT INTO pathenum (path, name) VALUES (\n # '',\n # :name\n # ) RETURNING id\n # ''', dict(parent=parent, name=name)\n #).list()[0][0]\n pid = self.db.insert_returning_id('pathenum', dict(path='', name=name))\n else:\n #pid = self.db.execute('''\n # INSERT INTO pathenum (path, name) VALUES (\n # (SELECT path || id || '.' FROM pathenum WHERE id = :parent),\n # :name\n # ) RETURNING id\n # ''', dict(parent=parent, name=name)\n #).list()[0][0]\n if self.db.is_dialect('mysql'):\n # FIXME: bardzo brzydki kod\n path = self.db.execute('''SELECT concat(path, id, '.') AS path FROM pathenum WHERE id = %s''' % parent).fetch_single('path')\n #print path\n pid = self.db.insert_returning_id('pathenum', dict(name=name, path=path))\n #pid = self.db.insert_returning_id('pathenum', dict(name=name), dict(\n # path=\"(SELECT concat(path, id, '.') FROM pathenum WHERE id = %s)\" % parent)\n #)\n elif self.db.is_dialect('sqlserver'):\n pid = self.db.insert_returning_id('pathenum', dict(name=name), dict(\n path=\"(SELECT path + CONVERT(varchar(10), id) + '.' FROM pathenum WHERE id = %s)\" % parent)\n )\n else:\n pid = self.db.insert_returning_id('pathenum', dict(name=name), dict(\n path=\"(SELECT path || id || '.' FROM pathenum WHERE id = %s)\" % parent)\n )\n return pid", "def add_parent(self, child, parent):\r\n setp = self._parents.setdefault(child, set())\r\n setp.add(parent)", "def test_skirmish_parenting(self):\n root = SkirmishAction()\n a1 = SkirmishAction()\n a2 = SkirmishAction()\n self.sess.add_all([root, a1, a2])\n self.sess.commit()\n\n root.children.append(a1)\n root.children.append(a2)\n self.sess.commit()\n\n self.assertEqual(a1.parent_id, root.id)\n self.assertEqual(a2.parent_id, root.id)", "def node_create(self, parent, path):\n\n q = (\"insert into nodes (parent, path) \"\n \"values (?, ?)\")\n props = (parent, path)\n return self.execute(q, props).lastrowid", "def insert(self, parent, name):\n if parent is None:\n pid = self.db.execute('''\n INSERT INTO ltreetab (path, name) VALUES (\n text2ltree('' || currval('ltreetab_id_seq')),\n :name\n ) RETURNING id\n ''',\n dict(parent=parent, name=name)\n ).fetch_single()\n else:\n pid = self.db.execute('''\n INSERT INTO ltreetab (path, name) VALUES (\n (SELECT path FROM ltreetab WHERE id = :parent) ||\n ('' ||currval('ltreetab_id_seq')),\n :name\n ) RETURNING id\n ''',\n dict(parent=parent, name=name)\n ).fetch_single()\n \n return pid", "def register_parent(self, **fields):\n if 'parent_key' not in fields.keys():\n raise KeyError('Primary key is missing')\n existing_fields = [i.name for i in self._db.get_columns('parents')]\n needed_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n needed_fields[key] = value\n if 'UID' not in needed_fields.keys():\n needed_fields['UID'] = needed_fields['parent_key']\n check = Parents.get_or_none(parent_key=needed_fields['parent_key'])\n if check is not None:\n return check\n new_parent = Parents.get_or_create(**needed_fields)\n return new_parent", "def set_parent(self, parent):\n if self not in parent.children:\n parent.children.append(self)\n self.parent = parent", "def add_parent(\n self, parent: \"Vertex\", loop: bool = False, first: bool = False\n ) -> None:\n if (loop or self != parent) and (parent not in self.parents):\n if first:\n self.parents.insert(0, parent)\n else:\n self.parents.append(parent)", "def add_parents(self, nodes):\n # Check that nodes is a list/tuple of BaseNode objects\n if (isinstance(nodes, (list, tuple)) and\n all([isinstance(node, BaseNode) for node in nodes])):\n for node in nodes:\n self.add_parent(node)\n else:\n raise TypeError('add_parents() is expecting an iterable of '\n 'Job and/or Dagman objects')\n\n return self", "def init_db(self, parent_type, child_type):\n self.parent = Node(self.handler, parent_type)\n self.children = [ Node(self.handler, child_type) for x in range(0, self.SIZE) ]\n for node in self.children:\n Link(self.handler, self.parent.node, node.node, child_type.upper())", "def insert(self, pid, pname, pparent, pobj, ptype):\r\n self.pids.append(pid)\r\n self.pnames.append(pname)\r\n self.pparents.append(pparent)\r\n self.ptypes.append(ptype)\r\n self.pobjs.append(pobj)", "def initialize_parents(self, filename=None):\n\t\tpass", "def include_parents():\n suffix = uuid4().hex\n\n click.secho('*** Creating Genres for Movie...', fg='green')\n _horror = _make_document('genre', name='Horror - %s' % suffix)\n click.secho(json.dumps(_horror, indent=2, sort_keys=True), fg='yellow')\n\n _monster = _make_document('genre', name='Monster - %s' % suffix, parent=_horror['_id'])\n click.secho(json.dumps(_monster, indent=2, sort_keys=True), fg='yellow')\n\n _vampire = _make_document('genre', name='Vampire - %s' % suffix, parent=_monster['_id'])\n click.secho(json.dumps(_vampire, indent=2, sort_keys=True), fg='yellow')\n\n _werewolf = _make_document('genre', name='Werewolf - %s' % suffix, parent=_monster['_id'])\n click.secho(json.dumps(_werewolf, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Creating Movie with genres `Werewolf` and `Vampire`, parent genres should be auto-filled...', fg='green')\n twilight = _make_document('movie', title='Twilight', genres=[_vampire['_id'], _werewolf['_id']])\n click.secho(json.dumps(twilight, indent=2, sort_keys=True), fg='yellow')", "def set_parent_table(self, table):\n self.__parent_table = table", "def parent_id(self, new_id: str) -> None:\n self._db_data.parent_id = new_id", "def set_parent(self, index):\n self.add_parent(self[index])", "def parents(self, p):\n raise NotImplementedError('must be implemented by subclass')", "def add_parent(self, parent, *args, **kwargs):\n return parent.add_child(self, **kwargs)", "def add_parent_groups(ctx):\n asyncio.run(add_parent_groups_impl(ctx.obj[\"config\"]))", "def show_available_parents(self):\n self.categoryParent.clear()\n\n parents = self.orm.fetch_parents()\n self.categoryParent.addItems([p.name for p in parents])\n\n self.categoryParent.addItem('')\n self.categoryParent.setCurrentText('')", "def add_children(self,node):\n\n node.parent_id = self.id\n node.level = self.level + 1\n node.path = node._create_path()\n node.save()", "def parents(cls):\n return db.relationship(ext.role_model, secondary='role_links',\n primaryjoin=f\"RoleLink.role_id==%s.{ext.role_pk}\" % cls.__name__,\n secondaryjoin=f\"RoleLink.parent_id==%s.{ext.role_pk}\" % cls.__name__,\n backref=\"children\")", "def add_child(session, df):\n try:\n for _, row in df.iterrows():\n child = Child()\n child.name = row['child_name']\n child.residence = row['Residence']\n father_obj = session.query(Parent).filter_by(name=row['father_name']).first()\n child.parents.append(father_obj)\n mother_obj = session.query(Parent).filter_by(name=row['mother_name']).first()\n child.parents.append(mother_obj)\n session.add(child)\n except Exception as ex:\n session.rollback()\n raise ex\n else:\n session.commit()", "def AddParent(self, parent=None):\n if isinstance(parent, Tree):\n parent.AddNode(self)", "def add_parent(self, chromosome):\n self.mating_pool.append(to_chromosome(chromosome))", "def getParents(obj):", "def populate(data, model: models.Model) -> list:\n\n children = []\n\n for record in data:\n # Check for children\n if 'children' in record:\n children_data = record.pop('children', [])\n obj = model.objects.create(**record)\n obj_children = populate(children_data, model)\n for child in obj_children:\n child.parent = obj\n child.save()\n # If not children, just create\n else:\n obj = model.objects.create(**record)\n # Adding record\n children.append(obj)\n\n return children", "def _connect_parents(self, s, opened):\n parents = self.parent_memory.get_parents(s)\n for parent in parents:\n if parent not in self.connected:\n self._connect_state(parent, opened)", "def test_add_parent(self):\n _, _, groupa, groupb = create_objects()\n groupa.add_parent(groupb)\n assert groupb in groupa.parents\n assert groupa in groupb.children\n return (groupa, groupb)", "def relationships(self):", "def add_parents(self, sample, fitness, max_parent_per_capita=1.0):\n\n assert isinstance(max_parent_per_capita, Number) and 0 <= max_parent_per_capita <= 1.0\n self.size = len(sample)\n max_parent_size = int(max_parent_per_capita * self.size)\n\n probabilities = np.cos(fitness) ** 2\n r = np.random.random(size=self.size)\n parents = sample[r < probabilities]\n\n parent_size = min(parents.shape[0], max_parent_size)\n split = parent_size // 2\n\n self.father = parents[:split]\n self.mother = parents[split: parent_size]", "def copy_to_parent(self, node_embed_names=..., edge_embed_names=...):\n ...", "def test_related_add_editions_to_parent(app, testdata):\n doc1 = Document.get_record_by_pid(testdata[\"documents\"][0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(testdata[\"documents\"][1][\"document_pid\"])\n child2 = Series.get_record_by_pid(testdata[\"series\"][0][\"series_pid\"])\n\n doc1.related.add_edition(doc2)\n parent_editions = doc1.related.editions\n child_editions = doc2.related.editions\n assert len(parent_editions) == 1\n assert len(child_editions) == 1\n assert parent_editions[0] == doc2\n assert child_editions[0] == doc1\n\n doc1.related.add_edition(child2)\n parent_editions = doc1.related.editions\n child_editions = child2.related.editions\n assert len(parent_editions) == 2\n assert len(child_editions) == 2\n assert parent_editions[0] == doc2\n assert parent_editions[1] == child2\n assert child_editions[0] == doc1\n assert child_editions[1] == doc2", "def get_parents(self):\n return self.parents", "def num_parents(self, num_parents):\n self._num_parents = num_parents", "def add_relatives(self, child2parent, idx2word):\n for child, parent in child2parent.items():\n if parent not in (0, -1):\n parent_word = idx2word[parent]\n parent_word.add_child(child)\n child.parent = parent_word", "def add_parent_attributes(self):\n if len(self.parent_attributes) == 0:\n return\n dest = self.parent.attributes\n source = self.parent_attributes\n changes = {}\n self.merge_attribute_defs(dest, source, changes)\n for aid, value in changes.iteritems():\n# self.parent.h5node.attrs[aid] = value\n # may need modifying for MATLAB\n #- if self.path not in self.file.file_pointer:\n if self.file.get_node(self.path, abort=False) is None:\n # create parent node since it does not exist\n print \"trying to set parent attributes on non-registered parent node:\"\n print \"Non-registered parent node is: '%s'\", self.path\n traceback.print_stack()\n sys.exit(1)\n #- self.file.file_pointer[self.path].attrs[aid] = value\n self.file.set_attribute(self.path, aid, value)", "def make_parents(self):\r\n self.parents = []\r\n \r\n for loopindex in range(0, int(self.population_size * 0.6)):\r\n while True:\r\n if loopindex < int(self.population_size * 6 / 15):\r\n parent = random.choice(self.best_districts)\r\n else:\r\n parent = random.choice(self.worst_districts)\r\n \r\n if parent not in self.parents:\r\n self.parents.append(parent)\r\n break", "async def add_parent_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n stmt = select(Group).filter(Group.parent_id is None).options(selectinload(Group.parent))\n result = await dbsession.execute(stmt)\n stmt = select(func.count(Group.id)).filter(Group.parent_id is None)\n result_count = await dbsession.execute(stmt)\n with click.progressbar(\n result.scalars(), length=result_count.scalar_one(), label=\"Adding parent groups\"\n ) as progress:\n for group in progress:\n if \"aat\" in config[\"data\"][\"hierarchy\"][\"expansions\"]:\n categories = apply_aat(group.value, merge=False)\n if categories:\n for category_list in categories:\n mapped = False\n for category in category_list:\n stmt = select(Group).filter(Group.value == category)\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if not parent_group:\n parent_group = Group(\n value=category, label=category[0].upper() + category[1:], split=\"parent\"\n )\n dbsession.add(group)\n group.parent = parent_group\n mapped = True\n group = parent_group # noqa: PLW2901\n if group.parent_id:\n break\n if mapped:\n break\n else:\n mapped = False\n for category in apply_nlp(group.value):\n stmt = select(Group).filter(\n or_(Group.value == category, Group.value == inflection.pluralize(category))\n )\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if parent_group:\n group.parent = parent_group\n await dbsession.commit()\n mapped = True\n break\n if not mapped:\n if group.value not in [\"styles and periods\"]:\n for category in apply_nlp(group.value):\n hierarchies = apply_aat(category, merge=False)\n groups = []\n for hierarchy in hierarchies:\n if group.value not in hierarchy:\n stmt = (\n select(Group)\n .filter(Group.value.in_(hierarchy))\n .options(selectinload(Group.items))\n )\n result = await dbsession.execute(stmt)\n for potential_group in result.scalars():\n depth = 0\n tmp = potential_group\n while tmp:\n depth = depth + 1\n tmp = tmp.parent\n groups.append((potential_group, depth, len(potential_group.items)))\n if groups:\n groups.sort(key=lambda g: (g[1], g[2]), reverse=True)\n group.parent = groups[0][0]\n break\n await dbsession.commit()", "def fetchMore(self, parent=None):\n parent = QModelIndex() if parent is None else parent\n if parent.column() > 0:\n return\n\n parent_item = self.treeItem(parent)\n if parent_item.children_fetched:\n return\n\n tree_items = self._fetchObjectChildren(parent_item.obj,\n parent_item.obj_path)\n\n self.beginInsertRows(parent, 0, len(tree_items) - 1)\n for tree_item in tree_items:\n parent_item.append_child(tree_item)\n\n parent_item.children_fetched = True\n self.endInsertRows()", "def set_parent(self, parent):\n self.parent = parent", "def set_parent(self, parent):\n self.parent = parent", "def add_children_to_parents(self, mutated_pop_dict, mating_pop_dict):\n\n print('Combining parent and child generations')\n\n merged_networks_dict = OrderedDict()\n\n for id, G in mutated_pop_dict.items():\n new_id = ''.join(\n [random.choice(string.ascii_letters + string.digits)\n for i in range(10)]\n )\n merged_networks_dict[new_id] = copy.deepcopy(G)\n for id, G in mating_pop_dict.items():\n merged_networks_dict[id] = copy.deepcopy(G)\n\n return merged_networks_dict", "def add_relationship(self, relationship):\n self.relationships[relationship.parent].append(relationship)", "def _setup_parents(self, parents=None):\n from trac import config\n self.parents = (parents or [])\n for filename in self.get('inherit', 'file').split(','):\n filename = Section._normalize_path(filename.strip(), self.env)\n self.parents.append(config.Configuration(filename))", "def find_parents(self):\r\n for i in range(len(self.vertices)):\r\n self.vertices[i].parents = []\r\n for i in range(len(self.vertices)):\r\n for child in self.vertices[i].children:\r\n if i not in self.vertices[child].parents:\r\n self.vertices[child].parents.append(i)", "def insert_menus(self, parent, first, last):\n for i in range(first, last + 1):\n index = self._model.index(i, 0, parent)\n flattened = [index]\n flattened.extend(self._flatten_hierarchy(self._model, index))\n for newi in flattened:\n self.create_menu_for_index(newi)", "def on_parent_changed(self):\n pass", "def test_related_add_multiple_children(app, testdata):\n # Test language\n docs = testdata[\"documents\"]\n series = testdata[\"series\"]\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(docs[1][\"document_pid\"])\n ser3 = Series.get_record_by_pid(series[0][\"series_pid\"])\n\n assert len(doc1.related.editions) == 0\n assert len(doc2.related.editions) == 0\n assert len(ser3.related.editions) == 0\n\n doc1.related.add_edition(doc2)\n doc1.commit()\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(docs[1][\"document_pid\"])\n ser3 = Series.get_record_by_pid(series[0][\"series_pid\"])\n\n assert len(doc1.related.editions) == 1\n assert len(doc2.related.editions) == 1\n assert len(ser3.related.editions) == 0\n\n doc1.related.add_edition(ser3)\n doc1.commit()\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(docs[1][\"document_pid\"])\n ser3 = Series.get_record_by_pid(series[0][\"series_pid\"])\n\n assert len(doc1.related.editions) == 2\n assert len(doc2.related.editions) == 2\n assert len(ser3.related.editions) == 2", "def parent_connect(self, node):\n if self.parent.get() >= self.data:\n self.parent.set_left(node)\n if node and node.left is not None:\n node.set_parent(self.parent)\n else:\n self.parent.set_right(node)\n if node and node.left is not None:\n node.set_parent(self.parent)", "def link(self, hps):\n for hp in self._parent_hps:\n parent = hps[hp]\n self.parents.add(parent)\n parent.children.add(self)", "def find_parents(self) -> None:\n self.referers: Dict[str, List[Har2Tree]] = defaultdict(list)\n for hartree in self.hartrees:\n if hartree.root_referer:\n self.referers[hartree.root_referer].append(hartree)", "def set_parent(self, new_parent):\n node = BinaryNode.or_none(new_parent)\n self.parent = node", "def get_parents(self):\r\n\r\n raise NotImplementedError()", "def link_parent_objects(self, pages):\n pages = list(pages)\n page_dict = {}\n\n for p in pages:\n page_dict[p.id] = p\n\n for p in pages:\n if p.parent_id is None:\n p.parent = None\n else:\n p.parent = page_dict[p.parent_id]\n \n p._ancestors_retrieved = True\n \n return pages", "def get_parents(self, node):\n pass", "def get_parents_list(self):\n return []", "def set_parent(self, parent_model, parent_pk):\n\n self.parent_pk = parent_pk\n self.parent_model = parent_model\n\n for field in self.model._meta.get_fields():\n if isinstance(field, models.ForeignKey):\n if issubclass(parent_model, field.related_model):\n self.parent_field = field\n break", "def test_create_parent(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())", "def add_module_parents(self):\n for module in self.modules:\n parents = get_module_parents(module)\n missing_parents = [p for p in parents if p not in self.modules]\n self.modules.extend(missing_parents)", "def parent(self, nid):\n self._parent = nid", "def addParent(self, parentNode):\n if (type(parentNode)==list or isIterable(parentNode)):\n for p in parentNode:\n if (self.isAncestorOf(p)):\n raise CycleException, \"%s is a decendent of %s, cannot add as parent.\" % (parentNode, self)\n if (p not in self.parents()):\n p._children.append(self)\n self._parents.append(p)\n else:\n if (self.isAncestorOf(parentNode)):\n raise CycleException, \"%s is a decendent of %s, cannot add as parent.\" % (parentNode, self)\n if (parentNode not in self.parents()):\n parentNode._children.append(self)\n self._parents.append(parentNode)", "def plates_to_parent(self, index, plates):\n raise NotImplementedError()", "def _set_parent(self, parent):\n self.__parent = parent", "def create_hierarchy(parent_path, child_path):\n if parent_path:\n parent_skill_id = database_controller.get_skill(parent_path).id\n child_skill_id = database_controller.get_skill(child_path).id\n new_hierarchy = Hierarchy(parent_skill_id=parent_skill_id, child_skill_id=child_skill_id)\n db.session.add(new_hierarchy)\n db.session.commit()\n else:\n child_skill_id = database_controller.get_skill(child_path).id\n new_hierarchy = Hierarchy(child_skill_id=child_skill_id)\n db.session.add(new_hierarchy)\n db.session.commit()\n ''' \n new_hierarchy = Hierarchy()\n if parent_path:\n parent_skill = database_controller.get_skill(parent_path)\n new_hierarchy.parent_skill_assoc = parent_skill\n child_skill = database_controller.get_skill(child_path)\n new_hierarchy.child_skill_assoc = child_skill\n db.session.add(new_hierarchy)\n db.session.commit()\n '''", "def parent(self, parent: AbstractPaths):\r\n self._parent = parent", "def _setparent(self, parent):\n\t\tself.parent = parent\n\t\tif self.parent is not None:\n\t\t\tself.parent.components.add(self)", "def reparent(self, dim, child, new_parent):\n cls = dim.closure_table\n\n # Detach child\n self.execute(\n 'DELETE FROM %s '\n 'WHERE child IN (SELECT child FROM %s where parent = ?) '\n 'AND parent NOT IN (SELECT child FROM %s WHERE parent = ?)' % (\n cls, cls, cls\n ),\n (child, child)\n )\n\n # Set new parent\n self.execute(\n 'SELECT supertree.parent, subtree.child, '\n 'supertree.depth + subtree.depth + 1 '\n 'FROM %s AS supertree JOIN %s AS subtree '\n 'WHERE subtree.parent = ? '\n 'AND supertree.child = ?' % (cls, cls),\n (child, new_parent)\n )\n values = list(self.cursor)\n self.cursor.executemany(\n 'INSERT INTO %s (parent, child, depth) values (?, ?, ?)' % cls,\n values\n )", "def allocate_nodes(parents):\n tree = {i: Node(i) for i, j in enumerate(parents)}\n root_node = None\n\n for i, parent in enumerate(parents):\n if parent >= 0:\n # Set the parent for that node.\n tree[i].parent = tree[parent]\n # Add the current node as a child of the parent.\n tree[parent].add_child(tree[i])\n else:\n root_node = tree[i]\n\n return tree, root_node", "def _add_children(self, parent, parent_iter):\n parent_gadget = Gadget.from_widget(parent)\n if parent_gadget:\n children = parent_gadget.get_children()\n else:\n children = get_all_children(parent)\n\n for child in children:\n gadget = Gadget.from_widget(child)\n if not gadget:\n if isinstance(child, gtk.Container):\n self._add_children(child, parent_iter)\n else:\n continue\n\n elif not self._find_iter(parent_iter, gadget):\n child_iter = self._model.append(parent_iter, (gadget,))\n self._add_children(child, child_iter)", "def _add_owner(parent_id, child_id):\n db.session.add(\n pam.BivAccess(\n source_biv_id=parent_id,\n target_biv_id=child_id\n )\n )", "def __init__(self, name, parents, cpt):\n\t\tself.parents = parents\n\t\tself.name = name\n\t\tself.cpt = cpt", "def save(self, *args, **kwargs):\n\n if self.parent:\n self.level = self.parent.level + 1\n self.blog_category = self.parent.blog_category\n\n super(Category, self).save(*args, **kwargs)", "def set_parent(self, new_parent):\n self.__parent = new_parent", "def attach_node(self, node_id, new_parent_id, connection=None):\n\n connection = connection or self.engine.connect()\n\n # todo: add check parent/node exist\n # todo: add check node is root of a tree\n\n paths_super_tree = self.paths.alias()\n paths_sub_tree = self.paths.alias()\n\n connection.execute(\n self.paths.insert().from_select(names=[\n 'ancestor', 'descendant', 'depth'\n ],\n select=select([\n paths_super_tree.c.ancestor,\n paths_sub_tree.c.descendant,\n (paths_super_tree.c.depth + paths_sub_tree.c.depth + 1)\n ]).where(\n paths_super_tree.c.descendant == new_parent_id\n ).where(\n paths_sub_tree.c.ancestor == node_id\n )\n )\n )", "def setparent(self, parent):\n\t\tself._setparent(parent)", "def map_to_parent_eid(self, eid):\n ...", "def set_categories(post, old, new):\n c = None\n if new:\n c = Category(name=new.pop())\n parent = c\n son = None\n while new:\n son = parent\n parent = Category(name=new.pop())\n son.parent = parent\n db.session.add(parent)\n if old:\n old_c = Category.query.get(old)\n parent.parent = old_c\n elif old:\n c = Category.query.get(old)\n if c is not None:\n db.session.add(c)\n post.category = c", "def _init_parent(self,path):\n parent_path=self.__class__._parent_path(path)\n self.parent=self.__class__.parent(parent_path)\n log.info(\"Getting parent %r:%r\"%(self.__class__.parent,parent_path))\n self.parent._init_child(path,self)", "def __handle_parents__(self, output, **kwargs):\n parent_map = kwargs.pop(\"parent_map\")\n subject = kwargs.pop('subject')\n predicate = kwargs.pop('predicate')\n parent_objects = self.execute(\n self.triple_maps[str(parent_map)],\n output,\n **kwargs)\n for parent_obj in parent_objects:\n if isinstance(parent_obj, BaseRdfDataType):\n parent_obj = parent_obj.rdflib\n if parent_obj == subject:\n continue\n output.add((\n subject,\n predicate,\n parent_obj))", "def add_node(self, node, parent):\n if node not in self.map.edges:\n self.map.edges[node] = []\n if parent not in self.map.edges:\n self.map.edges[parent] = [node]\n else:\n self.map.edges[parent].append(node)", "def upgrade():\n op.execute(\n \"\"\"\n insert ignore into relationships (\n modified_by_id,\n created_at,\n updated_at,\n source_id,\n source_type,\n destination_id,\n destination_type\n )\n select\n modified_by_id,\n created_at,\n updated_at,\n parent_id as source_id,\n parent_type as source_type,\n child_id as destination_id,\n child_type as destination_type\n from snapshots\n \"\"\"\n )", "def copy_from_parent(self, node_embed_names=..., edge_embed_names=..., ctx=...):\n ...", "def _add_graph_level(graph, level, parent_ids, names, scores):\n for i, parent_id in enumerate(parent_ids):\n new_node = (level, i)\n parent_node = (level - 1, parent_id)\n graph.add_node(new_node)\n graph.node[new_node][\"name\"] = names[i]\n graph.node[new_node][\"score\"] = str(scores[i])\n graph.node[new_node][\"size\"] = 100\n # Add an edge to the parent\n graph.add_edge(parent_node, new_node)", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def fm_all_parents(self):\n return self._relation_lst[self.PARENT].copy()", "def _addEntity(self, pid, chunks):\n parent = chunks[pid]\n sub = None\n # Find subject\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n if child.func in SubDict:\n sub = child\n if child.func == \"では\":\n if child.negative != 0 or any([val.negative != 0 for key, val in self.G.successors(child.main)]):\n pass\n else:\n sub = None\n if sub:\n self._addNode(parent, sub=sub.main)\n self._addEdge(sub.main, parent.main, label=\"陳述\", etype=\"stat\")\n else:\n self._addNode(parent)\n \n # Lopp through all children\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n # If child is noun\n if child.func in SubDict:\n if child.func == \"では\":\n if child.negative != 0 or any([val.negative != 0 for key, val in self.G.successors(child.main)]):\n pass\n else:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=child.func, etype=\"attr\")\n elif child.type == 0 and child.func in [\"と\", \"などと\"] and child.id + 1 == parent.id and preprocessText(chunks[parent.parent].main) not in [\"交代\", \"交換\"]:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=\"並列\", etype=\"para\")\n self._addEdge(parent.main, child.main, label=\"並列\", etype=\"para\")\n self.para.append([child.main, parent.main])\n elif child.type == 0 and child.func in ParallelDict and child.id + 1 == parent.id:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=\"並列\", etype=\"para\")\n self._addEdge(parent.main, child.main, label=\"並列\", etype=\"para\")\n self.para.append([child.main, parent.main])\n else:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=child.func, etype=\"attr\")", "def setParent(self, parent):\n self.parent = parent", "def _distribute_parent_info(moduleInfo: List[ModuleInfo]):\n\n for i in moduleInfo:\n parentInfo = _find_parent(i, moduleInfo)\n if parentInfo:\n i.set_parent(parentInfo.get_name())", "def remember_parent(self, parent, child):\n if child not in self.child_parents_map:\n self.child_parents_map[child] = set()\n self.child_parents_map[child].add(parent)", "def id_parentesco(self, id_parentesco):\n self._id_parentesco = id_parentesco" ]
[ "0.6654415", "0.65198356", "0.6462206", "0.64579284", "0.6353594", "0.62729967", "0.6186695", "0.61660165", "0.6154463", "0.61290205", "0.61266243", "0.6095059", "0.6068027", "0.6065559", "0.6063551", "0.6041484", "0.6020467", "0.60039234", "0.60022867", "0.59916437", "0.5974864", "0.59564966", "0.59110826", "0.5887255", "0.5886367", "0.588044", "0.5866037", "0.5846542", "0.58446354", "0.5843909", "0.58347076", "0.58229935", "0.58005226", "0.57831013", "0.5760664", "0.57467633", "0.57214695", "0.57196933", "0.57196265", "0.57081383", "0.56985706", "0.56923884", "0.5686767", "0.5671292", "0.5670978", "0.5667246", "0.56652206", "0.56361735", "0.562788", "0.56170356", "0.56170356", "0.56022716", "0.56001717", "0.55867577", "0.55817807", "0.5560837", "0.55591846", "0.555808", "0.5554888", "0.5547736", "0.55375737", "0.55116993", "0.5505882", "0.5497329", "0.5484898", "0.5480117", "0.5477565", "0.54678094", "0.54614615", "0.5460656", "0.54594874", "0.5455278", "0.5451485", "0.5423692", "0.5423065", "0.5421", "0.5418487", "0.54167753", "0.5416314", "0.5414901", "0.5414527", "0.54038954", "0.54036933", "0.54021037", "0.5400666", "0.53979224", "0.53961074", "0.53869224", "0.5382086", "0.53790975", "0.53774315", "0.53718084", "0.53708065", "0.5364089", "0.5360334", "0.5343925", "0.53399765", "0.53390616", "0.5336304", "0.53291047" ]
0.6953736
0
To add child to database
def add_child(session, df): try: for _, row in df.iterrows(): child = Child() child.name = row['child_name'] child.residence = row['Residence'] father_obj = session.query(Parent).filter_by(name=row['father_name']).first() child.parents.append(father_obj) mother_obj = session.query(Parent).filter_by(name=row['mother_name']).first() child.parents.append(mother_obj) session.add(child) except Exception as ex: session.rollback() raise ex else: session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_child(self, child):\n name = name_displayer.display(child)\n birth = get_birth_or_fallback(self.dbstate.db, child)\n birth_date, birth_sort, birth_place = self.get_date_place(birth)\n death = get_death_or_fallback(self.dbstate.db, child)\n death_date, death_sort, death_place = self.get_date_place(death)\n self.model.add((child.get_handle(),\n name,\n birth_date,\n birth_sort,\n death_date,\n death_sort))", "def add_child(self, child):\r\n self.children.append(child)", "def add_child(self, child, spouse):\n name = name_displayer.display(child)\n if spouse:\n spouse = name_displayer.display(spouse)\n spouse = spouse or ''\n birth = get_birth_or_fallback(self.dbstate.db, child)\n birth_date, birth_sort, birth_place = self.get_date_place(birth)\n death = get_death_or_fallback(self.dbstate.db, child)\n death_date, death_sort, death_place = self.get_date_place(death)\n self.model.add((child.get_handle(),\n name,\n birth_date,\n birth_sort,\n death_date,\n death_sort,\n spouse))", "def addChild(self, child):\n #assert child not in self.children\n #if child not in self.children:\n child.parents.append(self)\n self.children.append(child)", "def add_child(self, child):\r\n self.children.append(child)", "def add_child(self, child):\n self.childs.append(child)", "def add_child(self, child):\r\n \r\n self._children.append(child)\r\n self.update_batch(self._batch, self._group)", "def create_child(self, **kw):\n m = self.model_class.create(**kw)\n self.add(m)\n return m", "def addChild( self, child ):\n\n self.childs.append( child )", "def add_child(self, child):\n self.children.append(child)", "def add_child(self, child):\n self.children.append(child)", "def add_child(self, obj):\n obj.parent = self\n if obj not in self.children:\n self.children.add(obj)\n # TODO(andi): This assumes that the node is already child of a root\n # Database node which makes it impossible to create a sub-tree that\n # should be added to the real root later. For example:\n # db = Database()\n # node = Node()\n # db.add_child(node)\n # sub = Node()\n # sub.add_child(Node()) # <-- fails\n # node.add_child(db)\n self.db._oid_idx[obj.oid] = obj\n return obj", "def addChildObject(self, child):\n \n currChild = self.getChild(child.getName())\n if currChild:\n index = self.getIndex(currChild)\n if index != -1:\n self._children[index] = child\n child.setParent(self)\n # Unset the existing child's parent\n currChild.setParent(None)\n del currChild\n \n self.__setChildDict(child)\n else:\n child.setParent(self) \n self._children.append(child)\n self.__setChildDict(child)", "def insertChild(self, index, child):\n self.__initChild()\n self.__child.insert(index, child)", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def insert(self, parent, name):\n pid = self.db.insert_returning_id('simple', dict(parent=parent, name=name))\n return pid", "def add_child(self, child):\n name = child.name\n self._children[name] = child\n self._name_dict[name.split('-')[0]] += 1", "def add_child(self, descendant, **kwargs):\n kwargs.update({'parent': self, 'child': descendant})\n disable_check = kwargs.pop('disable_circular_check', False)\n\n if self.sequence_manager and self.sequence_manager.get_node_sequence_field():\n sequencename = self.sequence_manager.sequence_field_name\n sequence = kwargs.pop(sequencename, None)\n if sequence:\n setattr(descendant, sequencename, sequence)\n descendant.save()\n cls = self.children.through(**kwargs)\n return cls.save(disable_circular_check=disable_check)", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def add_child(self, pid):\n self._children_ids.append(pid)\n self._sort_children()", "def add_child(self, child, label):\n self.children[label] = child\n child.parents.append(self)", "def addChild(node):", "def create_child(self):\n raise NotImplementedError", "def addChild(self, edge):\n self.child_edges[edge.getId()] = edge", "def appendChild(self, child):\n self.__initChild()\n self.__child.append(child)", "def add_child(self, nodo):\n if nodo not in self.children:\n self.children.append(nodo)", "def _add_child(self, node):\n self.children.update({\n node.name: node\n })\n node.path = self._sep.join([self.path, node.name])\n node.parent = self", "def add_child(self, child_id):\n self._children.append(child_id)", "def test_do_insert_child(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n\r\n _error_code, _msg = DUT.do_insert(revision_id=1, parent_id=1)\r\n\r\n assert _error_code == 0\r\n assert _msg == (\r\n \"RAMSTK SUCCESS: Adding one or more items to the RAMSTK Program \"\r\n \"database.\")\r\n assert DUT.last_id == 4\r\n\r\n DUT.do_delete(DUT.last_id)", "def add_children(self,node):\n\n node.parent_id = self.id\n node.level = self.level + 1\n node.path = node._create_path()\n node.save()", "def add(self, child):\r\n# child = Node()\r\n# child._id = Kinetic.Global.id_counter\r\n# Kinetic.Global.id_counter += 1\r\n child.index = len(self.children)\r\n child.parent = self\r\n self.children.append(child)\r\n stage = child.get_stage()\r\n\r\n if not stage:\r\n Kinetic.Global._add_temp_node(child)\r\n else:\r\n stage._add_id(child)\r\n stage._add_name(child)\r\n\r\n go = Kinetic.Global\r\n go._pull_nodes(stage)\r\n\r\n if hasattr(self, '_add'):\r\n self._add(child)\r\n\r\n return '%s.add(%s);' %(self.name, child.name)", "def add_relation(cls, row_id, rel_obj):\n obj = cls.query.filter_by(id=row_id).first()\n # obj = db.session.query(cls).filter_by(id=row_id).first()\n #print(type(obj))\n if cls.__name__ == 'Actor':\n obj.filmography.append(rel_obj)\n elif cls.__name__ == 'Movie':\n obj.cast.append(rel_obj)\n return commit(obj)", "def add_child(self, ldraw_model):\n self.children.append(ldraw_model)", "def append_child(self, child):\n\t\tself._children.append(child)", "def Insert(self, child, index):\r\n \r\n self._children.insert(index, child)", "def insert(self, parent, name):\n if parent is None:\n pid = self.db.execute('''\n INSERT INTO ltreetab (path, name) VALUES (\n text2ltree('' || currval('ltreetab_id_seq')),\n :name\n ) RETURNING id\n ''',\n dict(parent=parent, name=name)\n ).fetch_single()\n else:\n pid = self.db.execute('''\n INSERT INTO ltreetab (path, name) VALUES (\n (SELECT path FROM ltreetab WHERE id = :parent) ||\n ('' ||currval('ltreetab_id_seq')),\n :name\n ) RETURNING id\n ''',\n dict(parent=parent, name=name)\n ).fetch_single()\n \n return pid", "def _newChild(self, child):\n self._testKeySubNsAdd()\n self._getSubNsList().append(child)", "def add_parent(session, df):\n try:\n for _, row in df.iterrows():\n parent = Parent()\n parent.name = row['parent_name']\n parent.family = row['family']\n session.add(parent)\n except Exception as ex:\n session.rollback()\n raise ex\n else:\n session.commit()", "def test_append_children_category(self):\n category = Category(catname='olympic games')\n category1 = Category(catname='Tennis')\n category.parents.append(category1)\n category.save()\n assert category.parents", "def add_child(self, node):\n self.children.append(node)", "def add_child(self, node):\n self.children.append(node)", "def add_child(self, node):\n self.children.append(node)", "def addChild(self, name, force=False):\n \n if type(name) != str:\n raise ValueError, 'Argument should be a string!'\n \n child = self.getChild(name)\n if child:\n # print 'Child %s present!' % name\n # Replace it if force==True\n if force:\n index = self.getIndex(child)\n if index != -1:\n child = self.__class__(name)\n self._children[index] = child\n child.setParent(self)\n \n self.__setChildDict(child)\n return child\n else:\n child = self.__class__(name)\n child.setParent(self)\n \n self._children.append(child)\n self.__setChildDict(child)\n\n return child", "def fm_append_member(cls, parent, child):\n parent.fm_append(child, cls.CHILD)\n child.fm_append(parent, cls.PARENT)", "def init_db(self, parent_type, child_type):\n self.parent = Node(self.handler, parent_type)\n self.children = [ Node(self.handler, child_type) for x in range(0, self.SIZE) ]\n for node in self.children:\n Link(self.handler, self.parent.node, node.node, child_type.upper())", "def link_child(self, parent, child):\n nodelist = self.get_nodes()\n parent_index = nodelist.index(parent)\n child_index = nodelist.index(child)\n\n self.__nodes[parent_index].append(child_index)", "def add_child(self, child, probe_id=None):\n node = None\n matching_nodes = [x for x in self.children if x.name == child.name] # see if the added node has already in its children list\n # print(\"[*] add children with the name {}.. matching_nodes: {}\".format(child.name, matching_nodes))\n if len(matching_nodes) > 0:\n node = matching_nodes[0]\n if probe_id is not None:\n node.probes = probe_id\n # print(\"\\t[*] current node: {}\".format(node.name))\n if node is None:\n if probe_id is not None:\n child.probes = probe_id\n self.children.append(child)\n node = child\n # print(\"\\t[*] node {} is appended to {} child list\".format(node.name, self.name))\n return node", "def save(self):\r\n db.session.add(self)\r\n db.session.commit()", "def insert(self, parent, name):\n if parent is None:\n #pid = self.db.execute('''\n # INSERT INTO pathenum (path, name) VALUES (\n # '',\n # :name\n # ) RETURNING id\n # ''', dict(parent=parent, name=name)\n #).list()[0][0]\n pid = self.db.insert_returning_id('pathenum', dict(path='', name=name))\n else:\n #pid = self.db.execute('''\n # INSERT INTO pathenum (path, name) VALUES (\n # (SELECT path || id || '.' FROM pathenum WHERE id = :parent),\n # :name\n # ) RETURNING id\n # ''', dict(parent=parent, name=name)\n #).list()[0][0]\n if self.db.is_dialect('mysql'):\n # FIXME: bardzo brzydki kod\n path = self.db.execute('''SELECT concat(path, id, '.') AS path FROM pathenum WHERE id = %s''' % parent).fetch_single('path')\n #print path\n pid = self.db.insert_returning_id('pathenum', dict(name=name, path=path))\n #pid = self.db.insert_returning_id('pathenum', dict(name=name), dict(\n # path=\"(SELECT concat(path, id, '.') FROM pathenum WHERE id = %s)\" % parent)\n #)\n elif self.db.is_dialect('sqlserver'):\n pid = self.db.insert_returning_id('pathenum', dict(name=name), dict(\n path=\"(SELECT path + CONVERT(varchar(10), id) + '.' FROM pathenum WHERE id = %s)\" % parent)\n )\n else:\n pid = self.db.insert_returning_id('pathenum', dict(name=name), dict(\n path=\"(SELECT path || id || '.' FROM pathenum WHERE id = %s)\" % parent)\n )\n return pid", "def add(self, model):\n assert isinstance(model, self.model_class) # it's a homogeneous collection\n m_id = str(model.get_id())\n assert m_id != None # needs a real id or cid\n # If the models have already been loaded, verify the model being added is\n # not already in the set. This allows for create_child to be used before a potential\n # lazy load has happened, which might load the newly created child from the DB again.\n if self._loaded:\n assert m_id not in self._models # collision\n model._set_parent(self)\n self._models[m_id] = model\n return model", "def add_child(self, child):\n\n self._children.add(child)", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def put(self, node, child):\n node.add_child(child)", "def insert(self, pid, pname, pparent, pobj, ptype):\r\n self.pids.append(pid)\r\n self.pnames.append(pname)\r\n self.pparents.append(pparent)\r\n self.ptypes.append(ptype)\r\n self.pobjs.append(pobj)", "def add_child(self, parent, child):\n wanted_parent = self.__find(parent, self.root)\n\n # check if there isn't a child with that value already\n child_exists = self.__find(child, self.root)\n if child_exists:\n raise Exception('A child with value {} already exists!'.format(child))\n\n wanted_parent.add_child(Node(child, parent=wanted_parent))\n self.nodes += 1", "def addchild(self, child, index=None, move=True):\n owners = child.owners()\n if len(owners) > 0 and not move:\n return # keep the original owner\n\n if self.guid in owners:\n return # already add\n\n # add to this group (avoid 'child' being remove from project when no one refer to it)\n pbxhelper.pbxobj_add_pbxlist_value(self, u'pbx_children', child, \\\n self.is_valid_child, index=index)\n\n for owner in owners.values():\n child.remove_referrer(owner) # remove from other groups", "def insert_new(self, marble: Marble) -> None:\n first = self.child\n if not first:\n raise ValueError(\"Marble has no child.\")\n second = first.child\n if not second:\n raise ValueError(\"Marble has not child.\")\n first.child = marble\n marble.parent = first\n marble.child = second\n second.parent = marble", "def create(self):\n db.session.add(self)\n db.session.commit()", "def add_child(self, value):\n assert type(value) != TreeNode\n self.children.append(TreeNode(value, self))", "def add_child(self, node):\n if self is node:\n parent_id = \"\"\n _nodeid=\"N_\"+str(0)\n else:\n if not issubclass(node.__class__, Node):\n raise TypeError(\"{}.add_child: arg «node»=«{}», type {} not valid.\".format(self.__class__.__name__, node, type(node)))\n self.childs.append(node)\n node.parent = self\n parent_id = self.TV.selection()[0]\n _nodeid=\"N_\"+str(self.node_count)\n # parent = self.rootnode.get_node_by_id(parent_id)\n # if parent is None:\n # return None\n\n # print(\"self.TV.insert node._nodeid\", node._nodeid)\n # print(\"self.TV.insert node.data\", node.data)\n \n self.TV.insert(parent_id, 'end', _nodeid, text=node.name)\n\n # parent_id = self.TreeView.selection()[0]\n # node_name = askstring(\"New Child\", prompt=\"Enter the node name\", initialvalue=\"\")\n # if not node_name:\n # node_name = \"no-name-node\"\n # # self.TV.insert(item, 'end', 'LC_'+str(self.TVleafref), \n # # text='Load case '+str(self.TVleafref))\n # #self.node_count += 1\n \n # self.TreeView.insert(parent_id, 'end', self._nodeid, text=self.name)\n\n return node", "def add_child(self, child_account):\r\n self._children.append(child_account)", "def _add_owner(parent_id, child_id):\n db.session.add(\n pam.BivAccess(\n source_biv_id=parent_id,\n target_biv_id=child_id\n )\n )", "def add_child(self, child):\n assert isinstance(child, (Node, str))\n self.children.append(child)\n return child", "def save_data(self):\n db.session.add(self)\n db.session.commit( )", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def add_child(self, node):\n if node not in self.children: #If the node isn't already a child of Node,\n self.children.append(node) #Add it to the end of the list of children", "def node_create(self, parent, path):\n\n q = (\"insert into nodes (parent, path) \"\n \"values (?, ?)\")\n props = (parent, path)\n return self.execute(q, props).lastrowid", "def new_child(self, parent, *args, **kwargs):\n child = self.new_element(*args, **kwargs)\n parent.append(child)\n return child", "def _extend_dommodel(self, child):\n self._dommodel.childNodes[0].appendChild(child)", "def add_child(self, element, parent):\n parent_node = self._validate(parent)\n child_node = self._Node(element,parent_node)\n parent_node._children.append(child_node)\n self._size += 1", "def add_child(self, child: \"AbstractNode\", index: int = None) -> None:\n if index is None:\n self._children.append(child)\n else:\n self._children.insert(index, child)", "def add_child(self, name: str, command: Command) -> None:\n self._children[name] = command", "def add_child(self, chromosome):\n self.next_population.append(to_chromosome(chromosome))", "def add_relationship(self, relationship):\n self.relationships[relationship.parent].append(relationship)", "def save(self):\n \n db.session.add(self)\n db.session.commit()", "def add_child(self, child, **kwargs):\n if child.is_root:\n return\n if 'after' in kwargs:\n if kwargs['after'] is not None:\n try:\n self.children.insert(\n self.children.index(kwargs['after']), child)\n return\n except ValueError:\n self.children.append(child)\n else:\n self.children.insert(0, child)\n self.children.append(child)\n if child.parent is not self and child.parent is not None:\n child.parent.remove_child(child)\n # pylint: disable=protected-access\n child.__parent = self", "def add_child(self, child):\n\t\t\n\t\tif child.__class__ != FileObject:\n\t\t\traise InvalidParameterError('child', 'should be an instance of FileObject')\n\t\t\n\t\tself.__childs.append(child)\n\t\treturn True", "def addNode(self, appendIt=False, nodeId=None, childId=None, sublist=None, label=''):\n node = super().addNode(appendIt=appendIt, nodeId=nodeId, childId=childId,\n label=label)\n self.save(node)\n if self.getSublist().atHead():\n # Update childID (sublist head) if this node is first in the sublist\n self.setChildId(node.nodeId)\n self.save(self.cursor)\n return node", "def create_new_child(self,instance):\n\t\tnew_child = self.tree.makeTree(self.tree.root, self.tree)\n\t\tnew_child.utility.increment_counts(instance)\n\t\tself.tree.children.append(new_child)", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n # try:\n # db.session.add(self)\n # db.session.commit()\n # except exc.IntegrityError:\n # db.session.rollback()", "def add_child(self, child):\n\n child_parent_scope = child.parent_scope\n child_parent_value = child.parent_value\n\n if all([\n child_parent_scope == self.scope,\n child_parent_value == self.value,\n self.user in (child.user, ANY),\n ]):\n self.children.append(child)\n child.parent = self\n return True\n\n else:\n return any([node.add_child(child) for node in self.children])", "def insert(self, child, key):\n childNode = BTNode(key)\n if child == \"left\":\n self.left = childNode\n elif child == \"right\":\n self.right = childNode", "def save(self):\n\t\tdb.session.add(self)\n\t\tdb.session.commit()", "def append(self, dpr):\r\n self.childlist.append(dpr)" ]
[ "0.7669851", "0.716879", "0.7081737", "0.7025343", "0.6958452", "0.69013804", "0.68937933", "0.6841492", "0.68182874", "0.6781795", "0.6781795", "0.6744913", "0.6703005", "0.65205777", "0.65191615", "0.65191615", "0.6515472", "0.64840895", "0.6477812", "0.6470086", "0.6470086", "0.6470086", "0.6422715", "0.6420365", "0.64047784", "0.6400218", "0.63935673", "0.6386487", "0.6367783", "0.63628215", "0.6356732", "0.6353468", "0.63307273", "0.63268185", "0.62639874", "0.6248534", "0.623223", "0.62235475", "0.6220142", "0.6206499", "0.6192518", "0.6144748", "0.6122102", "0.6122102", "0.6122102", "0.6120446", "0.61200565", "0.6096548", "0.6091709", "0.60818017", "0.6080182", "0.6079637", "0.60554373", "0.6052147", "0.60395694", "0.60395694", "0.60395694", "0.60395694", "0.6034775", "0.6017422", "0.6010903", "0.60075635", "0.60054106", "0.6000733", "0.5996554", "0.5980504", "0.5975921", "0.5972757", "0.5969391", "0.59678036", "0.5963325", "0.5963325", "0.5963325", "0.5963325", "0.5963325", "0.5963325", "0.5963325", "0.5963325", "0.5963325", "0.5963325", "0.5963325", "0.59617394", "0.5957234", "0.5956416", "0.5942649", "0.593424", "0.59320354", "0.5909298", "0.58910406", "0.5874765", "0.5867254", "0.5842119", "0.5840392", "0.5833517", "0.58334994", "0.5833076", "0.58166075", "0.58112067", "0.5806914", "0.5787352" ]
0.69629735
4
Convenience redirect to find the root outcome group for a particular context. Will redirect to the appropriate outcome group's URL.
def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs): path = '/v1/global/root_outcome_group' url = request_ctx.base_api_url + path.format() response = client.get(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect_to_root_outcome_group_for_context_accounts(request_ctx, account_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/root_outcome_group'\n url = request_ctx.base_api_url + path.format(account_id=account_id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def redirect_to_root_outcome_group_for_context_courses(request_ctx, course_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/root_outcome_group'\n url = request_ctx.base_api_url + path.format(course_id=course_id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def root_redirect():\r\n return redirect(url_for(\"display_top\"))", "def redirect_associated_mood():\n\n # grab the mood_id from the form\n user_mood_id = request.form.get(\"mood\")\n\n # set the mood_id to id grabbed from the form\n mood_id = user_mood_id\n\n return redirect(\"/moods/{}/entries\".format(mood_id))", "def root1(request):\n\ttemplate = 'main'\n\treturn redirect(template)", "def redirect(to):\r\n def _redirect(environ, start_response):\r\n args, kwargs = environ['wsgiorg.routing_args']\r\n start_response('301 MOVED PERMANENTLY',\r\n [('Location', to.format(*args, **kwargs))])\r\n return []\r\n return _redirect", "def root(request):\n\ttemplate = 'bfbot/main'\n\treturn redirect(template)", "def redirect(self, location):\n self.redirect_see_other(location)", "def redirect(target):\n return {\n 'status': '302',\n 'statusDescription': 'Found',\n 'headers': {\n 'location': [{\n 'key': 'Location',\n 'value': target\n }]\n }\n }", "def redirect_to():\n\n args_dict = request.args.items()\n args = CaseInsensitiveDict(args_dict)\n\n # We need to build the response manually and convert to UTF-8 to prevent\n # werkzeug from \"fixing\" the URL. This endpoint should set the Location\n # header to the exact string supplied.\n response = app.make_response(\"\")\n response.status_code = 302\n if \"status_code\" in args:\n status_code = int(args[\"status_code\"])\n if status_code >= 300 and status_code < 400:\n response.status_code = status_code\n response.headers[\"Location\"] = args[\"url\"].encode(\"utf-8\")\n\n return response", "def view__model_admin_root(context, request):\n return HTTPFound(request.resource_url(context.__parent__))", "def catch_all(path):\n return redirect('/', code=302)", "def redirect_to(self, route_name, *args, **kwargs):\n self.redirect(self.uri_for(route_name, *args, **kwargs))", "def redirect_source():\n return redirect(url_for(\"base_blueprint.source\"), code=301)", "def __call__(self, environ, start_response):\n path_info = environ['PATH_INFO']\n for key, redirect in self.redirects.items():\n if self.match(key, path_info):\n environ['PATH_INFO'] = redirect\n return self(environ, start_response)\n else:\n path, cut, prefix = self.first_path_segment(path_info)\n root = path[:cut]\n rest = path[cut:]\n if root in self.routes:\n environ['PATH_INFO'] = rest\n #XXX shouldn't we += to SCRIPT_NAME?\n environ['SCRIPT_NAME'] = prefix + root\n app = self.routes[root]\n else:\n app = webob.exc.HTTPNotFound()\n return app(environ, start_response)", "def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))", "def redirect_found(self, location):\n self.status = 302\n self.set_header('Location', location)", "def redirect_to_question():\n # responses variable will go on to store all of the user's answers to the questions\n session[ANSWERS_KEY] = []\n return redirect(f\"/questions/{len(session[ANSWERS_KEY])}\")", "def eastgardens(event, context):\n\n request = event['Records'][0]['cf']['request']\n path = request['uri']\n query = request['querystring']\n\n # prepend a ? if there is a query\n if query != '':\n query = '?' + query\n\n # Path+query based custom redirects get checked first\n if path + query in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path + query])\n\n # Now check path only custom redirects\n if path in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path])\n\n return handle_fallthrough(event, path, query)", "def redirect(self, location):\n self.status=302\n headers=self.headers\n headers['status']='302 Moved Temporarily'\n headers['location']=location\n return location", "def index_file():\n return redirect(\"/\")", "def redir_index():\n return redirect(url_for(\"index\"), code=301)", "def get(self):\n self.redirect('/admin')", "def get(self):\n self.redirect('/admin')", "def redirect(uri):\n response = HttpResponse('', status=302)\n response['Location'] = uri\n return response", "def intermediate_redirect(cls, form_path):\r\n from r2.lib.template_helpers import add_sr\r\n dest = cls.format_output_url(request.fullpath)\r\n path = add_sr(form_path + query_string({\"dest\": dest}))\r\n return cls.redirect(path)", "def redirector(status, start_response, exc_info=None):\n session['login.pre_uri'] = environ['PATH_INFO']\n session.save()\n start_response('302 Found',[(\"Location\",\"/login\"),(\"Content-type\",\"text\")])\n return []", "def index():\n return redirect(url_for('second_page'))", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def root():\n if request.headers['Accept'] == 'application/json':\n return \"Welcome\\n\\n\", 200\n else:\n return redirect(url_for('index'))", "def redirect_to(self, to, *args, **kwargs):\n\n response = redirect(to, *args, **kwargs)\n\n # By default, raise a redirect, which will cause the view\n # processing to stop and return this redirect.\n if kwargs.pop(\"raise\", True):\n raise response\n else:\n return response", "def redirect_version():\n return redirect(url_for(\"base_blueprint.version\"), code=301)", "def toLanding():\n return redirect(url_for('landingurl'))", "def get_success_url(self):\n is_same_user = self.get_object().userid == self.request.user\n return reverse(\"certhelper:shiftleader\") if not is_same_user else \"/\"", "def index():\n return redirect(auth_flow.get_authorization_url())", "def redirect(url, status=None):\n raise cherrypy.HTTPRedirect(url, status)", "def redirect(self) -> WerkzeugResponse:\n\n _dict = self.unpack_redirect()\n return self.perform_logout(_dict, BINDING_HTTP_REDIRECT)", "def redirect_to_course_position(course_module):\r\n urlargs = {'course_id': course_module.id.to_deprecated_string()}\r\n chapter = get_current_child(course_module)\r\n if chapter is None:\r\n # oops. Something bad has happened.\r\n raise Http404(\"No chapter found when loading current position in course\")\r\n\r\n urlargs['chapter'] = chapter.url_name\r\n if course_module.position is not None:\r\n return redirect(reverse('courseware_chapter', kwargs=urlargs))\r\n\r\n # Relying on default of returning first child\r\n section = get_current_child(chapter)\r\n if section is None:\r\n raise Http404(\"No section found when loading current position in course\")\r\n\r\n urlargs['section'] = section.url_name\r\n return redirect(reverse('courseware_section', kwargs=urlargs))", "def redirect_view(request):\n path = request.GET.get(\"to\") or \"/\"\n return redirect(path if path.startswith(\"/\") else f\"/{path}\", permanent=True)", "def second_page():\n return redirect(url_for('index'))", "def get_outcome_service_url(self, service_name=\"grade_handler\"):\r\n return self.runtime.handler_url(self, service_name, thirdparty=True).rstrip('/?')", "def selectGroup(request):\n if 'next' in request.GET:\n n = request.GET['next']\n else:\n n = '/database/' # TODO: make me non absolute\n # TODO: write a test for me!\n if request.method == \"POST\":\n form = LabGroupSelectionForm(request.user, data=request.POST)\n if form.is_valid():\n request.session['labgroup_id'] = form.cleaned_data['labGroup'].id\n return redirect(n)\n else:\n return render(request, 'select_group.html', {'form': form, 'next': n})\n else:\n form = LabGroupSelectionForm(request.user)\n return render(request, 'select_group.html', {'form': form, 'next': n})", "def index() -> str:\n return redirect('/students')", "def post_login(self, came_from=lurl('/')):\n if not request.identity:\n login_counter = request.environ.get('repoze.who.logins', 0) + 1\n redirect('/login',\n params=dict(came_from=came_from, __logins=login_counter))\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n\n # Do not use tg.redirect with tg.url as it will add the mountpoint\n # of the application twice.\n return HTTPFound(location=came_from)", "def getRootURL():", "def move_to_root(self, group_id, name):\n return self._client.post(\n url=self._client.get_full_url(\n self.get_path('collection', realm=self._realm_name)\n ),\n data=json.dumps({\n 'id': group_id,\n 'name': name\n })\n )", "def root_create(request): # pylint: disable=W0613\r\n root = get_or_create_root()\r\n return redirect('wiki:get', path=root.path)", "def redirect(self, url):\n self.setResponseCode(responsecode.FOUND)\n self.setHeader(\"location\", url)", "def redirect(url):", "def index():\n return redirect(url_for(\"home\"))", "def _go_to_root_menu(self):\n log.debug(\"Returning to root menu...\")\n # Issue an enter or two off the bat to get out of any display screens\n # and confirm command mode\n try:\n response = self._do_cmd_resp(Command.BLANK, expected_prompt=Prompt.CMD_PROMPT)\n while not str(response).lstrip().endswith(Prompt.CMD_PROMPT):\n response = self._do_cmd_resp(Command.BLANK,\n expected_prompt=Prompt.CMD_PROMPT)\n time.sleep(1)\n except InstrumentTimeoutException:\n raise InstrumentProtocolException(\"Not able to get valid command prompt. Is instrument in command mode?\")\n \n # When you get a --> prompt, do 9's until you get back to the root\n response = self._do_cmd_resp(Command.BACK_MENU,\n expected_prompt=MENU_PROMPTS)\n while not str(response).lstrip().endswith(Prompt.MAIN_MENU):\n response = self._do_cmd_resp(Command.BACK_MENU,\n expected_prompt=MENU_PROMPTS)", "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def get(self):\n cont = self.request.get('continue', default_value='/')\n\n # Check whether redirecting to an absolute or relative url\n netloc = urlparse.urlsplit(cont).netloc\n if netloc:\n # Disallow absolute urls to prevent arbitrary open redirects\n raise custom_exceptions.InvalidRedirectURLError(\n \"Redirecting to an absolute url is not allowed.\")\n\n conversion_names = self.request.get_all('conversion_name')\n\n if len(conversion_names):\n bingo(conversion_names)\n\n self.redirect(_iri_to_uri(cont))", "def redirect_see_other(self, location):\n self.status = 303\n self.set_header('Location', location)", "def url_root():\n return \"OK\"", "def goto_url(self):\n msg = self._cw._('you have been logged out')\n return self._cw.base_url()", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def redirectPage() -> Response:\n # pass in the function name\n return redirect(url_for('view.loadMainPage'))", "def default(self, tg_errors=None):\n raise redirect('list')", "def idx(_request):\n return HttpResponseRedirect('/home')", "def defaultlanding():\n #send user to description page if not logged in\n if not g.user:\n return redirect(url_for('description'))\n #display leaderboard for competition if logged in\n return redirect(url_for('leaderboard'))", "def redirectReferer():\n referer = request.headers.get('referer')\n if not referer:\n referer = url_for('catelog')\n return redirect(referer)", "def get(self):\n session.clear()\n return redirect(url_for(REDIRECT_URL))", "def redirect_dest(fallback):\n dest = request.args.get('next')\n try:\n if dest.startswith('/') or dest.startswith(request.host_url):\n return redirect(dest)\n dest_url = url_for(dest)\n except:\n return redirect(fallback)\n return redirect(dest_url)", "def do(self, action_context, view, *args, **kwargs):\n\n # We will use this action to check if we have a valid login session\n # if not then redirect to the login\n a = CheckUserAuthenticatedAlreadyAction()\n action_context = a.do(action_context, view, *args, **kwargs)\n # if not authenticated then break teh chain and return to login\n if not action_context.extra_context.get('user_authenticated'):\n action_context.break_chain = True\n url = reverse(\"auth:login\")\n redirect_param = settings.REDIRECT_URL_VALID_PARAMS[0]\n url = \"{0}?{1}={2}\".format(\n url,\n redirect_param,\n action_context.request.path\n )\n action_context.response = HttpResponseRedirect(url)\n\n return super().do(action_context, view, *args, **kwargs)", "def login():\n next_url = request.form.get(\"next\", None)\n\n if current_app.config[\"USE_SAML\"]:\n if next_url:\n return redirect(url_for(\"auth.saml\", sso2=next_url))\n return redirect(url_for(\"auth.saml\", sso=None))\n\n elif current_app.config[\"USE_LDAP\"]:\n return redirect(url_for(\"auth.ldap_login\", next=next_url))\n\n elif current_app.config[\"USE_LOCAL_AUTH\"]:\n return redirect(url_for(\"auth.local_login\", nex=next_url))\n\n return abort(404)", "def index():\n\n return redirect(api)", "def get_redirect_url(self, *args, **kwargs):\n redirect = kwargs['route']\n self.permanent = redirect.permanent\n return redirect.target.url", "def homepage():\n return redirect('index.html')", "def send302(start_response, location):\n start_response('302 Found', [('Location', location)])\n return [YZ_MOVED_TO + location]", "def get(self, request, *args, **kwargs):\n return redirect(reverse_lazy(\n 'nominations-application'\n ) + \"?id=\" + self.kwargs['pk'])", "def path(self, group):\n return", "def kick_from_group(request):\n\treturn render(request,\"404.html\",{})", "def root():\t\t\n\t\tif websetup.check_twitter_auth() is False:\n\t\t\tif websetup.make_twitter_auth() is False:\n\t\t\t\treturn \"OAuth failure.\"\n\t\treturn bottle.template('newroot')", "def test_to_other_url(self):\n user = User(username='test', is_staff=True, is_superuser=True,\n is_active=True)\n user.set_password('test')\n user.full_clean()\n user.save()\n request = RequestFactory().get('/')\n response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/')\n admin_instance = get_modeladmin(Iframe)\n new_response = admin_instance.maybe_fix_redirection(\n request=request, response=response_302, obj=user)\n self.assertEqual(new_response['X-Chunkadmin-Response'], 'not-chunkadmin') # noqa\n self.assertEqual(302, new_response.status_code)\n self.assertEqual('/admin_mountpoint/?_data_changed=1',\n new_response['Location'])", "def return_to_source(origin,parent_object_id,target_username):\n\tif origin in ('home','home_reply'):\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"home_loc_pk\",pk=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'history':\n\t\tif target_username:\n\t\t\treturn redirect(\"user_activity\",slug=target_username)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'public':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"public_group\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'search':\n\t\treturn redirect(\"search_username\")\n\telif origin == 'profile':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"user_profile\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\t\n\telif origin == 'profile_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"profile\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\t\n\telif origin == 'best_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"best_photo_loc_pk\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'photo_comments':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"comment\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'fresh_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"see_photo_pk\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"photo\")\n\telse:\n\t\treturn redirect(\"home\")", "def test_get_track_list_root_directory_returns_default_directory(self, example_group):\n example_group.directory = None\n example_track_list = example_group.track_lists[0]\n example_track_list.directory = None\n directory = utils.get_track_list_root_directory(\n group=example_group, track_list=example_track_list, default_dir=\"global/dir\"\n )\n assert directory == \"global/dir\"", "def root(request, fallback_view=None, **kwargs):\n if request.method == 'POST':\n if request.META['CONTENT_TYPE'] == 'text/xml':\n log.debug('XMLRPC request received')\n return parse_xmlrpc_request(request)\n log.debug('Distutils request received')\n parse_distutils_request(request)\n action = request.POST.get(':action','')\n else:\n action = request.GET.get(':action','')\n\n if not action:\n log.debug('No action in root view')\n if fallback_view is None:\n fallback_view = settings.DJANGOPYPI_FALLBACK_VIEW\n return fallback_view(request, **kwargs)\n\n if not action in settings.DJANGOPYPI_ACTION_VIEWS:\n log.error('Invalid action encountered: %s' % (action,))\n return HttpResponseNotAllowed(settings.DJANGOPYPI_ACTION_VIEW.keys())\n\n log.debug('Applying configured action view for %s' % (action,))\n return settings.DJANGOPYPI_ACTION_VIEWS[action](request, **kwargs)", "def test_login_openid_handle_redirection(self):\r\n response = self._send_bad_redirection_login()\r\n self.assertEquals(response.status_code, 302)", "def source():\n return redirect(get_last_menus_url())", "def entry_page():\n return redirect(url_for('index'))", "def test_index_redirect(self):\n response = self.app.get(\"/\")\n self.assertEqual(response.status_code, 302,\n \"/ did not redirect to login when user is not logged in\")\n self.assertTrue(\n response.location.endswith(\"/accounts/login/\"),\n \"Redirect location did not end with /accounts/login/\"\n )", "def test_get_track_list_root_directory_returns_group_directory(self, example_group):\n example_group.directory = \"group/dir\"\n example_track_list = example_group.track_lists[0]\n example_track_list.directory = None\n directory = utils.get_track_list_root_directory(\n group=example_group, track_list=example_track_list, default_dir=\"global/dir\"\n )\n assert directory == \"group/dir\"", "def redirect(self, path):\n self.get_controller().redirect(path)", "def redirect(self):\r\n mdict = self.matchdict\r\n hash_id = mdict.get('hash_id', None)\r\n username = mdict.get('username', None)\r\n\r\n hashed = Hashed.query.get(hash_id)\r\n\r\n if not hashed:\r\n # for some reason bad link, 404\r\n return HTTPNotFound()\r\n\r\n hashed.clicks = hashed.clicks + 1\r\n\r\n if username is not None:\r\n bookmark = Bmark.query.\\\r\n filter(Bmark.hash_id == hash_id).\\\r\n filter(Bmark.username == username).one()\r\n bookmark.clicks = bookmark.clicks + 1\r\n\r\n return HTTPFound(location=hashed.url)", "def openid_redirect(request):\n request.session['next'] = _get_next(request)\n request.session['openid_provider'] = request.GET.get('openid_provider')\n request.session['socialregistration_connect_object'] = get_object(request.GET)\n\n client = OpenID(\n request,\n 'http%s://%s%s' % (\n _https(),\n Site.objects.get_current().domain,\n reverse('openid_callback')\n ),\n request.GET.get('openid_provider')\n )\n try:\n return client.get_redirect()\n except DiscoveryFailure:\n request.session['openid_error'] = True\n return HttpResponseRedirect(settings.LOGIN_URL)", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def url(self):\n return url_for('/admin/groups/{}'.format(self.key))", "def post_login(self, came_from='/'):\n if not request.identity:\n login_counter = request.environ['repoze.who.logins'] + 1\n redirect('/login', came_from=came_from, __logins=login_counter)\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)", "def get(self):\n self.logout()\n self.redirect('/')", "def post_login(self, came_from='/'):\n if not request.identity:\n login_counter = request.environ['repoze.who.logins'] + 1\n redirect('/login', came_from=came_from, __logins=login_counter)\n\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)", "def redirect_to(self, response):\n redirect_url = response.xpath(self.filters[0]).extract_first()\n yield scrapy.Request(redirect_url, self.first_submit)", "def error_redirect(err):\n error = err\n return render_template('error.html',\n title='That doesn\\'t exist!',\n error=error)", "def demo():\n site = request.values['site']\n user = request.args.get('user_id', '')\n group = request.args.get('group_id', '')\n if group == '@list':\n group = request.values['list']\n\n activity_id = search_query = ''\n if group == source.SEARCH:\n search_query = request.args.get('search_query', '')\n elif group != source.BLOCKS:\n activity_id = request.args.get('activity_id', '')\n\n # pass query params through\n params = dict(request.args.items())\n params.update({\n 'plaintext': 'true',\n 'cache': 'false',\n 'search_query': search_query,\n })\n\n path = '/'.join(urllib.parse.quote_plus(part, safe='@')\n for part in (site, user, group, '@app', activity_id))\n return redirect(f'/{path}?{urllib.parse.urlencode(params)}')", "def _redirect(self):\n \n # Redirect URL is held in 'r' URL arg of this request\n b64encReturnTo = str(request.params.get('r', ''))\n\n if b64encReturnTo:\n # Decode the return to address\n try:\n b64decReturnTo = base64.urlsafe_b64decode(b64encReturnTo)\n except Exception, e:\n log.error(\"logout - decoding return URL: %s\" % e) \n c.xml = \"Error carrying out browser redirect following logout\"\n response.status_code = 400\n return render('ndg.security.kid', 'ndg.security.error')\n \n # Check for 'getCredentials' - avoid in case username/password\n # contained in the URL!\n getCredentialsIdx = b64decReturnTo.rfind('/getCredentials')\n if getCredentialsIdx != -1:\n log.debug(\"Reverting request URL from getCredentials to \"\n \"login...\")\n b64decReturnTo = b64decReturnTo[:getCredentialsIdx] + '/login'\n \n # Add flag indicating to caller that logout succeeded. The caller\n # can use this to remove any security cookie present in their\n # domain - See:\n # ndg.security.client.ssoclient.ssoclient.lib.base.BaseController\n if '?' in b64decReturnTo:\n b64decReturnTo += '&logout=1'\n else:\n b64decReturnTo += '?logout=1'\n\n # and now go back to whence we had come\n log.debug(\"LogoutController._redirect: redirect to %s\" %\n b64decReturnTo)\n h.redirect_to(b64decReturnTo)\n else:\n log.debug(\"LogoutController._redirect: no redirect URL set.\")\n response.status_code = 400\n c.errorPageHeading = \"Log out\"\n if getattr(c, \"loggedIn\", False):\n c.xml = \"Logged out\"\n else:\n c.xml = (\"An error occurred logging out. Please report the \"\n \"problem to your site administrator\") \n \n return render('ndg.security.kid', 'ndg.security.error')", "def __call__(self):\n if isXmlRpc(self.REQUEST):\n return self\n else:\n newpath = INewPath(self)\n newpath = getUtility(IVirtualRoot).ensure_virtual_root(newpath)\n self.REQUEST.response.redirect(newpath)", "def home_redirect(request):\n if request.user.is_authenticated() and request.user.is_staff:\n return redirect(\"volunteers\")\n elif request.user.is_authenticated() and not request.user.is_superuser:\n related_volunteer = get_object_or_404(Volunteer, user_id=request.user.pk)\n return redirect(\"edit-volunteer-profile\", volunteer_id=related_volunteer.pk)\n else:\n return redirect(\"new-volunteer\")", "def to_exam_root(self):\n self._bottom_tab(4)\n self._goto(\"exam_icon\")" ]
[ "0.7103075", "0.67741513", "0.56865895", "0.51907563", "0.5116719", "0.5102657", "0.5087065", "0.5047118", "0.4916807", "0.49087209", "0.48846796", "0.47762623", "0.47364914", "0.4677289", "0.46656385", "0.46637428", "0.46563548", "0.46534562", "0.45972314", "0.4521781", "0.45062536", "0.45030215", "0.45024154", "0.45024154", "0.44957826", "0.44948602", "0.44899046", "0.44889957", "0.44869763", "0.448599", "0.44630018", "0.44624335", "0.4444673", "0.44391045", "0.4407454", "0.44068038", "0.4404437", "0.43974093", "0.4391359", "0.4383362", "0.43788657", "0.43764284", "0.43678367", "0.4357152", "0.43323603", "0.43322605", "0.4329011", "0.4325541", "0.43224767", "0.43101028", "0.43091914", "0.43083736", "0.4305456", "0.4299628", "0.42955056", "0.42902592", "0.42720035", "0.42720035", "0.42720035", "0.4266655", "0.4263173", "0.42624322", "0.42537558", "0.4250566", "0.4246781", "0.42454904", "0.42319196", "0.4223124", "0.42221075", "0.4221986", "0.42171907", "0.42112315", "0.42052758", "0.42052475", "0.41966388", "0.41920617", "0.41893536", "0.41880608", "0.41860765", "0.41832078", "0.41791", "0.4174417", "0.4172573", "0.41708988", "0.4169707", "0.41651154", "0.4158805", "0.41580254", "0.41571978", "0.41561946", "0.41538662", "0.41396224", "0.41360778", "0.41355315", "0.4135148", "0.4134425", "0.41338757", "0.41303748", "0.4126399", "0.4125886" ]
0.76904535
0
Convenience redirect to find the root outcome group for a particular context. Will redirect to the appropriate outcome group's URL.
def redirect_to_root_outcome_group_for_context_accounts(request_ctx, account_id, **request_kwargs): path = '/v1/accounts/{account_id}/root_outcome_group' url = request_ctx.base_api_url + path.format(account_id=account_id) response = client.get(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs):\n\n path = '/v1/global/root_outcome_group'\n url = request_ctx.base_api_url + path.format()\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def redirect_to_root_outcome_group_for_context_courses(request_ctx, course_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/root_outcome_group'\n url = request_ctx.base_api_url + path.format(course_id=course_id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def root_redirect():\r\n return redirect(url_for(\"display_top\"))", "def redirect_associated_mood():\n\n # grab the mood_id from the form\n user_mood_id = request.form.get(\"mood\")\n\n # set the mood_id to id grabbed from the form\n mood_id = user_mood_id\n\n return redirect(\"/moods/{}/entries\".format(mood_id))", "def root1(request):\n\ttemplate = 'main'\n\treturn redirect(template)", "def redirect(to):\r\n def _redirect(environ, start_response):\r\n args, kwargs = environ['wsgiorg.routing_args']\r\n start_response('301 MOVED PERMANENTLY',\r\n [('Location', to.format(*args, **kwargs))])\r\n return []\r\n return _redirect", "def root(request):\n\ttemplate = 'bfbot/main'\n\treturn redirect(template)", "def redirect(self, location):\n self.redirect_see_other(location)", "def redirect(target):\n return {\n 'status': '302',\n 'statusDescription': 'Found',\n 'headers': {\n 'location': [{\n 'key': 'Location',\n 'value': target\n }]\n }\n }", "def redirect_to():\n\n args_dict = request.args.items()\n args = CaseInsensitiveDict(args_dict)\n\n # We need to build the response manually and convert to UTF-8 to prevent\n # werkzeug from \"fixing\" the URL. This endpoint should set the Location\n # header to the exact string supplied.\n response = app.make_response(\"\")\n response.status_code = 302\n if \"status_code\" in args:\n status_code = int(args[\"status_code\"])\n if status_code >= 300 and status_code < 400:\n response.status_code = status_code\n response.headers[\"Location\"] = args[\"url\"].encode(\"utf-8\")\n\n return response", "def view__model_admin_root(context, request):\n return HTTPFound(request.resource_url(context.__parent__))", "def catch_all(path):\n return redirect('/', code=302)", "def redirect_to(self, route_name, *args, **kwargs):\n self.redirect(self.uri_for(route_name, *args, **kwargs))", "def redirect_source():\n return redirect(url_for(\"base_blueprint.source\"), code=301)", "def __call__(self, environ, start_response):\n path_info = environ['PATH_INFO']\n for key, redirect in self.redirects.items():\n if self.match(key, path_info):\n environ['PATH_INFO'] = redirect\n return self(environ, start_response)\n else:\n path, cut, prefix = self.first_path_segment(path_info)\n root = path[:cut]\n rest = path[cut:]\n if root in self.routes:\n environ['PATH_INFO'] = rest\n #XXX shouldn't we += to SCRIPT_NAME?\n environ['SCRIPT_NAME'] = prefix + root\n app = self.routes[root]\n else:\n app = webob.exc.HTTPNotFound()\n return app(environ, start_response)", "def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))", "def redirect_found(self, location):\n self.status = 302\n self.set_header('Location', location)", "def redirect_to_question():\n # responses variable will go on to store all of the user's answers to the questions\n session[ANSWERS_KEY] = []\n return redirect(f\"/questions/{len(session[ANSWERS_KEY])}\")", "def eastgardens(event, context):\n\n request = event['Records'][0]['cf']['request']\n path = request['uri']\n query = request['querystring']\n\n # prepend a ? if there is a query\n if query != '':\n query = '?' + query\n\n # Path+query based custom redirects get checked first\n if path + query in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path + query])\n\n # Now check path only custom redirects\n if path in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path])\n\n return handle_fallthrough(event, path, query)", "def redirect(self, location):\n self.status=302\n headers=self.headers\n headers['status']='302 Moved Temporarily'\n headers['location']=location\n return location", "def index_file():\n return redirect(\"/\")", "def redir_index():\n return redirect(url_for(\"index\"), code=301)", "def get(self):\n self.redirect('/admin')", "def get(self):\n self.redirect('/admin')", "def redirect(uri):\n response = HttpResponse('', status=302)\n response['Location'] = uri\n return response", "def intermediate_redirect(cls, form_path):\r\n from r2.lib.template_helpers import add_sr\r\n dest = cls.format_output_url(request.fullpath)\r\n path = add_sr(form_path + query_string({\"dest\": dest}))\r\n return cls.redirect(path)", "def redirector(status, start_response, exc_info=None):\n session['login.pre_uri'] = environ['PATH_INFO']\n session.save()\n start_response('302 Found',[(\"Location\",\"/login\"),(\"Content-type\",\"text\")])\n return []", "def index():\n return redirect(url_for('second_page'))", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def root():\n if request.headers['Accept'] == 'application/json':\n return \"Welcome\\n\\n\", 200\n else:\n return redirect(url_for('index'))", "def redirect_to(self, to, *args, **kwargs):\n\n response = redirect(to, *args, **kwargs)\n\n # By default, raise a redirect, which will cause the view\n # processing to stop and return this redirect.\n if kwargs.pop(\"raise\", True):\n raise response\n else:\n return response", "def redirect_version():\n return redirect(url_for(\"base_blueprint.version\"), code=301)", "def toLanding():\n return redirect(url_for('landingurl'))", "def get_success_url(self):\n is_same_user = self.get_object().userid == self.request.user\n return reverse(\"certhelper:shiftleader\") if not is_same_user else \"/\"", "def index():\n return redirect(auth_flow.get_authorization_url())", "def redirect(url, status=None):\n raise cherrypy.HTTPRedirect(url, status)", "def redirect(self) -> WerkzeugResponse:\n\n _dict = self.unpack_redirect()\n return self.perform_logout(_dict, BINDING_HTTP_REDIRECT)", "def redirect_to_course_position(course_module):\r\n urlargs = {'course_id': course_module.id.to_deprecated_string()}\r\n chapter = get_current_child(course_module)\r\n if chapter is None:\r\n # oops. Something bad has happened.\r\n raise Http404(\"No chapter found when loading current position in course\")\r\n\r\n urlargs['chapter'] = chapter.url_name\r\n if course_module.position is not None:\r\n return redirect(reverse('courseware_chapter', kwargs=urlargs))\r\n\r\n # Relying on default of returning first child\r\n section = get_current_child(chapter)\r\n if section is None:\r\n raise Http404(\"No section found when loading current position in course\")\r\n\r\n urlargs['section'] = section.url_name\r\n return redirect(reverse('courseware_section', kwargs=urlargs))", "def redirect_view(request):\n path = request.GET.get(\"to\") or \"/\"\n return redirect(path if path.startswith(\"/\") else f\"/{path}\", permanent=True)", "def second_page():\n return redirect(url_for('index'))", "def get_outcome_service_url(self, service_name=\"grade_handler\"):\r\n return self.runtime.handler_url(self, service_name, thirdparty=True).rstrip('/?')", "def selectGroup(request):\n if 'next' in request.GET:\n n = request.GET['next']\n else:\n n = '/database/' # TODO: make me non absolute\n # TODO: write a test for me!\n if request.method == \"POST\":\n form = LabGroupSelectionForm(request.user, data=request.POST)\n if form.is_valid():\n request.session['labgroup_id'] = form.cleaned_data['labGroup'].id\n return redirect(n)\n else:\n return render(request, 'select_group.html', {'form': form, 'next': n})\n else:\n form = LabGroupSelectionForm(request.user)\n return render(request, 'select_group.html', {'form': form, 'next': n})", "def index() -> str:\n return redirect('/students')", "def post_login(self, came_from=lurl('/')):\n if not request.identity:\n login_counter = request.environ.get('repoze.who.logins', 0) + 1\n redirect('/login',\n params=dict(came_from=came_from, __logins=login_counter))\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n\n # Do not use tg.redirect with tg.url as it will add the mountpoint\n # of the application twice.\n return HTTPFound(location=came_from)", "def getRootURL():", "def move_to_root(self, group_id, name):\n return self._client.post(\n url=self._client.get_full_url(\n self.get_path('collection', realm=self._realm_name)\n ),\n data=json.dumps({\n 'id': group_id,\n 'name': name\n })\n )", "def root_create(request): # pylint: disable=W0613\r\n root = get_or_create_root()\r\n return redirect('wiki:get', path=root.path)", "def redirect(self, url):\n self.setResponseCode(responsecode.FOUND)\n self.setHeader(\"location\", url)", "def redirect(url):", "def index():\n return redirect(url_for(\"home\"))", "def _go_to_root_menu(self):\n log.debug(\"Returning to root menu...\")\n # Issue an enter or two off the bat to get out of any display screens\n # and confirm command mode\n try:\n response = self._do_cmd_resp(Command.BLANK, expected_prompt=Prompt.CMD_PROMPT)\n while not str(response).lstrip().endswith(Prompt.CMD_PROMPT):\n response = self._do_cmd_resp(Command.BLANK,\n expected_prompt=Prompt.CMD_PROMPT)\n time.sleep(1)\n except InstrumentTimeoutException:\n raise InstrumentProtocolException(\"Not able to get valid command prompt. Is instrument in command mode?\")\n \n # When you get a --> prompt, do 9's until you get back to the root\n response = self._do_cmd_resp(Command.BACK_MENU,\n expected_prompt=MENU_PROMPTS)\n while not str(response).lstrip().endswith(Prompt.MAIN_MENU):\n response = self._do_cmd_resp(Command.BACK_MENU,\n expected_prompt=MENU_PROMPTS)", "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def get(self):\n cont = self.request.get('continue', default_value='/')\n\n # Check whether redirecting to an absolute or relative url\n netloc = urlparse.urlsplit(cont).netloc\n if netloc:\n # Disallow absolute urls to prevent arbitrary open redirects\n raise custom_exceptions.InvalidRedirectURLError(\n \"Redirecting to an absolute url is not allowed.\")\n\n conversion_names = self.request.get_all('conversion_name')\n\n if len(conversion_names):\n bingo(conversion_names)\n\n self.redirect(_iri_to_uri(cont))", "def redirect_see_other(self, location):\n self.status = 303\n self.set_header('Location', location)", "def url_root():\n return \"OK\"", "def goto_url(self):\n msg = self._cw._('you have been logged out')\n return self._cw.base_url()", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def redirectPage() -> Response:\n # pass in the function name\n return redirect(url_for('view.loadMainPage'))", "def default(self, tg_errors=None):\n raise redirect('list')", "def idx(_request):\n return HttpResponseRedirect('/home')", "def defaultlanding():\n #send user to description page if not logged in\n if not g.user:\n return redirect(url_for('description'))\n #display leaderboard for competition if logged in\n return redirect(url_for('leaderboard'))", "def redirectReferer():\n referer = request.headers.get('referer')\n if not referer:\n referer = url_for('catelog')\n return redirect(referer)", "def get(self):\n session.clear()\n return redirect(url_for(REDIRECT_URL))", "def redirect_dest(fallback):\n dest = request.args.get('next')\n try:\n if dest.startswith('/') or dest.startswith(request.host_url):\n return redirect(dest)\n dest_url = url_for(dest)\n except:\n return redirect(fallback)\n return redirect(dest_url)", "def do(self, action_context, view, *args, **kwargs):\n\n # We will use this action to check if we have a valid login session\n # if not then redirect to the login\n a = CheckUserAuthenticatedAlreadyAction()\n action_context = a.do(action_context, view, *args, **kwargs)\n # if not authenticated then break teh chain and return to login\n if not action_context.extra_context.get('user_authenticated'):\n action_context.break_chain = True\n url = reverse(\"auth:login\")\n redirect_param = settings.REDIRECT_URL_VALID_PARAMS[0]\n url = \"{0}?{1}={2}\".format(\n url,\n redirect_param,\n action_context.request.path\n )\n action_context.response = HttpResponseRedirect(url)\n\n return super().do(action_context, view, *args, **kwargs)", "def login():\n next_url = request.form.get(\"next\", None)\n\n if current_app.config[\"USE_SAML\"]:\n if next_url:\n return redirect(url_for(\"auth.saml\", sso2=next_url))\n return redirect(url_for(\"auth.saml\", sso=None))\n\n elif current_app.config[\"USE_LDAP\"]:\n return redirect(url_for(\"auth.ldap_login\", next=next_url))\n\n elif current_app.config[\"USE_LOCAL_AUTH\"]:\n return redirect(url_for(\"auth.local_login\", nex=next_url))\n\n return abort(404)", "def index():\n\n return redirect(api)", "def get_redirect_url(self, *args, **kwargs):\n redirect = kwargs['route']\n self.permanent = redirect.permanent\n return redirect.target.url", "def homepage():\n return redirect('index.html')", "def send302(start_response, location):\n start_response('302 Found', [('Location', location)])\n return [YZ_MOVED_TO + location]", "def get(self, request, *args, **kwargs):\n return redirect(reverse_lazy(\n 'nominations-application'\n ) + \"?id=\" + self.kwargs['pk'])", "def path(self, group):\n return", "def kick_from_group(request):\n\treturn render(request,\"404.html\",{})", "def root():\t\t\n\t\tif websetup.check_twitter_auth() is False:\n\t\t\tif websetup.make_twitter_auth() is False:\n\t\t\t\treturn \"OAuth failure.\"\n\t\treturn bottle.template('newroot')", "def test_to_other_url(self):\n user = User(username='test', is_staff=True, is_superuser=True,\n is_active=True)\n user.set_password('test')\n user.full_clean()\n user.save()\n request = RequestFactory().get('/')\n response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/')\n admin_instance = get_modeladmin(Iframe)\n new_response = admin_instance.maybe_fix_redirection(\n request=request, response=response_302, obj=user)\n self.assertEqual(new_response['X-Chunkadmin-Response'], 'not-chunkadmin') # noqa\n self.assertEqual(302, new_response.status_code)\n self.assertEqual('/admin_mountpoint/?_data_changed=1',\n new_response['Location'])", "def return_to_source(origin,parent_object_id,target_username):\n\tif origin in ('home','home_reply'):\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"home_loc_pk\",pk=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'history':\n\t\tif target_username:\n\t\t\treturn redirect(\"user_activity\",slug=target_username)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'public':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"public_group\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'search':\n\t\treturn redirect(\"search_username\")\n\telif origin == 'profile':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"user_profile\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\t\n\telif origin == 'profile_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"profile\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\t\n\telif origin == 'best_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"best_photo_loc_pk\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'photo_comments':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"comment\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'fresh_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"see_photo_pk\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"photo\")\n\telse:\n\t\treturn redirect(\"home\")", "def test_get_track_list_root_directory_returns_default_directory(self, example_group):\n example_group.directory = None\n example_track_list = example_group.track_lists[0]\n example_track_list.directory = None\n directory = utils.get_track_list_root_directory(\n group=example_group, track_list=example_track_list, default_dir=\"global/dir\"\n )\n assert directory == \"global/dir\"", "def root(request, fallback_view=None, **kwargs):\n if request.method == 'POST':\n if request.META['CONTENT_TYPE'] == 'text/xml':\n log.debug('XMLRPC request received')\n return parse_xmlrpc_request(request)\n log.debug('Distutils request received')\n parse_distutils_request(request)\n action = request.POST.get(':action','')\n else:\n action = request.GET.get(':action','')\n\n if not action:\n log.debug('No action in root view')\n if fallback_view is None:\n fallback_view = settings.DJANGOPYPI_FALLBACK_VIEW\n return fallback_view(request, **kwargs)\n\n if not action in settings.DJANGOPYPI_ACTION_VIEWS:\n log.error('Invalid action encountered: %s' % (action,))\n return HttpResponseNotAllowed(settings.DJANGOPYPI_ACTION_VIEW.keys())\n\n log.debug('Applying configured action view for %s' % (action,))\n return settings.DJANGOPYPI_ACTION_VIEWS[action](request, **kwargs)", "def test_login_openid_handle_redirection(self):\r\n response = self._send_bad_redirection_login()\r\n self.assertEquals(response.status_code, 302)", "def source():\n return redirect(get_last_menus_url())", "def entry_page():\n return redirect(url_for('index'))", "def test_index_redirect(self):\n response = self.app.get(\"/\")\n self.assertEqual(response.status_code, 302,\n \"/ did not redirect to login when user is not logged in\")\n self.assertTrue(\n response.location.endswith(\"/accounts/login/\"),\n \"Redirect location did not end with /accounts/login/\"\n )", "def test_get_track_list_root_directory_returns_group_directory(self, example_group):\n example_group.directory = \"group/dir\"\n example_track_list = example_group.track_lists[0]\n example_track_list.directory = None\n directory = utils.get_track_list_root_directory(\n group=example_group, track_list=example_track_list, default_dir=\"global/dir\"\n )\n assert directory == \"group/dir\"", "def redirect(self, path):\n self.get_controller().redirect(path)", "def redirect(self):\r\n mdict = self.matchdict\r\n hash_id = mdict.get('hash_id', None)\r\n username = mdict.get('username', None)\r\n\r\n hashed = Hashed.query.get(hash_id)\r\n\r\n if not hashed:\r\n # for some reason bad link, 404\r\n return HTTPNotFound()\r\n\r\n hashed.clicks = hashed.clicks + 1\r\n\r\n if username is not None:\r\n bookmark = Bmark.query.\\\r\n filter(Bmark.hash_id == hash_id).\\\r\n filter(Bmark.username == username).one()\r\n bookmark.clicks = bookmark.clicks + 1\r\n\r\n return HTTPFound(location=hashed.url)", "def openid_redirect(request):\n request.session['next'] = _get_next(request)\n request.session['openid_provider'] = request.GET.get('openid_provider')\n request.session['socialregistration_connect_object'] = get_object(request.GET)\n\n client = OpenID(\n request,\n 'http%s://%s%s' % (\n _https(),\n Site.objects.get_current().domain,\n reverse('openid_callback')\n ),\n request.GET.get('openid_provider')\n )\n try:\n return client.get_redirect()\n except DiscoveryFailure:\n request.session['openid_error'] = True\n return HttpResponseRedirect(settings.LOGIN_URL)", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def url(self):\n return url_for('/admin/groups/{}'.format(self.key))", "def post_login(self, came_from='/'):\n if not request.identity:\n login_counter = request.environ['repoze.who.logins'] + 1\n redirect('/login', came_from=came_from, __logins=login_counter)\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)", "def get(self):\n self.logout()\n self.redirect('/')", "def post_login(self, came_from='/'):\n if not request.identity:\n login_counter = request.environ['repoze.who.logins'] + 1\n redirect('/login', came_from=came_from, __logins=login_counter)\n\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)", "def redirect_to(self, response):\n redirect_url = response.xpath(self.filters[0]).extract_first()\n yield scrapy.Request(redirect_url, self.first_submit)", "def error_redirect(err):\n error = err\n return render_template('error.html',\n title='That doesn\\'t exist!',\n error=error)", "def demo():\n site = request.values['site']\n user = request.args.get('user_id', '')\n group = request.args.get('group_id', '')\n if group == '@list':\n group = request.values['list']\n\n activity_id = search_query = ''\n if group == source.SEARCH:\n search_query = request.args.get('search_query', '')\n elif group != source.BLOCKS:\n activity_id = request.args.get('activity_id', '')\n\n # pass query params through\n params = dict(request.args.items())\n params.update({\n 'plaintext': 'true',\n 'cache': 'false',\n 'search_query': search_query,\n })\n\n path = '/'.join(urllib.parse.quote_plus(part, safe='@')\n for part in (site, user, group, '@app', activity_id))\n return redirect(f'/{path}?{urllib.parse.urlencode(params)}')", "def _redirect(self):\n \n # Redirect URL is held in 'r' URL arg of this request\n b64encReturnTo = str(request.params.get('r', ''))\n\n if b64encReturnTo:\n # Decode the return to address\n try:\n b64decReturnTo = base64.urlsafe_b64decode(b64encReturnTo)\n except Exception, e:\n log.error(\"logout - decoding return URL: %s\" % e) \n c.xml = \"Error carrying out browser redirect following logout\"\n response.status_code = 400\n return render('ndg.security.kid', 'ndg.security.error')\n \n # Check for 'getCredentials' - avoid in case username/password\n # contained in the URL!\n getCredentialsIdx = b64decReturnTo.rfind('/getCredentials')\n if getCredentialsIdx != -1:\n log.debug(\"Reverting request URL from getCredentials to \"\n \"login...\")\n b64decReturnTo = b64decReturnTo[:getCredentialsIdx] + '/login'\n \n # Add flag indicating to caller that logout succeeded. The caller\n # can use this to remove any security cookie present in their\n # domain - See:\n # ndg.security.client.ssoclient.ssoclient.lib.base.BaseController\n if '?' in b64decReturnTo:\n b64decReturnTo += '&logout=1'\n else:\n b64decReturnTo += '?logout=1'\n\n # and now go back to whence we had come\n log.debug(\"LogoutController._redirect: redirect to %s\" %\n b64decReturnTo)\n h.redirect_to(b64decReturnTo)\n else:\n log.debug(\"LogoutController._redirect: no redirect URL set.\")\n response.status_code = 400\n c.errorPageHeading = \"Log out\"\n if getattr(c, \"loggedIn\", False):\n c.xml = \"Logged out\"\n else:\n c.xml = (\"An error occurred logging out. Please report the \"\n \"problem to your site administrator\") \n \n return render('ndg.security.kid', 'ndg.security.error')", "def __call__(self):\n if isXmlRpc(self.REQUEST):\n return self\n else:\n newpath = INewPath(self)\n newpath = getUtility(IVirtualRoot).ensure_virtual_root(newpath)\n self.REQUEST.response.redirect(newpath)", "def home_redirect(request):\n if request.user.is_authenticated() and request.user.is_staff:\n return redirect(\"volunteers\")\n elif request.user.is_authenticated() and not request.user.is_superuser:\n related_volunteer = get_object_or_404(Volunteer, user_id=request.user.pk)\n return redirect(\"edit-volunteer-profile\", volunteer_id=related_volunteer.pk)\n else:\n return redirect(\"new-volunteer\")", "def to_exam_root(self):\n self._bottom_tab(4)\n self._goto(\"exam_icon\")" ]
[ "0.76904535", "0.67741513", "0.56865895", "0.51907563", "0.5116719", "0.5102657", "0.5087065", "0.5047118", "0.4916807", "0.49087209", "0.48846796", "0.47762623", "0.47364914", "0.4677289", "0.46656385", "0.46637428", "0.46563548", "0.46534562", "0.45972314", "0.4521781", "0.45062536", "0.45030215", "0.45024154", "0.45024154", "0.44957826", "0.44948602", "0.44899046", "0.44889957", "0.44869763", "0.448599", "0.44630018", "0.44624335", "0.4444673", "0.44391045", "0.4407454", "0.44068038", "0.4404437", "0.43974093", "0.4391359", "0.4383362", "0.43788657", "0.43764284", "0.43678367", "0.4357152", "0.43323603", "0.43322605", "0.4329011", "0.4325541", "0.43224767", "0.43101028", "0.43091914", "0.43083736", "0.4305456", "0.4299628", "0.42955056", "0.42902592", "0.42720035", "0.42720035", "0.42720035", "0.4266655", "0.4263173", "0.42624322", "0.42537558", "0.4250566", "0.4246781", "0.42454904", "0.42319196", "0.4223124", "0.42221075", "0.4221986", "0.42171907", "0.42112315", "0.42052758", "0.42052475", "0.41966388", "0.41920617", "0.41893536", "0.41880608", "0.41860765", "0.41832078", "0.41791", "0.4174417", "0.4172573", "0.41708988", "0.4169707", "0.41651154", "0.4158805", "0.41580254", "0.41571978", "0.41561946", "0.41538662", "0.41396224", "0.41360778", "0.41355315", "0.4135148", "0.4134425", "0.41338757", "0.41303748", "0.4126399", "0.4125886" ]
0.7103075
1
Convenience redirect to find the root outcome group for a particular context. Will redirect to the appropriate outcome group's URL.
def redirect_to_root_outcome_group_for_context_courses(request_ctx, course_id, **request_kwargs): path = '/v1/courses/{course_id}/root_outcome_group' url = request_ctx.base_api_url + path.format(course_id=course_id) response = client.get(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs):\n\n path = '/v1/global/root_outcome_group'\n url = request_ctx.base_api_url + path.format()\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def redirect_to_root_outcome_group_for_context_accounts(request_ctx, account_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/root_outcome_group'\n url = request_ctx.base_api_url + path.format(account_id=account_id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def root_redirect():\r\n return redirect(url_for(\"display_top\"))", "def redirect_associated_mood():\n\n # grab the mood_id from the form\n user_mood_id = request.form.get(\"mood\")\n\n # set the mood_id to id grabbed from the form\n mood_id = user_mood_id\n\n return redirect(\"/moods/{}/entries\".format(mood_id))", "def root1(request):\n\ttemplate = 'main'\n\treturn redirect(template)", "def redirect(to):\r\n def _redirect(environ, start_response):\r\n args, kwargs = environ['wsgiorg.routing_args']\r\n start_response('301 MOVED PERMANENTLY',\r\n [('Location', to.format(*args, **kwargs))])\r\n return []\r\n return _redirect", "def root(request):\n\ttemplate = 'bfbot/main'\n\treturn redirect(template)", "def redirect(self, location):\n self.redirect_see_other(location)", "def redirect(target):\n return {\n 'status': '302',\n 'statusDescription': 'Found',\n 'headers': {\n 'location': [{\n 'key': 'Location',\n 'value': target\n }]\n }\n }", "def redirect_to():\n\n args_dict = request.args.items()\n args = CaseInsensitiveDict(args_dict)\n\n # We need to build the response manually and convert to UTF-8 to prevent\n # werkzeug from \"fixing\" the URL. This endpoint should set the Location\n # header to the exact string supplied.\n response = app.make_response(\"\")\n response.status_code = 302\n if \"status_code\" in args:\n status_code = int(args[\"status_code\"])\n if status_code >= 300 and status_code < 400:\n response.status_code = status_code\n response.headers[\"Location\"] = args[\"url\"].encode(\"utf-8\")\n\n return response", "def view__model_admin_root(context, request):\n return HTTPFound(request.resource_url(context.__parent__))", "def catch_all(path):\n return redirect('/', code=302)", "def redirect_to(self, route_name, *args, **kwargs):\n self.redirect(self.uri_for(route_name, *args, **kwargs))", "def redirect_source():\n return redirect(url_for(\"base_blueprint.source\"), code=301)", "def __call__(self, environ, start_response):\n path_info = environ['PATH_INFO']\n for key, redirect in self.redirects.items():\n if self.match(key, path_info):\n environ['PATH_INFO'] = redirect\n return self(environ, start_response)\n else:\n path, cut, prefix = self.first_path_segment(path_info)\n root = path[:cut]\n rest = path[cut:]\n if root in self.routes:\n environ['PATH_INFO'] = rest\n #XXX shouldn't we += to SCRIPT_NAME?\n environ['SCRIPT_NAME'] = prefix + root\n app = self.routes[root]\n else:\n app = webob.exc.HTTPNotFound()\n return app(environ, start_response)", "def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))", "def redirect_found(self, location):\n self.status = 302\n self.set_header('Location', location)", "def redirect_to_question():\n # responses variable will go on to store all of the user's answers to the questions\n session[ANSWERS_KEY] = []\n return redirect(f\"/questions/{len(session[ANSWERS_KEY])}\")", "def eastgardens(event, context):\n\n request = event['Records'][0]['cf']['request']\n path = request['uri']\n query = request['querystring']\n\n # prepend a ? if there is a query\n if query != '':\n query = '?' + query\n\n # Path+query based custom redirects get checked first\n if path + query in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path + query])\n\n # Now check path only custom redirects\n if path in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path])\n\n return handle_fallthrough(event, path, query)", "def redirect(self, location):\n self.status=302\n headers=self.headers\n headers['status']='302 Moved Temporarily'\n headers['location']=location\n return location", "def index_file():\n return redirect(\"/\")", "def redir_index():\n return redirect(url_for(\"index\"), code=301)", "def get(self):\n self.redirect('/admin')", "def get(self):\n self.redirect('/admin')", "def redirect(uri):\n response = HttpResponse('', status=302)\n response['Location'] = uri\n return response", "def intermediate_redirect(cls, form_path):\r\n from r2.lib.template_helpers import add_sr\r\n dest = cls.format_output_url(request.fullpath)\r\n path = add_sr(form_path + query_string({\"dest\": dest}))\r\n return cls.redirect(path)", "def redirector(status, start_response, exc_info=None):\n session['login.pre_uri'] = environ['PATH_INFO']\n session.save()\n start_response('302 Found',[(\"Location\",\"/login\"),(\"Content-type\",\"text\")])\n return []", "def index():\n return redirect(url_for('second_page'))", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def root():\n if request.headers['Accept'] == 'application/json':\n return \"Welcome\\n\\n\", 200\n else:\n return redirect(url_for('index'))", "def redirect_to(self, to, *args, **kwargs):\n\n response = redirect(to, *args, **kwargs)\n\n # By default, raise a redirect, which will cause the view\n # processing to stop and return this redirect.\n if kwargs.pop(\"raise\", True):\n raise response\n else:\n return response", "def redirect_version():\n return redirect(url_for(\"base_blueprint.version\"), code=301)", "def toLanding():\n return redirect(url_for('landingurl'))", "def get_success_url(self):\n is_same_user = self.get_object().userid == self.request.user\n return reverse(\"certhelper:shiftleader\") if not is_same_user else \"/\"", "def index():\n return redirect(auth_flow.get_authorization_url())", "def redirect(url, status=None):\n raise cherrypy.HTTPRedirect(url, status)", "def redirect(self) -> WerkzeugResponse:\n\n _dict = self.unpack_redirect()\n return self.perform_logout(_dict, BINDING_HTTP_REDIRECT)", "def redirect_to_course_position(course_module):\r\n urlargs = {'course_id': course_module.id.to_deprecated_string()}\r\n chapter = get_current_child(course_module)\r\n if chapter is None:\r\n # oops. Something bad has happened.\r\n raise Http404(\"No chapter found when loading current position in course\")\r\n\r\n urlargs['chapter'] = chapter.url_name\r\n if course_module.position is not None:\r\n return redirect(reverse('courseware_chapter', kwargs=urlargs))\r\n\r\n # Relying on default of returning first child\r\n section = get_current_child(chapter)\r\n if section is None:\r\n raise Http404(\"No section found when loading current position in course\")\r\n\r\n urlargs['section'] = section.url_name\r\n return redirect(reverse('courseware_section', kwargs=urlargs))", "def redirect_view(request):\n path = request.GET.get(\"to\") or \"/\"\n return redirect(path if path.startswith(\"/\") else f\"/{path}\", permanent=True)", "def second_page():\n return redirect(url_for('index'))", "def get_outcome_service_url(self, service_name=\"grade_handler\"):\r\n return self.runtime.handler_url(self, service_name, thirdparty=True).rstrip('/?')", "def selectGroup(request):\n if 'next' in request.GET:\n n = request.GET['next']\n else:\n n = '/database/' # TODO: make me non absolute\n # TODO: write a test for me!\n if request.method == \"POST\":\n form = LabGroupSelectionForm(request.user, data=request.POST)\n if form.is_valid():\n request.session['labgroup_id'] = form.cleaned_data['labGroup'].id\n return redirect(n)\n else:\n return render(request, 'select_group.html', {'form': form, 'next': n})\n else:\n form = LabGroupSelectionForm(request.user)\n return render(request, 'select_group.html', {'form': form, 'next': n})", "def index() -> str:\n return redirect('/students')", "def post_login(self, came_from=lurl('/')):\n if not request.identity:\n login_counter = request.environ.get('repoze.who.logins', 0) + 1\n redirect('/login',\n params=dict(came_from=came_from, __logins=login_counter))\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n\n # Do not use tg.redirect with tg.url as it will add the mountpoint\n # of the application twice.\n return HTTPFound(location=came_from)", "def getRootURL():", "def move_to_root(self, group_id, name):\n return self._client.post(\n url=self._client.get_full_url(\n self.get_path('collection', realm=self._realm_name)\n ),\n data=json.dumps({\n 'id': group_id,\n 'name': name\n })\n )", "def root_create(request): # pylint: disable=W0613\r\n root = get_or_create_root()\r\n return redirect('wiki:get', path=root.path)", "def redirect(self, url):\n self.setResponseCode(responsecode.FOUND)\n self.setHeader(\"location\", url)", "def redirect(url):", "def index():\n return redirect(url_for(\"home\"))", "def _go_to_root_menu(self):\n log.debug(\"Returning to root menu...\")\n # Issue an enter or two off the bat to get out of any display screens\n # and confirm command mode\n try:\n response = self._do_cmd_resp(Command.BLANK, expected_prompt=Prompt.CMD_PROMPT)\n while not str(response).lstrip().endswith(Prompt.CMD_PROMPT):\n response = self._do_cmd_resp(Command.BLANK,\n expected_prompt=Prompt.CMD_PROMPT)\n time.sleep(1)\n except InstrumentTimeoutException:\n raise InstrumentProtocolException(\"Not able to get valid command prompt. Is instrument in command mode?\")\n \n # When you get a --> prompt, do 9's until you get back to the root\n response = self._do_cmd_resp(Command.BACK_MENU,\n expected_prompt=MENU_PROMPTS)\n while not str(response).lstrip().endswith(Prompt.MAIN_MENU):\n response = self._do_cmd_resp(Command.BACK_MENU,\n expected_prompt=MENU_PROMPTS)", "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def get(self):\n cont = self.request.get('continue', default_value='/')\n\n # Check whether redirecting to an absolute or relative url\n netloc = urlparse.urlsplit(cont).netloc\n if netloc:\n # Disallow absolute urls to prevent arbitrary open redirects\n raise custom_exceptions.InvalidRedirectURLError(\n \"Redirecting to an absolute url is not allowed.\")\n\n conversion_names = self.request.get_all('conversion_name')\n\n if len(conversion_names):\n bingo(conversion_names)\n\n self.redirect(_iri_to_uri(cont))", "def redirect_see_other(self, location):\n self.status = 303\n self.set_header('Location', location)", "def url_root():\n return \"OK\"", "def goto_url(self):\n msg = self._cw._('you have been logged out')\n return self._cw.base_url()", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def redirectPage() -> Response:\n # pass in the function name\n return redirect(url_for('view.loadMainPage'))", "def default(self, tg_errors=None):\n raise redirect('list')", "def idx(_request):\n return HttpResponseRedirect('/home')", "def defaultlanding():\n #send user to description page if not logged in\n if not g.user:\n return redirect(url_for('description'))\n #display leaderboard for competition if logged in\n return redirect(url_for('leaderboard'))", "def redirectReferer():\n referer = request.headers.get('referer')\n if not referer:\n referer = url_for('catelog')\n return redirect(referer)", "def get(self):\n session.clear()\n return redirect(url_for(REDIRECT_URL))", "def redirect_dest(fallback):\n dest = request.args.get('next')\n try:\n if dest.startswith('/') or dest.startswith(request.host_url):\n return redirect(dest)\n dest_url = url_for(dest)\n except:\n return redirect(fallback)\n return redirect(dest_url)", "def do(self, action_context, view, *args, **kwargs):\n\n # We will use this action to check if we have a valid login session\n # if not then redirect to the login\n a = CheckUserAuthenticatedAlreadyAction()\n action_context = a.do(action_context, view, *args, **kwargs)\n # if not authenticated then break teh chain and return to login\n if not action_context.extra_context.get('user_authenticated'):\n action_context.break_chain = True\n url = reverse(\"auth:login\")\n redirect_param = settings.REDIRECT_URL_VALID_PARAMS[0]\n url = \"{0}?{1}={2}\".format(\n url,\n redirect_param,\n action_context.request.path\n )\n action_context.response = HttpResponseRedirect(url)\n\n return super().do(action_context, view, *args, **kwargs)", "def login():\n next_url = request.form.get(\"next\", None)\n\n if current_app.config[\"USE_SAML\"]:\n if next_url:\n return redirect(url_for(\"auth.saml\", sso2=next_url))\n return redirect(url_for(\"auth.saml\", sso=None))\n\n elif current_app.config[\"USE_LDAP\"]:\n return redirect(url_for(\"auth.ldap_login\", next=next_url))\n\n elif current_app.config[\"USE_LOCAL_AUTH\"]:\n return redirect(url_for(\"auth.local_login\", nex=next_url))\n\n return abort(404)", "def index():\n\n return redirect(api)", "def get_redirect_url(self, *args, **kwargs):\n redirect = kwargs['route']\n self.permanent = redirect.permanent\n return redirect.target.url", "def homepage():\n return redirect('index.html')", "def send302(start_response, location):\n start_response('302 Found', [('Location', location)])\n return [YZ_MOVED_TO + location]", "def get(self, request, *args, **kwargs):\n return redirect(reverse_lazy(\n 'nominations-application'\n ) + \"?id=\" + self.kwargs['pk'])", "def path(self, group):\n return", "def kick_from_group(request):\n\treturn render(request,\"404.html\",{})", "def root():\t\t\n\t\tif websetup.check_twitter_auth() is False:\n\t\t\tif websetup.make_twitter_auth() is False:\n\t\t\t\treturn \"OAuth failure.\"\n\t\treturn bottle.template('newroot')", "def test_to_other_url(self):\n user = User(username='test', is_staff=True, is_superuser=True,\n is_active=True)\n user.set_password('test')\n user.full_clean()\n user.save()\n request = RequestFactory().get('/')\n response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/')\n admin_instance = get_modeladmin(Iframe)\n new_response = admin_instance.maybe_fix_redirection(\n request=request, response=response_302, obj=user)\n self.assertEqual(new_response['X-Chunkadmin-Response'], 'not-chunkadmin') # noqa\n self.assertEqual(302, new_response.status_code)\n self.assertEqual('/admin_mountpoint/?_data_changed=1',\n new_response['Location'])", "def return_to_source(origin,parent_object_id,target_username):\n\tif origin in ('home','home_reply'):\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"home_loc_pk\",pk=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'history':\n\t\tif target_username:\n\t\t\treturn redirect(\"user_activity\",slug=target_username)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'public':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"public_group\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'search':\n\t\treturn redirect(\"search_username\")\n\telif origin == 'profile':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"user_profile\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\t\n\telif origin == 'profile_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"profile\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\t\n\telif origin == 'best_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"best_photo_loc_pk\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'photo_comments':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"comment\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'fresh_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"see_photo_pk\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"photo\")\n\telse:\n\t\treturn redirect(\"home\")", "def test_get_track_list_root_directory_returns_default_directory(self, example_group):\n example_group.directory = None\n example_track_list = example_group.track_lists[0]\n example_track_list.directory = None\n directory = utils.get_track_list_root_directory(\n group=example_group, track_list=example_track_list, default_dir=\"global/dir\"\n )\n assert directory == \"global/dir\"", "def root(request, fallback_view=None, **kwargs):\n if request.method == 'POST':\n if request.META['CONTENT_TYPE'] == 'text/xml':\n log.debug('XMLRPC request received')\n return parse_xmlrpc_request(request)\n log.debug('Distutils request received')\n parse_distutils_request(request)\n action = request.POST.get(':action','')\n else:\n action = request.GET.get(':action','')\n\n if not action:\n log.debug('No action in root view')\n if fallback_view is None:\n fallback_view = settings.DJANGOPYPI_FALLBACK_VIEW\n return fallback_view(request, **kwargs)\n\n if not action in settings.DJANGOPYPI_ACTION_VIEWS:\n log.error('Invalid action encountered: %s' % (action,))\n return HttpResponseNotAllowed(settings.DJANGOPYPI_ACTION_VIEW.keys())\n\n log.debug('Applying configured action view for %s' % (action,))\n return settings.DJANGOPYPI_ACTION_VIEWS[action](request, **kwargs)", "def test_login_openid_handle_redirection(self):\r\n response = self._send_bad_redirection_login()\r\n self.assertEquals(response.status_code, 302)", "def source():\n return redirect(get_last_menus_url())", "def entry_page():\n return redirect(url_for('index'))", "def test_index_redirect(self):\n response = self.app.get(\"/\")\n self.assertEqual(response.status_code, 302,\n \"/ did not redirect to login when user is not logged in\")\n self.assertTrue(\n response.location.endswith(\"/accounts/login/\"),\n \"Redirect location did not end with /accounts/login/\"\n )", "def test_get_track_list_root_directory_returns_group_directory(self, example_group):\n example_group.directory = \"group/dir\"\n example_track_list = example_group.track_lists[0]\n example_track_list.directory = None\n directory = utils.get_track_list_root_directory(\n group=example_group, track_list=example_track_list, default_dir=\"global/dir\"\n )\n assert directory == \"group/dir\"", "def redirect(self, path):\n self.get_controller().redirect(path)", "def redirect(self):\r\n mdict = self.matchdict\r\n hash_id = mdict.get('hash_id', None)\r\n username = mdict.get('username', None)\r\n\r\n hashed = Hashed.query.get(hash_id)\r\n\r\n if not hashed:\r\n # for some reason bad link, 404\r\n return HTTPNotFound()\r\n\r\n hashed.clicks = hashed.clicks + 1\r\n\r\n if username is not None:\r\n bookmark = Bmark.query.\\\r\n filter(Bmark.hash_id == hash_id).\\\r\n filter(Bmark.username == username).one()\r\n bookmark.clicks = bookmark.clicks + 1\r\n\r\n return HTTPFound(location=hashed.url)", "def openid_redirect(request):\n request.session['next'] = _get_next(request)\n request.session['openid_provider'] = request.GET.get('openid_provider')\n request.session['socialregistration_connect_object'] = get_object(request.GET)\n\n client = OpenID(\n request,\n 'http%s://%s%s' % (\n _https(),\n Site.objects.get_current().domain,\n reverse('openid_callback')\n ),\n request.GET.get('openid_provider')\n )\n try:\n return client.get_redirect()\n except DiscoveryFailure:\n request.session['openid_error'] = True\n return HttpResponseRedirect(settings.LOGIN_URL)", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def url(self):\n return url_for('/admin/groups/{}'.format(self.key))", "def post_login(self, came_from='/'):\n if not request.identity:\n login_counter = request.environ['repoze.who.logins'] + 1\n redirect('/login', came_from=came_from, __logins=login_counter)\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)", "def get(self):\n self.logout()\n self.redirect('/')", "def post_login(self, came_from='/'):\n if not request.identity:\n login_counter = request.environ['repoze.who.logins'] + 1\n redirect('/login', came_from=came_from, __logins=login_counter)\n\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)", "def redirect_to(self, response):\n redirect_url = response.xpath(self.filters[0]).extract_first()\n yield scrapy.Request(redirect_url, self.first_submit)", "def error_redirect(err):\n error = err\n return render_template('error.html',\n title='That doesn\\'t exist!',\n error=error)", "def demo():\n site = request.values['site']\n user = request.args.get('user_id', '')\n group = request.args.get('group_id', '')\n if group == '@list':\n group = request.values['list']\n\n activity_id = search_query = ''\n if group == source.SEARCH:\n search_query = request.args.get('search_query', '')\n elif group != source.BLOCKS:\n activity_id = request.args.get('activity_id', '')\n\n # pass query params through\n params = dict(request.args.items())\n params.update({\n 'plaintext': 'true',\n 'cache': 'false',\n 'search_query': search_query,\n })\n\n path = '/'.join(urllib.parse.quote_plus(part, safe='@')\n for part in (site, user, group, '@app', activity_id))\n return redirect(f'/{path}?{urllib.parse.urlencode(params)}')", "def _redirect(self):\n \n # Redirect URL is held in 'r' URL arg of this request\n b64encReturnTo = str(request.params.get('r', ''))\n\n if b64encReturnTo:\n # Decode the return to address\n try:\n b64decReturnTo = base64.urlsafe_b64decode(b64encReturnTo)\n except Exception, e:\n log.error(\"logout - decoding return URL: %s\" % e) \n c.xml = \"Error carrying out browser redirect following logout\"\n response.status_code = 400\n return render('ndg.security.kid', 'ndg.security.error')\n \n # Check for 'getCredentials' - avoid in case username/password\n # contained in the URL!\n getCredentialsIdx = b64decReturnTo.rfind('/getCredentials')\n if getCredentialsIdx != -1:\n log.debug(\"Reverting request URL from getCredentials to \"\n \"login...\")\n b64decReturnTo = b64decReturnTo[:getCredentialsIdx] + '/login'\n \n # Add flag indicating to caller that logout succeeded. The caller\n # can use this to remove any security cookie present in their\n # domain - See:\n # ndg.security.client.ssoclient.ssoclient.lib.base.BaseController\n if '?' in b64decReturnTo:\n b64decReturnTo += '&logout=1'\n else:\n b64decReturnTo += '?logout=1'\n\n # and now go back to whence we had come\n log.debug(\"LogoutController._redirect: redirect to %s\" %\n b64decReturnTo)\n h.redirect_to(b64decReturnTo)\n else:\n log.debug(\"LogoutController._redirect: no redirect URL set.\")\n response.status_code = 400\n c.errorPageHeading = \"Log out\"\n if getattr(c, \"loggedIn\", False):\n c.xml = \"Logged out\"\n else:\n c.xml = (\"An error occurred logging out. Please report the \"\n \"problem to your site administrator\") \n \n return render('ndg.security.kid', 'ndg.security.error')", "def __call__(self):\n if isXmlRpc(self.REQUEST):\n return self\n else:\n newpath = INewPath(self)\n newpath = getUtility(IVirtualRoot).ensure_virtual_root(newpath)\n self.REQUEST.response.redirect(newpath)", "def home_redirect(request):\n if request.user.is_authenticated() and request.user.is_staff:\n return redirect(\"volunteers\")\n elif request.user.is_authenticated() and not request.user.is_superuser:\n related_volunteer = get_object_or_404(Volunteer, user_id=request.user.pk)\n return redirect(\"edit-volunteer-profile\", volunteer_id=related_volunteer.pk)\n else:\n return redirect(\"new-volunteer\")", "def to_exam_root(self):\n self._bottom_tab(4)\n self._goto(\"exam_icon\")" ]
[ "0.76904535", "0.7103075", "0.56865895", "0.51907563", "0.5116719", "0.5102657", "0.5087065", "0.5047118", "0.4916807", "0.49087209", "0.48846796", "0.47762623", "0.47364914", "0.4677289", "0.46656385", "0.46637428", "0.46563548", "0.46534562", "0.45972314", "0.4521781", "0.45062536", "0.45030215", "0.45024154", "0.45024154", "0.44957826", "0.44948602", "0.44899046", "0.44889957", "0.44869763", "0.448599", "0.44630018", "0.44624335", "0.4444673", "0.44391045", "0.4407454", "0.44068038", "0.4404437", "0.43974093", "0.4391359", "0.4383362", "0.43788657", "0.43764284", "0.43678367", "0.4357152", "0.43323603", "0.43322605", "0.4329011", "0.4325541", "0.43224767", "0.43101028", "0.43091914", "0.43083736", "0.4305456", "0.4299628", "0.42955056", "0.42902592", "0.42720035", "0.42720035", "0.42720035", "0.4266655", "0.4263173", "0.42624322", "0.42537558", "0.4250566", "0.4246781", "0.42454904", "0.42319196", "0.4223124", "0.42221075", "0.4221986", "0.42171907", "0.42112315", "0.42052758", "0.42052475", "0.41966388", "0.41920617", "0.41893536", "0.41880608", "0.41860765", "0.41832078", "0.41791", "0.4174417", "0.4172573", "0.41708988", "0.4169707", "0.41651154", "0.4158805", "0.41580254", "0.41571978", "0.41561946", "0.41538662", "0.41396224", "0.41360778", "0.41355315", "0.4135148", "0.4134425", "0.41338757", "0.41303748", "0.4126399", "0.4125886" ]
0.67741513
2
Modify an existing outcome group. Fields not provided are left as is; unrecognized fields are ignored. When changing the parent outcome group, the new parent group must belong to the same context as this outcome group, and must not be a descendant of this outcome group (i.e. no cycles allowed).
def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, 'parent_outcome_group_id' : parent_outcome_group_id, } url = request_ctx.base_api_url + path.format(id=id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def update_group():\n _id = request.form['_id']\n name = request.form['name']\n data, code, message = FIELD_SERVICE.update_group(_id, name)\n return __result(data, code, message)", "def test_modify_group(self):\n response = self.client.modify_group(\"ABC123\")\n self.assertEqual(response[\"method\"], \"POST\")\n self.assertEqual(response[\"uri\"], \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(response[\"body\"]), {\"account_id\": [self.client.account_id]})", "def test_patch_project_move_child(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, self.category\n )\n self.make_assignment(new_category, self.user, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def do_group_update():\n target_group = Group.query.filter_by(id=request.form['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n target_group.name = request.form['name']\n target_group.group_meter_id = request.form['meter']\n target_group.group_production_meter_id_first = request.form['group_production_meter_id_first']\n target_group.group_production_meter_id_second = request.form[\n 'group_production_meter_id_second']\n\n db.session.commit()\n return group_list(\"Updated group \" + target_group.name)", "def grp(self, grpNode):\n\t\tself._grp = grpNode", "def modify_resource_group(\n self,\n request: dds_20151201_models.ModifyResourceGroupRequest,\n ) -> dds_20151201_models.ModifyResourceGroupResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_resource_group_with_options(request, runtime)", "def update_group(self, group_name, new_group_name=None, new_path=None):\r\n params = {'GroupName' : group_name}\r\n if new_group_name:\r\n params['NewGroupName'] = new_group_name\r\n if new_path:\r\n params['NewPath'] = new_path\r\n return self.get_response('UpdateGroup', params)", "def update_research_group(self, employee_id, new_research_group):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET research_group = %s '\n 'WHERE id=%s;',\n (new_research_group, employee_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def ModifyGroup(self, group, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/groups/%s/modify\" %\n (GANETI_RAPI_VERSION, group)), query, kwargs)", "def _group_append(groups, id, new_group):\n\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n\n path_inds = list(reversed(path_inds))\n\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)\n\n return groups", "def edit_group_command(self):\n self.switch_frame(\"Edit Group\")\n id = self.parent.get_frame_id(\"Edit Group\")\n self.parent.frames[id].display_group(self.user.active_group)", "def test_update_group(self):\n pass", "def test_replace_parent_to_self(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n with pytest.raises(Exception):\n groupa.replace_parent(groupb, groupa)", "def request_group_update():\n target_group = Group.query.filter_by(id=request.args['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n return Response(\n render_template(\n 'admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/update\",\n id=target_group.id,\n name=target_group.name,\n meter=target_group.group_meter_id,\n group_production_meter_id_first=target_group.group_production_meter_id_first,\n group_production_meter_id_second=target_group.group_production_meter_id_second),\n mimetype='text/html')", "def _mod_group(self, command, group_id, group_type, buckets=None):\n self.datapath.send_msg(\n self.parser.OFPGroupMod(\n datapath=self.datapath,\n command=command,\n group_id=group_id,\n type_=group_type,\n buckets=buckets,\n )\n )", "def reset_group(node, suffix=\"_grp\"):\n # create transform group\n name = \"{}_{}\".format(node.rsplit(\"_\", 1)[0], suffix)\n reset_grp = cmds.createNode(\"transform\", name=name)\n cmds.parent(reset_grp, node)\n cmds.makeIdentity(reset_grp, translate=True, rotate=True, scale=True)\n\n # reparent under parent if any, else world\n parent = (cmds.listRelatives(node, parent=True) or [None])[0]\n if parent:\n cmds.parent(reset_grp, parent)\n else:\n cmds.parent(reset_grp, world=True)\n cmds.parent(node, reset_grp)\n\n # for joints, reset rotates and jointOrients\n if cmds.nodeType(node) == \"joint\":\n cmds.makeIdentity(node, jointOrient=True, rotate=True, apply=True)\n\n cmds.select(clear=True)\n\n return reset_grp", "def slotGroupEdit(self):\n dialog = GroupDialog(self)\n if dialog.exec_loop() == QDialog.Accepted:\n if dialog.group_id != None:\n # set group\n self.sampleGroup.globalGroupId = dialog.group_id\n self.groupLabel.setText(dialog.group_id)\n else:\n # ungroup\n self.sampleGroup.globalGroupId = None\n self.groupLabel.setText('Not\\nGrouped')\n self.emit(PYSIGNAL('groupChanged'), (self,))", "def axial_correction_group(obj,\n to_parents_origin=False,\n name_prefix=\"\",\n name_postfix=\"_ACGroup#\"):\n obj = get_valid_dag_node(obj)\n\n if name_postfix == \"\":\n name_postfix = \"_ACGroup#\"\n\n ac_group = pm.group(\n em=True,\n n=(name_prefix + obj.name() + name_postfix)\n )\n\n ac_group = pm.parent(ac_group, obj)[0]\n\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n parent = pm.listRelatives(obj, p=True)\n if len(parent) != 0:\n pm.parent(ac_group, parent[0], a=True)\n else:\n pm.parent(ac_group, w=True)\n\n if to_parents_origin:\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n pm.parent(obj, ac_group, a=True)\n\n # for joints also set the joint orient to zero\n if isinstance(obj, pm.nodetypes.Joint):\n # set the joint rotation and joint orient to zero\n obj.setAttr('r', (0, 0, 0))\n obj.setAttr('jo', (0, 0, 0))\n\n return ac_group", "def _group_modify_id(group, id_modifier):\n\n group = group._replace(id=id_modifier(group.id))\n group = group._replace(children=list(map(lambda g: Skeleton._group_modify_id(g, id_modifier), group.children)))\n\n return group", "def process_object(self, new, old=None):\n new = super().process_object(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.modified_field, self.model.permissions_field)\n validate_from_bucket_schema_or_400(\n new,\n resource_name=\"group\",\n request=self.request,\n ignore_fields=internal_fields,\n id_field=self.model.id_field,\n )\n\n return new", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def update_group(self, group_id, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.patch('groups/%s' % group_id, post_body)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def group_update(*, login_manager: LoginManager, group_id: str, **kwargs: Any):\n groups_client = login_manager.get_groups_client()\n\n # get the current state of the group\n group = groups_client.get_group(group_id)\n\n # assemble put data using existing values for any field not given\n # note that the API does not accept the full group document, so we must\n # specify name and description instead of just iterating kwargs\n data = {}\n for field in [\"name\", \"description\"]:\n if kwargs.get(field) is not None:\n data[field] = kwargs[field]\n else:\n data[field] = group[field]\n\n response = groups_client.update_group(group_id, data)\n\n formatted_print(response, simple_text=\"Group updated successfully\")", "async def mergegroup(self, ctx, original_group_id: int, duplicate_group_id: int):\n original_group = await ex.get_group(original_group_id)\n duplicate_group = await ex.get_group(duplicate_group_id)\n if not duplicate_group:\n return await ctx.send(f\"> {duplicate_group_id} could not find a Group.\")\n if not original_group:\n return await ctx.send(f\"> {original_group} could not find a Group.\")\n # move aliases\n await ex.conn.execute(\"UPDATE groupmembers.aliases SET objectid = $1 WHERE isgroup = $2 AND objectid = $3\", original_group.id, 1, duplicate_group.id)\n for member_id in duplicate_group.members:\n if member_id not in original_group.members:\n # update the member location to the original group\n await ex.conn.execute(\"UPDATE groupmembers.idoltogroup SET groupid = $1 WHERE idolid = $2 AND groupid = $3\", original_group.id, member_id, duplicate_group.id)\n # delete group\n await ex.conn.execute(\"DELETE FROM groupmembers.groups WHERE groupid = $1\", duplicate_group.id)\n # recreate cache\n await ex.create_idol_cache()\n await ex.create_group_cache()\n await ctx.send(f\"> Merged {duplicate_group_id} to {original_group_id}.\")", "def test_update_team_user_group(client):\n group = client.update_team_user_group(TEAM_ID, GROUP_ID, {\n \"name\": \"Updated Python group\",\n \"is_reviewer\": False,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert group.name == \"Updated Python group\"\n assert group.permissions['is_admin']\n assert not group.permissions['is_reviewer']", "def update_group(groupname):\n name = request.get_json().get(\"name\", None)\n description = request.get_json().get(\"description\", None)\n response = jsonify(\n admin.update_group(current_app.scoped_session(), groupname, description, name)\n )\n return response", "def group(ctx, project, group): # pylint:disable=redefined-outer-name\n ctx.obj = ctx.obj or {}\n ctx.obj['project'] = project\n ctx.obj['group'] = group", "def _merge_groups(self, group, newgroup):\n\n # name\n if group.name != newgroup.name:\n raise errors.AnsibleError(\"Cannot merge group %s with %s\" % (group.name, newgroup.name))\n\n # depth\n group.depth = max([group.depth, newgroup.depth])\n\n # hosts list (host objects are by now already added to self.hosts)\n for host in newgroup.hosts:\n grouphosts = dict([(h.name, h) for h in group.hosts])\n if host.name in grouphosts:\n # same host name but different object, merge\n self._merge_hosts(grouphosts[host.name], host)\n else:\n # new membership, add host to group from self\n # group from self will also be added again to host.groups, but\n # as different object\n group.add_host(self.hosts[host.name])\n # now remove this the old object for group in host.groups\n for hostgroup in [g for g in host.groups]:\n if hostgroup.name == group.name and hostgroup != self.groups[group.name]:\n self.hosts[host.name].groups.remove(hostgroup)\n\n\n # group child membership relation\n for newchild in newgroup.child_groups:\n # dict with existing child groups:\n childgroups = dict([(g.name, g) for g in group.child_groups])\n # check if child of new group is already known as a child\n if newchild.name not in childgroups:\n self.groups[group.name].add_child_group(newchild)\n\n # group parent membership relation\n for newparent in newgroup.parent_groups:\n # dict with existing parent groups:\n parentgroups = dict([(g.name, g) for g in group.parent_groups])\n # check if parent of new group is already known as a parent\n if newparent.name not in parentgroups:\n if newparent.name not in self.groups:\n # group does not exist yet in self, import him\n self.groups[newparent.name] = newparent\n # group now exists but not yet as a parent here\n self.groups[newparent.name].add_child_group(group)\n\n # variables\n group.vars = utils.combine_vars(group.vars, newgroup.vars)", "def reparent(self, obj, parent):\n return self.update(obj, parent=parent)", "def test_patch_project_move_root(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, None\n )\n new_owner = self.make_user('new_owner')\n self.make_assignment(new_category, new_owner, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'parent': ''}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 200, msg=response.content)", "def __upgrade_group(self, group_old: Group, group_new: str) -> None:\n def upgrade_permissions(permissions_list_1: list, permissions_list_2: list, action) -> list:\n permissions_to_change = [\n permission_change\n for permission_change in permissions_list_1\n if permission_change not in permissions_list_2\n ]\n return self.__upgrade_group_permissions(group_old, permissions_to_change, action)\n\n messages = [f'Group {group_new} permission changes']\n\n permissions_from_db = [p.codename for p in group_old.permissions.all()]\n permissions_from_file = main_app_groups[group_new]\n\n # in db but not in file -> remove\n messages += upgrade_permissions(permissions_from_db, permissions_from_file, REMOVE)\n # in file but not in db -> add\n messages += upgrade_permissions(permissions_from_file, permissions_from_db, ADD)\n\n if len(messages) > 1:\n self.__print_messages(messages)", "def set_outcome(self, outcome):\r\n if outcome < self._outcome:\r\n self._outcome = outcome\r\n self.choose(0, 0, 0, 0, 0) # Dummy call, to validate outcome.\r\n if self.parent: self.parent.set_outcome(self._outcome)", "def test_groups_group_id_state_put(self):\n pass", "def test_update_resource_group(self):\n pass", "def put(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n cid = self.current_user.cid\n tid = self.current_user.tid\n gid = data.gid\n name = data.name\n logging.info(\"[UWEB] Modify group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n self.db.execute(\"UPDATE T_GROUP\"\n \" SET name = %s\"\n \" WHERE id = %s\",\n name, gid)\n\n # NOTE: wspush to client \n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status)\n except Exception as e:\n logging.exception(\"[UWEB] Modify group failed. cid: %s, Exception: %s\",\n self.current_user.cid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def update(ctx, name, description, tags):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n update_dict = {}\n\n if name:\n update_dict['name'] = name\n\n if description:\n update_dict['description'] = description\n\n tags = validate_tags(tags)\n if tags:\n update_dict['tags'] = tags\n\n if not update_dict:\n Printer.print_warning('No argument was provided to update the experiment group.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment_group.update_experiment_group(\n user, project_name, _group, update_dict)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not update experiment group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n Printer.print_success(\"Experiment group updated.\")\n get_group_details(response)", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def mutate(self, child):\n return child", "def group(self, val):\n self.set_property(\"Group\", val)", "def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_nodegroup(self.cluster_id, self.uuid, updates)\n\n self.obj_reset_changes()", "def test_convert_to_newgroup(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance1')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance1' in inventoryloader.groups\n assert inventoryloader.groups['glance_all'].has_group('glance1')\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance1'].has_host('localhost')\n assert \"management_bridge\" in inventoryloader.groups['glance1'].vars", "def modify_resource_group_with_options(\n self,\n request: dds_20151201_models.ModifyResourceGroupRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyResourceGroupResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyResourceGroup',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyResourceGroupResponse(),\n self.call_api(params, req, runtime)\n )", "def test_editGroup(self):\n\t\tuser = User.objects.get(id=1)\n\t\tself.client.force_authenticate(user=user)\n\t\tgroup = Group.objects.create(admin=user, name='testGroup3', isPublic=True, \n\t\t\tdescription='This is another test group that just created.')\n\n\t\turl = \"/groups/3/\"\n\t\tdata = {\n\t\t\t'name' : 'anotherTestGroup'\n\t\t}\n\n\t\tresponse = self.client.patch(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data[\"name\"], 'anotherTestGroup')\n\n\t\turl = \"/groups/2/\"\n\t\tresponse = self.client.patch(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def groupFormation(self):\n self.updatedPartnerNames()\n self.updatedPotentialPartnerNames()\n self.updatePartnerIdices()\n self.makeGraphDictionary()\n self.assignGroupIDs()", "def rename_group(self, old: str, new: str) -> Tuple[Optional['LedGroup'], str]:\n old_name: str = LedGroup.get_name(old)\n group: Optional['LedGroup'] = self.get_group_by_name(old_name)\n if group is None:\n return None, 'no_group'\n if new == \"\":\n return None, 'empty_group'\n unique: bool = not (new in self.get_group_list())\n if not unique:\n return None, \"group_exists\"\n group.Name = new\n check: Optional[LedGroup] = LedGroup.verify_led_group(group)\n if check is None:\n group.Name = old_name\n return None, \"wrong_group_name\"\n for seq in self.Sequencers:\n if seq.Group.lower() == old_name.lower():\n seq.Group = new\n return group, \"\"", "def test_patch_project_move(self):\n self.assertEqual(\n self.project.full_title,\n self.category.title + ' / ' + self.project.title,\n )\n\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, None\n )\n self.make_assignment(new_category, self.user_owner_cat, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.project.refresh_from_db()\n model_dict = model_to_dict(self.project)\n self.assertEqual(model_dict['parent'], new_category.pk)\n owners = [a.user for a in self.project.get_owners()]\n self.assertIn(self.user_owner_cat, owners)\n self.assertIn(self.user_owner, owners)\n\n # Assert child project full title update\n self.assertEqual(\n self.project.full_title,\n new_category.title + ' / ' + self.project.title,\n )\n self.assertEqual(\n json.loads(response.content)['parent'], str(new_category.sodar_uuid)\n )", "def test_patch_group(self):\n pass", "def test_patch_group(self):\n pass", "def _switch_group_right(group_id, action, value, workspace, request_user):\n group = group_api.get_group_by_id(group_id)\n\n if action == workspace_constants.ACTION_READ:\n if value:\n workspace_api.add_group_read_access_to_workspace(workspace, group, request_user)\n else:\n workspace_api.remove_group_read_access_to_workspace(workspace, group, request_user)\n elif action == workspace_constants.ACTION_WRITE:\n if value:\n workspace_api.add_group_write_access_to_workspace(workspace, group, request_user)\n else:\n workspace_api.remove_group_write_access_to_workspace(workspace, group, request_user)", "def _update_field(self, tag_id, value):\n if tag_id in self._group_fields:\n # start a new level, an individual group doesn't\n # exist since we haven't read any information in yet.\n self._level_stack.append({\n 'tag_id': tag_id,\n 'list': [],\n 'group': None,\n })\n\n elif len(self._level_stack) == 0:\n # We are at the top of the message\n if isinstance(value, bytes):\n self._message[tag_id] = value.decode()\n else:\n self._message[tag_id] = value\n\n elif tag_id in self._group_fields[self._level_stack[-1]['tag_id']]:\n # We are within a group and the field is in the list of tags\n # for this group\n level = self._level_stack[-1]\n group = level['group']\n if group is None or tag_id in group:\n # Create a new group if there is no current group\n # or if this key already exists within the group\n group = collections.OrderedDict()\n level['list'].append(group)\n level['group'] = group\n group[tag_id] = value\n\n else:\n # we are in a grouping, but we have a tag_id that doesn't\n # belong, so need to pop the stack off\n level = self._level_stack.pop() # this is the current level\n\n while len(self._level_stack) > 0:\n # Add the current group to it's parent grouping\n parent_level = self._level_stack[-1]\n parent_level['group'][level['tag_id']] = level['list']\n\n level = parent_level\n if tag_id in self._group_fields[level['tag_id']]:\n break\n self._level_stack.pop()\n\n if len(self._level_stack) == 0:\n self._message[level['tag_id']] = level['list']\n\n self._update_field(tag_id, value)", "def patch_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def patch(self,\n provider_id,\n group_id,\n group,\n ):\n return self._invoke('patch',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n 'group': group,\n })", "def test_replace_group(self):\n pass", "def test_patch_resource_group(self):\n pass", "def add_to_group(self, group):\n\n if not self.in_group(group):\n self.secondary_groups.append(group)\n return self", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def _confirm_group(cls):\n if cls.GROUP_NAME in bpy.data.objects:\n return\n #Backup current selection\n selection = ObjectSelection()\n #Create empty object\n bpy.ops.object.empty_add()\n new_group = bpy.context.selected_objects[0]\n new_group.name = cls.GROUP_NAME\n new_group.hide = True\n #Restore selection\n selection.restore()", "def upsert_group(self,\n group, # type: Group\n *options, # type: UpsertGroupOptions\n **kwargs # type: Any\n ):\n # This endpoint accepts application/x-www-form-urlencoded and requires the data be sent as form data.\n # The name/id should not be included in the form data.\n # Roles should be a comma separated list of strings.\n # If, only if, the role contains a bucket name then the rolename should be suffixed\n # with[<bucket_name>] e.g. bucket_full_access[default],security_admin.\n\n final_args = forward_args(kwargs, *options)\n final_args.update({k: v for k, v in group.as_dict.items() if k in {\n 'roles', 'description', 'ldap_group_reference'}})\n self._admin_bucket.group_upsert(group.name, **final_args)", "def _set_grouping(self, change) -> None:\n grouping = self._grouping_full\n self.options = self._flat_groupings(grouping)\n self.set_trait(\n \"_grouping_labels\",\n tuple(\n [\n (header, tuple([_[0] for _ in options]))\n for header, options in grouping\n ]\n ),\n )\n if not self._initializing_traits_:\n for index, option in enumerate(self._flat_groupings()):\n if (\n option not in self.disabled_options\n and option not in self._group_headers\n ):\n if self.index == index:\n self._notify_trait(\"index\", index, index)\n else:\n self.index = index\n break\n else:\n self.index = None", "def test_delete_group_reparent_vars(self, inventoryloader):\n inventoryloader.del_group('glance_api', reparent_vars=True)\n assert 'management_bridge' in inventoryloader.groups['glance_all'].vars", "def add_to_group(self, org, contact, group):\n pass", "async def handle_set_group(self, match: Match[str], payload: str) -> None:\n groupid = match.group(1)\n\n try:\n group = self._bridge.groups[groupid]\n state = GroupSetState(**json.loads(payload))\n LOGGER.info(f\"Updating group {group.name}\")\n await group.set_action(**state.dict())\n except IndexError:\n LOGGER.warning(f\"Unknown group id: {groupid}\")\n except json.JSONDecodeError:\n LOGGER.warning(f\"Bad JSON on light request: {payload}\")\n except TypeError:\n LOGGER.warning(f\"Expected dictionary, got: {payload}\")\n except ValidationError as e:\n LOGGER.warning(f\"Invalid light state: {e}\")", "def UpdateGroupMembership(self, newMembers):\r\n globals.groupMembers[newMembers.targetGuid] = True #remove the target Sticky\r\n\r\n for guid in newMembers.guids[0]:\r\n globals.groupMembers[guid]=True\r\n\r\n group = Group()\r\n globals._groupNumber = globals._groupNumber+1\r\n group.groupID = globals._groupName + str(globals._groupNumber)\r\n group.targetSticky[\"guid\"] = newMembers.targetGuid\r\n group.targetSticky[\"desc\"] = newMembers.targetDesc\r\n group.targetSticky[\"head\"] = newMembers.targetHead #lplp1313 new value\r\n\r\n guidSims = tuple(zip(newMembers.guids[0], newMembers.descriptions[0], newMembers.headers[0], list(newMembers.cos_sims[0]))) #lplp1313 new value \r\n for g, d, h, c in guidSims:\r\n gs = GroupSticky()\r\n gs.guid=g\r\n gs.desc=d\r\n gs.head=h #lplp1313 new value\r\n gs.cosineVal=c\r\n group.groupStickies.append(gs)\r\n\r\n globals._jsonReply._groups.append(group)", "def set_parent(self, new_parent):\n self.__parent = new_parent", "def partial_update(self, request, *args, **kwargs):\n instance = Group.objects.get(pk=kwargs['pk'])\n\n if instance.owner_id != request.user.id and not request.user.is_superuser:\n return not_allowed_to_do()\n\n write_serializer = GroupWriteSerializer(\n partial=True,\n instance=instance,\n data=request.data,\n context={\"request\": request}\n )\n if write_serializer.is_valid():\n instance = write_serializer.update(instance, write_serializer.validated_data)\n read_serializer = GroupReadSerializer(instance)\n return Response(read_serializer.data, status.HTTP_200_OK)\n else:\n return Response(write_serializer.errors, status.HTTP_500_INTERNAL_SERVER_ERROR)", "def setKind(self, *args):\n return _libsbml.Group_setKind(self, *args)", "def update(self):\r\n return self.connection._update_group('UpdateAutoScalingGroup', self)", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "def fusion_api_update_group_role_assignment(self, body, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.update(body, api, headers)", "def replaceChild(self, *args):\n return _libsbml.ASTNode_replaceChild(self, *args)", "def test_groups_group_ref_put(self):\n pass", "def update(self, consistencygroup, **kwargs):\n if not kwargs:\n return\n\n body = {\"consistencygroup\": kwargs}\n\n return self._update(\"/consistencygroups/%s\" %\n base.getid(consistencygroup), body)", "def test_add_self_as_parent(self):\n groupa = Group('groupa')\n with pytest.raises(Exception):\n groupa.add_parent(groupa)", "def update_group(self, group_id, update_group_details, **kwargs):\n resource_path = \"/groups/{groupId}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"groupId\": group_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_group_details,\n response_type=\"Group\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_group_details,\n response_type=\"Group\")", "def adjust_parent(self, parent_adjustor: ParentRoleAdjuster):\n self.adjust_parent_aggregate(parent_adjustor=parent_adjustor,\n get_summed_field=lambda: getattr(parent_adjustor.child_logic_row.row, self._child_summed_field),\n get_old_summed_field=lambda: getattr(parent_adjustor.child_logic_row.old_row, self._child_summed_field)\n )", "def update_targetgroup(self, group_id, **kwargs):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).update(**kwargs)\r\n self._db.commit()\r\n return result", "def test_remove_self_as_parent(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n groupb.del_parent(groupb)", "def test_05_self_can_downgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))", "async def group(ctx, *, new_group=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n # Can't be group-less\n if new_group is None:\n new_group = random.choice(changeable_groups)\n new_group = new_group.lower()\n author = ctx.message.author\n member_roles = author.roles\n server_roles = ctx.message.server.roles\n\n member_allowed = discord.utils.find(lambda r: r.name.lower() == required_role, member_roles)\n\n if not member_allowed:\n need_citizen = \"You must be a member of the {0} role to join a color group\"\n await amor_manager.say(need_citizen.format(required_role.title()))\n return\n\n if new_group in changeable_groups:\n # Remove the old group the user was in\n new_roles = [r for r in member_roles if not r.name.lower() in changeable_groups]\n # Get the proper object for the user's new group\n role = discord.utils.find(lambda r: r.name.lower() == new_group, server_roles)\n if role is not None:\n new_roles.append(role)\n await(amor_manager.replace_roles(author, *new_roles))\n await amor_manager.say('{0} moved to group {1}'.format(author.name, new_group))\n else:\n suggest = random.choice(changeable_groups)\n cant_join = \"`{0}` is not a color group you're allowed to join. Why not try `{1}`\"\n await amor_manager.say(cant_join.format(new_group, suggest))", "def MutateAdGroupLabels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def replace_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def update(self,\n provider_id,\n group_id,\n group,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n 'group': group,\n })", "def _add_group(self, group):\n\n if group.name not in self.groups:\n # it's brand new, add him!\n self.groups[group.name] = group\n if self.groups[group.name] != group:\n # different object, merge\n self._merge_groups(self.groups[group.name], group)", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def update_group(\n self,\n group,\n validate_only=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n # Wrap the transport method to add retry and timeout logic.\n if \"update_group\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"update_group\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.update_group,\n default_retry=self._method_configs[\"UpdateGroup\"].retry,\n default_timeout=self._method_configs[\"UpdateGroup\"].timeout,\n client_info=self._client_info,\n )\n\n request = group_service_pb2.UpdateGroupRequest(\n group=group, validate_only=validate_only,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"group.name\", group.name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"update_group\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", "def reposition(self, x, y):\n self.groupx = x\n self.groupy = y\n self.call('reposition', x, y)", "def test_add_existing_group(self, inventoryloader):\n grp_cnt = inventoryloader.count_groups()\n grp_vars = inventoryloader.groups['glance_api'].vars\n inventoryloader.add_group(u'glance_api')\n assert inventoryloader.count_groups() == grp_cnt\n assert inventoryloader.groups['glance_api'].vars == grp_vars\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n inventoryloader.add_group(u'glance_api', {\"vars\": { u'external_bridge': u'br-ext'}})\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n assert 'br-ext' == inventoryloader.groups['glance_api'].vars['external_bridge']", "def edit_group(request, group_id):\n group = None\n if group_id:\n group = models.UserGroup.get_by_id(int(group_id))\n return utility.edit_instance(request, models.UserGroup, forms.GroupEditForm,\n 'admin/edit_group',\n urlresolvers.reverse('views.admin.list_groups'),\n group_id, group=group)", "def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500", "def test_replace_groups(self):\n pass", "def replaceChild(self, *args):\n return _libsbml.ASTBasePlugin_replaceChild(self, *args)", "async def add_parent_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n stmt = select(Group).filter(Group.parent_id is None).options(selectinload(Group.parent))\n result = await dbsession.execute(stmt)\n stmt = select(func.count(Group.id)).filter(Group.parent_id is None)\n result_count = await dbsession.execute(stmt)\n with click.progressbar(\n result.scalars(), length=result_count.scalar_one(), label=\"Adding parent groups\"\n ) as progress:\n for group in progress:\n if \"aat\" in config[\"data\"][\"hierarchy\"][\"expansions\"]:\n categories = apply_aat(group.value, merge=False)\n if categories:\n for category_list in categories:\n mapped = False\n for category in category_list:\n stmt = select(Group).filter(Group.value == category)\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if not parent_group:\n parent_group = Group(\n value=category, label=category[0].upper() + category[1:], split=\"parent\"\n )\n dbsession.add(group)\n group.parent = parent_group\n mapped = True\n group = parent_group # noqa: PLW2901\n if group.parent_id:\n break\n if mapped:\n break\n else:\n mapped = False\n for category in apply_nlp(group.value):\n stmt = select(Group).filter(\n or_(Group.value == category, Group.value == inflection.pluralize(category))\n )\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if parent_group:\n group.parent = parent_group\n await dbsession.commit()\n mapped = True\n break\n if not mapped:\n if group.value not in [\"styles and periods\"]:\n for category in apply_nlp(group.value):\n hierarchies = apply_aat(category, merge=False)\n groups = []\n for hierarchy in hierarchies:\n if group.value not in hierarchy:\n stmt = (\n select(Group)\n .filter(Group.value.in_(hierarchy))\n .options(selectinload(Group.items))\n )\n result = await dbsession.execute(stmt)\n for potential_group in result.scalars():\n depth = 0\n tmp = potential_group\n while tmp:\n depth = depth + 1\n tmp = tmp.parent\n groups.append((potential_group, depth, len(potential_group.items)))\n if groups:\n groups.sort(key=lambda g: (g[1], g[2]), reverse=True)\n group.parent = groups[0][0]\n break\n await dbsession.commit()", "def clone(self):\n return _libsbml.Group_clone(self)", "def capacitygroup_update(cmd_ctx, cpc, capacitygroup, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_update(cmd_ctx, cpc, capacitygroup, options))", "def edit_group(request, name=None):\n if not request.user.is_superuser:\n raise PopupException(_(\"You must be a superuser to add or edit a group.\"), error_code=401)\n\n if name is not None:\n instance = Group.objects.get(name=name)\n else:\n instance = None\n\n if request.method == 'POST':\n form = GroupEditForm(request.POST, instance=instance)\n if form.is_valid():\n form.save()\n request.info(_('Group information updated'))\n return list_groups(request)\n\n else:\n form = GroupEditForm(instance=instance)\n\n return render('edit_group.mako', request, dict(form=form, action=request.path, name=name))", "def put_group(\n group_id: BSONObjectId,\n data: PutGroupIn,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n grp: Group = Group.objects.get(pk=group_id)\n if not (\n tkn.owner == grp.owner or has_clearance(tkn.owner, \"sni.update_group\")\n ):\n raise PermissionError\n logging.debug(\"Updating group %s (%s)\", grp.group_name, group_id)\n if data.add_members is not None:\n grp.members += [\n User.objects.get(character_name=member_name)\n for member_name in set(data.add_members)\n ]\n if data.authorized_to_login is not None:\n assert_has_clearance(tkn.owner, \"sni.set_authorized_to_login\")\n grp.authorized_to_login = data.authorized_to_login\n if data.description is not None:\n grp.description = data.description\n if data.members is not None:\n grp.members = [\n User.objects.get(character_name=member_name)\n for member_name in set(data.members)\n ]\n if data.owner is not None:\n grp.owner = User.objects.get(character_name=data.owner)\n if data.remove_members is not None:\n grp.members = [\n member\n for member in grp.members\n if member.character_name not in data.remove_members\n ]\n grp.members = list(set(grp.members + [grp.owner]))\n grp.save()\n return GetGroupOut.from_record(grp)", "def post_security_group_update(self, resource_id, resource_dict):\n pass" ]
[ "0.61605686", "0.61406356", "0.5958074", "0.59414357", "0.5797429", "0.5668177", "0.5624319", "0.5621426", "0.561763", "0.5572149", "0.5539592", "0.55051327", "0.54824334", "0.5461233", "0.54360193", "0.5433018", "0.53988296", "0.5368346", "0.53458875", "0.52537507", "0.52454543", "0.5235111", "0.5230351", "0.52265704", "0.5214914", "0.5211578", "0.5167291", "0.51348954", "0.51078886", "0.50968206", "0.508044", "0.50553834", "0.5047006", "0.5045846", "0.50361997", "0.50161546", "0.49979255", "0.49879062", "0.49850982", "0.49767378", "0.49551222", "0.49455002", "0.49402133", "0.49337864", "0.4927972", "0.49264747", "0.49236476", "0.4909741", "0.49097082", "0.4900177", "0.4900177", "0.4894824", "0.4884672", "0.48715976", "0.48469827", "0.48394856", "0.48353392", "0.4829131", "0.4821512", "0.48193535", "0.48189867", "0.48012626", "0.47981516", "0.4789781", "0.47839117", "0.4780934", "0.47805288", "0.4779988", "0.47796774", "0.47796145", "0.47776666", "0.47758597", "0.477075", "0.47692114", "0.47674754", "0.47673678", "0.47637057", "0.47603077", "0.4758207", "0.47574228", "0.47559536", "0.47313794", "0.4729889", "0.4727195", "0.47129786", "0.4707538", "0.46987382", "0.46972087", "0.468537", "0.46821442", "0.46779305", "0.46745872", "0.4674417", "0.46665904", "0.4665863", "0.46649307", "0.46626148", "0.4660908", "0.4660435", "0.4655853" ]
0.67553115
0
Modify an existing outcome group. Fields not provided are left as is; unrecognized fields are ignored. When changing the parent outcome group, the new parent group must belong to the same context as this outcome group, and must not be a descendant of this outcome group (i.e. no cycles allowed).
def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, 'parent_outcome_group_id' : parent_outcome_group_id, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def update_group():\n _id = request.form['_id']\n name = request.form['name']\n data, code, message = FIELD_SERVICE.update_group(_id, name)\n return __result(data, code, message)", "def test_modify_group(self):\n response = self.client.modify_group(\"ABC123\")\n self.assertEqual(response[\"method\"], \"POST\")\n self.assertEqual(response[\"uri\"], \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(response[\"body\"]), {\"account_id\": [self.client.account_id]})", "def test_patch_project_move_child(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, self.category\n )\n self.make_assignment(new_category, self.user, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def do_group_update():\n target_group = Group.query.filter_by(id=request.form['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n target_group.name = request.form['name']\n target_group.group_meter_id = request.form['meter']\n target_group.group_production_meter_id_first = request.form['group_production_meter_id_first']\n target_group.group_production_meter_id_second = request.form[\n 'group_production_meter_id_second']\n\n db.session.commit()\n return group_list(\"Updated group \" + target_group.name)", "def grp(self, grpNode):\n\t\tself._grp = grpNode", "def modify_resource_group(\n self,\n request: dds_20151201_models.ModifyResourceGroupRequest,\n ) -> dds_20151201_models.ModifyResourceGroupResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_resource_group_with_options(request, runtime)", "def update_group(self, group_name, new_group_name=None, new_path=None):\r\n params = {'GroupName' : group_name}\r\n if new_group_name:\r\n params['NewGroupName'] = new_group_name\r\n if new_path:\r\n params['NewPath'] = new_path\r\n return self.get_response('UpdateGroup', params)", "def update_research_group(self, employee_id, new_research_group):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET research_group = %s '\n 'WHERE id=%s;',\n (new_research_group, employee_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def ModifyGroup(self, group, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/groups/%s/modify\" %\n (GANETI_RAPI_VERSION, group)), query, kwargs)", "def _group_append(groups, id, new_group):\n\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n\n path_inds = list(reversed(path_inds))\n\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)\n\n return groups", "def edit_group_command(self):\n self.switch_frame(\"Edit Group\")\n id = self.parent.get_frame_id(\"Edit Group\")\n self.parent.frames[id].display_group(self.user.active_group)", "def test_update_group(self):\n pass", "def test_replace_parent_to_self(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n with pytest.raises(Exception):\n groupa.replace_parent(groupb, groupa)", "def request_group_update():\n target_group = Group.query.filter_by(id=request.args['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n return Response(\n render_template(\n 'admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/update\",\n id=target_group.id,\n name=target_group.name,\n meter=target_group.group_meter_id,\n group_production_meter_id_first=target_group.group_production_meter_id_first,\n group_production_meter_id_second=target_group.group_production_meter_id_second),\n mimetype='text/html')", "def _mod_group(self, command, group_id, group_type, buckets=None):\n self.datapath.send_msg(\n self.parser.OFPGroupMod(\n datapath=self.datapath,\n command=command,\n group_id=group_id,\n type_=group_type,\n buckets=buckets,\n )\n )", "def reset_group(node, suffix=\"_grp\"):\n # create transform group\n name = \"{}_{}\".format(node.rsplit(\"_\", 1)[0], suffix)\n reset_grp = cmds.createNode(\"transform\", name=name)\n cmds.parent(reset_grp, node)\n cmds.makeIdentity(reset_grp, translate=True, rotate=True, scale=True)\n\n # reparent under parent if any, else world\n parent = (cmds.listRelatives(node, parent=True) or [None])[0]\n if parent:\n cmds.parent(reset_grp, parent)\n else:\n cmds.parent(reset_grp, world=True)\n cmds.parent(node, reset_grp)\n\n # for joints, reset rotates and jointOrients\n if cmds.nodeType(node) == \"joint\":\n cmds.makeIdentity(node, jointOrient=True, rotate=True, apply=True)\n\n cmds.select(clear=True)\n\n return reset_grp", "def slotGroupEdit(self):\n dialog = GroupDialog(self)\n if dialog.exec_loop() == QDialog.Accepted:\n if dialog.group_id != None:\n # set group\n self.sampleGroup.globalGroupId = dialog.group_id\n self.groupLabel.setText(dialog.group_id)\n else:\n # ungroup\n self.sampleGroup.globalGroupId = None\n self.groupLabel.setText('Not\\nGrouped')\n self.emit(PYSIGNAL('groupChanged'), (self,))", "def axial_correction_group(obj,\n to_parents_origin=False,\n name_prefix=\"\",\n name_postfix=\"_ACGroup#\"):\n obj = get_valid_dag_node(obj)\n\n if name_postfix == \"\":\n name_postfix = \"_ACGroup#\"\n\n ac_group = pm.group(\n em=True,\n n=(name_prefix + obj.name() + name_postfix)\n )\n\n ac_group = pm.parent(ac_group, obj)[0]\n\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n parent = pm.listRelatives(obj, p=True)\n if len(parent) != 0:\n pm.parent(ac_group, parent[0], a=True)\n else:\n pm.parent(ac_group, w=True)\n\n if to_parents_origin:\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n pm.parent(obj, ac_group, a=True)\n\n # for joints also set the joint orient to zero\n if isinstance(obj, pm.nodetypes.Joint):\n # set the joint rotation and joint orient to zero\n obj.setAttr('r', (0, 0, 0))\n obj.setAttr('jo', (0, 0, 0))\n\n return ac_group", "def _group_modify_id(group, id_modifier):\n\n group = group._replace(id=id_modifier(group.id))\n group = group._replace(children=list(map(lambda g: Skeleton._group_modify_id(g, id_modifier), group.children)))\n\n return group", "def process_object(self, new, old=None):\n new = super().process_object(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.modified_field, self.model.permissions_field)\n validate_from_bucket_schema_or_400(\n new,\n resource_name=\"group\",\n request=self.request,\n ignore_fields=internal_fields,\n id_field=self.model.id_field,\n )\n\n return new", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def update_group(self, group_id, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.patch('groups/%s' % group_id, post_body)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def group_update(*, login_manager: LoginManager, group_id: str, **kwargs: Any):\n groups_client = login_manager.get_groups_client()\n\n # get the current state of the group\n group = groups_client.get_group(group_id)\n\n # assemble put data using existing values for any field not given\n # note that the API does not accept the full group document, so we must\n # specify name and description instead of just iterating kwargs\n data = {}\n for field in [\"name\", \"description\"]:\n if kwargs.get(field) is not None:\n data[field] = kwargs[field]\n else:\n data[field] = group[field]\n\n response = groups_client.update_group(group_id, data)\n\n formatted_print(response, simple_text=\"Group updated successfully\")", "async def mergegroup(self, ctx, original_group_id: int, duplicate_group_id: int):\n original_group = await ex.get_group(original_group_id)\n duplicate_group = await ex.get_group(duplicate_group_id)\n if not duplicate_group:\n return await ctx.send(f\"> {duplicate_group_id} could not find a Group.\")\n if not original_group:\n return await ctx.send(f\"> {original_group} could not find a Group.\")\n # move aliases\n await ex.conn.execute(\"UPDATE groupmembers.aliases SET objectid = $1 WHERE isgroup = $2 AND objectid = $3\", original_group.id, 1, duplicate_group.id)\n for member_id in duplicate_group.members:\n if member_id not in original_group.members:\n # update the member location to the original group\n await ex.conn.execute(\"UPDATE groupmembers.idoltogroup SET groupid = $1 WHERE idolid = $2 AND groupid = $3\", original_group.id, member_id, duplicate_group.id)\n # delete group\n await ex.conn.execute(\"DELETE FROM groupmembers.groups WHERE groupid = $1\", duplicate_group.id)\n # recreate cache\n await ex.create_idol_cache()\n await ex.create_group_cache()\n await ctx.send(f\"> Merged {duplicate_group_id} to {original_group_id}.\")", "def test_update_team_user_group(client):\n group = client.update_team_user_group(TEAM_ID, GROUP_ID, {\n \"name\": \"Updated Python group\",\n \"is_reviewer\": False,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert group.name == \"Updated Python group\"\n assert group.permissions['is_admin']\n assert not group.permissions['is_reviewer']", "def update_group(groupname):\n name = request.get_json().get(\"name\", None)\n description = request.get_json().get(\"description\", None)\n response = jsonify(\n admin.update_group(current_app.scoped_session(), groupname, description, name)\n )\n return response", "def group(ctx, project, group): # pylint:disable=redefined-outer-name\n ctx.obj = ctx.obj or {}\n ctx.obj['project'] = project\n ctx.obj['group'] = group", "def _merge_groups(self, group, newgroup):\n\n # name\n if group.name != newgroup.name:\n raise errors.AnsibleError(\"Cannot merge group %s with %s\" % (group.name, newgroup.name))\n\n # depth\n group.depth = max([group.depth, newgroup.depth])\n\n # hosts list (host objects are by now already added to self.hosts)\n for host in newgroup.hosts:\n grouphosts = dict([(h.name, h) for h in group.hosts])\n if host.name in grouphosts:\n # same host name but different object, merge\n self._merge_hosts(grouphosts[host.name], host)\n else:\n # new membership, add host to group from self\n # group from self will also be added again to host.groups, but\n # as different object\n group.add_host(self.hosts[host.name])\n # now remove this the old object for group in host.groups\n for hostgroup in [g for g in host.groups]:\n if hostgroup.name == group.name and hostgroup != self.groups[group.name]:\n self.hosts[host.name].groups.remove(hostgroup)\n\n\n # group child membership relation\n for newchild in newgroup.child_groups:\n # dict with existing child groups:\n childgroups = dict([(g.name, g) for g in group.child_groups])\n # check if child of new group is already known as a child\n if newchild.name not in childgroups:\n self.groups[group.name].add_child_group(newchild)\n\n # group parent membership relation\n for newparent in newgroup.parent_groups:\n # dict with existing parent groups:\n parentgroups = dict([(g.name, g) for g in group.parent_groups])\n # check if parent of new group is already known as a parent\n if newparent.name not in parentgroups:\n if newparent.name not in self.groups:\n # group does not exist yet in self, import him\n self.groups[newparent.name] = newparent\n # group now exists but not yet as a parent here\n self.groups[newparent.name].add_child_group(group)\n\n # variables\n group.vars = utils.combine_vars(group.vars, newgroup.vars)", "def reparent(self, obj, parent):\n return self.update(obj, parent=parent)", "def test_patch_project_move_root(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, None\n )\n new_owner = self.make_user('new_owner')\n self.make_assignment(new_category, new_owner, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'parent': ''}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 200, msg=response.content)", "def __upgrade_group(self, group_old: Group, group_new: str) -> None:\n def upgrade_permissions(permissions_list_1: list, permissions_list_2: list, action) -> list:\n permissions_to_change = [\n permission_change\n for permission_change in permissions_list_1\n if permission_change not in permissions_list_2\n ]\n return self.__upgrade_group_permissions(group_old, permissions_to_change, action)\n\n messages = [f'Group {group_new} permission changes']\n\n permissions_from_db = [p.codename for p in group_old.permissions.all()]\n permissions_from_file = main_app_groups[group_new]\n\n # in db but not in file -> remove\n messages += upgrade_permissions(permissions_from_db, permissions_from_file, REMOVE)\n # in file but not in db -> add\n messages += upgrade_permissions(permissions_from_file, permissions_from_db, ADD)\n\n if len(messages) > 1:\n self.__print_messages(messages)", "def set_outcome(self, outcome):\r\n if outcome < self._outcome:\r\n self._outcome = outcome\r\n self.choose(0, 0, 0, 0, 0) # Dummy call, to validate outcome.\r\n if self.parent: self.parent.set_outcome(self._outcome)", "def test_groups_group_id_state_put(self):\n pass", "def test_update_resource_group(self):\n pass", "def put(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n cid = self.current_user.cid\n tid = self.current_user.tid\n gid = data.gid\n name = data.name\n logging.info(\"[UWEB] Modify group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n self.db.execute(\"UPDATE T_GROUP\"\n \" SET name = %s\"\n \" WHERE id = %s\",\n name, gid)\n\n # NOTE: wspush to client \n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status)\n except Exception as e:\n logging.exception(\"[UWEB] Modify group failed. cid: %s, Exception: %s\",\n self.current_user.cid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def update(ctx, name, description, tags):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n update_dict = {}\n\n if name:\n update_dict['name'] = name\n\n if description:\n update_dict['description'] = description\n\n tags = validate_tags(tags)\n if tags:\n update_dict['tags'] = tags\n\n if not update_dict:\n Printer.print_warning('No argument was provided to update the experiment group.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment_group.update_experiment_group(\n user, project_name, _group, update_dict)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not update experiment group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n Printer.print_success(\"Experiment group updated.\")\n get_group_details(response)", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def mutate(self, child):\n return child", "def group(self, val):\n self.set_property(\"Group\", val)", "def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_nodegroup(self.cluster_id, self.uuid, updates)\n\n self.obj_reset_changes()", "def test_convert_to_newgroup(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance1')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance1' in inventoryloader.groups\n assert inventoryloader.groups['glance_all'].has_group('glance1')\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance1'].has_host('localhost')\n assert \"management_bridge\" in inventoryloader.groups['glance1'].vars", "def modify_resource_group_with_options(\n self,\n request: dds_20151201_models.ModifyResourceGroupRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyResourceGroupResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyResourceGroup',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyResourceGroupResponse(),\n self.call_api(params, req, runtime)\n )", "def test_editGroup(self):\n\t\tuser = User.objects.get(id=1)\n\t\tself.client.force_authenticate(user=user)\n\t\tgroup = Group.objects.create(admin=user, name='testGroup3', isPublic=True, \n\t\t\tdescription='This is another test group that just created.')\n\n\t\turl = \"/groups/3/\"\n\t\tdata = {\n\t\t\t'name' : 'anotherTestGroup'\n\t\t}\n\n\t\tresponse = self.client.patch(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data[\"name\"], 'anotherTestGroup')\n\n\t\turl = \"/groups/2/\"\n\t\tresponse = self.client.patch(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def groupFormation(self):\n self.updatedPartnerNames()\n self.updatedPotentialPartnerNames()\n self.updatePartnerIdices()\n self.makeGraphDictionary()\n self.assignGroupIDs()", "def rename_group(self, old: str, new: str) -> Tuple[Optional['LedGroup'], str]:\n old_name: str = LedGroup.get_name(old)\n group: Optional['LedGroup'] = self.get_group_by_name(old_name)\n if group is None:\n return None, 'no_group'\n if new == \"\":\n return None, 'empty_group'\n unique: bool = not (new in self.get_group_list())\n if not unique:\n return None, \"group_exists\"\n group.Name = new\n check: Optional[LedGroup] = LedGroup.verify_led_group(group)\n if check is None:\n group.Name = old_name\n return None, \"wrong_group_name\"\n for seq in self.Sequencers:\n if seq.Group.lower() == old_name.lower():\n seq.Group = new\n return group, \"\"", "def test_patch_project_move(self):\n self.assertEqual(\n self.project.full_title,\n self.category.title + ' / ' + self.project.title,\n )\n\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, None\n )\n self.make_assignment(new_category, self.user_owner_cat, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.project.refresh_from_db()\n model_dict = model_to_dict(self.project)\n self.assertEqual(model_dict['parent'], new_category.pk)\n owners = [a.user for a in self.project.get_owners()]\n self.assertIn(self.user_owner_cat, owners)\n self.assertIn(self.user_owner, owners)\n\n # Assert child project full title update\n self.assertEqual(\n self.project.full_title,\n new_category.title + ' / ' + self.project.title,\n )\n self.assertEqual(\n json.loads(response.content)['parent'], str(new_category.sodar_uuid)\n )", "def test_patch_group(self):\n pass", "def test_patch_group(self):\n pass", "def _switch_group_right(group_id, action, value, workspace, request_user):\n group = group_api.get_group_by_id(group_id)\n\n if action == workspace_constants.ACTION_READ:\n if value:\n workspace_api.add_group_read_access_to_workspace(workspace, group, request_user)\n else:\n workspace_api.remove_group_read_access_to_workspace(workspace, group, request_user)\n elif action == workspace_constants.ACTION_WRITE:\n if value:\n workspace_api.add_group_write_access_to_workspace(workspace, group, request_user)\n else:\n workspace_api.remove_group_write_access_to_workspace(workspace, group, request_user)", "def _update_field(self, tag_id, value):\n if tag_id in self._group_fields:\n # start a new level, an individual group doesn't\n # exist since we haven't read any information in yet.\n self._level_stack.append({\n 'tag_id': tag_id,\n 'list': [],\n 'group': None,\n })\n\n elif len(self._level_stack) == 0:\n # We are at the top of the message\n if isinstance(value, bytes):\n self._message[tag_id] = value.decode()\n else:\n self._message[tag_id] = value\n\n elif tag_id in self._group_fields[self._level_stack[-1]['tag_id']]:\n # We are within a group and the field is in the list of tags\n # for this group\n level = self._level_stack[-1]\n group = level['group']\n if group is None or tag_id in group:\n # Create a new group if there is no current group\n # or if this key already exists within the group\n group = collections.OrderedDict()\n level['list'].append(group)\n level['group'] = group\n group[tag_id] = value\n\n else:\n # we are in a grouping, but we have a tag_id that doesn't\n # belong, so need to pop the stack off\n level = self._level_stack.pop() # this is the current level\n\n while len(self._level_stack) > 0:\n # Add the current group to it's parent grouping\n parent_level = self._level_stack[-1]\n parent_level['group'][level['tag_id']] = level['list']\n\n level = parent_level\n if tag_id in self._group_fields[level['tag_id']]:\n break\n self._level_stack.pop()\n\n if len(self._level_stack) == 0:\n self._message[level['tag_id']] = level['list']\n\n self._update_field(tag_id, value)", "def patch_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def patch(self,\n provider_id,\n group_id,\n group,\n ):\n return self._invoke('patch',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n 'group': group,\n })", "def test_replace_group(self):\n pass", "def test_patch_resource_group(self):\n pass", "def add_to_group(self, group):\n\n if not self.in_group(group):\n self.secondary_groups.append(group)\n return self", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def _confirm_group(cls):\n if cls.GROUP_NAME in bpy.data.objects:\n return\n #Backup current selection\n selection = ObjectSelection()\n #Create empty object\n bpy.ops.object.empty_add()\n new_group = bpy.context.selected_objects[0]\n new_group.name = cls.GROUP_NAME\n new_group.hide = True\n #Restore selection\n selection.restore()", "def upsert_group(self,\n group, # type: Group\n *options, # type: UpsertGroupOptions\n **kwargs # type: Any\n ):\n # This endpoint accepts application/x-www-form-urlencoded and requires the data be sent as form data.\n # The name/id should not be included in the form data.\n # Roles should be a comma separated list of strings.\n # If, only if, the role contains a bucket name then the rolename should be suffixed\n # with[<bucket_name>] e.g. bucket_full_access[default],security_admin.\n\n final_args = forward_args(kwargs, *options)\n final_args.update({k: v for k, v in group.as_dict.items() if k in {\n 'roles', 'description', 'ldap_group_reference'}})\n self._admin_bucket.group_upsert(group.name, **final_args)", "def _set_grouping(self, change) -> None:\n grouping = self._grouping_full\n self.options = self._flat_groupings(grouping)\n self.set_trait(\n \"_grouping_labels\",\n tuple(\n [\n (header, tuple([_[0] for _ in options]))\n for header, options in grouping\n ]\n ),\n )\n if not self._initializing_traits_:\n for index, option in enumerate(self._flat_groupings()):\n if (\n option not in self.disabled_options\n and option not in self._group_headers\n ):\n if self.index == index:\n self._notify_trait(\"index\", index, index)\n else:\n self.index = index\n break\n else:\n self.index = None", "def test_delete_group_reparent_vars(self, inventoryloader):\n inventoryloader.del_group('glance_api', reparent_vars=True)\n assert 'management_bridge' in inventoryloader.groups['glance_all'].vars", "def add_to_group(self, org, contact, group):\n pass", "async def handle_set_group(self, match: Match[str], payload: str) -> None:\n groupid = match.group(1)\n\n try:\n group = self._bridge.groups[groupid]\n state = GroupSetState(**json.loads(payload))\n LOGGER.info(f\"Updating group {group.name}\")\n await group.set_action(**state.dict())\n except IndexError:\n LOGGER.warning(f\"Unknown group id: {groupid}\")\n except json.JSONDecodeError:\n LOGGER.warning(f\"Bad JSON on light request: {payload}\")\n except TypeError:\n LOGGER.warning(f\"Expected dictionary, got: {payload}\")\n except ValidationError as e:\n LOGGER.warning(f\"Invalid light state: {e}\")", "def UpdateGroupMembership(self, newMembers):\r\n globals.groupMembers[newMembers.targetGuid] = True #remove the target Sticky\r\n\r\n for guid in newMembers.guids[0]:\r\n globals.groupMembers[guid]=True\r\n\r\n group = Group()\r\n globals._groupNumber = globals._groupNumber+1\r\n group.groupID = globals._groupName + str(globals._groupNumber)\r\n group.targetSticky[\"guid\"] = newMembers.targetGuid\r\n group.targetSticky[\"desc\"] = newMembers.targetDesc\r\n group.targetSticky[\"head\"] = newMembers.targetHead #lplp1313 new value\r\n\r\n guidSims = tuple(zip(newMembers.guids[0], newMembers.descriptions[0], newMembers.headers[0], list(newMembers.cos_sims[0]))) #lplp1313 new value \r\n for g, d, h, c in guidSims:\r\n gs = GroupSticky()\r\n gs.guid=g\r\n gs.desc=d\r\n gs.head=h #lplp1313 new value\r\n gs.cosineVal=c\r\n group.groupStickies.append(gs)\r\n\r\n globals._jsonReply._groups.append(group)", "def set_parent(self, new_parent):\n self.__parent = new_parent", "def partial_update(self, request, *args, **kwargs):\n instance = Group.objects.get(pk=kwargs['pk'])\n\n if instance.owner_id != request.user.id and not request.user.is_superuser:\n return not_allowed_to_do()\n\n write_serializer = GroupWriteSerializer(\n partial=True,\n instance=instance,\n data=request.data,\n context={\"request\": request}\n )\n if write_serializer.is_valid():\n instance = write_serializer.update(instance, write_serializer.validated_data)\n read_serializer = GroupReadSerializer(instance)\n return Response(read_serializer.data, status.HTTP_200_OK)\n else:\n return Response(write_serializer.errors, status.HTTP_500_INTERNAL_SERVER_ERROR)", "def setKind(self, *args):\n return _libsbml.Group_setKind(self, *args)", "def update(self):\r\n return self.connection._update_group('UpdateAutoScalingGroup', self)", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "def fusion_api_update_group_role_assignment(self, body, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.update(body, api, headers)", "def replaceChild(self, *args):\n return _libsbml.ASTNode_replaceChild(self, *args)", "def test_groups_group_ref_put(self):\n pass", "def update(self, consistencygroup, **kwargs):\n if not kwargs:\n return\n\n body = {\"consistencygroup\": kwargs}\n\n return self._update(\"/consistencygroups/%s\" %\n base.getid(consistencygroup), body)", "def test_add_self_as_parent(self):\n groupa = Group('groupa')\n with pytest.raises(Exception):\n groupa.add_parent(groupa)", "def update_group(self, group_id, update_group_details, **kwargs):\n resource_path = \"/groups/{groupId}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"groupId\": group_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_group_details,\n response_type=\"Group\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_group_details,\n response_type=\"Group\")", "def adjust_parent(self, parent_adjustor: ParentRoleAdjuster):\n self.adjust_parent_aggregate(parent_adjustor=parent_adjustor,\n get_summed_field=lambda: getattr(parent_adjustor.child_logic_row.row, self._child_summed_field),\n get_old_summed_field=lambda: getattr(parent_adjustor.child_logic_row.old_row, self._child_summed_field)\n )", "def update_targetgroup(self, group_id, **kwargs):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).update(**kwargs)\r\n self._db.commit()\r\n return result", "def test_remove_self_as_parent(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n groupb.del_parent(groupb)", "def test_05_self_can_downgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))", "async def group(ctx, *, new_group=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n # Can't be group-less\n if new_group is None:\n new_group = random.choice(changeable_groups)\n new_group = new_group.lower()\n author = ctx.message.author\n member_roles = author.roles\n server_roles = ctx.message.server.roles\n\n member_allowed = discord.utils.find(lambda r: r.name.lower() == required_role, member_roles)\n\n if not member_allowed:\n need_citizen = \"You must be a member of the {0} role to join a color group\"\n await amor_manager.say(need_citizen.format(required_role.title()))\n return\n\n if new_group in changeable_groups:\n # Remove the old group the user was in\n new_roles = [r for r in member_roles if not r.name.lower() in changeable_groups]\n # Get the proper object for the user's new group\n role = discord.utils.find(lambda r: r.name.lower() == new_group, server_roles)\n if role is not None:\n new_roles.append(role)\n await(amor_manager.replace_roles(author, *new_roles))\n await amor_manager.say('{0} moved to group {1}'.format(author.name, new_group))\n else:\n suggest = random.choice(changeable_groups)\n cant_join = \"`{0}` is not a color group you're allowed to join. Why not try `{1}`\"\n await amor_manager.say(cant_join.format(new_group, suggest))", "def MutateAdGroupLabels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def replace_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def update(self,\n provider_id,\n group_id,\n group,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n 'group': group,\n })", "def _add_group(self, group):\n\n if group.name not in self.groups:\n # it's brand new, add him!\n self.groups[group.name] = group\n if self.groups[group.name] != group:\n # different object, merge\n self._merge_groups(self.groups[group.name], group)", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def update_group(\n self,\n group,\n validate_only=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n # Wrap the transport method to add retry and timeout logic.\n if \"update_group\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"update_group\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.update_group,\n default_retry=self._method_configs[\"UpdateGroup\"].retry,\n default_timeout=self._method_configs[\"UpdateGroup\"].timeout,\n client_info=self._client_info,\n )\n\n request = group_service_pb2.UpdateGroupRequest(\n group=group, validate_only=validate_only,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"group.name\", group.name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"update_group\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", "def reposition(self, x, y):\n self.groupx = x\n self.groupy = y\n self.call('reposition', x, y)", "def test_add_existing_group(self, inventoryloader):\n grp_cnt = inventoryloader.count_groups()\n grp_vars = inventoryloader.groups['glance_api'].vars\n inventoryloader.add_group(u'glance_api')\n assert inventoryloader.count_groups() == grp_cnt\n assert inventoryloader.groups['glance_api'].vars == grp_vars\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n inventoryloader.add_group(u'glance_api', {\"vars\": { u'external_bridge': u'br-ext'}})\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n assert 'br-ext' == inventoryloader.groups['glance_api'].vars['external_bridge']", "def edit_group(request, group_id):\n group = None\n if group_id:\n group = models.UserGroup.get_by_id(int(group_id))\n return utility.edit_instance(request, models.UserGroup, forms.GroupEditForm,\n 'admin/edit_group',\n urlresolvers.reverse('views.admin.list_groups'),\n group_id, group=group)", "def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500", "def test_replace_groups(self):\n pass", "def replaceChild(self, *args):\n return _libsbml.ASTBasePlugin_replaceChild(self, *args)", "async def add_parent_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n stmt = select(Group).filter(Group.parent_id is None).options(selectinload(Group.parent))\n result = await dbsession.execute(stmt)\n stmt = select(func.count(Group.id)).filter(Group.parent_id is None)\n result_count = await dbsession.execute(stmt)\n with click.progressbar(\n result.scalars(), length=result_count.scalar_one(), label=\"Adding parent groups\"\n ) as progress:\n for group in progress:\n if \"aat\" in config[\"data\"][\"hierarchy\"][\"expansions\"]:\n categories = apply_aat(group.value, merge=False)\n if categories:\n for category_list in categories:\n mapped = False\n for category in category_list:\n stmt = select(Group).filter(Group.value == category)\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if not parent_group:\n parent_group = Group(\n value=category, label=category[0].upper() + category[1:], split=\"parent\"\n )\n dbsession.add(group)\n group.parent = parent_group\n mapped = True\n group = parent_group # noqa: PLW2901\n if group.parent_id:\n break\n if mapped:\n break\n else:\n mapped = False\n for category in apply_nlp(group.value):\n stmt = select(Group).filter(\n or_(Group.value == category, Group.value == inflection.pluralize(category))\n )\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if parent_group:\n group.parent = parent_group\n await dbsession.commit()\n mapped = True\n break\n if not mapped:\n if group.value not in [\"styles and periods\"]:\n for category in apply_nlp(group.value):\n hierarchies = apply_aat(category, merge=False)\n groups = []\n for hierarchy in hierarchies:\n if group.value not in hierarchy:\n stmt = (\n select(Group)\n .filter(Group.value.in_(hierarchy))\n .options(selectinload(Group.items))\n )\n result = await dbsession.execute(stmt)\n for potential_group in result.scalars():\n depth = 0\n tmp = potential_group\n while tmp:\n depth = depth + 1\n tmp = tmp.parent\n groups.append((potential_group, depth, len(potential_group.items)))\n if groups:\n groups.sort(key=lambda g: (g[1], g[2]), reverse=True)\n group.parent = groups[0][0]\n break\n await dbsession.commit()", "def clone(self):\n return _libsbml.Group_clone(self)", "def capacitygroup_update(cmd_ctx, cpc, capacitygroup, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_update(cmd_ctx, cpc, capacitygroup, options))", "def edit_group(request, name=None):\n if not request.user.is_superuser:\n raise PopupException(_(\"You must be a superuser to add or edit a group.\"), error_code=401)\n\n if name is not None:\n instance = Group.objects.get(name=name)\n else:\n instance = None\n\n if request.method == 'POST':\n form = GroupEditForm(request.POST, instance=instance)\n if form.is_valid():\n form.save()\n request.info(_('Group information updated'))\n return list_groups(request)\n\n else:\n form = GroupEditForm(instance=instance)\n\n return render('edit_group.mako', request, dict(form=form, action=request.path, name=name))", "def put_group(\n group_id: BSONObjectId,\n data: PutGroupIn,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n grp: Group = Group.objects.get(pk=group_id)\n if not (\n tkn.owner == grp.owner or has_clearance(tkn.owner, \"sni.update_group\")\n ):\n raise PermissionError\n logging.debug(\"Updating group %s (%s)\", grp.group_name, group_id)\n if data.add_members is not None:\n grp.members += [\n User.objects.get(character_name=member_name)\n for member_name in set(data.add_members)\n ]\n if data.authorized_to_login is not None:\n assert_has_clearance(tkn.owner, \"sni.set_authorized_to_login\")\n grp.authorized_to_login = data.authorized_to_login\n if data.description is not None:\n grp.description = data.description\n if data.members is not None:\n grp.members = [\n User.objects.get(character_name=member_name)\n for member_name in set(data.members)\n ]\n if data.owner is not None:\n grp.owner = User.objects.get(character_name=data.owner)\n if data.remove_members is not None:\n grp.members = [\n member\n for member in grp.members\n if member.character_name not in data.remove_members\n ]\n grp.members = list(set(grp.members + [grp.owner]))\n grp.save()\n return GetGroupOut.from_record(grp)", "def post_security_group_update(self, resource_id, resource_dict):\n pass" ]
[ "0.67553115", "0.61406356", "0.5958074", "0.59414357", "0.5797429", "0.5668177", "0.5624319", "0.5621426", "0.561763", "0.5572149", "0.5539592", "0.55051327", "0.54824334", "0.5461233", "0.54360193", "0.5433018", "0.53988296", "0.5368346", "0.53458875", "0.52537507", "0.52454543", "0.5235111", "0.5230351", "0.52265704", "0.5214914", "0.5211578", "0.5167291", "0.51348954", "0.51078886", "0.50968206", "0.508044", "0.50553834", "0.5047006", "0.5045846", "0.50361997", "0.50161546", "0.49979255", "0.49879062", "0.49850982", "0.49767378", "0.49551222", "0.49455002", "0.49402133", "0.49337864", "0.4927972", "0.49264747", "0.49236476", "0.4909741", "0.49097082", "0.4900177", "0.4900177", "0.4894824", "0.4884672", "0.48715976", "0.48469827", "0.48394856", "0.48353392", "0.4829131", "0.4821512", "0.48193535", "0.48189867", "0.48012626", "0.47981516", "0.4789781", "0.47839117", "0.4780934", "0.47805288", "0.4779988", "0.47796774", "0.47796145", "0.47776666", "0.47758597", "0.477075", "0.47692114", "0.47674754", "0.47673678", "0.47637057", "0.47603077", "0.4758207", "0.47574228", "0.47559536", "0.47313794", "0.4729889", "0.4727195", "0.47129786", "0.4707538", "0.46987382", "0.46972087", "0.468537", "0.46821442", "0.46779305", "0.46745872", "0.4674417", "0.46665904", "0.4665863", "0.46649307", "0.46626148", "0.4660908", "0.4660435", "0.4655853" ]
0.61605686
1
Modify an existing outcome group. Fields not provided are left as is; unrecognized fields are ignored. When changing the parent outcome group, the new parent group must belong to the same context as this outcome group, and must not be a descendant of this outcome group (i.e. no cycles allowed).
def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, 'parent_outcome_group_id' : parent_outcome_group_id, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def update_group():\n _id = request.form['_id']\n name = request.form['name']\n data, code, message = FIELD_SERVICE.update_group(_id, name)\n return __result(data, code, message)", "def test_modify_group(self):\n response = self.client.modify_group(\"ABC123\")\n self.assertEqual(response[\"method\"], \"POST\")\n self.assertEqual(response[\"uri\"], \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(response[\"body\"]), {\"account_id\": [self.client.account_id]})", "def test_patch_project_move_child(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, self.category\n )\n self.make_assignment(new_category, self.user, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def do_group_update():\n target_group = Group.query.filter_by(id=request.form['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n target_group.name = request.form['name']\n target_group.group_meter_id = request.form['meter']\n target_group.group_production_meter_id_first = request.form['group_production_meter_id_first']\n target_group.group_production_meter_id_second = request.form[\n 'group_production_meter_id_second']\n\n db.session.commit()\n return group_list(\"Updated group \" + target_group.name)", "def grp(self, grpNode):\n\t\tself._grp = grpNode", "def modify_resource_group(\n self,\n request: dds_20151201_models.ModifyResourceGroupRequest,\n ) -> dds_20151201_models.ModifyResourceGroupResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_resource_group_with_options(request, runtime)", "def update_group(self, group_name, new_group_name=None, new_path=None):\r\n params = {'GroupName' : group_name}\r\n if new_group_name:\r\n params['NewGroupName'] = new_group_name\r\n if new_path:\r\n params['NewPath'] = new_path\r\n return self.get_response('UpdateGroup', params)", "def update_research_group(self, employee_id, new_research_group):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET research_group = %s '\n 'WHERE id=%s;',\n (new_research_group, employee_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def ModifyGroup(self, group, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/groups/%s/modify\" %\n (GANETI_RAPI_VERSION, group)), query, kwargs)", "def _group_append(groups, id, new_group):\n\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n\n path_inds = list(reversed(path_inds))\n\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)\n\n return groups", "def edit_group_command(self):\n self.switch_frame(\"Edit Group\")\n id = self.parent.get_frame_id(\"Edit Group\")\n self.parent.frames[id].display_group(self.user.active_group)", "def test_update_group(self):\n pass", "def test_replace_parent_to_self(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n with pytest.raises(Exception):\n groupa.replace_parent(groupb, groupa)", "def request_group_update():\n target_group = Group.query.filter_by(id=request.args['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n return Response(\n render_template(\n 'admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/update\",\n id=target_group.id,\n name=target_group.name,\n meter=target_group.group_meter_id,\n group_production_meter_id_first=target_group.group_production_meter_id_first,\n group_production_meter_id_second=target_group.group_production_meter_id_second),\n mimetype='text/html')", "def _mod_group(self, command, group_id, group_type, buckets=None):\n self.datapath.send_msg(\n self.parser.OFPGroupMod(\n datapath=self.datapath,\n command=command,\n group_id=group_id,\n type_=group_type,\n buckets=buckets,\n )\n )", "def reset_group(node, suffix=\"_grp\"):\n # create transform group\n name = \"{}_{}\".format(node.rsplit(\"_\", 1)[0], suffix)\n reset_grp = cmds.createNode(\"transform\", name=name)\n cmds.parent(reset_grp, node)\n cmds.makeIdentity(reset_grp, translate=True, rotate=True, scale=True)\n\n # reparent under parent if any, else world\n parent = (cmds.listRelatives(node, parent=True) or [None])[0]\n if parent:\n cmds.parent(reset_grp, parent)\n else:\n cmds.parent(reset_grp, world=True)\n cmds.parent(node, reset_grp)\n\n # for joints, reset rotates and jointOrients\n if cmds.nodeType(node) == \"joint\":\n cmds.makeIdentity(node, jointOrient=True, rotate=True, apply=True)\n\n cmds.select(clear=True)\n\n return reset_grp", "def slotGroupEdit(self):\n dialog = GroupDialog(self)\n if dialog.exec_loop() == QDialog.Accepted:\n if dialog.group_id != None:\n # set group\n self.sampleGroup.globalGroupId = dialog.group_id\n self.groupLabel.setText(dialog.group_id)\n else:\n # ungroup\n self.sampleGroup.globalGroupId = None\n self.groupLabel.setText('Not\\nGrouped')\n self.emit(PYSIGNAL('groupChanged'), (self,))", "def axial_correction_group(obj,\n to_parents_origin=False,\n name_prefix=\"\",\n name_postfix=\"_ACGroup#\"):\n obj = get_valid_dag_node(obj)\n\n if name_postfix == \"\":\n name_postfix = \"_ACGroup#\"\n\n ac_group = pm.group(\n em=True,\n n=(name_prefix + obj.name() + name_postfix)\n )\n\n ac_group = pm.parent(ac_group, obj)[0]\n\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n parent = pm.listRelatives(obj, p=True)\n if len(parent) != 0:\n pm.parent(ac_group, parent[0], a=True)\n else:\n pm.parent(ac_group, w=True)\n\n if to_parents_origin:\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n pm.parent(obj, ac_group, a=True)\n\n # for joints also set the joint orient to zero\n if isinstance(obj, pm.nodetypes.Joint):\n # set the joint rotation and joint orient to zero\n obj.setAttr('r', (0, 0, 0))\n obj.setAttr('jo', (0, 0, 0))\n\n return ac_group", "def _group_modify_id(group, id_modifier):\n\n group = group._replace(id=id_modifier(group.id))\n group = group._replace(children=list(map(lambda g: Skeleton._group_modify_id(g, id_modifier), group.children)))\n\n return group", "def process_object(self, new, old=None):\n new = super().process_object(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.modified_field, self.model.permissions_field)\n validate_from_bucket_schema_or_400(\n new,\n resource_name=\"group\",\n request=self.request,\n ignore_fields=internal_fields,\n id_field=self.model.id_field,\n )\n\n return new", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def update_group(self, group_id, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.patch('groups/%s' % group_id, post_body)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def group_update(*, login_manager: LoginManager, group_id: str, **kwargs: Any):\n groups_client = login_manager.get_groups_client()\n\n # get the current state of the group\n group = groups_client.get_group(group_id)\n\n # assemble put data using existing values for any field not given\n # note that the API does not accept the full group document, so we must\n # specify name and description instead of just iterating kwargs\n data = {}\n for field in [\"name\", \"description\"]:\n if kwargs.get(field) is not None:\n data[field] = kwargs[field]\n else:\n data[field] = group[field]\n\n response = groups_client.update_group(group_id, data)\n\n formatted_print(response, simple_text=\"Group updated successfully\")", "async def mergegroup(self, ctx, original_group_id: int, duplicate_group_id: int):\n original_group = await ex.get_group(original_group_id)\n duplicate_group = await ex.get_group(duplicate_group_id)\n if not duplicate_group:\n return await ctx.send(f\"> {duplicate_group_id} could not find a Group.\")\n if not original_group:\n return await ctx.send(f\"> {original_group} could not find a Group.\")\n # move aliases\n await ex.conn.execute(\"UPDATE groupmembers.aliases SET objectid = $1 WHERE isgroup = $2 AND objectid = $3\", original_group.id, 1, duplicate_group.id)\n for member_id in duplicate_group.members:\n if member_id not in original_group.members:\n # update the member location to the original group\n await ex.conn.execute(\"UPDATE groupmembers.idoltogroup SET groupid = $1 WHERE idolid = $2 AND groupid = $3\", original_group.id, member_id, duplicate_group.id)\n # delete group\n await ex.conn.execute(\"DELETE FROM groupmembers.groups WHERE groupid = $1\", duplicate_group.id)\n # recreate cache\n await ex.create_idol_cache()\n await ex.create_group_cache()\n await ctx.send(f\"> Merged {duplicate_group_id} to {original_group_id}.\")", "def test_update_team_user_group(client):\n group = client.update_team_user_group(TEAM_ID, GROUP_ID, {\n \"name\": \"Updated Python group\",\n \"is_reviewer\": False,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert group.name == \"Updated Python group\"\n assert group.permissions['is_admin']\n assert not group.permissions['is_reviewer']", "def update_group(groupname):\n name = request.get_json().get(\"name\", None)\n description = request.get_json().get(\"description\", None)\n response = jsonify(\n admin.update_group(current_app.scoped_session(), groupname, description, name)\n )\n return response", "def group(ctx, project, group): # pylint:disable=redefined-outer-name\n ctx.obj = ctx.obj or {}\n ctx.obj['project'] = project\n ctx.obj['group'] = group", "def _merge_groups(self, group, newgroup):\n\n # name\n if group.name != newgroup.name:\n raise errors.AnsibleError(\"Cannot merge group %s with %s\" % (group.name, newgroup.name))\n\n # depth\n group.depth = max([group.depth, newgroup.depth])\n\n # hosts list (host objects are by now already added to self.hosts)\n for host in newgroup.hosts:\n grouphosts = dict([(h.name, h) for h in group.hosts])\n if host.name in grouphosts:\n # same host name but different object, merge\n self._merge_hosts(grouphosts[host.name], host)\n else:\n # new membership, add host to group from self\n # group from self will also be added again to host.groups, but\n # as different object\n group.add_host(self.hosts[host.name])\n # now remove this the old object for group in host.groups\n for hostgroup in [g for g in host.groups]:\n if hostgroup.name == group.name and hostgroup != self.groups[group.name]:\n self.hosts[host.name].groups.remove(hostgroup)\n\n\n # group child membership relation\n for newchild in newgroup.child_groups:\n # dict with existing child groups:\n childgroups = dict([(g.name, g) for g in group.child_groups])\n # check if child of new group is already known as a child\n if newchild.name not in childgroups:\n self.groups[group.name].add_child_group(newchild)\n\n # group parent membership relation\n for newparent in newgroup.parent_groups:\n # dict with existing parent groups:\n parentgroups = dict([(g.name, g) for g in group.parent_groups])\n # check if parent of new group is already known as a parent\n if newparent.name not in parentgroups:\n if newparent.name not in self.groups:\n # group does not exist yet in self, import him\n self.groups[newparent.name] = newparent\n # group now exists but not yet as a parent here\n self.groups[newparent.name].add_child_group(group)\n\n # variables\n group.vars = utils.combine_vars(group.vars, newgroup.vars)", "def reparent(self, obj, parent):\n return self.update(obj, parent=parent)", "def test_patch_project_move_root(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, None\n )\n new_owner = self.make_user('new_owner')\n self.make_assignment(new_category, new_owner, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'parent': ''}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 200, msg=response.content)", "def __upgrade_group(self, group_old: Group, group_new: str) -> None:\n def upgrade_permissions(permissions_list_1: list, permissions_list_2: list, action) -> list:\n permissions_to_change = [\n permission_change\n for permission_change in permissions_list_1\n if permission_change not in permissions_list_2\n ]\n return self.__upgrade_group_permissions(group_old, permissions_to_change, action)\n\n messages = [f'Group {group_new} permission changes']\n\n permissions_from_db = [p.codename for p in group_old.permissions.all()]\n permissions_from_file = main_app_groups[group_new]\n\n # in db but not in file -> remove\n messages += upgrade_permissions(permissions_from_db, permissions_from_file, REMOVE)\n # in file but not in db -> add\n messages += upgrade_permissions(permissions_from_file, permissions_from_db, ADD)\n\n if len(messages) > 1:\n self.__print_messages(messages)", "def set_outcome(self, outcome):\r\n if outcome < self._outcome:\r\n self._outcome = outcome\r\n self.choose(0, 0, 0, 0, 0) # Dummy call, to validate outcome.\r\n if self.parent: self.parent.set_outcome(self._outcome)", "def test_groups_group_id_state_put(self):\n pass", "def test_update_resource_group(self):\n pass", "def put(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n cid = self.current_user.cid\n tid = self.current_user.tid\n gid = data.gid\n name = data.name\n logging.info(\"[UWEB] Modify group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n self.db.execute(\"UPDATE T_GROUP\"\n \" SET name = %s\"\n \" WHERE id = %s\",\n name, gid)\n\n # NOTE: wspush to client \n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status)\n except Exception as e:\n logging.exception(\"[UWEB] Modify group failed. cid: %s, Exception: %s\",\n self.current_user.cid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def update(ctx, name, description, tags):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n update_dict = {}\n\n if name:\n update_dict['name'] = name\n\n if description:\n update_dict['description'] = description\n\n tags = validate_tags(tags)\n if tags:\n update_dict['tags'] = tags\n\n if not update_dict:\n Printer.print_warning('No argument was provided to update the experiment group.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment_group.update_experiment_group(\n user, project_name, _group, update_dict)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not update experiment group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n Printer.print_success(\"Experiment group updated.\")\n get_group_details(response)", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def mutate(self, child):\n return child", "def group(self, val):\n self.set_property(\"Group\", val)", "def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_nodegroup(self.cluster_id, self.uuid, updates)\n\n self.obj_reset_changes()", "def test_convert_to_newgroup(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance1')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance1' in inventoryloader.groups\n assert inventoryloader.groups['glance_all'].has_group('glance1')\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance1'].has_host('localhost')\n assert \"management_bridge\" in inventoryloader.groups['glance1'].vars", "def modify_resource_group_with_options(\n self,\n request: dds_20151201_models.ModifyResourceGroupRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyResourceGroupResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyResourceGroup',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyResourceGroupResponse(),\n self.call_api(params, req, runtime)\n )", "def test_editGroup(self):\n\t\tuser = User.objects.get(id=1)\n\t\tself.client.force_authenticate(user=user)\n\t\tgroup = Group.objects.create(admin=user, name='testGroup3', isPublic=True, \n\t\t\tdescription='This is another test group that just created.')\n\n\t\turl = \"/groups/3/\"\n\t\tdata = {\n\t\t\t'name' : 'anotherTestGroup'\n\t\t}\n\n\t\tresponse = self.client.patch(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data[\"name\"], 'anotherTestGroup')\n\n\t\turl = \"/groups/2/\"\n\t\tresponse = self.client.patch(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def groupFormation(self):\n self.updatedPartnerNames()\n self.updatedPotentialPartnerNames()\n self.updatePartnerIdices()\n self.makeGraphDictionary()\n self.assignGroupIDs()", "def rename_group(self, old: str, new: str) -> Tuple[Optional['LedGroup'], str]:\n old_name: str = LedGroup.get_name(old)\n group: Optional['LedGroup'] = self.get_group_by_name(old_name)\n if group is None:\n return None, 'no_group'\n if new == \"\":\n return None, 'empty_group'\n unique: bool = not (new in self.get_group_list())\n if not unique:\n return None, \"group_exists\"\n group.Name = new\n check: Optional[LedGroup] = LedGroup.verify_led_group(group)\n if check is None:\n group.Name = old_name\n return None, \"wrong_group_name\"\n for seq in self.Sequencers:\n if seq.Group.lower() == old_name.lower():\n seq.Group = new\n return group, \"\"", "def test_patch_project_move(self):\n self.assertEqual(\n self.project.full_title,\n self.category.title + ' / ' + self.project.title,\n )\n\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, None\n )\n self.make_assignment(new_category, self.user_owner_cat, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.project.refresh_from_db()\n model_dict = model_to_dict(self.project)\n self.assertEqual(model_dict['parent'], new_category.pk)\n owners = [a.user for a in self.project.get_owners()]\n self.assertIn(self.user_owner_cat, owners)\n self.assertIn(self.user_owner, owners)\n\n # Assert child project full title update\n self.assertEqual(\n self.project.full_title,\n new_category.title + ' / ' + self.project.title,\n )\n self.assertEqual(\n json.loads(response.content)['parent'], str(new_category.sodar_uuid)\n )", "def test_patch_group(self):\n pass", "def test_patch_group(self):\n pass", "def _switch_group_right(group_id, action, value, workspace, request_user):\n group = group_api.get_group_by_id(group_id)\n\n if action == workspace_constants.ACTION_READ:\n if value:\n workspace_api.add_group_read_access_to_workspace(workspace, group, request_user)\n else:\n workspace_api.remove_group_read_access_to_workspace(workspace, group, request_user)\n elif action == workspace_constants.ACTION_WRITE:\n if value:\n workspace_api.add_group_write_access_to_workspace(workspace, group, request_user)\n else:\n workspace_api.remove_group_write_access_to_workspace(workspace, group, request_user)", "def _update_field(self, tag_id, value):\n if tag_id in self._group_fields:\n # start a new level, an individual group doesn't\n # exist since we haven't read any information in yet.\n self._level_stack.append({\n 'tag_id': tag_id,\n 'list': [],\n 'group': None,\n })\n\n elif len(self._level_stack) == 0:\n # We are at the top of the message\n if isinstance(value, bytes):\n self._message[tag_id] = value.decode()\n else:\n self._message[tag_id] = value\n\n elif tag_id in self._group_fields[self._level_stack[-1]['tag_id']]:\n # We are within a group and the field is in the list of tags\n # for this group\n level = self._level_stack[-1]\n group = level['group']\n if group is None or tag_id in group:\n # Create a new group if there is no current group\n # or if this key already exists within the group\n group = collections.OrderedDict()\n level['list'].append(group)\n level['group'] = group\n group[tag_id] = value\n\n else:\n # we are in a grouping, but we have a tag_id that doesn't\n # belong, so need to pop the stack off\n level = self._level_stack.pop() # this is the current level\n\n while len(self._level_stack) > 0:\n # Add the current group to it's parent grouping\n parent_level = self._level_stack[-1]\n parent_level['group'][level['tag_id']] = level['list']\n\n level = parent_level\n if tag_id in self._group_fields[level['tag_id']]:\n break\n self._level_stack.pop()\n\n if len(self._level_stack) == 0:\n self._message[level['tag_id']] = level['list']\n\n self._update_field(tag_id, value)", "def patch_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def patch(self,\n provider_id,\n group_id,\n group,\n ):\n return self._invoke('patch',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n 'group': group,\n })", "def test_replace_group(self):\n pass", "def test_patch_resource_group(self):\n pass", "def add_to_group(self, group):\n\n if not self.in_group(group):\n self.secondary_groups.append(group)\n return self", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def _confirm_group(cls):\n if cls.GROUP_NAME in bpy.data.objects:\n return\n #Backup current selection\n selection = ObjectSelection()\n #Create empty object\n bpy.ops.object.empty_add()\n new_group = bpy.context.selected_objects[0]\n new_group.name = cls.GROUP_NAME\n new_group.hide = True\n #Restore selection\n selection.restore()", "def upsert_group(self,\n group, # type: Group\n *options, # type: UpsertGroupOptions\n **kwargs # type: Any\n ):\n # This endpoint accepts application/x-www-form-urlencoded and requires the data be sent as form data.\n # The name/id should not be included in the form data.\n # Roles should be a comma separated list of strings.\n # If, only if, the role contains a bucket name then the rolename should be suffixed\n # with[<bucket_name>] e.g. bucket_full_access[default],security_admin.\n\n final_args = forward_args(kwargs, *options)\n final_args.update({k: v for k, v in group.as_dict.items() if k in {\n 'roles', 'description', 'ldap_group_reference'}})\n self._admin_bucket.group_upsert(group.name, **final_args)", "def _set_grouping(self, change) -> None:\n grouping = self._grouping_full\n self.options = self._flat_groupings(grouping)\n self.set_trait(\n \"_grouping_labels\",\n tuple(\n [\n (header, tuple([_[0] for _ in options]))\n for header, options in grouping\n ]\n ),\n )\n if not self._initializing_traits_:\n for index, option in enumerate(self._flat_groupings()):\n if (\n option not in self.disabled_options\n and option not in self._group_headers\n ):\n if self.index == index:\n self._notify_trait(\"index\", index, index)\n else:\n self.index = index\n break\n else:\n self.index = None", "def test_delete_group_reparent_vars(self, inventoryloader):\n inventoryloader.del_group('glance_api', reparent_vars=True)\n assert 'management_bridge' in inventoryloader.groups['glance_all'].vars", "def add_to_group(self, org, contact, group):\n pass", "async def handle_set_group(self, match: Match[str], payload: str) -> None:\n groupid = match.group(1)\n\n try:\n group = self._bridge.groups[groupid]\n state = GroupSetState(**json.loads(payload))\n LOGGER.info(f\"Updating group {group.name}\")\n await group.set_action(**state.dict())\n except IndexError:\n LOGGER.warning(f\"Unknown group id: {groupid}\")\n except json.JSONDecodeError:\n LOGGER.warning(f\"Bad JSON on light request: {payload}\")\n except TypeError:\n LOGGER.warning(f\"Expected dictionary, got: {payload}\")\n except ValidationError as e:\n LOGGER.warning(f\"Invalid light state: {e}\")", "def UpdateGroupMembership(self, newMembers):\r\n globals.groupMembers[newMembers.targetGuid] = True #remove the target Sticky\r\n\r\n for guid in newMembers.guids[0]:\r\n globals.groupMembers[guid]=True\r\n\r\n group = Group()\r\n globals._groupNumber = globals._groupNumber+1\r\n group.groupID = globals._groupName + str(globals._groupNumber)\r\n group.targetSticky[\"guid\"] = newMembers.targetGuid\r\n group.targetSticky[\"desc\"] = newMembers.targetDesc\r\n group.targetSticky[\"head\"] = newMembers.targetHead #lplp1313 new value\r\n\r\n guidSims = tuple(zip(newMembers.guids[0], newMembers.descriptions[0], newMembers.headers[0], list(newMembers.cos_sims[0]))) #lplp1313 new value \r\n for g, d, h, c in guidSims:\r\n gs = GroupSticky()\r\n gs.guid=g\r\n gs.desc=d\r\n gs.head=h #lplp1313 new value\r\n gs.cosineVal=c\r\n group.groupStickies.append(gs)\r\n\r\n globals._jsonReply._groups.append(group)", "def set_parent(self, new_parent):\n self.__parent = new_parent", "def partial_update(self, request, *args, **kwargs):\n instance = Group.objects.get(pk=kwargs['pk'])\n\n if instance.owner_id != request.user.id and not request.user.is_superuser:\n return not_allowed_to_do()\n\n write_serializer = GroupWriteSerializer(\n partial=True,\n instance=instance,\n data=request.data,\n context={\"request\": request}\n )\n if write_serializer.is_valid():\n instance = write_serializer.update(instance, write_serializer.validated_data)\n read_serializer = GroupReadSerializer(instance)\n return Response(read_serializer.data, status.HTTP_200_OK)\n else:\n return Response(write_serializer.errors, status.HTTP_500_INTERNAL_SERVER_ERROR)", "def setKind(self, *args):\n return _libsbml.Group_setKind(self, *args)", "def update(self):\r\n return self.connection._update_group('UpdateAutoScalingGroup', self)", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "def fusion_api_update_group_role_assignment(self, body, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.update(body, api, headers)", "def replaceChild(self, *args):\n return _libsbml.ASTNode_replaceChild(self, *args)", "def test_groups_group_ref_put(self):\n pass", "def update(self, consistencygroup, **kwargs):\n if not kwargs:\n return\n\n body = {\"consistencygroup\": kwargs}\n\n return self._update(\"/consistencygroups/%s\" %\n base.getid(consistencygroup), body)", "def test_add_self_as_parent(self):\n groupa = Group('groupa')\n with pytest.raises(Exception):\n groupa.add_parent(groupa)", "def update_group(self, group_id, update_group_details, **kwargs):\n resource_path = \"/groups/{groupId}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"groupId\": group_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_group_details,\n response_type=\"Group\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_group_details,\n response_type=\"Group\")", "def adjust_parent(self, parent_adjustor: ParentRoleAdjuster):\n self.adjust_parent_aggregate(parent_adjustor=parent_adjustor,\n get_summed_field=lambda: getattr(parent_adjustor.child_logic_row.row, self._child_summed_field),\n get_old_summed_field=lambda: getattr(parent_adjustor.child_logic_row.old_row, self._child_summed_field)\n )", "def update_targetgroup(self, group_id, **kwargs):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).update(**kwargs)\r\n self._db.commit()\r\n return result", "def test_remove_self_as_parent(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n groupb.del_parent(groupb)", "def test_05_self_can_downgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))", "async def group(ctx, *, new_group=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n # Can't be group-less\n if new_group is None:\n new_group = random.choice(changeable_groups)\n new_group = new_group.lower()\n author = ctx.message.author\n member_roles = author.roles\n server_roles = ctx.message.server.roles\n\n member_allowed = discord.utils.find(lambda r: r.name.lower() == required_role, member_roles)\n\n if not member_allowed:\n need_citizen = \"You must be a member of the {0} role to join a color group\"\n await amor_manager.say(need_citizen.format(required_role.title()))\n return\n\n if new_group in changeable_groups:\n # Remove the old group the user was in\n new_roles = [r for r in member_roles if not r.name.lower() in changeable_groups]\n # Get the proper object for the user's new group\n role = discord.utils.find(lambda r: r.name.lower() == new_group, server_roles)\n if role is not None:\n new_roles.append(role)\n await(amor_manager.replace_roles(author, *new_roles))\n await amor_manager.say('{0} moved to group {1}'.format(author.name, new_group))\n else:\n suggest = random.choice(changeable_groups)\n cant_join = \"`{0}` is not a color group you're allowed to join. Why not try `{1}`\"\n await amor_manager.say(cant_join.format(new_group, suggest))", "def MutateAdGroupLabels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def replace_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def update(self,\n provider_id,\n group_id,\n group,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n 'group': group,\n })", "def _add_group(self, group):\n\n if group.name not in self.groups:\n # it's brand new, add him!\n self.groups[group.name] = group\n if self.groups[group.name] != group:\n # different object, merge\n self._merge_groups(self.groups[group.name], group)", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def update_group(\n self,\n group,\n validate_only=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n # Wrap the transport method to add retry and timeout logic.\n if \"update_group\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"update_group\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.update_group,\n default_retry=self._method_configs[\"UpdateGroup\"].retry,\n default_timeout=self._method_configs[\"UpdateGroup\"].timeout,\n client_info=self._client_info,\n )\n\n request = group_service_pb2.UpdateGroupRequest(\n group=group, validate_only=validate_only,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"group.name\", group.name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"update_group\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", "def reposition(self, x, y):\n self.groupx = x\n self.groupy = y\n self.call('reposition', x, y)", "def test_add_existing_group(self, inventoryloader):\n grp_cnt = inventoryloader.count_groups()\n grp_vars = inventoryloader.groups['glance_api'].vars\n inventoryloader.add_group(u'glance_api')\n assert inventoryloader.count_groups() == grp_cnt\n assert inventoryloader.groups['glance_api'].vars == grp_vars\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n inventoryloader.add_group(u'glance_api', {\"vars\": { u'external_bridge': u'br-ext'}})\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n assert 'br-ext' == inventoryloader.groups['glance_api'].vars['external_bridge']", "def edit_group(request, group_id):\n group = None\n if group_id:\n group = models.UserGroup.get_by_id(int(group_id))\n return utility.edit_instance(request, models.UserGroup, forms.GroupEditForm,\n 'admin/edit_group',\n urlresolvers.reverse('views.admin.list_groups'),\n group_id, group=group)", "def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500", "def test_replace_groups(self):\n pass", "def replaceChild(self, *args):\n return _libsbml.ASTBasePlugin_replaceChild(self, *args)", "async def add_parent_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n stmt = select(Group).filter(Group.parent_id is None).options(selectinload(Group.parent))\n result = await dbsession.execute(stmt)\n stmt = select(func.count(Group.id)).filter(Group.parent_id is None)\n result_count = await dbsession.execute(stmt)\n with click.progressbar(\n result.scalars(), length=result_count.scalar_one(), label=\"Adding parent groups\"\n ) as progress:\n for group in progress:\n if \"aat\" in config[\"data\"][\"hierarchy\"][\"expansions\"]:\n categories = apply_aat(group.value, merge=False)\n if categories:\n for category_list in categories:\n mapped = False\n for category in category_list:\n stmt = select(Group).filter(Group.value == category)\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if not parent_group:\n parent_group = Group(\n value=category, label=category[0].upper() + category[1:], split=\"parent\"\n )\n dbsession.add(group)\n group.parent = parent_group\n mapped = True\n group = parent_group # noqa: PLW2901\n if group.parent_id:\n break\n if mapped:\n break\n else:\n mapped = False\n for category in apply_nlp(group.value):\n stmt = select(Group).filter(\n or_(Group.value == category, Group.value == inflection.pluralize(category))\n )\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if parent_group:\n group.parent = parent_group\n await dbsession.commit()\n mapped = True\n break\n if not mapped:\n if group.value not in [\"styles and periods\"]:\n for category in apply_nlp(group.value):\n hierarchies = apply_aat(category, merge=False)\n groups = []\n for hierarchy in hierarchies:\n if group.value not in hierarchy:\n stmt = (\n select(Group)\n .filter(Group.value.in_(hierarchy))\n .options(selectinload(Group.items))\n )\n result = await dbsession.execute(stmt)\n for potential_group in result.scalars():\n depth = 0\n tmp = potential_group\n while tmp:\n depth = depth + 1\n tmp = tmp.parent\n groups.append((potential_group, depth, len(potential_group.items)))\n if groups:\n groups.sort(key=lambda g: (g[1], g[2]), reverse=True)\n group.parent = groups[0][0]\n break\n await dbsession.commit()", "def clone(self):\n return _libsbml.Group_clone(self)", "def capacitygroup_update(cmd_ctx, cpc, capacitygroup, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_update(cmd_ctx, cpc, capacitygroup, options))", "def edit_group(request, name=None):\n if not request.user.is_superuser:\n raise PopupException(_(\"You must be a superuser to add or edit a group.\"), error_code=401)\n\n if name is not None:\n instance = Group.objects.get(name=name)\n else:\n instance = None\n\n if request.method == 'POST':\n form = GroupEditForm(request.POST, instance=instance)\n if form.is_valid():\n form.save()\n request.info(_('Group information updated'))\n return list_groups(request)\n\n else:\n form = GroupEditForm(instance=instance)\n\n return render('edit_group.mako', request, dict(form=form, action=request.path, name=name))", "def put_group(\n group_id: BSONObjectId,\n data: PutGroupIn,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n grp: Group = Group.objects.get(pk=group_id)\n if not (\n tkn.owner == grp.owner or has_clearance(tkn.owner, \"sni.update_group\")\n ):\n raise PermissionError\n logging.debug(\"Updating group %s (%s)\", grp.group_name, group_id)\n if data.add_members is not None:\n grp.members += [\n User.objects.get(character_name=member_name)\n for member_name in set(data.add_members)\n ]\n if data.authorized_to_login is not None:\n assert_has_clearance(tkn.owner, \"sni.set_authorized_to_login\")\n grp.authorized_to_login = data.authorized_to_login\n if data.description is not None:\n grp.description = data.description\n if data.members is not None:\n grp.members = [\n User.objects.get(character_name=member_name)\n for member_name in set(data.members)\n ]\n if data.owner is not None:\n grp.owner = User.objects.get(character_name=data.owner)\n if data.remove_members is not None:\n grp.members = [\n member\n for member in grp.members\n if member.character_name not in data.remove_members\n ]\n grp.members = list(set(grp.members + [grp.owner]))\n grp.save()\n return GetGroupOut.from_record(grp)", "def post_security_group_update(self, resource_id, resource_dict):\n pass" ]
[ "0.67553115", "0.61605686", "0.5958074", "0.59414357", "0.5797429", "0.5668177", "0.5624319", "0.5621426", "0.561763", "0.5572149", "0.5539592", "0.55051327", "0.54824334", "0.5461233", "0.54360193", "0.5433018", "0.53988296", "0.5368346", "0.53458875", "0.52537507", "0.52454543", "0.5235111", "0.5230351", "0.52265704", "0.5214914", "0.5211578", "0.5167291", "0.51348954", "0.51078886", "0.50968206", "0.508044", "0.50553834", "0.5047006", "0.5045846", "0.50361997", "0.50161546", "0.49979255", "0.49879062", "0.49850982", "0.49767378", "0.49551222", "0.49455002", "0.49402133", "0.49337864", "0.4927972", "0.49264747", "0.49236476", "0.4909741", "0.49097082", "0.4900177", "0.4900177", "0.4894824", "0.4884672", "0.48715976", "0.48469827", "0.48394856", "0.48353392", "0.4829131", "0.4821512", "0.48193535", "0.48189867", "0.48012626", "0.47981516", "0.4789781", "0.47839117", "0.4780934", "0.47805288", "0.4779988", "0.47796774", "0.47796145", "0.47776666", "0.47758597", "0.477075", "0.47692114", "0.47674754", "0.47673678", "0.47637057", "0.47603077", "0.4758207", "0.47574228", "0.47559536", "0.47313794", "0.4729889", "0.4727195", "0.47129786", "0.4707538", "0.46987382", "0.46972087", "0.468537", "0.46821442", "0.46779305", "0.46745872", "0.4674417", "0.46665904", "0.4665863", "0.46649307", "0.46626148", "0.4660908", "0.4660435", "0.4655853" ]
0.61406356
2
Deleting an outcome group deletes descendant outcome groups and outcome links. The linked outcomes themselves are only deleted if all links to the outcome were deleted. Aligned outcomes cannot be deleted; as such, if all remaining links to an aligned outcome are included in this group's descendants, the group deletion will fail.
def delete_outcome_group_global(request_ctx, id, **request_kwargs): path = '/v1/global/outcome_groups/{id}' url = request_ctx.base_api_url + path.format(id=id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_outcome_group_accounts(request_ctx, account_id, id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def test_groups_group_ref_delete(self):\n pass", "def delete_outcome_group_courses(request_ctx, course_id, id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def delete_targetgroup(self, group_id):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).delete()\r\n self._db.commit()\r\n return result", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_delete_group(self):\n self.group.delete_group.return_value = succeed('del')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(result, 'del')", "def delete_group(_request, group_id):\n group = models.UserGroup.get_by_id(int(group_id))\n group.delete()\n\n url = urlresolvers.reverse('views.admin.list_groups')\n return http.HttpResponseRedirect(url)", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def delete_target_groups(ctx):\n self.delete_target_groups()\n ctx.info('Deleted target groups for the load balancer {}:'.format(self.get_balancer_name()))", "def delete(self, consistencygroup, force=False):\n body = {'consistencygroup': {'force': force}}\n self.run_hooks('modify_body_for_action', body, 'consistencygroup')\n url = '/consistencygroups/%s/delete' % base.getid(consistencygroup)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def test_delete_groups(self):\n pass", "def delete_group(self, group_id):\n url = self.groups_url + \"/%s\" % group_id\n return requests.delete(url, headers=self.headers)", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def delete_group(self, group_id: str):\n # If successful, this method returns 204 No Content response code.\n # It does not return anything in the response body.\n # Using resp_type=\"text\" to avoid parsing error in the calling method.\n self.ms_client.http_request(method='DELETE', url_suffix=f'groups/{group_id}', resp_type=\"text\")", "def do_del_group(dbsync, group):\n pass", "def test_delete_group_log_context(self):\n self.group.delete_group.return_value = succeed('del')\n expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)),\n '00', 'g1')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n expected_lookup, self.group,\n fallback_dispatcher=get_log_dispatcher(self.log,\n {'effectful': True}))\n self.assertEqual(result, 'del')", "def delete_group(args, p4, group_name, metrics):\n LOG.debug(\"delete_group() {}\".format(group_name))\n r = p4.fetch_group(group_name)\n if r and r.get('Owners') and p4gf_const.P4GF_USER in r.get('Owners'):\n print_verbose(args, _(\"Deleting group '{group_name}'...\").format(group_name=group_name))\n p4.run('group', '-a', '-d', group_name)\n metrics.groups += 1\n else:\n print_verbose(args, _(\"Not deleting group '{group}':\"\n \" Does not exist or '{user}' is not an owner.\")\n .format(group=group_name, user=p4gf_const.P4GF_USER))", "def delete_group_group_member(self, targetgroup, groupname):\n try:\n targetgroup = self.quote(targetgroup)\n groupname = self.quote(groupname)\n self.g.delete('groups/%s/groups/%s' % (targetgroup,\n groupname),\n headers={})\n except HTTPError as e:\n return self._manage_errors(e)", "def del_group(self, group_id, group_type):\n self._mod_group(\n command=self.ofproto.OFPGC_DELETE,\n group_id=group_id,\n group_type=group_type,\n )", "def delete_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n client.delete_group(group_id)\n\n # get the group data from the context\n group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === \"{group_id}\")')\n if isinstance(group_data, list):\n group_data = group_data[0]\n\n # add a field that indicates that the group was deleted\n group_data['Deleted'] = True # add a field with the members to the group\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}\n\n human_readable = f'Group: \"{group_id}\" was deleted successfully.'\n return human_readable, entry_context, NO_OUTPUTS", "def delete_group(gid):\n if request.method == 'POST':\n hl.deleteGroup(gid)\n return redirect('/users')", "def test_delete_group(self, inventoryloader):\n cg = inventoryloader.count_groups()\n ch = inventoryloader.count_hosts()\n inventoryloader.del_group('glance_api')\n assert 'glance_api' not in inventoryloader.groups['glance_all'].children\n assert 'glance_api' not in inventoryloader.hosts['localhost'].groups\n assert 'glance_api' not in inventoryloader.groups\n assert inventoryloader.count_groups() == cg -1\n assert inventoryloader.count_hosts() == ch", "def delete_group(dispatcher, log, trans_id, group, force):\n\n def check_and_delete(_group, state):\n if state.desired == 0:\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n return d.addCallback(lambda _: state)\n else:\n raise GroupNotEmptyError(group.tenant_id, group.uuid)\n\n if tenant_is_enabled(group.tenant_id, config_value):\n if force:\n # We don't care about servers in the group. So trigger deletion\n # since it will take precedence over other status\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n else:\n # Delete only if desired is 0 which must be done with a lock to\n # ensure desired is not getting modified by another thread/node\n # when executing policy\n d = group.modify_state(\n check_and_delete,\n modify_state_reason='delete_group')\n else:\n if force:\n d = empty_group(log, trans_id, group)\n d.addCallback(lambda _: group.delete_group())\n else:\n d = group.delete_group()\n return d", "def test_delete_link_no_resources(self):\n g = groups.get_by_name(\"fifth group\")\n \n self.open_url('/group/list')\n \n deletelink = self.wd.find_element(By.ID, \"delete-link-{0}\".format(g.id))\n deletelink.click()\n \n alert = self.wd.switch_to_alert()\n self.assertEqual(\"Are you sure you want to remove group {0} (id={1})\".format(g.name, g.id), alert.text)\n alert.accept()\n \n self.assert_notification(\"Group deleted: {0} (id={1})\".format(g.name, g.id))\n self.assert_not_in_list_table(g.name)", "def test_api_v1_groups_id_delete(self):\n pass", "def test_delete_membership_works_for_group_admins(self):\n get_response = lambda: self.client.delete(self.url)\n\n self.assert_authentication_required(get_response)\n self.assert_membership_required(get_response)\n self.assert_group_admin_rights_required(get_response)\n\n # bob is group admin, but he cannot delete himself:\n self.login_as(\"bob\")\n with self.assertNumQueries(3):\n self.assert_not_authorized(get_response())\n\n self.assertEqual(Membership.objects.filter(\n community_id=self.GROUP_ID, user_id=self.USER_ID).count(), 1)\n\n # bob is group admin, he can delete regular group members:\n USER = \"alice\"\n USER_ID = self.USERS[USER][\"id\"]\n url = reverse(\n 'communities:membership-detail',\n kwargs={\n 'community_id': self.GROUP_ID,\n 'user_id': USER_ID,\n }\n )\n with self.assertNumQueries(5):\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(response.data, None)\n\n self.assertEqual(Membership.objects.filter(\n community_id=self.GROUP_ID, user_id=USER_ID).count(), 0)", "def delete_adcampaign_group(self, campaign_group_id, batch=False):\n path = '%s' % campaign_group_id\n return self.make_request(path, 'DELETE', batch=batch)", "def test_products_ref_groups_delete(self):\n pass", "def delete_group(\n group_id: BSONObjectId,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.delete_group\")\n grp: Group = Group.objects.get(pk=group_id)\n logging.debug(\"Deleting group %s (%s)\", grp.group_name, group_id)\n grp.delete()", "def delete_challenge_groups_hook(*_, instance: Challenge, using, **__):\n try:\n instance.admins_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n try:\n instance.participants_group.delete(using=using)\n except ObjectDoesNotExist:\n pass", "def delete_target_groups(self):\n target_groups_config = self.get_target_groups_config()\n\n for short_name in target_groups_config.keys():\n if not self.target_group_exists(short_name):\n self.logger.info('Target group {} does not exists, nothing to delete.'.format(\n self.get_target_group_name(short_name)\n ))\n continue\n\n response = self.client.delete_target_group(\n TargetGroupArn=self.get_target_group_arn(short_name)\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n self.logger.info('Target group {} deleted.'.format(self.get_target_group_name(short_name)))", "def test_delete_link_resources(self):\n g = groups.get_by_name(\"First Group\")\n \n self.open_url('/group/list')\n \n deletelink = self.wd.find_element(By.ID, \"delete-link-{0}\".format(g.id))\n deletelink.click()\n \n self.assertEquals('Delete Group', self.wd.title)\n \n self.submit_form(\"delete_form\")\n \n alert = self.wd.switch_to_alert()\n self.assertEqual(\"Are you sure you wish to permanently delete this group and specified resources?\", alert.text)\n alert.accept()\n \n self.assert_notification(\"Group deleted: {0} (id={1})\".format(g.name, g.id))\n self.assert_not_in_list_table(g.name)", "def delete_group(self, group_o):\n class_query = ClassQuery('fvTenant')\n class_query.propFilter = 'eq(fvTenant.name, \"' + group_o.name + '\")'\n tenant_list = self.moDir.query(class_query)\n if len(tenant_list) > 0:\n tenant_list[0].delete()\n self.commit(tenant_list[0])", "def test_delete_collection_group(self):\n pass", "def test_delete_team_user_group(client):\n resp = client.delete_team_user_group(TEAM_ID, NEW_GROUP_ID)\n assert resp['team_id'] == TEAM_ID\n assert resp['group_deleted']", "def delete(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('DELETE', url)", "def delete_agent(self, group_name, id, quite=True):\n self._out.append(('_simulation', 0.5, (group_name, id, quite)))", "def test_delete_resource_group(self):\n pass", "def delete_all_groups(self):\n DELETED = 204\n for group in self.get_list_groups():\n codes = [\n self.delete_all_group_member(group[\"id\"]).status_code,\n self.delete_group(group[\"id\"]).status_code\n ]\n\n res = filter(lambda a: a != DELETED, codes)\n if res:\n return res[0]\n\n return DELETED", "def disconnect_whole_group(self, id_group:int) -> bool:\n try:\n self.cursor.execute(f\"DELETE FROM {table_groups} WHERE id={id_group};\")\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We found problems with deletion of the whole group from the {table_groups} in database. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def delete_group(self, group_name):\r\n params = {'GroupName' : group_name}\r\n return self.get_response('DeleteGroup', params)", "def delete_placement_group(self, name):\r\n params = {'GroupName':name}\r\n return self.get_status('DeletePlacementGroup', params, verb='POST')", "def test_delete_entry_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_entry_groups(id, group_id, topic_id)", "def unlink_Group(self, group):\n\t\tself.__groups.remove(group.weakref)\n\t\tself._cli_invalidate()", "def delete_participant_groups_by_participant_group_ids(\n self,\n participant_group_ids: List[str] = None,\n ) -> None:\n if not participant_group_ids:\n return None\n\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n\n participant_group_ids_block = \"\"\n if participant_group_ids:\n task_run_ids_str = \",\".join([f'\"{pgi}\"' for pgi in participant_group_ids])\n participant_group_ids_block = (\n f\"AND prolific_participant_group_id IN ({task_run_ids_str})\"\n )\n\n c.execute(\n f\"\"\"\n DELETE FROM participant_groups\n WHERE {participant_group_ids_block};\n \"\"\"\n )\n return None", "def test_TC_44383_DELETE_Groups_Id(self, context):\n # Define a test step\n with pytest.allure.step(\"\"\"First create group using request POST /groups.\"\"\"):\n # Test case configuration\n edgeDeviceGroupDetails = context.sc.EdgeDeviceGroupDetails(\n configAdminCanEdit=True,\n configurations=[],\n deliveryLoadBalancePolicy='PROXIMITY_MATCHES',\n dnsName='10.1.25.46',\n edgeDeviceRoles=['EDGE', 'ORIGIN', 'DISTRIBUTION'],\n id='GroupD1',\n members=[{\n 'id': 'POST_veDevices_AllConfigAdminMulticastTrue'\n }],\n name='GroupD1',\n originLoadBalancePolicy='DNS_NAME',\n provisioningPolicy='ALL_MEMBERS',\n proximityDetails=None,\n visibleInAllConfigurations=True)\n\n # createEntity the Groups.\n # The `check` call validates return code\n # and some of the swagger schema.\n # Most schema checks are disabled.\n response = check(\n context.cl.Groups.createEntity(\n body=edgeDeviceGroupDetails\n )\n )\n\n\n # Define a test step\n with pytest.allure.step(\"\"\"Now verify that user is able to delete the group on providing 'Id' parameter using request DELETE /groups{id}.\"\"\"):\n\n # deleteEntity the Groups.\n # The `check` call validates return code\n # and some of the swagger schema.\n # Most schema checks are disabled.\n check(\n context.cl.Groups.deleteEntity(\n id='GroupD1'\n )\n )", "def test_delete_group_reparent_hosts(self, inventoryloader):\n inventoryloader.del_group('glance_api', reparent_hosts=True)\n assert inventoryloader.groups['glance_all'].has_host('localhost')\n assert inventoryloader.hosts['localhost'].has_group('glance_all')", "def delete(ctx):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n\n if not click.confirm(\"Are sure you want to delete experiment group `{}`\".format(_group)):\n click.echo('Existing without deleting experiment group.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment_group.delete_experiment_group(\n user, project_name, _group)\n # Purge caching\n GroupManager.purge()\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not delete experiment group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.status_code == 204:\n Printer.print_success(\"Experiment group `{}` was delete successfully\".format(_group))", "def on_groups_deleted(event):\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_objects:\n group = change[\"old\"]\n bucket_id = event.payload[\"bucket_id\"]\n group_uri = utils.instance_uri(event.request, \"group\", bucket_id=bucket_id, id=group[\"id\"])\n\n permission_backend.remove_principal(group_uri)", "async def delete_group(ctx, group_name: str, owner: str=None):\n\n if owner and owner != ctx.message.author.name:\n if ctx.message.author.id != bot.owner_id:\n await ctx.send(\"Sorry, you don't have permission to delete that group. Nerd.\")\n else:\n owner = ctx.message.author.name\n\n if bg_bot.manager.remove_group(owner, group_name):\n response = f'{group_name} successfully removed from {owner} groups!'\n else:\n response = f'Error in removing {group_name} from {owner} groups!'\n \n await ctx.send(response)", "def test_groups_group_users_delete(self):\n pass", "def test_groups_group_users_delete(self):\n pass", "def test_delete_group_reparent_vars(self, inventoryloader):\n inventoryloader.del_group('glance_api', reparent_vars=True)\n assert 'management_bridge' in inventoryloader.groups['glance_all'].vars", "def test_user_group_controller_delete(self):\n pass", "def drop_groups(self, group_ids=None):\n return self.groups.delete(group_ids)", "def unfollow_group(request, pk):\n group = get_object_or_404(Group, id=pk)\n\n # Check user is not member of the group\n if not group.members.filter(id=request.user.id).exists():\n actions.unfollow(request.user, group, send_action=False)\n request.user.userprofile.follow_groups.remove(group)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore.')\n # the group members can choose not follow the group anymore, but still\n # been the member\n else:\n actions.unfollow(request.user, group, send_action=False)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore. But you are still the one of the members of this group.')\n\n return redirect('groups:groups-detail', pk)", "def delete_TestGroup(test_case, override_group_name=null, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, Optional[Str], Optional[HeadersType], Optional[CookiesType]) -> None\n app_or_url = get_app_or_url(test_case)\n headers = override_headers if override_headers is not null else test_case.json_headers\n cookies = override_cookies if override_cookies is not null else test_case.cookies\n groups = TestSetup.get_RegisteredGroupsList(test_case, override_headers=headers, override_cookies=cookies)\n group_name = override_group_name if override_group_name is not null else test_case.test_group_name\n # delete as required, skip if non-existing\n if group_name in groups:\n path = \"/groups/{grp}\".format(grp=group_name)\n resp = test_request(app_or_url, \"DELETE\", path, headers=headers, cookies=cookies)\n check_response_basic_info(resp, 200, expected_method=\"DELETE\")\n TestSetup.check_NonExistingTestGroup(test_case, override_group_name=group_name,\n override_headers=headers, override_cookies=cookies)", "def test_removeGroup(self):\n\t\tuser = User.objects.get(id=1)\n\t\tself.client.force_authenticate(user=user)\n\t\tgroup = Group.objects.create(admin=user, name='testGroup3', isPublic=True, \n\t\t\tdescription='This is another test group that just created.')\n\n\t\turl = \"/groups/3/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\t\turl = \"/groups/2/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def delete_group():\n incoming = request.get_json()\n Chatroom.delete_chatroom_with_room_id(incoming['room_id'])\n return jsonify(results = incoming['room_id'])", "def delete_group(groupname):\n response = jsonify(admin.delete_group(current_app.scoped_session(), groupname))\n return response", "def fusion_api_delete_group_role_assignment(self, name=None, uri=None, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.delete(name, uri, api, headers)", "def test_delete_group_by_id(self):\n # Create a user with 2 groups\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Delete one of those groups\n resp = self.app.delete('/groups/{}'.format(self.test_group1_groupid))\n assert resp.status_code == 200\n\n # Verify that the group is gone\n resp = self.app.get('/groups/{}'.format(self.test_group1_groupid))\n assert resp.status_code == 404\n\n # Verify that the user's groups don't have that group listed\n resp = self.app.get('/users/{}'.format(self.test_user1_userid))\n assert resp.status_code == 200\n\n data = json.loads(resp.data)\n assert self.test_group1_groupid not in data['groups']", "def test_delete_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.delete_topic_groups(group_id, topic_id)", "def delete():\n name = request.json['name']\n group = models.user.Group.get(name)\n if not group:\n raise Absent('Group does not exists.', deletion=False)\n else:\n models.db.session.delete(group)\n models.db.session.commit()\n return response(200, deletion=True)", "def test_delete_community_works_for_group_admins(self):\n get_response = lambda: self.client.delete(self.url)\n\n self.assert_authentication_required(get_response)\n self.assert_membership_required(get_response)\n self.assert_group_admin_rights_required(get_response)\n\n self.assertEqual(Community.objects.filter(name=self.GROUP).count(), 1)\n\n # bob is group admin, he can delete the group:\n self.login_as(\"bob\")\n with self.assertNumQueries(9):\n # (5) select quizzes (6) del members (7) del chat (8) del tournaments (9) del com \n response = get_response()\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(response.data, None)\n\n self.assertEqual(Community.objects.filter(name=self.GROUP).count(), 0)", "def test_ipam_vlan_groups_delete(self):\n pass", "def fusion_api_del_role_from_group(self, domain=None, group=None, api=None, headers=None):\n return self.roles.del_role_from_group(domain, group, api=api, headers=headers)", "def test_remove_parent(self):\n groupa, groupb = self.test_add_parent()\n groupa.del_parent(groupb)\n assert groupb not in groupa.parents\n assert groupa not in groupb.children", "def remove_inv_group(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n gw = kwargs['gateway']\n group_id = kwargs['objectname']\n json_response_status_code = delete_inventory_group_json_response(proxy, sessiontoken, gw, group_id)\n if json_response_status_code == 200:\n print(\"The group \" + group_id + \" has been deleted\")\n else:\n print(\"Something went wrong - please check your syntax and try again.\")", "def delete_group(id, createdby):\n query = \"DELETE FROM groups WHERE group_id = {} AND createdby ='{}'\".format(id, createdby)\n cur.execute(query)", "def product_group_delete(obj, name):\n client = get_client(obj)\n\n with Action('Deleting product_group: {}'.format(name), nl=True):\n pgs = client.product_group_list(name)\n\n client.product_group_delete(pgs[0]['uri'])", "def delete(self,\n provider_id,\n group_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n })", "def deleteGroup(request):\n \n if request.method == 'POST':\n \n form = DeleteGroupForm(request.POST)\n \n if form.is_valid():\n \n cd = form.cleaned_data\n \n try:\n \n #Delete records from m2m of Users & Groups for selected groups\n for eachGroup in cd['group_id']:\n Group_User.objects.filter(group = eachGroup.id).delete()\n \n #Delete Group(s)\n for eachGroup in cd['group_id']:\n Group.objects.filter(id = eachGroup.id).delete()\n \n except:\n \n error = 'Unable to Delete Groups!'\n return render_to_response('deletegroup.html', \n {'form': form, 'error': error},\n context_instance=RequestContext(request))\n \n return HttpResponseRedirect('/deletegroup/success/')\n \n else:\n \n return render_to_response('deletegroup.html',\n {'form': form}, \n context_instance=RequestContext(request)) \n \n else:\n \n form = DeleteGroupForm()\n \n return render_to_response('deletegroup.html', \n {'form': form}, \n context_instance=RequestContext(request))", "def after_delete(self, record):\n debug = logging.getLogger(__name__).debug\n debug('deleted group %r (%r)', record['name'], record['group_id'])\n audit('delete group', record['name'])", "def delete_all_group_member(self, group_id):\n url = self.groups_url + \"/%s/members\" % group_id\n return requests.delete(url, headers=self.headers)", "def allowed_group_access_delete(user, group):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return (user.has_perm(\"vnswww.group_delete_any\")\n or (user.has_perm(\"vnswww.group_delete_org\")\n and group.org == up.org))", "def test_delete(self):\n self.assertTrue(self.run_function(\"group.add\", [self._group]))\n\n # correct functionality\n self.assertTrue(self.run_function(\"group.delete\", [self._group]))\n\n # group does not exist\n self.assertFalse(self.run_function(\"group.delete\", [self._no_group]))", "def customer_group_delete(group_id):\n result = {\"success\" : 1, \"message\" : \"Customer can not be Deleted\"}\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n \n #clean up the user id\n group_id = db.escape_string(group_id)\n \n query = \"\"\"\n DELETE FROM `groups`\n WHERE `groups`.`group_id` = \"%s\"\n \"\"\" %(group_id)\n cursor = db.cursor()\n try:\n if (cursor.execute(query)) != 0:\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Group Deleted Successfully\"}\n except Exception as customer_exp:\n result = {\"success\" : 1, \"message\" : \"Customer Group can not be Deleted \" + str(e)}\n finally:\n cursor.close()\n db.close()\n return result", "def remove_from_targetgroup(self, target_id, group_id):\r\n target_row = self._db(self._db.target.id==target_id).select().first()\r\n group_row = self._db(self._db.targetgroup.id==group_id\r\n ).select().first()\r\n result = False\r\n if target_row is not None and group_row is not None:\r\n result = True\r\n targets_j = group_row.targets\r\n\r\n if not targets_j:\r\n targets_j = json.dumps([target_id])\r\n else:\r\n tmp = json.loads(targets_j)\r\n tmp.remove(target_id)\r\n targets_j = json.dumps(tmp)\r\n\r\n self._db(self._db.targetgroup.id==group_id\r\n ).update(targets=targets_j)\r\n self._db.commit()\r\n return result", "def test_groups_group_users_user_delete(self):\n pass", "def test_groups_group_users_user_delete(self):\n pass", "def delete_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def remove_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for membership in self.group_memberships:\n if membership.group.href == group.href:\n membership.delete()\n return\n\n raise StormpathError({\n 'developerMessage': 'This user is not part of Group %s.' % group.name,\n })", "def test_050_delete_user_from_group(self):\n\n testflow.step(\n \"Removing user %s from group %s\", TEST_USER1, TEST_GROUP1\n )\n assert MANAGE_CLI.run(\n 'userdel',\n TEST_GROUP1,\n user=TEST_USER1\n )[0], \"Failed to remove user from group '%s'\" % TEST_GROUP1\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'userdel',\n TEST_GROUP1,\n user='nonsense'\n )[0], \"Possible to remove nonexisting user from group\"\n\n testflow.step(\"Removing user %s from nonexistent group\", TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'userdel',\n 'nonsense',\n user=TEST_USER1\n )[0], \"Possible to remove user from nonexisting group\"", "def capacitygroup_delete(cmd_ctx, cpc, capacitygroup):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_delete(cmd_ctx, cpc, capacitygroup))", "def remove_from_group(self, group):\n\n if self.in_group(group):\n self.secondary_groups.remove(group)\n return self", "def remove_from_group(self, org, contact, group):\n pass", "def test_remove_learner_group_specific_for_coach_pt1(self):\n self.assertTrue(self.coach1.has_perm('auth.remove_learner_group', self.learner_groups[0]))", "def delete_group(self, group_id, **kwargs):\n resource_path = \"/groups/{groupId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"groupId\": group_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def delete_salary_group(db:Session):\n pass", "def delete_vm_group(session, cluster, vm_group):\n client_factory = session.vim.client.factory\n group_spec = client_factory.create('ns0:ClusterGroupSpec')\n groups = []\n\n group_spec.info = vm_group\n group_spec.operation = \"remove\"\n group_spec.removeKey = vm_group.name\n groups.append(group_spec)\n\n config_spec = client_factory.create('ns0:ClusterConfigSpecEx')\n config_spec.groupSpec = groups\n reconfigure_cluster(session, cluster, config_spec)", "def test_remove_learner_group_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_learner_group', self.learner_groups[1]))" ]
[ "0.66492325", "0.6541948", "0.6461525", "0.6348538", "0.633461", "0.6283196", "0.6217388", "0.6203314", "0.6168106", "0.61588556", "0.6150082", "0.61439496", "0.6109929", "0.61078966", "0.6060277", "0.60184395", "0.6001307", "0.6001307", "0.59507966", "0.5928808", "0.5904541", "0.588854", "0.58635855", "0.5855789", "0.58181447", "0.5795915", "0.57424855", "0.5739038", "0.57293683", "0.57169527", "0.57094395", "0.5706176", "0.5673936", "0.5642542", "0.5629306", "0.5621884", "0.5619029", "0.56167084", "0.5609237", "0.5596568", "0.55946803", "0.5547348", "0.5515882", "0.55131626", "0.55027825", "0.5501512", "0.54915875", "0.54880553", "0.5485858", "0.547825", "0.5473315", "0.5452238", "0.5419276", "0.53967416", "0.53943926", "0.53861785", "0.53841305", "0.5383583", "0.53597087", "0.53597087", "0.53470075", "0.53394866", "0.5334645", "0.53235066", "0.53165925", "0.5310999", "0.5300821", "0.53008074", "0.529749", "0.5295983", "0.52796894", "0.5277352", "0.5267231", "0.5267039", "0.5263943", "0.5247316", "0.5230748", "0.52220625", "0.520562", "0.5202912", "0.5199656", "0.51958704", "0.5194566", "0.51864624", "0.5185525", "0.5179963", "0.51632285", "0.51458204", "0.51458204", "0.5137142", "0.5129982", "0.5122124", "0.5117913", "0.51152146", "0.5114596", "0.5108533", "0.5105518", "0.5100354", "0.5096249", "0.5089743" ]
0.662239
1
Deleting an outcome group deletes descendant outcome groups and outcome links. The linked outcomes themselves are only deleted if all links to the outcome were deleted. Aligned outcomes cannot be deleted; as such, if all remaining links to an aligned outcome are included in this group's descendants, the group deletion will fail.
def delete_outcome_group_accounts(request_ctx, account_id, id, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}' url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_outcome_group_global(request_ctx, id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def test_groups_group_ref_delete(self):\n pass", "def delete_outcome_group_courses(request_ctx, course_id, id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def delete_targetgroup(self, group_id):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).delete()\r\n self._db.commit()\r\n return result", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_delete_group(self):\n self.group.delete_group.return_value = succeed('del')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(result, 'del')", "def delete_group(_request, group_id):\n group = models.UserGroup.get_by_id(int(group_id))\n group.delete()\n\n url = urlresolvers.reverse('views.admin.list_groups')\n return http.HttpResponseRedirect(url)", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def delete_target_groups(ctx):\n self.delete_target_groups()\n ctx.info('Deleted target groups for the load balancer {}:'.format(self.get_balancer_name()))", "def delete(self, consistencygroup, force=False):\n body = {'consistencygroup': {'force': force}}\n self.run_hooks('modify_body_for_action', body, 'consistencygroup')\n url = '/consistencygroups/%s/delete' % base.getid(consistencygroup)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def test_delete_groups(self):\n pass", "def delete_group(self, group_id):\n url = self.groups_url + \"/%s\" % group_id\n return requests.delete(url, headers=self.headers)", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def delete_group(self, group_id: str):\n # If successful, this method returns 204 No Content response code.\n # It does not return anything in the response body.\n # Using resp_type=\"text\" to avoid parsing error in the calling method.\n self.ms_client.http_request(method='DELETE', url_suffix=f'groups/{group_id}', resp_type=\"text\")", "def do_del_group(dbsync, group):\n pass", "def test_delete_group_log_context(self):\n self.group.delete_group.return_value = succeed('del')\n expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)),\n '00', 'g1')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n expected_lookup, self.group,\n fallback_dispatcher=get_log_dispatcher(self.log,\n {'effectful': True}))\n self.assertEqual(result, 'del')", "def delete_group(args, p4, group_name, metrics):\n LOG.debug(\"delete_group() {}\".format(group_name))\n r = p4.fetch_group(group_name)\n if r and r.get('Owners') and p4gf_const.P4GF_USER in r.get('Owners'):\n print_verbose(args, _(\"Deleting group '{group_name}'...\").format(group_name=group_name))\n p4.run('group', '-a', '-d', group_name)\n metrics.groups += 1\n else:\n print_verbose(args, _(\"Not deleting group '{group}':\"\n \" Does not exist or '{user}' is not an owner.\")\n .format(group=group_name, user=p4gf_const.P4GF_USER))", "def delete_group_group_member(self, targetgroup, groupname):\n try:\n targetgroup = self.quote(targetgroup)\n groupname = self.quote(groupname)\n self.g.delete('groups/%s/groups/%s' % (targetgroup,\n groupname),\n headers={})\n except HTTPError as e:\n return self._manage_errors(e)", "def del_group(self, group_id, group_type):\n self._mod_group(\n command=self.ofproto.OFPGC_DELETE,\n group_id=group_id,\n group_type=group_type,\n )", "def delete_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n client.delete_group(group_id)\n\n # get the group data from the context\n group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === \"{group_id}\")')\n if isinstance(group_data, list):\n group_data = group_data[0]\n\n # add a field that indicates that the group was deleted\n group_data['Deleted'] = True # add a field with the members to the group\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}\n\n human_readable = f'Group: \"{group_id}\" was deleted successfully.'\n return human_readable, entry_context, NO_OUTPUTS", "def delete_group(gid):\n if request.method == 'POST':\n hl.deleteGroup(gid)\n return redirect('/users')", "def test_delete_group(self, inventoryloader):\n cg = inventoryloader.count_groups()\n ch = inventoryloader.count_hosts()\n inventoryloader.del_group('glance_api')\n assert 'glance_api' not in inventoryloader.groups['glance_all'].children\n assert 'glance_api' not in inventoryloader.hosts['localhost'].groups\n assert 'glance_api' not in inventoryloader.groups\n assert inventoryloader.count_groups() == cg -1\n assert inventoryloader.count_hosts() == ch", "def delete_group(dispatcher, log, trans_id, group, force):\n\n def check_and_delete(_group, state):\n if state.desired == 0:\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n return d.addCallback(lambda _: state)\n else:\n raise GroupNotEmptyError(group.tenant_id, group.uuid)\n\n if tenant_is_enabled(group.tenant_id, config_value):\n if force:\n # We don't care about servers in the group. So trigger deletion\n # since it will take precedence over other status\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n else:\n # Delete only if desired is 0 which must be done with a lock to\n # ensure desired is not getting modified by another thread/node\n # when executing policy\n d = group.modify_state(\n check_and_delete,\n modify_state_reason='delete_group')\n else:\n if force:\n d = empty_group(log, trans_id, group)\n d.addCallback(lambda _: group.delete_group())\n else:\n d = group.delete_group()\n return d", "def test_delete_link_no_resources(self):\n g = groups.get_by_name(\"fifth group\")\n \n self.open_url('/group/list')\n \n deletelink = self.wd.find_element(By.ID, \"delete-link-{0}\".format(g.id))\n deletelink.click()\n \n alert = self.wd.switch_to_alert()\n self.assertEqual(\"Are you sure you want to remove group {0} (id={1})\".format(g.name, g.id), alert.text)\n alert.accept()\n \n self.assert_notification(\"Group deleted: {0} (id={1})\".format(g.name, g.id))\n self.assert_not_in_list_table(g.name)", "def test_api_v1_groups_id_delete(self):\n pass", "def test_delete_membership_works_for_group_admins(self):\n get_response = lambda: self.client.delete(self.url)\n\n self.assert_authentication_required(get_response)\n self.assert_membership_required(get_response)\n self.assert_group_admin_rights_required(get_response)\n\n # bob is group admin, but he cannot delete himself:\n self.login_as(\"bob\")\n with self.assertNumQueries(3):\n self.assert_not_authorized(get_response())\n\n self.assertEqual(Membership.objects.filter(\n community_id=self.GROUP_ID, user_id=self.USER_ID).count(), 1)\n\n # bob is group admin, he can delete regular group members:\n USER = \"alice\"\n USER_ID = self.USERS[USER][\"id\"]\n url = reverse(\n 'communities:membership-detail',\n kwargs={\n 'community_id': self.GROUP_ID,\n 'user_id': USER_ID,\n }\n )\n with self.assertNumQueries(5):\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(response.data, None)\n\n self.assertEqual(Membership.objects.filter(\n community_id=self.GROUP_ID, user_id=USER_ID).count(), 0)", "def delete_adcampaign_group(self, campaign_group_id, batch=False):\n path = '%s' % campaign_group_id\n return self.make_request(path, 'DELETE', batch=batch)", "def test_products_ref_groups_delete(self):\n pass", "def delete_group(\n group_id: BSONObjectId,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.delete_group\")\n grp: Group = Group.objects.get(pk=group_id)\n logging.debug(\"Deleting group %s (%s)\", grp.group_name, group_id)\n grp.delete()", "def delete_challenge_groups_hook(*_, instance: Challenge, using, **__):\n try:\n instance.admins_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n try:\n instance.participants_group.delete(using=using)\n except ObjectDoesNotExist:\n pass", "def delete_target_groups(self):\n target_groups_config = self.get_target_groups_config()\n\n for short_name in target_groups_config.keys():\n if not self.target_group_exists(short_name):\n self.logger.info('Target group {} does not exists, nothing to delete.'.format(\n self.get_target_group_name(short_name)\n ))\n continue\n\n response = self.client.delete_target_group(\n TargetGroupArn=self.get_target_group_arn(short_name)\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n self.logger.info('Target group {} deleted.'.format(self.get_target_group_name(short_name)))", "def test_delete_link_resources(self):\n g = groups.get_by_name(\"First Group\")\n \n self.open_url('/group/list')\n \n deletelink = self.wd.find_element(By.ID, \"delete-link-{0}\".format(g.id))\n deletelink.click()\n \n self.assertEquals('Delete Group', self.wd.title)\n \n self.submit_form(\"delete_form\")\n \n alert = self.wd.switch_to_alert()\n self.assertEqual(\"Are you sure you wish to permanently delete this group and specified resources?\", alert.text)\n alert.accept()\n \n self.assert_notification(\"Group deleted: {0} (id={1})\".format(g.name, g.id))\n self.assert_not_in_list_table(g.name)", "def delete_group(self, group_o):\n class_query = ClassQuery('fvTenant')\n class_query.propFilter = 'eq(fvTenant.name, \"' + group_o.name + '\")'\n tenant_list = self.moDir.query(class_query)\n if len(tenant_list) > 0:\n tenant_list[0].delete()\n self.commit(tenant_list[0])", "def test_delete_collection_group(self):\n pass", "def test_delete_team_user_group(client):\n resp = client.delete_team_user_group(TEAM_ID, NEW_GROUP_ID)\n assert resp['team_id'] == TEAM_ID\n assert resp['group_deleted']", "def delete(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('DELETE', url)", "def delete_agent(self, group_name, id, quite=True):\n self._out.append(('_simulation', 0.5, (group_name, id, quite)))", "def test_delete_resource_group(self):\n pass", "def delete_all_groups(self):\n DELETED = 204\n for group in self.get_list_groups():\n codes = [\n self.delete_all_group_member(group[\"id\"]).status_code,\n self.delete_group(group[\"id\"]).status_code\n ]\n\n res = filter(lambda a: a != DELETED, codes)\n if res:\n return res[0]\n\n return DELETED", "def disconnect_whole_group(self, id_group:int) -> bool:\n try:\n self.cursor.execute(f\"DELETE FROM {table_groups} WHERE id={id_group};\")\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We found problems with deletion of the whole group from the {table_groups} in database. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def delete_group(self, group_name):\r\n params = {'GroupName' : group_name}\r\n return self.get_response('DeleteGroup', params)", "def delete_placement_group(self, name):\r\n params = {'GroupName':name}\r\n return self.get_status('DeletePlacementGroup', params, verb='POST')", "def test_delete_entry_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_entry_groups(id, group_id, topic_id)", "def unlink_Group(self, group):\n\t\tself.__groups.remove(group.weakref)\n\t\tself._cli_invalidate()", "def delete_participant_groups_by_participant_group_ids(\n self,\n participant_group_ids: List[str] = None,\n ) -> None:\n if not participant_group_ids:\n return None\n\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n\n participant_group_ids_block = \"\"\n if participant_group_ids:\n task_run_ids_str = \",\".join([f'\"{pgi}\"' for pgi in participant_group_ids])\n participant_group_ids_block = (\n f\"AND prolific_participant_group_id IN ({task_run_ids_str})\"\n )\n\n c.execute(\n f\"\"\"\n DELETE FROM participant_groups\n WHERE {participant_group_ids_block};\n \"\"\"\n )\n return None", "def test_TC_44383_DELETE_Groups_Id(self, context):\n # Define a test step\n with pytest.allure.step(\"\"\"First create group using request POST /groups.\"\"\"):\n # Test case configuration\n edgeDeviceGroupDetails = context.sc.EdgeDeviceGroupDetails(\n configAdminCanEdit=True,\n configurations=[],\n deliveryLoadBalancePolicy='PROXIMITY_MATCHES',\n dnsName='10.1.25.46',\n edgeDeviceRoles=['EDGE', 'ORIGIN', 'DISTRIBUTION'],\n id='GroupD1',\n members=[{\n 'id': 'POST_veDevices_AllConfigAdminMulticastTrue'\n }],\n name='GroupD1',\n originLoadBalancePolicy='DNS_NAME',\n provisioningPolicy='ALL_MEMBERS',\n proximityDetails=None,\n visibleInAllConfigurations=True)\n\n # createEntity the Groups.\n # The `check` call validates return code\n # and some of the swagger schema.\n # Most schema checks are disabled.\n response = check(\n context.cl.Groups.createEntity(\n body=edgeDeviceGroupDetails\n )\n )\n\n\n # Define a test step\n with pytest.allure.step(\"\"\"Now verify that user is able to delete the group on providing 'Id' parameter using request DELETE /groups{id}.\"\"\"):\n\n # deleteEntity the Groups.\n # The `check` call validates return code\n # and some of the swagger schema.\n # Most schema checks are disabled.\n check(\n context.cl.Groups.deleteEntity(\n id='GroupD1'\n )\n )", "def test_delete_group_reparent_hosts(self, inventoryloader):\n inventoryloader.del_group('glance_api', reparent_hosts=True)\n assert inventoryloader.groups['glance_all'].has_host('localhost')\n assert inventoryloader.hosts['localhost'].has_group('glance_all')", "def delete(ctx):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n\n if not click.confirm(\"Are sure you want to delete experiment group `{}`\".format(_group)):\n click.echo('Existing without deleting experiment group.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment_group.delete_experiment_group(\n user, project_name, _group)\n # Purge caching\n GroupManager.purge()\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not delete experiment group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.status_code == 204:\n Printer.print_success(\"Experiment group `{}` was delete successfully\".format(_group))", "def on_groups_deleted(event):\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_objects:\n group = change[\"old\"]\n bucket_id = event.payload[\"bucket_id\"]\n group_uri = utils.instance_uri(event.request, \"group\", bucket_id=bucket_id, id=group[\"id\"])\n\n permission_backend.remove_principal(group_uri)", "async def delete_group(ctx, group_name: str, owner: str=None):\n\n if owner and owner != ctx.message.author.name:\n if ctx.message.author.id != bot.owner_id:\n await ctx.send(\"Sorry, you don't have permission to delete that group. Nerd.\")\n else:\n owner = ctx.message.author.name\n\n if bg_bot.manager.remove_group(owner, group_name):\n response = f'{group_name} successfully removed from {owner} groups!'\n else:\n response = f'Error in removing {group_name} from {owner} groups!'\n \n await ctx.send(response)", "def test_groups_group_users_delete(self):\n pass", "def test_groups_group_users_delete(self):\n pass", "def test_delete_group_reparent_vars(self, inventoryloader):\n inventoryloader.del_group('glance_api', reparent_vars=True)\n assert 'management_bridge' in inventoryloader.groups['glance_all'].vars", "def test_user_group_controller_delete(self):\n pass", "def drop_groups(self, group_ids=None):\n return self.groups.delete(group_ids)", "def unfollow_group(request, pk):\n group = get_object_or_404(Group, id=pk)\n\n # Check user is not member of the group\n if not group.members.filter(id=request.user.id).exists():\n actions.unfollow(request.user, group, send_action=False)\n request.user.userprofile.follow_groups.remove(group)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore.')\n # the group members can choose not follow the group anymore, but still\n # been the member\n else:\n actions.unfollow(request.user, group, send_action=False)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore. But you are still the one of the members of this group.')\n\n return redirect('groups:groups-detail', pk)", "def delete_TestGroup(test_case, override_group_name=null, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, Optional[Str], Optional[HeadersType], Optional[CookiesType]) -> None\n app_or_url = get_app_or_url(test_case)\n headers = override_headers if override_headers is not null else test_case.json_headers\n cookies = override_cookies if override_cookies is not null else test_case.cookies\n groups = TestSetup.get_RegisteredGroupsList(test_case, override_headers=headers, override_cookies=cookies)\n group_name = override_group_name if override_group_name is not null else test_case.test_group_name\n # delete as required, skip if non-existing\n if group_name in groups:\n path = \"/groups/{grp}\".format(grp=group_name)\n resp = test_request(app_or_url, \"DELETE\", path, headers=headers, cookies=cookies)\n check_response_basic_info(resp, 200, expected_method=\"DELETE\")\n TestSetup.check_NonExistingTestGroup(test_case, override_group_name=group_name,\n override_headers=headers, override_cookies=cookies)", "def test_removeGroup(self):\n\t\tuser = User.objects.get(id=1)\n\t\tself.client.force_authenticate(user=user)\n\t\tgroup = Group.objects.create(admin=user, name='testGroup3', isPublic=True, \n\t\t\tdescription='This is another test group that just created.')\n\n\t\turl = \"/groups/3/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\t\turl = \"/groups/2/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def delete_group():\n incoming = request.get_json()\n Chatroom.delete_chatroom_with_room_id(incoming['room_id'])\n return jsonify(results = incoming['room_id'])", "def delete_group(groupname):\n response = jsonify(admin.delete_group(current_app.scoped_session(), groupname))\n return response", "def fusion_api_delete_group_role_assignment(self, name=None, uri=None, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.delete(name, uri, api, headers)", "def test_delete_group_by_id(self):\n # Create a user with 2 groups\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Delete one of those groups\n resp = self.app.delete('/groups/{}'.format(self.test_group1_groupid))\n assert resp.status_code == 200\n\n # Verify that the group is gone\n resp = self.app.get('/groups/{}'.format(self.test_group1_groupid))\n assert resp.status_code == 404\n\n # Verify that the user's groups don't have that group listed\n resp = self.app.get('/users/{}'.format(self.test_user1_userid))\n assert resp.status_code == 200\n\n data = json.loads(resp.data)\n assert self.test_group1_groupid not in data['groups']", "def test_delete_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.delete_topic_groups(group_id, topic_id)", "def delete():\n name = request.json['name']\n group = models.user.Group.get(name)\n if not group:\n raise Absent('Group does not exists.', deletion=False)\n else:\n models.db.session.delete(group)\n models.db.session.commit()\n return response(200, deletion=True)", "def test_delete_community_works_for_group_admins(self):\n get_response = lambda: self.client.delete(self.url)\n\n self.assert_authentication_required(get_response)\n self.assert_membership_required(get_response)\n self.assert_group_admin_rights_required(get_response)\n\n self.assertEqual(Community.objects.filter(name=self.GROUP).count(), 1)\n\n # bob is group admin, he can delete the group:\n self.login_as(\"bob\")\n with self.assertNumQueries(9):\n # (5) select quizzes (6) del members (7) del chat (8) del tournaments (9) del com \n response = get_response()\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(response.data, None)\n\n self.assertEqual(Community.objects.filter(name=self.GROUP).count(), 0)", "def test_ipam_vlan_groups_delete(self):\n pass", "def fusion_api_del_role_from_group(self, domain=None, group=None, api=None, headers=None):\n return self.roles.del_role_from_group(domain, group, api=api, headers=headers)", "def test_remove_parent(self):\n groupa, groupb = self.test_add_parent()\n groupa.del_parent(groupb)\n assert groupb not in groupa.parents\n assert groupa not in groupb.children", "def remove_inv_group(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n gw = kwargs['gateway']\n group_id = kwargs['objectname']\n json_response_status_code = delete_inventory_group_json_response(proxy, sessiontoken, gw, group_id)\n if json_response_status_code == 200:\n print(\"The group \" + group_id + \" has been deleted\")\n else:\n print(\"Something went wrong - please check your syntax and try again.\")", "def delete_group(id, createdby):\n query = \"DELETE FROM groups WHERE group_id = {} AND createdby ='{}'\".format(id, createdby)\n cur.execute(query)", "def product_group_delete(obj, name):\n client = get_client(obj)\n\n with Action('Deleting product_group: {}'.format(name), nl=True):\n pgs = client.product_group_list(name)\n\n client.product_group_delete(pgs[0]['uri'])", "def delete(self,\n provider_id,\n group_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n })", "def deleteGroup(request):\n \n if request.method == 'POST':\n \n form = DeleteGroupForm(request.POST)\n \n if form.is_valid():\n \n cd = form.cleaned_data\n \n try:\n \n #Delete records from m2m of Users & Groups for selected groups\n for eachGroup in cd['group_id']:\n Group_User.objects.filter(group = eachGroup.id).delete()\n \n #Delete Group(s)\n for eachGroup in cd['group_id']:\n Group.objects.filter(id = eachGroup.id).delete()\n \n except:\n \n error = 'Unable to Delete Groups!'\n return render_to_response('deletegroup.html', \n {'form': form, 'error': error},\n context_instance=RequestContext(request))\n \n return HttpResponseRedirect('/deletegroup/success/')\n \n else:\n \n return render_to_response('deletegroup.html',\n {'form': form}, \n context_instance=RequestContext(request)) \n \n else:\n \n form = DeleteGroupForm()\n \n return render_to_response('deletegroup.html', \n {'form': form}, \n context_instance=RequestContext(request))", "def after_delete(self, record):\n debug = logging.getLogger(__name__).debug\n debug('deleted group %r (%r)', record['name'], record['group_id'])\n audit('delete group', record['name'])", "def delete_all_group_member(self, group_id):\n url = self.groups_url + \"/%s/members\" % group_id\n return requests.delete(url, headers=self.headers)", "def allowed_group_access_delete(user, group):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return (user.has_perm(\"vnswww.group_delete_any\")\n or (user.has_perm(\"vnswww.group_delete_org\")\n and group.org == up.org))", "def test_delete(self):\n self.assertTrue(self.run_function(\"group.add\", [self._group]))\n\n # correct functionality\n self.assertTrue(self.run_function(\"group.delete\", [self._group]))\n\n # group does not exist\n self.assertFalse(self.run_function(\"group.delete\", [self._no_group]))", "def customer_group_delete(group_id):\n result = {\"success\" : 1, \"message\" : \"Customer can not be Deleted\"}\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n \n #clean up the user id\n group_id = db.escape_string(group_id)\n \n query = \"\"\"\n DELETE FROM `groups`\n WHERE `groups`.`group_id` = \"%s\"\n \"\"\" %(group_id)\n cursor = db.cursor()\n try:\n if (cursor.execute(query)) != 0:\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Group Deleted Successfully\"}\n except Exception as customer_exp:\n result = {\"success\" : 1, \"message\" : \"Customer Group can not be Deleted \" + str(e)}\n finally:\n cursor.close()\n db.close()\n return result", "def remove_from_targetgroup(self, target_id, group_id):\r\n target_row = self._db(self._db.target.id==target_id).select().first()\r\n group_row = self._db(self._db.targetgroup.id==group_id\r\n ).select().first()\r\n result = False\r\n if target_row is not None and group_row is not None:\r\n result = True\r\n targets_j = group_row.targets\r\n\r\n if not targets_j:\r\n targets_j = json.dumps([target_id])\r\n else:\r\n tmp = json.loads(targets_j)\r\n tmp.remove(target_id)\r\n targets_j = json.dumps(tmp)\r\n\r\n self._db(self._db.targetgroup.id==group_id\r\n ).update(targets=targets_j)\r\n self._db.commit()\r\n return result", "def test_groups_group_users_user_delete(self):\n pass", "def test_groups_group_users_user_delete(self):\n pass", "def delete_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def remove_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for membership in self.group_memberships:\n if membership.group.href == group.href:\n membership.delete()\n return\n\n raise StormpathError({\n 'developerMessage': 'This user is not part of Group %s.' % group.name,\n })", "def test_050_delete_user_from_group(self):\n\n testflow.step(\n \"Removing user %s from group %s\", TEST_USER1, TEST_GROUP1\n )\n assert MANAGE_CLI.run(\n 'userdel',\n TEST_GROUP1,\n user=TEST_USER1\n )[0], \"Failed to remove user from group '%s'\" % TEST_GROUP1\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'userdel',\n TEST_GROUP1,\n user='nonsense'\n )[0], \"Possible to remove nonexisting user from group\"\n\n testflow.step(\"Removing user %s from nonexistent group\", TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'userdel',\n 'nonsense',\n user=TEST_USER1\n )[0], \"Possible to remove user from nonexisting group\"", "def capacitygroup_delete(cmd_ctx, cpc, capacitygroup):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_delete(cmd_ctx, cpc, capacitygroup))", "def remove_from_group(self, group):\n\n if self.in_group(group):\n self.secondary_groups.remove(group)\n return self", "def remove_from_group(self, org, contact, group):\n pass", "def test_remove_learner_group_specific_for_coach_pt1(self):\n self.assertTrue(self.coach1.has_perm('auth.remove_learner_group', self.learner_groups[0]))", "def delete_group(self, group_id, **kwargs):\n resource_path = \"/groups/{groupId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"groupId\": group_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def delete_salary_group(db:Session):\n pass", "def delete_vm_group(session, cluster, vm_group):\n client_factory = session.vim.client.factory\n group_spec = client_factory.create('ns0:ClusterGroupSpec')\n groups = []\n\n group_spec.info = vm_group\n group_spec.operation = \"remove\"\n group_spec.removeKey = vm_group.name\n groups.append(group_spec)\n\n config_spec = client_factory.create('ns0:ClusterConfigSpecEx')\n config_spec.groupSpec = groups\n reconfigure_cluster(session, cluster, config_spec)", "def test_remove_learner_group_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_learner_group', self.learner_groups[1]))" ]
[ "0.662239", "0.6541948", "0.6461525", "0.6348538", "0.633461", "0.6283196", "0.6217388", "0.6203314", "0.6168106", "0.61588556", "0.6150082", "0.61439496", "0.6109929", "0.61078966", "0.6060277", "0.60184395", "0.6001307", "0.6001307", "0.59507966", "0.5928808", "0.5904541", "0.588854", "0.58635855", "0.5855789", "0.58181447", "0.5795915", "0.57424855", "0.5739038", "0.57293683", "0.57169527", "0.57094395", "0.5706176", "0.5673936", "0.5642542", "0.5629306", "0.5621884", "0.5619029", "0.56167084", "0.5609237", "0.5596568", "0.55946803", "0.5547348", "0.5515882", "0.55131626", "0.55027825", "0.5501512", "0.54915875", "0.54880553", "0.5485858", "0.547825", "0.5473315", "0.5452238", "0.5419276", "0.53967416", "0.53943926", "0.53861785", "0.53841305", "0.5383583", "0.53597087", "0.53597087", "0.53470075", "0.53394866", "0.5334645", "0.53235066", "0.53165925", "0.5310999", "0.5300821", "0.53008074", "0.529749", "0.5295983", "0.52796894", "0.5277352", "0.5267231", "0.5267039", "0.5263943", "0.5247316", "0.5230748", "0.52220625", "0.520562", "0.5202912", "0.5199656", "0.51958704", "0.5194566", "0.51864624", "0.5185525", "0.5179963", "0.51632285", "0.51458204", "0.51458204", "0.5137142", "0.5129982", "0.5122124", "0.5117913", "0.51152146", "0.5114596", "0.5108533", "0.5105518", "0.5100354", "0.5096249", "0.5089743" ]
0.66492325
0
Deleting an outcome group deletes descendant outcome groups and outcome links. The linked outcomes themselves are only deleted if all links to the outcome were deleted. Aligned outcomes cannot be deleted; as such, if all remaining links to an aligned outcome are included in this group's descendants, the group deletion will fail.
def delete_outcome_group_courses(request_ctx, course_id, id, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}' url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_outcome_group_accounts(request_ctx, account_id, id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def delete_outcome_group_global(request_ctx, id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def test_groups_group_ref_delete(self):\n pass", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def delete_targetgroup(self, group_id):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).delete()\r\n self._db.commit()\r\n return result", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_delete_group(self):\n self.group.delete_group.return_value = succeed('del')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(result, 'del')", "def delete_group(_request, group_id):\n group = models.UserGroup.get_by_id(int(group_id))\n group.delete()\n\n url = urlresolvers.reverse('views.admin.list_groups')\n return http.HttpResponseRedirect(url)", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def delete_target_groups(ctx):\n self.delete_target_groups()\n ctx.info('Deleted target groups for the load balancer {}:'.format(self.get_balancer_name()))", "def delete(self, consistencygroup, force=False):\n body = {'consistencygroup': {'force': force}}\n self.run_hooks('modify_body_for_action', body, 'consistencygroup')\n url = '/consistencygroups/%s/delete' % base.getid(consistencygroup)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def test_delete_groups(self):\n pass", "def delete_group(self, group_id):\n url = self.groups_url + \"/%s\" % group_id\n return requests.delete(url, headers=self.headers)", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def delete_group(self, group_id: str):\n # If successful, this method returns 204 No Content response code.\n # It does not return anything in the response body.\n # Using resp_type=\"text\" to avoid parsing error in the calling method.\n self.ms_client.http_request(method='DELETE', url_suffix=f'groups/{group_id}', resp_type=\"text\")", "def do_del_group(dbsync, group):\n pass", "def test_delete_group_log_context(self):\n self.group.delete_group.return_value = succeed('del')\n expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)),\n '00', 'g1')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n expected_lookup, self.group,\n fallback_dispatcher=get_log_dispatcher(self.log,\n {'effectful': True}))\n self.assertEqual(result, 'del')", "def delete_group(args, p4, group_name, metrics):\n LOG.debug(\"delete_group() {}\".format(group_name))\n r = p4.fetch_group(group_name)\n if r and r.get('Owners') and p4gf_const.P4GF_USER in r.get('Owners'):\n print_verbose(args, _(\"Deleting group '{group_name}'...\").format(group_name=group_name))\n p4.run('group', '-a', '-d', group_name)\n metrics.groups += 1\n else:\n print_verbose(args, _(\"Not deleting group '{group}':\"\n \" Does not exist or '{user}' is not an owner.\")\n .format(group=group_name, user=p4gf_const.P4GF_USER))", "def delete_group_group_member(self, targetgroup, groupname):\n try:\n targetgroup = self.quote(targetgroup)\n groupname = self.quote(groupname)\n self.g.delete('groups/%s/groups/%s' % (targetgroup,\n groupname),\n headers={})\n except HTTPError as e:\n return self._manage_errors(e)", "def del_group(self, group_id, group_type):\n self._mod_group(\n command=self.ofproto.OFPGC_DELETE,\n group_id=group_id,\n group_type=group_type,\n )", "def delete_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n client.delete_group(group_id)\n\n # get the group data from the context\n group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === \"{group_id}\")')\n if isinstance(group_data, list):\n group_data = group_data[0]\n\n # add a field that indicates that the group was deleted\n group_data['Deleted'] = True # add a field with the members to the group\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}\n\n human_readable = f'Group: \"{group_id}\" was deleted successfully.'\n return human_readable, entry_context, NO_OUTPUTS", "def delete_group(gid):\n if request.method == 'POST':\n hl.deleteGroup(gid)\n return redirect('/users')", "def test_delete_group(self, inventoryloader):\n cg = inventoryloader.count_groups()\n ch = inventoryloader.count_hosts()\n inventoryloader.del_group('glance_api')\n assert 'glance_api' not in inventoryloader.groups['glance_all'].children\n assert 'glance_api' not in inventoryloader.hosts['localhost'].groups\n assert 'glance_api' not in inventoryloader.groups\n assert inventoryloader.count_groups() == cg -1\n assert inventoryloader.count_hosts() == ch", "def delete_group(dispatcher, log, trans_id, group, force):\n\n def check_and_delete(_group, state):\n if state.desired == 0:\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n return d.addCallback(lambda _: state)\n else:\n raise GroupNotEmptyError(group.tenant_id, group.uuid)\n\n if tenant_is_enabled(group.tenant_id, config_value):\n if force:\n # We don't care about servers in the group. So trigger deletion\n # since it will take precedence over other status\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n else:\n # Delete only if desired is 0 which must be done with a lock to\n # ensure desired is not getting modified by another thread/node\n # when executing policy\n d = group.modify_state(\n check_and_delete,\n modify_state_reason='delete_group')\n else:\n if force:\n d = empty_group(log, trans_id, group)\n d.addCallback(lambda _: group.delete_group())\n else:\n d = group.delete_group()\n return d", "def test_delete_link_no_resources(self):\n g = groups.get_by_name(\"fifth group\")\n \n self.open_url('/group/list')\n \n deletelink = self.wd.find_element(By.ID, \"delete-link-{0}\".format(g.id))\n deletelink.click()\n \n alert = self.wd.switch_to_alert()\n self.assertEqual(\"Are you sure you want to remove group {0} (id={1})\".format(g.name, g.id), alert.text)\n alert.accept()\n \n self.assert_notification(\"Group deleted: {0} (id={1})\".format(g.name, g.id))\n self.assert_not_in_list_table(g.name)", "def test_api_v1_groups_id_delete(self):\n pass", "def test_delete_membership_works_for_group_admins(self):\n get_response = lambda: self.client.delete(self.url)\n\n self.assert_authentication_required(get_response)\n self.assert_membership_required(get_response)\n self.assert_group_admin_rights_required(get_response)\n\n # bob is group admin, but he cannot delete himself:\n self.login_as(\"bob\")\n with self.assertNumQueries(3):\n self.assert_not_authorized(get_response())\n\n self.assertEqual(Membership.objects.filter(\n community_id=self.GROUP_ID, user_id=self.USER_ID).count(), 1)\n\n # bob is group admin, he can delete regular group members:\n USER = \"alice\"\n USER_ID = self.USERS[USER][\"id\"]\n url = reverse(\n 'communities:membership-detail',\n kwargs={\n 'community_id': self.GROUP_ID,\n 'user_id': USER_ID,\n }\n )\n with self.assertNumQueries(5):\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(response.data, None)\n\n self.assertEqual(Membership.objects.filter(\n community_id=self.GROUP_ID, user_id=USER_ID).count(), 0)", "def delete_adcampaign_group(self, campaign_group_id, batch=False):\n path = '%s' % campaign_group_id\n return self.make_request(path, 'DELETE', batch=batch)", "def test_products_ref_groups_delete(self):\n pass", "def delete_group(\n group_id: BSONObjectId,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.delete_group\")\n grp: Group = Group.objects.get(pk=group_id)\n logging.debug(\"Deleting group %s (%s)\", grp.group_name, group_id)\n grp.delete()", "def delete_challenge_groups_hook(*_, instance: Challenge, using, **__):\n try:\n instance.admins_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n try:\n instance.participants_group.delete(using=using)\n except ObjectDoesNotExist:\n pass", "def delete_target_groups(self):\n target_groups_config = self.get_target_groups_config()\n\n for short_name in target_groups_config.keys():\n if not self.target_group_exists(short_name):\n self.logger.info('Target group {} does not exists, nothing to delete.'.format(\n self.get_target_group_name(short_name)\n ))\n continue\n\n response = self.client.delete_target_group(\n TargetGroupArn=self.get_target_group_arn(short_name)\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n self.logger.info('Target group {} deleted.'.format(self.get_target_group_name(short_name)))", "def test_delete_link_resources(self):\n g = groups.get_by_name(\"First Group\")\n \n self.open_url('/group/list')\n \n deletelink = self.wd.find_element(By.ID, \"delete-link-{0}\".format(g.id))\n deletelink.click()\n \n self.assertEquals('Delete Group', self.wd.title)\n \n self.submit_form(\"delete_form\")\n \n alert = self.wd.switch_to_alert()\n self.assertEqual(\"Are you sure you wish to permanently delete this group and specified resources?\", alert.text)\n alert.accept()\n \n self.assert_notification(\"Group deleted: {0} (id={1})\".format(g.name, g.id))\n self.assert_not_in_list_table(g.name)", "def delete_group(self, group_o):\n class_query = ClassQuery('fvTenant')\n class_query.propFilter = 'eq(fvTenant.name, \"' + group_o.name + '\")'\n tenant_list = self.moDir.query(class_query)\n if len(tenant_list) > 0:\n tenant_list[0].delete()\n self.commit(tenant_list[0])", "def test_delete_collection_group(self):\n pass", "def test_delete_team_user_group(client):\n resp = client.delete_team_user_group(TEAM_ID, NEW_GROUP_ID)\n assert resp['team_id'] == TEAM_ID\n assert resp['group_deleted']", "def delete(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('DELETE', url)", "def delete_agent(self, group_name, id, quite=True):\n self._out.append(('_simulation', 0.5, (group_name, id, quite)))", "def test_delete_resource_group(self):\n pass", "def delete_all_groups(self):\n DELETED = 204\n for group in self.get_list_groups():\n codes = [\n self.delete_all_group_member(group[\"id\"]).status_code,\n self.delete_group(group[\"id\"]).status_code\n ]\n\n res = filter(lambda a: a != DELETED, codes)\n if res:\n return res[0]\n\n return DELETED", "def disconnect_whole_group(self, id_group:int) -> bool:\n try:\n self.cursor.execute(f\"DELETE FROM {table_groups} WHERE id={id_group};\")\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We found problems with deletion of the whole group from the {table_groups} in database. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def delete_group(self, group_name):\r\n params = {'GroupName' : group_name}\r\n return self.get_response('DeleteGroup', params)", "def delete_placement_group(self, name):\r\n params = {'GroupName':name}\r\n return self.get_status('DeletePlacementGroup', params, verb='POST')", "def test_delete_entry_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_entry_groups(id, group_id, topic_id)", "def unlink_Group(self, group):\n\t\tself.__groups.remove(group.weakref)\n\t\tself._cli_invalidate()", "def delete_participant_groups_by_participant_group_ids(\n self,\n participant_group_ids: List[str] = None,\n ) -> None:\n if not participant_group_ids:\n return None\n\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n\n participant_group_ids_block = \"\"\n if participant_group_ids:\n task_run_ids_str = \",\".join([f'\"{pgi}\"' for pgi in participant_group_ids])\n participant_group_ids_block = (\n f\"AND prolific_participant_group_id IN ({task_run_ids_str})\"\n )\n\n c.execute(\n f\"\"\"\n DELETE FROM participant_groups\n WHERE {participant_group_ids_block};\n \"\"\"\n )\n return None", "def test_TC_44383_DELETE_Groups_Id(self, context):\n # Define a test step\n with pytest.allure.step(\"\"\"First create group using request POST /groups.\"\"\"):\n # Test case configuration\n edgeDeviceGroupDetails = context.sc.EdgeDeviceGroupDetails(\n configAdminCanEdit=True,\n configurations=[],\n deliveryLoadBalancePolicy='PROXIMITY_MATCHES',\n dnsName='10.1.25.46',\n edgeDeviceRoles=['EDGE', 'ORIGIN', 'DISTRIBUTION'],\n id='GroupD1',\n members=[{\n 'id': 'POST_veDevices_AllConfigAdminMulticastTrue'\n }],\n name='GroupD1',\n originLoadBalancePolicy='DNS_NAME',\n provisioningPolicy='ALL_MEMBERS',\n proximityDetails=None,\n visibleInAllConfigurations=True)\n\n # createEntity the Groups.\n # The `check` call validates return code\n # and some of the swagger schema.\n # Most schema checks are disabled.\n response = check(\n context.cl.Groups.createEntity(\n body=edgeDeviceGroupDetails\n )\n )\n\n\n # Define a test step\n with pytest.allure.step(\"\"\"Now verify that user is able to delete the group on providing 'Id' parameter using request DELETE /groups{id}.\"\"\"):\n\n # deleteEntity the Groups.\n # The `check` call validates return code\n # and some of the swagger schema.\n # Most schema checks are disabled.\n check(\n context.cl.Groups.deleteEntity(\n id='GroupD1'\n )\n )", "def test_delete_group_reparent_hosts(self, inventoryloader):\n inventoryloader.del_group('glance_api', reparent_hosts=True)\n assert inventoryloader.groups['glance_all'].has_host('localhost')\n assert inventoryloader.hosts['localhost'].has_group('glance_all')", "def delete(ctx):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n\n if not click.confirm(\"Are sure you want to delete experiment group `{}`\".format(_group)):\n click.echo('Existing without deleting experiment group.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment_group.delete_experiment_group(\n user, project_name, _group)\n # Purge caching\n GroupManager.purge()\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not delete experiment group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.status_code == 204:\n Printer.print_success(\"Experiment group `{}` was delete successfully\".format(_group))", "def on_groups_deleted(event):\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_objects:\n group = change[\"old\"]\n bucket_id = event.payload[\"bucket_id\"]\n group_uri = utils.instance_uri(event.request, \"group\", bucket_id=bucket_id, id=group[\"id\"])\n\n permission_backend.remove_principal(group_uri)", "async def delete_group(ctx, group_name: str, owner: str=None):\n\n if owner and owner != ctx.message.author.name:\n if ctx.message.author.id != bot.owner_id:\n await ctx.send(\"Sorry, you don't have permission to delete that group. Nerd.\")\n else:\n owner = ctx.message.author.name\n\n if bg_bot.manager.remove_group(owner, group_name):\n response = f'{group_name} successfully removed from {owner} groups!'\n else:\n response = f'Error in removing {group_name} from {owner} groups!'\n \n await ctx.send(response)", "def test_groups_group_users_delete(self):\n pass", "def test_groups_group_users_delete(self):\n pass", "def test_delete_group_reparent_vars(self, inventoryloader):\n inventoryloader.del_group('glance_api', reparent_vars=True)\n assert 'management_bridge' in inventoryloader.groups['glance_all'].vars", "def test_user_group_controller_delete(self):\n pass", "def drop_groups(self, group_ids=None):\n return self.groups.delete(group_ids)", "def unfollow_group(request, pk):\n group = get_object_or_404(Group, id=pk)\n\n # Check user is not member of the group\n if not group.members.filter(id=request.user.id).exists():\n actions.unfollow(request.user, group, send_action=False)\n request.user.userprofile.follow_groups.remove(group)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore.')\n # the group members can choose not follow the group anymore, but still\n # been the member\n else:\n actions.unfollow(request.user, group, send_action=False)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore. But you are still the one of the members of this group.')\n\n return redirect('groups:groups-detail', pk)", "def delete_TestGroup(test_case, override_group_name=null, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, Optional[Str], Optional[HeadersType], Optional[CookiesType]) -> None\n app_or_url = get_app_or_url(test_case)\n headers = override_headers if override_headers is not null else test_case.json_headers\n cookies = override_cookies if override_cookies is not null else test_case.cookies\n groups = TestSetup.get_RegisteredGroupsList(test_case, override_headers=headers, override_cookies=cookies)\n group_name = override_group_name if override_group_name is not null else test_case.test_group_name\n # delete as required, skip if non-existing\n if group_name in groups:\n path = \"/groups/{grp}\".format(grp=group_name)\n resp = test_request(app_or_url, \"DELETE\", path, headers=headers, cookies=cookies)\n check_response_basic_info(resp, 200, expected_method=\"DELETE\")\n TestSetup.check_NonExistingTestGroup(test_case, override_group_name=group_name,\n override_headers=headers, override_cookies=cookies)", "def test_removeGroup(self):\n\t\tuser = User.objects.get(id=1)\n\t\tself.client.force_authenticate(user=user)\n\t\tgroup = Group.objects.create(admin=user, name='testGroup3', isPublic=True, \n\t\t\tdescription='This is another test group that just created.')\n\n\t\turl = \"/groups/3/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\t\turl = \"/groups/2/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def delete_group():\n incoming = request.get_json()\n Chatroom.delete_chatroom_with_room_id(incoming['room_id'])\n return jsonify(results = incoming['room_id'])", "def delete_group(groupname):\n response = jsonify(admin.delete_group(current_app.scoped_session(), groupname))\n return response", "def fusion_api_delete_group_role_assignment(self, name=None, uri=None, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.delete(name, uri, api, headers)", "def test_delete_group_by_id(self):\n # Create a user with 2 groups\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Delete one of those groups\n resp = self.app.delete('/groups/{}'.format(self.test_group1_groupid))\n assert resp.status_code == 200\n\n # Verify that the group is gone\n resp = self.app.get('/groups/{}'.format(self.test_group1_groupid))\n assert resp.status_code == 404\n\n # Verify that the user's groups don't have that group listed\n resp = self.app.get('/users/{}'.format(self.test_user1_userid))\n assert resp.status_code == 200\n\n data = json.loads(resp.data)\n assert self.test_group1_groupid not in data['groups']", "def test_delete_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.delete_topic_groups(group_id, topic_id)", "def delete():\n name = request.json['name']\n group = models.user.Group.get(name)\n if not group:\n raise Absent('Group does not exists.', deletion=False)\n else:\n models.db.session.delete(group)\n models.db.session.commit()\n return response(200, deletion=True)", "def test_delete_community_works_for_group_admins(self):\n get_response = lambda: self.client.delete(self.url)\n\n self.assert_authentication_required(get_response)\n self.assert_membership_required(get_response)\n self.assert_group_admin_rights_required(get_response)\n\n self.assertEqual(Community.objects.filter(name=self.GROUP).count(), 1)\n\n # bob is group admin, he can delete the group:\n self.login_as(\"bob\")\n with self.assertNumQueries(9):\n # (5) select quizzes (6) del members (7) del chat (8) del tournaments (9) del com \n response = get_response()\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(response.data, None)\n\n self.assertEqual(Community.objects.filter(name=self.GROUP).count(), 0)", "def test_ipam_vlan_groups_delete(self):\n pass", "def fusion_api_del_role_from_group(self, domain=None, group=None, api=None, headers=None):\n return self.roles.del_role_from_group(domain, group, api=api, headers=headers)", "def test_remove_parent(self):\n groupa, groupb = self.test_add_parent()\n groupa.del_parent(groupb)\n assert groupb not in groupa.parents\n assert groupa not in groupb.children", "def remove_inv_group(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n gw = kwargs['gateway']\n group_id = kwargs['objectname']\n json_response_status_code = delete_inventory_group_json_response(proxy, sessiontoken, gw, group_id)\n if json_response_status_code == 200:\n print(\"The group \" + group_id + \" has been deleted\")\n else:\n print(\"Something went wrong - please check your syntax and try again.\")", "def delete_group(id, createdby):\n query = \"DELETE FROM groups WHERE group_id = {} AND createdby ='{}'\".format(id, createdby)\n cur.execute(query)", "def product_group_delete(obj, name):\n client = get_client(obj)\n\n with Action('Deleting product_group: {}'.format(name), nl=True):\n pgs = client.product_group_list(name)\n\n client.product_group_delete(pgs[0]['uri'])", "def delete(self,\n provider_id,\n group_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n })", "def deleteGroup(request):\n \n if request.method == 'POST':\n \n form = DeleteGroupForm(request.POST)\n \n if form.is_valid():\n \n cd = form.cleaned_data\n \n try:\n \n #Delete records from m2m of Users & Groups for selected groups\n for eachGroup in cd['group_id']:\n Group_User.objects.filter(group = eachGroup.id).delete()\n \n #Delete Group(s)\n for eachGroup in cd['group_id']:\n Group.objects.filter(id = eachGroup.id).delete()\n \n except:\n \n error = 'Unable to Delete Groups!'\n return render_to_response('deletegroup.html', \n {'form': form, 'error': error},\n context_instance=RequestContext(request))\n \n return HttpResponseRedirect('/deletegroup/success/')\n \n else:\n \n return render_to_response('deletegroup.html',\n {'form': form}, \n context_instance=RequestContext(request)) \n \n else:\n \n form = DeleteGroupForm()\n \n return render_to_response('deletegroup.html', \n {'form': form}, \n context_instance=RequestContext(request))", "def after_delete(self, record):\n debug = logging.getLogger(__name__).debug\n debug('deleted group %r (%r)', record['name'], record['group_id'])\n audit('delete group', record['name'])", "def delete_all_group_member(self, group_id):\n url = self.groups_url + \"/%s/members\" % group_id\n return requests.delete(url, headers=self.headers)", "def allowed_group_access_delete(user, group):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return (user.has_perm(\"vnswww.group_delete_any\")\n or (user.has_perm(\"vnswww.group_delete_org\")\n and group.org == up.org))", "def test_delete(self):\n self.assertTrue(self.run_function(\"group.add\", [self._group]))\n\n # correct functionality\n self.assertTrue(self.run_function(\"group.delete\", [self._group]))\n\n # group does not exist\n self.assertFalse(self.run_function(\"group.delete\", [self._no_group]))", "def customer_group_delete(group_id):\n result = {\"success\" : 1, \"message\" : \"Customer can not be Deleted\"}\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n \n #clean up the user id\n group_id = db.escape_string(group_id)\n \n query = \"\"\"\n DELETE FROM `groups`\n WHERE `groups`.`group_id` = \"%s\"\n \"\"\" %(group_id)\n cursor = db.cursor()\n try:\n if (cursor.execute(query)) != 0:\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Group Deleted Successfully\"}\n except Exception as customer_exp:\n result = {\"success\" : 1, \"message\" : \"Customer Group can not be Deleted \" + str(e)}\n finally:\n cursor.close()\n db.close()\n return result", "def remove_from_targetgroup(self, target_id, group_id):\r\n target_row = self._db(self._db.target.id==target_id).select().first()\r\n group_row = self._db(self._db.targetgroup.id==group_id\r\n ).select().first()\r\n result = False\r\n if target_row is not None and group_row is not None:\r\n result = True\r\n targets_j = group_row.targets\r\n\r\n if not targets_j:\r\n targets_j = json.dumps([target_id])\r\n else:\r\n tmp = json.loads(targets_j)\r\n tmp.remove(target_id)\r\n targets_j = json.dumps(tmp)\r\n\r\n self._db(self._db.targetgroup.id==group_id\r\n ).update(targets=targets_j)\r\n self._db.commit()\r\n return result", "def test_groups_group_users_user_delete(self):\n pass", "def test_groups_group_users_user_delete(self):\n pass", "def delete_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def remove_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for membership in self.group_memberships:\n if membership.group.href == group.href:\n membership.delete()\n return\n\n raise StormpathError({\n 'developerMessage': 'This user is not part of Group %s.' % group.name,\n })", "def test_050_delete_user_from_group(self):\n\n testflow.step(\n \"Removing user %s from group %s\", TEST_USER1, TEST_GROUP1\n )\n assert MANAGE_CLI.run(\n 'userdel',\n TEST_GROUP1,\n user=TEST_USER1\n )[0], \"Failed to remove user from group '%s'\" % TEST_GROUP1\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'userdel',\n TEST_GROUP1,\n user='nonsense'\n )[0], \"Possible to remove nonexisting user from group\"\n\n testflow.step(\"Removing user %s from nonexistent group\", TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'userdel',\n 'nonsense',\n user=TEST_USER1\n )[0], \"Possible to remove user from nonexisting group\"", "def capacitygroup_delete(cmd_ctx, cpc, capacitygroup):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_delete(cmd_ctx, cpc, capacitygroup))", "def remove_from_group(self, group):\n\n if self.in_group(group):\n self.secondary_groups.remove(group)\n return self", "def remove_from_group(self, org, contact, group):\n pass", "def test_remove_learner_group_specific_for_coach_pt1(self):\n self.assertTrue(self.coach1.has_perm('auth.remove_learner_group', self.learner_groups[0]))", "def delete_group(self, group_id, **kwargs):\n resource_path = \"/groups/{groupId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"groupId\": group_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def delete_salary_group(db:Session):\n pass", "def delete_vm_group(session, cluster, vm_group):\n client_factory = session.vim.client.factory\n group_spec = client_factory.create('ns0:ClusterGroupSpec')\n groups = []\n\n group_spec.info = vm_group\n group_spec.operation = \"remove\"\n group_spec.removeKey = vm_group.name\n groups.append(group_spec)\n\n config_spec = client_factory.create('ns0:ClusterConfigSpecEx')\n config_spec.groupSpec = groups\n reconfigure_cluster(session, cluster, config_spec)", "def test_remove_learner_group_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_learner_group', self.learner_groups[1]))" ]
[ "0.66492325", "0.662239", "0.6541948", "0.6461525", "0.6348538", "0.6283196", "0.6217388", "0.6203314", "0.6168106", "0.61588556", "0.6150082", "0.61439496", "0.6109929", "0.61078966", "0.6060277", "0.60184395", "0.6001307", "0.6001307", "0.59507966", "0.5928808", "0.5904541", "0.588854", "0.58635855", "0.5855789", "0.58181447", "0.5795915", "0.57424855", "0.5739038", "0.57293683", "0.57169527", "0.57094395", "0.5706176", "0.5673936", "0.5642542", "0.5629306", "0.5621884", "0.5619029", "0.56167084", "0.5609237", "0.5596568", "0.55946803", "0.5547348", "0.5515882", "0.55131626", "0.55027825", "0.5501512", "0.54915875", "0.54880553", "0.5485858", "0.547825", "0.5473315", "0.5452238", "0.5419276", "0.53967416", "0.53943926", "0.53861785", "0.53841305", "0.5383583", "0.53597087", "0.53597087", "0.53470075", "0.53394866", "0.5334645", "0.53235066", "0.53165925", "0.5310999", "0.5300821", "0.53008074", "0.529749", "0.5295983", "0.52796894", "0.5277352", "0.5267231", "0.5267039", "0.5263943", "0.5247316", "0.5230748", "0.52220625", "0.520562", "0.5202912", "0.5199656", "0.51958704", "0.5194566", "0.51864624", "0.5185525", "0.5179963", "0.51632285", "0.51458204", "0.51458204", "0.5137142", "0.5129982", "0.5122124", "0.5117913", "0.51152146", "0.5114596", "0.5108533", "0.5105518", "0.5100354", "0.5096249", "0.5089743" ]
0.633461
5
List the immediate OutcomeLink children of the outcome group. Paginated.
def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/global/outcome_groups/{id}/outcomes' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def children(self): # noqa: ANN201", "def get_children(self):\n\n pass", "def nav_children(self):\r\n return list(self.get_children().filter(show_in_nav=True))", "def get_children(self):\r\n return self.children", "def GetChildren(self, *args, **kwargs):\n pass", "def get_children(self):\n return []", "def children_of(self, member_name):\n for relationship in self.relationships[member_name]:\n yield self.members[relationship.child]", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n raise NotImplementedError()", "def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)", "def get_children(self):\n return self.items", "def test_get_all_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_all_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def get_children(self):\r\n return self._children", "def children(self):\r\n return self.location_set.filter(hidden=False)", "def children(self):\n return self._children", "def children(self):\n return self._children", "def get_organization_group_children_url(og_id):\n\n return '{organization_group_api_path}/{organization_group_id}/children'. \\\n format(organization_group_api_path=ORGANIZATION_GROUP_API_COMMON_PATH, organization_group_id=og_id)", "def children(self):\n \n return self._children", "def descendants(self):\n for a in self._related(set(), 'children'):\n yield a", "def children(self) -> Iterable[Heirarchical]:\n return []", "def test_get_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def getChildren(self):\n return []", "def get_children(self):\n return self._children", "def fm_all_children(self):\n return self._relation_lst[self.CHILD].copy()", "def select_children(self):\n objs = []\n for obj in pm.selected():\n objs.extend(obj.listRelatives(ad=True, type=[\"transform\", \"joint\"]))\n pm.select(objs, add=True)", "def outcomes(self):\n return self._get_child_page_of_type(LearningOutcomesPage)", "def children(self):\n return list(self._children)", "def children(self):\n return self.contents", "def children(self):\n return self.contents", "def children(self):\n address = self.address\n if address:\n address += \"/\"\n\n # Escape the address for re matching\n addres = re.escape(address)\n regex = \"^\" + address + \"[^/]+$\"\n children = Page.objects.filter(address__regex=regex).order_by(\"address\")\n return list(children)", "def getChildren():", "def get_epic_children(self) -> list:\n\n children = [i['key'] for i in self.repo.api_call(requests.get, f\"search?jql=cf[10008]='{self.jira_key}'\")['issues']]\n return children", "def child_ents(self) -> Iterator['Entity']:\n for ent in self.vmf.entities:\n if self.id in ent.visgroup_ids:\n yield ent", "def get_childs(self):\n\t\treturn self.__childs", "def GetChildren(self):\r\n\r\n return self._children", "def getChildren(self):\n \n return self._children", "def get_next(self):\n return self.childs", "def _get_children(self):\n if not self.ontology:\n raise ValueError(\"No associated ontology.\")\n\n return self.ontology.get_sub_properties(self)", "def descendants(self):\n for child in self.children:\n yield child\n if isinstance(child, LoggedAction):\n for descendant in child.descendants():\n yield descendant", "def child_views(self):\n return self.children", "def getChildren(self):\n return self.child_edges", "def print_children(self, offset=None, outputfile=None):\n if offset is None:\n offset = ''\n if outputfile is None:\n print(offset + self.name)\n else:\n outputfile.write(offset + self.name + '\\n')\n for i in range(len(self.children)):\n self.children[i].print_children(offset=offset + ' ', outputfile=outputfile)", "def children(self) -> List[str]:\n return self._children", "def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://www.med.navy.mil'\n r = requests.get(self.starting_url, verify=CERTIFICATE_DIR + '/cat3.pem')\n soup = bs4.BeautifulSoup(r.content, features=\"html.parser\")\n\n # get target column of list items\n issuance_list = soup.find('div', attrs={'class': 'noindex ms-wpContentDivSpace'})\n matches = [\"Publications\", \"BUMEDNotes\", \"BUMEDInstructions\"]\n # extract links\n links = [link for link in issuance_list.find_all('a')]\n for link in links[2:-1]:\n if any(x in str(link) for x in matches):\n if not link['href'].startswith('http'):\n url = base_url + link['href']\n else:\n url = link['href']\n yield url", "def children(self):\n return {x[1] for x in self.outgoing}", "def _get_children(self, x):\n try:\n return x._pfp__children\n\n except AttributeError:\n return []", "def get_children_elements(self):\n\n pass", "def get_all_children(self):\n return tuple(self.children)", "def get_children(self):\n return self._routes.values()", "def get_children(self, refobj):\n children = cmds.listConnections(\"%s.children\" % refobj, d=False)\n if not children:\n children = []\n return children", "def children(self):\n if self._pedigree is None:\n raise Exception(\"Pedigree is not defined\")\n return [self._pedigree.individual(pid) for pid in sorted(self._children_ids, key=self._sort_by_birth)]", "def __iter__(self):\n\n for i in self._children:\n yield i", "def get_children_queryset(self):\n pass", "def children_list(self):\n return [\n # self.notify,\n # self.snap_multiplier,\n # self.range_low, self.range_high,\n # self.activity_threshold\n ]", "def all_children(self):\n\n for child in self.children:\n yield child\n for sub_child in child.all_children():\n yield sub_child", "def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def get_all_children_seq(self):\n results = []\n queue = []\n children = self.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = node.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n return results", "def get_children(self):\n return [node for node in self._children.values()]", "def each_child(\n self,\n search_range=None,\n descended_from_type=_otio.Composable,\n shallow_search=False,\n):\n for child in self.children_if(descended_from_type, search_range, shallow_search):\n yield child", "def __iter__(self):\n return iter(self.__children)", "def __next__(self):\n for child in self.children:\n yield child", "def getChildren(self):\n return self.directories.values()", "def __iter__(self):\n for child in self.children:\n yield child", "def get_children(self, cont: typing.Union[str, Type(None)]) -> List[str]:\n s = requests.Session()\n\n url = \"https://en.wikipedia.org/w/api.php\"\n\n if cont is None:\n params = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"titles\": self.title,\n \"prop\": \"links\",\n \"pllimit\": \"max\"\n }\n else:\n params = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"titles\": self.title,\n \"prop\": \"links\",\n \"pllimit\": \"max\",\n \"plcontinue\": cont\n }\n\n titles_so_far = []\n\n r = s.get(url=url, params=params)\n data = r.json()\n\n pages = data[\"query\"][\"pages\"]\n\n for k, v in pages.items():\n if \"links\" not in v:\n return []\n\n for l in v[\"links\"]:\n titles_so_far.append(l[\"title\"])\n\n if \"batchcomplete\" in data:\n return titles_so_far\n else:\n contHolder = data[\"continue\"][\"plcontinue\"]\n titles_so_far.extend(self.get_children(contHolder))\n return titles_so_far\n\n # return [Article(child, self.target, self.title) for child in titles_so_far]", "def __iter__(self):\n return iter(self._children)", "def get_children(self, context: ResourceCommandContext, obj_ref: str, child_type: str) -> list:\n return self.handler.get_children(obj_ref, child_type)", "def _generate_children(self) -> list:\n if self.debug: print(f\"StateNode._generate_children()\")\n return [self.transition(x) for x in self.actions()]", "def get_children(target, concept_map):\n child_inds = []\n target_index = concept_map[CONCEPTS_STR].index(target)\n target_row = concept_map[ADJ_MAT_STR][target_index]\n for ind in range(len(target_row)): # for each ind in row of adj mat\n val = target_row[ind]\n if(val>0 and ind != target_index): # don't care concept is child of itself\n child_inds.append(ind)\n return list(map(lambda ind: concept_map[CONCEPTS_STR][ind], child_inds))", "def getExpandedLinks():", "def get_children(self, item, level):\n return item.children", "def children():\n return {\n \"charges\": Charge,\n \"codes\": Code,\n \"comments\": Comment,\n \"links\": Link,\n \"parties\": Party,\n \"rates\": Rate,\n \"references\": Reference,\n }", "def Children(self) -> _n_1_t_2:", "def get_child_resource_nodes(self):\n raise errors.Unimplemented()", "def _children(self):\n for codeobj in self.body:\n if isinstance(codeobj, CodeEntity):\n yield codeobj", "def children(self):\n startkey, endkey = self._key_bounds\n depth = len(self.path) + 2 # 1 for domain, 1 for next location level\n q = self.view('locations/hierarchy', startkey=startkey, endkey=endkey, group_level=depth)\n keys = [e['key'] for e in q if len(e['key']) == depth]\n return self.view('locations/hierarchy', keys=keys, reduce=False, include_docs=True).all()", "def children(self):\n return self.leaves", "def get_children(self, parent, child_type=None):\n\n parent_ref = parent if type(parent) is str else parent.ref\n if child_type:\n if child_type.endswith('List'):\n child_ref = parent_ref + '/' + child_type\n return [child_ref + '/' + str(o.objectID) for o in self.connection.httpGet(child_ref)]\n else:\n return [parent_ref + '/' + child_type]\n else:\n links = self.cget(parent, 'links')\n if links:\n return [parent_ref + '/' + link.jsonOptions['rel'] for link in links]", "def test_homechild_listobjects(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n children01 = yield home01.listChildren()\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n self.assertEqual(home.id(), home01.id())\n children = yield home.listChildren()\n self.assertEqual(set(children), set(children01))\n yield self.commitTransaction(1)", "def get_children_link_indexes(self, link_idx):\n indexes = []\n for i in range(len(info_populator.InfoPopulator.LINK_PARENT_CHILD_RELATION)):\n if link_idx == info_populator.InfoPopulator.LINK_PARENT_CHILD_RELATION[\n i][0]:\n indexes.append(\n self.get_link_index(\n info_populator.InfoPopulator.LINK_PARENT_CHILD_RELATION[i][1]))\n return indexes", "def print_all_descendants(self):\n #self._print_all_descendants_rec(self, 0) # stdout version \n string = \"|---\" + str(self) + \"\\n\"\n return self._return_string_all_descendants_rec(self, string, 0)", "def list_object_children_paged(self, object_ref: str,\n next_token: Optional[str] = None,\n per_page: Optional[int] = None, **kwargs) -> Tuple[dict, Optional[str]]:\n kwargs.update(\n DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': object_ref},\n MaxResults=min(per_page, self._page_limit) if per_page else self._page_limit,\n )\n if next_token:\n kwargs['NextToken'] = next_token\n result = cd_client.list_object_children(**kwargs)\n return result['Children'], result.get(\"NextToken\")", "def get_b_children(self, b_obj):\n return [child for child in Blender.Object.Get()\n if child.parent == b_obj]", "def get_citation_child_list(self):\n return self.address_list", "def children(self) -> List[Region]:\n return []", "def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children", "def traverse_ancestors(self):\n self.event.wait()\n for a in super(FalseObject, self).traverse_ancestors():\n yield a", "def children_ids(self):\n return self._children_ids", "def get_children(self):\r\n\r\n # FIXME: Expose iteration from CIndex, PR6125.\r\n def visitor(child, parent, children):\r\n # FIXME: Document this assertion in API.\r\n # FIXME: There should just be an isNull method.\r\n assert child != conf.lib.clang_getNullCursor()\r\n\r\n # Create reference to TU so it isn't GC'd before Cursor.\r\n child._tu = self._tu\r\n children.append(child)\r\n return 1 # continue\r\n children = []\r\n conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),\r\n children)\r\n return iter(children)", "def render_children(self, block, view_name=None, context=None):\n results = []\n for child_id in block.children:\n child = self.get_block(child_id)\n result = self.render_child(child, view_name, context)\n results.append(result)\n return results", "def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def list_object_children(self, object_ref: str, **kwargs) -> Iterator[Tuple[str, str]]:\n resp = cd_client.list_object_children(DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': object_ref},\n MaxResults=self._page_limit,\n **kwargs)\n while True:\n for name, ref in resp['Children'].items():\n yield name, '$' + ref\n next_token = resp.get('NextToken')\n if next_token:\n resp = cd_client.list_object_children(DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': object_ref},\n NextToken=next_token,\n MaxResults=self._page_limit)\n else:\n break", "def siblings(self):\n return type(self)._default_manager.filter(category=self.category)", "def children(self) -> List[Region]:\n return self._children", "def _get_children(env, s):\n return [env.predict(s, a) for a in env.get_actions(s)]", "def children(self):\n return self.hashring_watch.get_children()", "def children(node):\n\n return snd(node)", "def GetChildren(self, p_int, p_int_1, p_int_2):\n ..." ]
[ "0.62007177", "0.60129935", "0.59958863", "0.58787924", "0.5869366", "0.58592945", "0.5843929", "0.57969826", "0.5790005", "0.5790005", "0.5790005", "0.5784237", "0.5758441", "0.5725369", "0.5718923", "0.5705861", "0.56908256", "0.56826895", "0.56826895", "0.56807274", "0.5678357", "0.5673407", "0.5663532", "0.56499904", "0.5630875", "0.56280816", "0.56279117", "0.5624265", "0.5623903", "0.5591998", "0.5586219", "0.5586219", "0.5559264", "0.554359", "0.55417335", "0.5529376", "0.5519995", "0.55043954", "0.5500609", "0.5449192", "0.5415584", "0.54124033", "0.5411345", "0.5406772", "0.5387032", "0.5386948", "0.532538", "0.53241414", "0.5321131", "0.5320732", "0.53137034", "0.53100693", "0.52991486", "0.52881837", "0.52839124", "0.5280865", "0.5274147", "0.526855", "0.525717", "0.5255392", "0.52501935", "0.52495563", "0.5248211", "0.5238346", "0.5229398", "0.52258676", "0.522404", "0.5214709", "0.52120084", "0.52075726", "0.5206681", "0.51985246", "0.519424", "0.5192437", "0.5186004", "0.5130755", "0.5123423", "0.51104444", "0.51091784", "0.5108976", "0.5107544", "0.51049465", "0.5094659", "0.50924873", "0.508125", "0.50811666", "0.5078817", "0.50759506", "0.50701004", "0.5069498", "0.5066931", "0.5066709", "0.5057887", "0.5046958", "0.50432384", "0.5040004", "0.503265", "0.5023119", "0.5017467", "0.50141364" ]
0.5629413
25
List the immediate OutcomeLink children of the outcome group. Paginated.
def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def children(self): # noqa: ANN201", "def get_children(self):\n\n pass", "def nav_children(self):\r\n return list(self.get_children().filter(show_in_nav=True))", "def get_children(self):\r\n return self.children", "def GetChildren(self, *args, **kwargs):\n pass", "def get_children(self):\n return []", "def children_of(self, member_name):\n for relationship in self.relationships[member_name]:\n yield self.members[relationship.child]", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n raise NotImplementedError()", "def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)", "def get_children(self):\n return self.items", "def test_get_all_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_all_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def get_children(self):\r\n return self._children", "def children(self):\r\n return self.location_set.filter(hidden=False)", "def children(self):\n return self._children", "def children(self):\n return self._children", "def get_organization_group_children_url(og_id):\n\n return '{organization_group_api_path}/{organization_group_id}/children'. \\\n format(organization_group_api_path=ORGANIZATION_GROUP_API_COMMON_PATH, organization_group_id=og_id)", "def children(self):\n \n return self._children", "def descendants(self):\n for a in self._related(set(), 'children'):\n yield a", "def children(self) -> Iterable[Heirarchical]:\n return []", "def test_get_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def getChildren(self):\n return []", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def get_children(self):\n return self._children", "def fm_all_children(self):\n return self._relation_lst[self.CHILD].copy()", "def select_children(self):\n objs = []\n for obj in pm.selected():\n objs.extend(obj.listRelatives(ad=True, type=[\"transform\", \"joint\"]))\n pm.select(objs, add=True)", "def outcomes(self):\n return self._get_child_page_of_type(LearningOutcomesPage)", "def children(self):\n return list(self._children)", "def children(self):\n return self.contents", "def children(self):\n return self.contents", "def children(self):\n address = self.address\n if address:\n address += \"/\"\n\n # Escape the address for re matching\n addres = re.escape(address)\n regex = \"^\" + address + \"[^/]+$\"\n children = Page.objects.filter(address__regex=regex).order_by(\"address\")\n return list(children)", "def getChildren():", "def get_epic_children(self) -> list:\n\n children = [i['key'] for i in self.repo.api_call(requests.get, f\"search?jql=cf[10008]='{self.jira_key}'\")['issues']]\n return children", "def child_ents(self) -> Iterator['Entity']:\n for ent in self.vmf.entities:\n if self.id in ent.visgroup_ids:\n yield ent", "def get_childs(self):\n\t\treturn self.__childs", "def GetChildren(self):\r\n\r\n return self._children", "def getChildren(self):\n \n return self._children", "def get_next(self):\n return self.childs", "def _get_children(self):\n if not self.ontology:\n raise ValueError(\"No associated ontology.\")\n\n return self.ontology.get_sub_properties(self)", "def descendants(self):\n for child in self.children:\n yield child\n if isinstance(child, LoggedAction):\n for descendant in child.descendants():\n yield descendant", "def child_views(self):\n return self.children", "def getChildren(self):\n return self.child_edges", "def print_children(self, offset=None, outputfile=None):\n if offset is None:\n offset = ''\n if outputfile is None:\n print(offset + self.name)\n else:\n outputfile.write(offset + self.name + '\\n')\n for i in range(len(self.children)):\n self.children[i].print_children(offset=offset + ' ', outputfile=outputfile)", "def children(self) -> List[str]:\n return self._children", "def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://www.med.navy.mil'\n r = requests.get(self.starting_url, verify=CERTIFICATE_DIR + '/cat3.pem')\n soup = bs4.BeautifulSoup(r.content, features=\"html.parser\")\n\n # get target column of list items\n issuance_list = soup.find('div', attrs={'class': 'noindex ms-wpContentDivSpace'})\n matches = [\"Publications\", \"BUMEDNotes\", \"BUMEDInstructions\"]\n # extract links\n links = [link for link in issuance_list.find_all('a')]\n for link in links[2:-1]:\n if any(x in str(link) for x in matches):\n if not link['href'].startswith('http'):\n url = base_url + link['href']\n else:\n url = link['href']\n yield url", "def children(self):\n return {x[1] for x in self.outgoing}", "def _get_children(self, x):\n try:\n return x._pfp__children\n\n except AttributeError:\n return []", "def get_children_elements(self):\n\n pass", "def get_all_children(self):\n return tuple(self.children)", "def get_children(self):\n return self._routes.values()", "def get_children(self, refobj):\n children = cmds.listConnections(\"%s.children\" % refobj, d=False)\n if not children:\n children = []\n return children", "def children(self):\n if self._pedigree is None:\n raise Exception(\"Pedigree is not defined\")\n return [self._pedigree.individual(pid) for pid in sorted(self._children_ids, key=self._sort_by_birth)]", "def __iter__(self):\n\n for i in self._children:\n yield i", "def get_children_queryset(self):\n pass", "def children_list(self):\n return [\n # self.notify,\n # self.snap_multiplier,\n # self.range_low, self.range_high,\n # self.activity_threshold\n ]", "def all_children(self):\n\n for child in self.children:\n yield child\n for sub_child in child.all_children():\n yield sub_child", "def get_all_children_seq(self):\n results = []\n queue = []\n children = self.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = node.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n return results", "def get_children(self):\n return [node for node in self._children.values()]", "def each_child(\n self,\n search_range=None,\n descended_from_type=_otio.Composable,\n shallow_search=False,\n):\n for child in self.children_if(descended_from_type, search_range, shallow_search):\n yield child", "def __iter__(self):\n return iter(self.__children)", "def __next__(self):\n for child in self.children:\n yield child", "def getChildren(self):\n return self.directories.values()", "def __iter__(self):\n for child in self.children:\n yield child", "def get_children(self, cont: typing.Union[str, Type(None)]) -> List[str]:\n s = requests.Session()\n\n url = \"https://en.wikipedia.org/w/api.php\"\n\n if cont is None:\n params = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"titles\": self.title,\n \"prop\": \"links\",\n \"pllimit\": \"max\"\n }\n else:\n params = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"titles\": self.title,\n \"prop\": \"links\",\n \"pllimit\": \"max\",\n \"plcontinue\": cont\n }\n\n titles_so_far = []\n\n r = s.get(url=url, params=params)\n data = r.json()\n\n pages = data[\"query\"][\"pages\"]\n\n for k, v in pages.items():\n if \"links\" not in v:\n return []\n\n for l in v[\"links\"]:\n titles_so_far.append(l[\"title\"])\n\n if \"batchcomplete\" in data:\n return titles_so_far\n else:\n contHolder = data[\"continue\"][\"plcontinue\"]\n titles_so_far.extend(self.get_children(contHolder))\n return titles_so_far\n\n # return [Article(child, self.target, self.title) for child in titles_so_far]", "def __iter__(self):\n return iter(self._children)", "def get_children(self, context: ResourceCommandContext, obj_ref: str, child_type: str) -> list:\n return self.handler.get_children(obj_ref, child_type)", "def _generate_children(self) -> list:\n if self.debug: print(f\"StateNode._generate_children()\")\n return [self.transition(x) for x in self.actions()]", "def get_children(target, concept_map):\n child_inds = []\n target_index = concept_map[CONCEPTS_STR].index(target)\n target_row = concept_map[ADJ_MAT_STR][target_index]\n for ind in range(len(target_row)): # for each ind in row of adj mat\n val = target_row[ind]\n if(val>0 and ind != target_index): # don't care concept is child of itself\n child_inds.append(ind)\n return list(map(lambda ind: concept_map[CONCEPTS_STR][ind], child_inds))", "def getExpandedLinks():", "def get_children(self, item, level):\n return item.children", "def children():\n return {\n \"charges\": Charge,\n \"codes\": Code,\n \"comments\": Comment,\n \"links\": Link,\n \"parties\": Party,\n \"rates\": Rate,\n \"references\": Reference,\n }", "def Children(self) -> _n_1_t_2:", "def get_child_resource_nodes(self):\n raise errors.Unimplemented()", "def _children(self):\n for codeobj in self.body:\n if isinstance(codeobj, CodeEntity):\n yield codeobj", "def children(self):\n startkey, endkey = self._key_bounds\n depth = len(self.path) + 2 # 1 for domain, 1 for next location level\n q = self.view('locations/hierarchy', startkey=startkey, endkey=endkey, group_level=depth)\n keys = [e['key'] for e in q if len(e['key']) == depth]\n return self.view('locations/hierarchy', keys=keys, reduce=False, include_docs=True).all()", "def children(self):\n return self.leaves", "def get_children(self, parent, child_type=None):\n\n parent_ref = parent if type(parent) is str else parent.ref\n if child_type:\n if child_type.endswith('List'):\n child_ref = parent_ref + '/' + child_type\n return [child_ref + '/' + str(o.objectID) for o in self.connection.httpGet(child_ref)]\n else:\n return [parent_ref + '/' + child_type]\n else:\n links = self.cget(parent, 'links')\n if links:\n return [parent_ref + '/' + link.jsonOptions['rel'] for link in links]", "def test_homechild_listobjects(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n children01 = yield home01.listChildren()\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n self.assertEqual(home.id(), home01.id())\n children = yield home.listChildren()\n self.assertEqual(set(children), set(children01))\n yield self.commitTransaction(1)", "def get_children_link_indexes(self, link_idx):\n indexes = []\n for i in range(len(info_populator.InfoPopulator.LINK_PARENT_CHILD_RELATION)):\n if link_idx == info_populator.InfoPopulator.LINK_PARENT_CHILD_RELATION[\n i][0]:\n indexes.append(\n self.get_link_index(\n info_populator.InfoPopulator.LINK_PARENT_CHILD_RELATION[i][1]))\n return indexes", "def print_all_descendants(self):\n #self._print_all_descendants_rec(self, 0) # stdout version \n string = \"|---\" + str(self) + \"\\n\"\n return self._return_string_all_descendants_rec(self, string, 0)", "def list_object_children_paged(self, object_ref: str,\n next_token: Optional[str] = None,\n per_page: Optional[int] = None, **kwargs) -> Tuple[dict, Optional[str]]:\n kwargs.update(\n DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': object_ref},\n MaxResults=min(per_page, self._page_limit) if per_page else self._page_limit,\n )\n if next_token:\n kwargs['NextToken'] = next_token\n result = cd_client.list_object_children(**kwargs)\n return result['Children'], result.get(\"NextToken\")", "def get_b_children(self, b_obj):\n return [child for child in Blender.Object.Get()\n if child.parent == b_obj]", "def get_citation_child_list(self):\n return self.address_list", "def children(self) -> List[Region]:\n return []", "def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children", "def traverse_ancestors(self):\n self.event.wait()\n for a in super(FalseObject, self).traverse_ancestors():\n yield a", "def children_ids(self):\n return self._children_ids", "def get_children(self):\r\n\r\n # FIXME: Expose iteration from CIndex, PR6125.\r\n def visitor(child, parent, children):\r\n # FIXME: Document this assertion in API.\r\n # FIXME: There should just be an isNull method.\r\n assert child != conf.lib.clang_getNullCursor()\r\n\r\n # Create reference to TU so it isn't GC'd before Cursor.\r\n child._tu = self._tu\r\n children.append(child)\r\n return 1 # continue\r\n children = []\r\n conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),\r\n children)\r\n return iter(children)", "def render_children(self, block, view_name=None, context=None):\n results = []\n for child_id in block.children:\n child = self.get_block(child_id)\n result = self.render_child(child, view_name, context)\n results.append(result)\n return results", "def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def list_object_children(self, object_ref: str, **kwargs) -> Iterator[Tuple[str, str]]:\n resp = cd_client.list_object_children(DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': object_ref},\n MaxResults=self._page_limit,\n **kwargs)\n while True:\n for name, ref in resp['Children'].items():\n yield name, '$' + ref\n next_token = resp.get('NextToken')\n if next_token:\n resp = cd_client.list_object_children(DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': object_ref},\n NextToken=next_token,\n MaxResults=self._page_limit)\n else:\n break", "def siblings(self):\n return type(self)._default_manager.filter(category=self.category)", "def children(self) -> List[Region]:\n return self._children", "def _get_children(env, s):\n return [env.predict(s, a) for a in env.get_actions(s)]", "def children(self):\n return self.hashring_watch.get_children()", "def children(node):\n\n return snd(node)", "def GetChildren(self, p_int, p_int_1, p_int_2):\n ..." ]
[ "0.62007177", "0.60129935", "0.59958863", "0.58787924", "0.5869366", "0.58592945", "0.5843929", "0.57969826", "0.5790005", "0.5790005", "0.5790005", "0.5784237", "0.5758441", "0.5725369", "0.5718923", "0.5705861", "0.56908256", "0.56826895", "0.56826895", "0.56807274", "0.5678357", "0.5673407", "0.5663532", "0.56499904", "0.5630875", "0.5629413", "0.56280816", "0.56279117", "0.5624265", "0.5623903", "0.5591998", "0.5586219", "0.5586219", "0.5559264", "0.554359", "0.55417335", "0.5529376", "0.5519995", "0.55043954", "0.5500609", "0.5449192", "0.5415584", "0.54124033", "0.5411345", "0.5406772", "0.5387032", "0.5386948", "0.532538", "0.53241414", "0.5321131", "0.5320732", "0.53137034", "0.53100693", "0.52991486", "0.52881837", "0.52839124", "0.5280865", "0.5274147", "0.526855", "0.5255392", "0.52501935", "0.52495563", "0.5248211", "0.5238346", "0.5229398", "0.52258676", "0.522404", "0.5214709", "0.52120084", "0.52075726", "0.5206681", "0.51985246", "0.519424", "0.5192437", "0.5186004", "0.5130755", "0.5123423", "0.51104444", "0.51091784", "0.5108976", "0.5107544", "0.51049465", "0.5094659", "0.50924873", "0.508125", "0.50811666", "0.5078817", "0.50759506", "0.50701004", "0.5069498", "0.5066931", "0.5066709", "0.5057887", "0.5046958", "0.50432384", "0.5040004", "0.503265", "0.5023119", "0.5017467", "0.50141364" ]
0.525717
59
List the immediate OutcomeLink children of the outcome group. Paginated.
def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def children(self): # noqa: ANN201", "def get_children(self):\n\n pass", "def nav_children(self):\r\n return list(self.get_children().filter(show_in_nav=True))", "def get_children(self):\r\n return self.children", "def GetChildren(self, *args, **kwargs):\n pass", "def get_children(self):\n return []", "def children_of(self, member_name):\n for relationship in self.relationships[member_name]:\n yield self.members[relationship.child]", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n raise NotImplementedError()", "def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)", "def get_children(self):\n return self.items", "def test_get_all_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_all_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def get_children(self):\r\n return self._children", "def children(self):\r\n return self.location_set.filter(hidden=False)", "def children(self):\n return self._children", "def children(self):\n return self._children", "def get_organization_group_children_url(og_id):\n\n return '{organization_group_api_path}/{organization_group_id}/children'. \\\n format(organization_group_api_path=ORGANIZATION_GROUP_API_COMMON_PATH, organization_group_id=og_id)", "def children(self):\n \n return self._children", "def descendants(self):\n for a in self._related(set(), 'children'):\n yield a", "def children(self) -> Iterable[Heirarchical]:\n return []", "def test_get_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def getChildren(self):\n return []", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def get_children(self):\n return self._children", "def fm_all_children(self):\n return self._relation_lst[self.CHILD].copy()", "def select_children(self):\n objs = []\n for obj in pm.selected():\n objs.extend(obj.listRelatives(ad=True, type=[\"transform\", \"joint\"]))\n pm.select(objs, add=True)", "def outcomes(self):\n return self._get_child_page_of_type(LearningOutcomesPage)", "def children(self):\n return list(self._children)", "def children(self):\n return self.contents", "def children(self):\n return self.contents", "def children(self):\n address = self.address\n if address:\n address += \"/\"\n\n # Escape the address for re matching\n addres = re.escape(address)\n regex = \"^\" + address + \"[^/]+$\"\n children = Page.objects.filter(address__regex=regex).order_by(\"address\")\n return list(children)", "def getChildren():", "def get_epic_children(self) -> list:\n\n children = [i['key'] for i in self.repo.api_call(requests.get, f\"search?jql=cf[10008]='{self.jira_key}'\")['issues']]\n return children", "def child_ents(self) -> Iterator['Entity']:\n for ent in self.vmf.entities:\n if self.id in ent.visgroup_ids:\n yield ent", "def get_childs(self):\n\t\treturn self.__childs", "def GetChildren(self):\r\n\r\n return self._children", "def getChildren(self):\n \n return self._children", "def get_next(self):\n return self.childs", "def _get_children(self):\n if not self.ontology:\n raise ValueError(\"No associated ontology.\")\n\n return self.ontology.get_sub_properties(self)", "def descendants(self):\n for child in self.children:\n yield child\n if isinstance(child, LoggedAction):\n for descendant in child.descendants():\n yield descendant", "def child_views(self):\n return self.children", "def getChildren(self):\n return self.child_edges", "def print_children(self, offset=None, outputfile=None):\n if offset is None:\n offset = ''\n if outputfile is None:\n print(offset + self.name)\n else:\n outputfile.write(offset + self.name + '\\n')\n for i in range(len(self.children)):\n self.children[i].print_children(offset=offset + ' ', outputfile=outputfile)", "def children(self) -> List[str]:\n return self._children", "def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://www.med.navy.mil'\n r = requests.get(self.starting_url, verify=CERTIFICATE_DIR + '/cat3.pem')\n soup = bs4.BeautifulSoup(r.content, features=\"html.parser\")\n\n # get target column of list items\n issuance_list = soup.find('div', attrs={'class': 'noindex ms-wpContentDivSpace'})\n matches = [\"Publications\", \"BUMEDNotes\", \"BUMEDInstructions\"]\n # extract links\n links = [link for link in issuance_list.find_all('a')]\n for link in links[2:-1]:\n if any(x in str(link) for x in matches):\n if not link['href'].startswith('http'):\n url = base_url + link['href']\n else:\n url = link['href']\n yield url", "def children(self):\n return {x[1] for x in self.outgoing}", "def _get_children(self, x):\n try:\n return x._pfp__children\n\n except AttributeError:\n return []", "def get_children_elements(self):\n\n pass", "def get_all_children(self):\n return tuple(self.children)", "def get_children(self):\n return self._routes.values()", "def get_children(self, refobj):\n children = cmds.listConnections(\"%s.children\" % refobj, d=False)\n if not children:\n children = []\n return children", "def children(self):\n if self._pedigree is None:\n raise Exception(\"Pedigree is not defined\")\n return [self._pedigree.individual(pid) for pid in sorted(self._children_ids, key=self._sort_by_birth)]", "def __iter__(self):\n\n for i in self._children:\n yield i", "def get_children_queryset(self):\n pass", "def children_list(self):\n return [\n # self.notify,\n # self.snap_multiplier,\n # self.range_low, self.range_high,\n # self.activity_threshold\n ]", "def all_children(self):\n\n for child in self.children:\n yield child\n for sub_child in child.all_children():\n yield sub_child", "def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def get_all_children_seq(self):\n results = []\n queue = []\n children = self.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = node.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n return results", "def get_children(self):\n return [node for node in self._children.values()]", "def each_child(\n self,\n search_range=None,\n descended_from_type=_otio.Composable,\n shallow_search=False,\n):\n for child in self.children_if(descended_from_type, search_range, shallow_search):\n yield child", "def __iter__(self):\n return iter(self.__children)", "def __next__(self):\n for child in self.children:\n yield child", "def getChildren(self):\n return self.directories.values()", "def __iter__(self):\n for child in self.children:\n yield child", "def get_children(self, cont: typing.Union[str, Type(None)]) -> List[str]:\n s = requests.Session()\n\n url = \"https://en.wikipedia.org/w/api.php\"\n\n if cont is None:\n params = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"titles\": self.title,\n \"prop\": \"links\",\n \"pllimit\": \"max\"\n }\n else:\n params = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"titles\": self.title,\n \"prop\": \"links\",\n \"pllimit\": \"max\",\n \"plcontinue\": cont\n }\n\n titles_so_far = []\n\n r = s.get(url=url, params=params)\n data = r.json()\n\n pages = data[\"query\"][\"pages\"]\n\n for k, v in pages.items():\n if \"links\" not in v:\n return []\n\n for l in v[\"links\"]:\n titles_so_far.append(l[\"title\"])\n\n if \"batchcomplete\" in data:\n return titles_so_far\n else:\n contHolder = data[\"continue\"][\"plcontinue\"]\n titles_so_far.extend(self.get_children(contHolder))\n return titles_so_far\n\n # return [Article(child, self.target, self.title) for child in titles_so_far]", "def __iter__(self):\n return iter(self._children)", "def get_children(self, context: ResourceCommandContext, obj_ref: str, child_type: str) -> list:\n return self.handler.get_children(obj_ref, child_type)", "def _generate_children(self) -> list:\n if self.debug: print(f\"StateNode._generate_children()\")\n return [self.transition(x) for x in self.actions()]", "def get_children(target, concept_map):\n child_inds = []\n target_index = concept_map[CONCEPTS_STR].index(target)\n target_row = concept_map[ADJ_MAT_STR][target_index]\n for ind in range(len(target_row)): # for each ind in row of adj mat\n val = target_row[ind]\n if(val>0 and ind != target_index): # don't care concept is child of itself\n child_inds.append(ind)\n return list(map(lambda ind: concept_map[CONCEPTS_STR][ind], child_inds))", "def getExpandedLinks():", "def get_children(self, item, level):\n return item.children", "def children():\n return {\n \"charges\": Charge,\n \"codes\": Code,\n \"comments\": Comment,\n \"links\": Link,\n \"parties\": Party,\n \"rates\": Rate,\n \"references\": Reference,\n }", "def Children(self) -> _n_1_t_2:", "def get_child_resource_nodes(self):\n raise errors.Unimplemented()", "def _children(self):\n for codeobj in self.body:\n if isinstance(codeobj, CodeEntity):\n yield codeobj", "def children(self):\n startkey, endkey = self._key_bounds\n depth = len(self.path) + 2 # 1 for domain, 1 for next location level\n q = self.view('locations/hierarchy', startkey=startkey, endkey=endkey, group_level=depth)\n keys = [e['key'] for e in q if len(e['key']) == depth]\n return self.view('locations/hierarchy', keys=keys, reduce=False, include_docs=True).all()", "def children(self):\n return self.leaves", "def get_children(self, parent, child_type=None):\n\n parent_ref = parent if type(parent) is str else parent.ref\n if child_type:\n if child_type.endswith('List'):\n child_ref = parent_ref + '/' + child_type\n return [child_ref + '/' + str(o.objectID) for o in self.connection.httpGet(child_ref)]\n else:\n return [parent_ref + '/' + child_type]\n else:\n links = self.cget(parent, 'links')\n if links:\n return [parent_ref + '/' + link.jsonOptions['rel'] for link in links]", "def test_homechild_listobjects(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n children01 = yield home01.listChildren()\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n self.assertEqual(home.id(), home01.id())\n children = yield home.listChildren()\n self.assertEqual(set(children), set(children01))\n yield self.commitTransaction(1)", "def get_children_link_indexes(self, link_idx):\n indexes = []\n for i in range(len(info_populator.InfoPopulator.LINK_PARENT_CHILD_RELATION)):\n if link_idx == info_populator.InfoPopulator.LINK_PARENT_CHILD_RELATION[\n i][0]:\n indexes.append(\n self.get_link_index(\n info_populator.InfoPopulator.LINK_PARENT_CHILD_RELATION[i][1]))\n return indexes", "def print_all_descendants(self):\n #self._print_all_descendants_rec(self, 0) # stdout version \n string = \"|---\" + str(self) + \"\\n\"\n return self._return_string_all_descendants_rec(self, string, 0)", "def list_object_children_paged(self, object_ref: str,\n next_token: Optional[str] = None,\n per_page: Optional[int] = None, **kwargs) -> Tuple[dict, Optional[str]]:\n kwargs.update(\n DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': object_ref},\n MaxResults=min(per_page, self._page_limit) if per_page else self._page_limit,\n )\n if next_token:\n kwargs['NextToken'] = next_token\n result = cd_client.list_object_children(**kwargs)\n return result['Children'], result.get(\"NextToken\")", "def get_b_children(self, b_obj):\n return [child for child in Blender.Object.Get()\n if child.parent == b_obj]", "def get_citation_child_list(self):\n return self.address_list", "def children(self) -> List[Region]:\n return []", "def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children", "def traverse_ancestors(self):\n self.event.wait()\n for a in super(FalseObject, self).traverse_ancestors():\n yield a", "def children_ids(self):\n return self._children_ids", "def get_children(self):\r\n\r\n # FIXME: Expose iteration from CIndex, PR6125.\r\n def visitor(child, parent, children):\r\n # FIXME: Document this assertion in API.\r\n # FIXME: There should just be an isNull method.\r\n assert child != conf.lib.clang_getNullCursor()\r\n\r\n # Create reference to TU so it isn't GC'd before Cursor.\r\n child._tu = self._tu\r\n children.append(child)\r\n return 1 # continue\r\n children = []\r\n conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),\r\n children)\r\n return iter(children)", "def render_children(self, block, view_name=None, context=None):\n results = []\n for child_id in block.children:\n child = self.get_block(child_id)\n result = self.render_child(child, view_name, context)\n results.append(result)\n return results", "def list_object_children(self, object_ref: str, **kwargs) -> Iterator[Tuple[str, str]]:\n resp = cd_client.list_object_children(DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': object_ref},\n MaxResults=self._page_limit,\n **kwargs)\n while True:\n for name, ref in resp['Children'].items():\n yield name, '$' + ref\n next_token = resp.get('NextToken')\n if next_token:\n resp = cd_client.list_object_children(DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': object_ref},\n NextToken=next_token,\n MaxResults=self._page_limit)\n else:\n break", "def siblings(self):\n return type(self)._default_manager.filter(category=self.category)", "def children(self) -> List[Region]:\n return self._children", "def _get_children(env, s):\n return [env.predict(s, a) for a in env.get_actions(s)]", "def children(self):\n return self.hashring_watch.get_children()", "def children(node):\n\n return snd(node)", "def GetChildren(self, p_int, p_int_1, p_int_2):\n ..." ]
[ "0.62007177", "0.60129935", "0.59958863", "0.58787924", "0.5869366", "0.58592945", "0.5843929", "0.57969826", "0.5790005", "0.5790005", "0.5790005", "0.5784237", "0.5758441", "0.5725369", "0.5718923", "0.5705861", "0.56908256", "0.56826895", "0.56826895", "0.56807274", "0.5678357", "0.5673407", "0.5663532", "0.56499904", "0.5630875", "0.5629413", "0.56280816", "0.56279117", "0.5624265", "0.5623903", "0.5591998", "0.5586219", "0.5586219", "0.5559264", "0.554359", "0.55417335", "0.5529376", "0.5519995", "0.55043954", "0.5500609", "0.5449192", "0.5415584", "0.54124033", "0.5411345", "0.5406772", "0.5387032", "0.5386948", "0.532538", "0.53241414", "0.5321131", "0.5320732", "0.53137034", "0.53100693", "0.52991486", "0.52881837", "0.52839124", "0.5280865", "0.5274147", "0.526855", "0.525717", "0.5255392", "0.52501935", "0.52495563", "0.5248211", "0.5238346", "0.5229398", "0.52258676", "0.522404", "0.5214709", "0.52120084", "0.52075726", "0.5206681", "0.51985246", "0.519424", "0.5192437", "0.5186004", "0.5130755", "0.5123423", "0.51104444", "0.51091784", "0.5108976", "0.5107544", "0.51049465", "0.5094659", "0.50924873", "0.508125", "0.50811666", "0.5078817", "0.50759506", "0.50701004", "0.5069498", "0.5066931", "0.5066709", "0.5046958", "0.50432384", "0.5040004", "0.503265", "0.5023119", "0.5017467", "0.50141364" ]
0.5057887
93
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}/outcomes' payload = { 'outcome_id' : outcome_id, 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def ez_set_outcome(auth_token, dataset_id, outcome, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_set_outcome\"\n payload = {\n \"dataset_id\": dataset_id,\n \"outcome\" : outcome,\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def link(url, title, icon=None, badge=None, **context):\n\n return {\n \"url\": url,\n \"title\": title,\n \"context\": context,\n \"badge\": badge,\n \"class\": \"link\",\n \"icon\": icon\n }", "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, workflow_ID=None, parentobj_ID=None, **kwargs):\n\n uri = kwargs.get('uri')\n uid = kwargs.get('uid')\n desc = kwargs.get('desc')\n name = kwargs.get('name')\n source = kwargs.get('source')\n\n if (self.debug):\n print('MPO.ADD', workflow_ID, parentobj_ID, name, desc,uri,uid,source,kwargs, file=sys.stderr)\n\n if uid:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uid\":uid}\n elif uri:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uri\":uri}\n else:\n return {\"name\":name,\"description\":desc,\"source_uid\":source,\"message\":\"Must provide either uri or uid.\", 'uid':-1, \"status\":-1}\n\n return self.post(self.DATAOBJECT_RT,workflow_ID,[parentobj_ID],data=payload,**kwargs)", "def create_link(self, word, meaning):\n print(str(self.unique_id) + \" learned \" +\n str(word) + \" for \" + str(meaning))\n self.meaning2word[meaning] = word\n self.word2meaning[word] = meaning\n self.wordsuccess[word] = []\n\n if meaning not in self.model.vocabulary:\n self.model.vocabulary[meaning] = {}\n\n # If word not in vocabulary, add it\n if word not in self.model.vocabulary[meaning]:\n self.model.vocabulary[meaning][word] = [self.unique_id]\n # Else append this agent to its users\n else:\n self.model.vocabulary[meaning][word].append(self.unique_id)", "def add_link_to_bundle(request, bundle_id):\n\n # ensure bundle exists\n bundle = get_object_or_404(Bundle, id=bundle_id)\n\n # get/create link for given url\n url = request.data.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # assert that \"comfort_level\" is specified.\n # this is validated outside of the `Link` fields handled by\n # DRF serializer validation.\n comfort_level = int(request.data.get('comfort_level', None))\n if comfort_level not in [i[0] for i in COMFORT_LEVELS]:\n return Response({'error': True,\n 'msg': 'Please specify a reader comfort level'\n })\n\n url = urltools.normalize(url)\n\n try:\n # fetch existing link\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n # create a new link\n link_serializer = LinkSerializer(data=request.data)\n link_serializer.is_valid(raise_exception=True)\n link = link_serializer.save()\n\n # add link to bundle\n if not BundleLink.objects.filter(bundle=bundle, link=link).exists():\n # call alchemy util to fetch concepts for URL\n concepts = bundles.alchemy_utils.get_concepts(url)\n this_bundle = BundleLink.objects.create(bundle=bundle,\n link=link,\n comfort_level=comfort_level,\n curator_id=1)\n for concept in concepts:\n this_bundle.tags.add(concept)\n\n return Response('', status=201)", "def create_link(\n integration: Integration,\n installation: IntegrationInstallation,\n event: GroupEvent,\n response: Response,\n) -> None:\n external_issue = ExternalIssue.objects.create(\n organization_id=event.group.project.organization_id,\n integration_id=integration.id,\n key=response[\"key\"],\n title=event.title,\n description=installation.get_group_description(event.group, event),\n metadata=response.get(\"metadata\"),\n )\n GroupLink.objects.create(\n group_id=event.group.id,\n project_id=event.group.project_id,\n linked_type=GroupLink.LinkedType.issue,\n linked_id=external_issue.id,\n relationship=GroupLink.Relationship.references,\n data={\"provider\": integration.provider},\n )", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def relate(self, related):\n\n self._module._connection.relate(self, related)", "def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def outcomes(self, outcomes):\n\n self._outcomes = outcomes", "def add_rewards(self, step_reward, goal_reward, bad_state_reward=None, restart_state_reward = None):\n self.r_step = step_reward\n self.r_goal = goal_reward\n self.r_bad = bad_state_reward\n self.r_restart = restart_state_reward", "def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def addLink(self, obj1, obj2):\n\n link = vsdModels.ObjectLink(object1=obj1, object2=obj2)\n link.validate()\n return self.postRequest('object-links', data=link.to_struct())", "def _reward(self, action):\n raise NotImplementedError", "def add_reward(self, reward):\n self.quest_node['reward'] = reward\n self.reward = reward\n graph.push(self.quest_node)", "def link(self, callback, SpawnedLink=SpawnedLink):\n # XXX: Is the redefinition of SpawnedLink supposed to just be an\n # optimization, or do people use it? It's not documented\n # pylint:disable=redefined-outer-name\n self.rawlink(SpawnedLink(callback))", "def _on_outcome(self, outcome, condition):\n self._outcome = outcome\n self._condition = condition", "def create_hit(self, hit_type=None, question=None,\r\n lifetime=datetime.timedelta(days=7),\r\n max_assignments=1, \r\n title=None, description=None, keywords=None,\r\n reward=None, duration=datetime.timedelta(days=7),\r\n approval_delay=None, annotation=None,\r\n questions=None, qualifications=None,\r\n response_groups=None):\r\n \r\n # handle single or multiple questions\r\n neither = question is None and questions is None\r\n both = question is not None and questions is not None\r\n if neither or both:\r\n raise ValueError(\"Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both\")\r\n\r\n if question:\r\n questions = [question]\r\n question_param = QuestionForm(questions)\r\n if isinstance(question, QuestionForm):\r\n question_param = question\r\n elif isinstance(question, ExternalQuestion):\r\n question_param = question\r\n \r\n # Handle basic required arguments and set up params dict\r\n params = {'Question': question_param.get_as_xml(),\r\n 'LifetimeInSeconds' :\r\n self.duration_as_seconds(lifetime),\r\n 'MaxAssignments' : max_assignments,\r\n }\r\n\r\n # if hit type specified then add it\r\n # else add the additional required parameters\r\n if hit_type:\r\n params['HITTypeId'] = hit_type\r\n else:\r\n # Handle keywords\r\n final_keywords = MTurkConnection.get_keywords_as_string(keywords)\r\n \r\n # Handle price argument\r\n final_price = MTurkConnection.get_price_as_price(reward)\r\n \r\n final_duration = self.duration_as_seconds(duration)\r\n\r\n additional_params = dict(\r\n Title=title,\r\n Description=description,\r\n Keywords=final_keywords,\r\n AssignmentDurationInSeconds=final_duration,\r\n )\r\n additional_params.update(final_price.get_as_params('Reward'))\r\n\r\n if approval_delay is not None:\r\n d = self.duration_as_seconds(approval_delay)\r\n additional_params['AutoApprovalDelayInSeconds'] = d\r\n\r\n # add these params to the others\r\n params.update(additional_params)\r\n\r\n # add the annotation if specified\r\n if annotation is not None:\r\n params['RequesterAnnotation'] = annotation\r\n \r\n # Add the Qualifications if specified\r\n if qualifications is not None:\r\n params.update(qualifications.get_as_params())\r\n\r\n # Handle optional response groups argument\r\n if response_groups:\r\n self.build_list_params(params, response_groups, 'ResponseGroup')\r\n \r\n # Submit\r\n return self._process_request('CreateHIT', params, [('HIT', HIT),])", "def create_issue_link(self, link_type, inwardissue,\r\n outwardissue, comment=None):\r\n self.jira.create_issue_link(type=link_type,\r\n inwardIssue=str(inwardissue),\r\n outwardIssue=str(outwardissue))", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def new_link(self, key, link, default):\n\n s = self._new_link()\n s.key = key\n s.link = link\n s.default = default\n return s", "def addLink(self, name=None, **kwargs):\n if isinstance(name, rigmech):\n self.sym_prefix = name.sym_prefix + \"_\"\n self.addLink(\n name=name.name,\n mass=name.global_syms[\"mass\"],\n inertia=name.global_syms[\"Mq\"],\n origin_xyz=name.global_syms[\"xyz_com\"],\n )\n else:\n kwargs[\"name\"] = name\n LinkArgs = rigmech._check_field_inputs(\n \"addLink\", self._DefaultLinkFields, kwargs\n )\n self.Links[LinkArgs[\"name\"]] = LinkArgs", "def add_link(self, target, rel, title=None, title_star=None,\n anchor=None, hreflang=None, type_hint=None):\n\n # PERF(kgriffs): Heuristic to detect possiblity of an extension\n # relation type, in which case it will be a URL that may contain\n # reserved characters. Otherwise, don't waste time running the\n # string through uri.encode\n #\n # Example values for rel:\n #\n # \"next\"\n # \"http://example.com/ext-type\"\n # \"https://example.com/ext-type\"\n # \"alternate http://example.com/ext-type\"\n # \"http://example.com/ext-type alternate\"\n #\n if '//' in rel:\n if ' ' in rel:\n rel = ('\"' +\n ' '.join([uri.encode(r) for r in rel.split()]) +\n '\"')\n else:\n rel = '\"' + uri.encode(rel) + '\"'\n\n value = '<' + uri.encode(target) + '>; rel=' + rel\n\n if title is not None:\n value += '; title=\"' + title + '\"'\n\n if title_star is not None:\n value += (\"; title*=UTF-8'\" + title_star[0] + \"'\" +\n uri.encode_value(title_star[1]))\n\n if type_hint is not None:\n value += '; type=\"' + type_hint + '\"'\n\n if hreflang is not None:\n if isinstance(hreflang, six.string_types):\n value += '; hreflang=' + hreflang\n else:\n value += '; '\n value += '; '.join(['hreflang=' + lang for lang in hreflang])\n\n if anchor is not None:\n value += '; anchor=\"' + uri.encode(anchor) + '\"'\n\n _headers = self._headers\n if 'link' in _headers:\n _headers['link'] += ', ' + value\n else:\n _headers['link'] = value", "def reward(self, history_id, reward):\n pass", "def add(self, destination, kind):\n if destination in self.__links:\n raise SarasvatiException(\"Link to specified thought already exist\")\n if kind not in self.__correct_kinds:\n raise SarasvatiException(\"Link kind is not correct: \" + kind)\n if self.__source is destination:\n raise SarasvatiException(\"Unable link thought to itself\")\n link = Link(self.__source, destination, kind)\n return self.add_link(link)", "def add_outcome(self, node, cost=0, weight=1, classifier=None):\n\n if classifier is None:\n self.outcomes.append((Edge(self, node, cost=cost), weight))\n else:\n self.outcomes.append((Edge(self, node, cost=cost), classifier))", "def relate(a, b, **kwargs):\n return lib.relate(a, b, **kwargs)", "def set_outcome(self, outcome):\r\n if outcome < self._outcome:\r\n self._outcome = outcome\r\n self.choose(0, 0, 0, 0, 0) # Dummy call, to validate outcome.\r\n if self.parent: self.parent.set_outcome(self._outcome)", "def relate_object(self, obj):\n suffix = self._get_api_suffix(obj.__class__)\n endpoint = self._get_api_endpoint() + '/' + suffix\n obj_id = obj._id()\n results = self.tq.post(endpoint, data={'id': obj_id})\n\n results = results.get('data')\n if not results or 'pivot' not in results[0]:\n raise ActionFailedError('Relate indicators')", "def createPooledReward(self, name, rewardPoolId, product_key_name, instructions=None):\n param = {\"name\": name, product_key_name: 'pooled:%s' % rewardPoolId}\n if instructions:\n param[\"instructions\"] = instructions\n self.post_json('/reward', param)", "def create_relation_to_episode(episode_id):\n epi = Episode.query.get(episode_id)\n if not epi:\n abort(404)\n\n\n data = request.json\n if any([\n 'id' in data and not isinstance(data.get('id'), int)\n ]):\n abort(400)\n\n dire = Director.query.get(data[\"id\"])\n if not dire:\n abort(404)\n\n epi.directors.append(dire)\n db.session.commit()\n return jsonify({'result': f\"{dire} directed episode {epi}\"})", "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, state, action, reward, new_state, done):\n experience = (state, action, reward, new_state, done)\n self.buffer.append(experience)", "def get_reward(self, state, action, next_state, absorbing):\n raise NotImplementedError", "def add(self, state, action, reward, next_state, done):\n experience = Experience(state, action, reward, next_state, done)\n self.memory.append(experience)", "def add(self, obs, action, reward, new_obs, done):\n experience = (obs, action, reward, new_obs, done)\n insert_index = self.fix_index()\n if insert_index > 0:\n if insert_index in self._storage:\n del self._storage[insert_index]\n self._storage[insert_index] = experience\n # add to priority queue\n priority = self.priority_queue.get_max_priority()\n self.priority_queue.update(priority, insert_index)\n return True\n else:\n sys.stderr.write('Insert failed\\n')\n return False", "def append(self, state, action, reward, next_state=None, next_action=None,\n is_state_terminal=False):\n raise NotImplementedError", "def add_hyperlink(paragraph, url, text, color, underline):\r\n\r\n # This gets access to the document.xml.rels file and gets a new relation id value\r\n part = paragraph.part\r\n r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)\r\n\r\n # Create the w:hyperlink tag and add needed values\r\n hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')\r\n hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )\r\n\r\n # Create a w:r element\r\n new_run = docx.oxml.shared.OxmlElement('w:r')\r\n\r\n # Create a new w:rPr element\r\n rPr = docx.oxml.shared.OxmlElement('w:rPr')\r\n\r\n # Add color if it is given\r\n if not color is None:\r\n c = docx.oxml.shared.OxmlElement('w:color')\r\n c.set(docx.oxml.shared.qn('w:val'), color)\r\n rPr.append(c)\r\n\r\n # Remove underlining if it is requested\r\n if not underline:\r\n u = docx.oxml.shared.OxmlElement('w:u')\r\n u.set(docx.oxml.shared.qn('w:val'), 'none')\r\n rPr.append(u)\r\n\r\n # Join all the xml elements together add add the required text to the w:r element\r\n new_run.append(rPr)\r\n new_run.text = text\r\n hyperlink.append(new_run)\r\n\r\n paragraph._p.append(hyperlink)\r\n\r\n return hyperlink", "def add_link(self, link):\n raise NotImplementedError", "def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel", "def link(self, s_id):\r\n\r\n # Take the link entires from TOML file\r\n schedules = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if schedules:\r\n for entries in schedules:\r\n # Construct payload \r\n for payload in entries.get('link'):\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/link.json', test=payload)\r\n # Post request\r\n if 'id' in self.schedules[-1]:\r\n payload['schedule'] = self.schedules[-1].get('id')\r\n if 'id' in self.workouts[-1]:\r\n payload['workout'] = self.workouts[-1].get('id')\r\n return self.add_post(payload, API.url_link, self.links)", "def post(self, category_id, name, description, weight, category_ref, sponsor_id):\n\t\tproperties = {\"id\": category_id, \"name\": name, \"description\": description, \"weight\": weight, \"categoryRef\": category_ref, \"sponsorId\": sponsor_id}\n\t\treturn self.service.post(self.base_uri, json.dumps(properties))", "def perform_action(self, action):\n \n assert self.is_valid_action(action)\n \n # Save the action.\n self.action = action\n \n #the slight strategy of the opponent\n if self.reward==rLose :\n observation = self.observation\n else:\n observation = random.choice([oRock,oPaper,oScissor])\n \n #determine the result of the game and get the reward\n if action == aRock:\n if observation == oRock:\n reward= rDraw\n elif observation == oPaper:\n reward= rLose\n elif observation == oScissor:\n reward= rWin\n elif action == aPaper:\n if observation == oRock:\n reward= rWin\n elif observation == oPaper:\n reward= rDraw\n elif observation == oScissor:\n reward= rLose\n elif action == aScissor:\n if observation == oRock:\n reward= rLose\n elif observation == oPaper:\n reward= rWin\n elif observation == oScissor:\n reward= rDraw\n \n \n #Store the observation and reward in the environment.\n self.observation = observation\n \n self.reward = reward\n \n \n return (observation, reward)\n # end def", "def get_link(self, user_input):\r\n\r\n\t\t# state that you made it this far\r\n\t\tprint(f\"\\nSuccessfully called get_link() with the parameter(s): \\n\\n\\tuser_input -> {user_input}\")\r\n\r\n\t\t# tokenize the user's input, removing words like \"is\", \"the\", \"it\" and so on...\r\n\t\ttokens = self.tokenize(user_input)\r\n\r\n\t\t# categorize the question\r\n\t\tprint(f\"\\nIdentifying question's category...\")\r\n\t\tcategory = self.bayesian_naive_logic(tokens)\r\n\r\n\t\t# start looking for a link that may provide a Answer\r\n\t\tresponse_set = self.storage.get_urls(tokens, category)\r\n\t\tprint(f\"\\nBest Answer found: {response_set}\")\r\n\r\n\t\treturn f\"Here is a link with information closely matching your question: <a href='{response_set}' target='_blank'>{response_set}</a>\"", "def reward(self, history_id, reward):\n reward_action = self._historystorage.unrewarded_histories[history_id].action\n reward_action_idx = self._actions.index(reward_action)\n context = self._historystorage.unrewarded_histories[history_id].context[reward_action_idx]\n context = np.matrix(context)\n\n # Update the model\n matrix_a = self._modelstorage.get_model()['matrix_a']\n matrix_ainv = self._modelstorage.get_model()['matrix_ainv']\n b = self._modelstorage.get_model()['b']\n theta = self._modelstorage.get_model()['theta']\n matrix_a[reward_action] += np.dot(context.T, context)\n matrix_ainv[reward_action] = np.linalg.solve(matrix_a[reward_action], np.identity(self.d))\n b[reward_action] += reward * context.T\n theta[reward_action] = np.dot(matrix_ainv[reward_action], b[reward_action])\n self._modelstorage.save_model({'matrix_a': matrix_a, 'matrix_ainv': matrix_ainv, 'b': b, 'theta': theta})\n\n # Update the history\n self._historystorage.add_reward(history_id, reward)", "def link_room(self, room_to_link, direction):\n self.linked_rooms[direction] = room_to_link\n # print(self.name + \" linked rooms :\" + repr(self.linked_rooms) )", "def add_new_event(self,\n event_type: str,\n event_datetime: str,\n covid_status: str = \"U\",\n death: int = 0,\n critical_care_admission: int = 0,\n component: str or None = None,\n source: str or None = None,\n source_type: str or None = None,\n wimd: int or None = None,\n **kwargs):\n # Parse datetime and check validity (None for date if invalid)\n event_datetime = parse_datetime(event_datetime)\n if event_datetime.get(\"date\") is None:\n err = f\"Datetime parsed when trying to generate a new outcome event for {self.patientId} was invalid!\"\n self._config.write_to_log(err)\n raise ValueError(err)\n # Create outcome document\n new_outcome = Event(patientId=self.patientId,\n eventType=event_type.strip(),\n eventDate=event_datetime.get(\"date\"),\n covidStatus=covid_status,\n death=death,\n criticalCareAdmission=critical_care_admission,\n **kwargs)\n # Populate with optional parameters if given\n new_outcome = _add_if_value(new_outcome, [(\"component\", component),\n (\"source\", source),\n (\"sourceType\", source_type),\n (\"wimd\", wimd),\n (\"eventTime\", event_datetime.get(\"time\"))])\n new_outcome = new_outcome.save()\n self.outcomeEvents.append(new_outcome)\n self.save()\n self._config.write_to_log(f\"Outcome event {new_outcome.id} for patient {self.patientId}\")", "def make_link(first, second):\n manager = Actions()\n manager.make_link(first, second)", "def add_sample(self, img, action, reward, terminal):\n self.imgs[:, :, self.top] = img\n self.actions[self.top] = action\n self.rewards[self.top] = reward\n self.terminal[self.top] = terminal\n\n if self.size == self.max_steps:\n self.bottom = (self.bottom + 1) % self.max_steps\n else:\n self.size += 1\n\n self.top = (self.top + 1) % self.max_steps", "async def link(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'Cannot send links on this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n title = await Controller.canonical_title(item)\n if title is None:\n await msg.channel.send(**{\n 'content': f'There are no pages matching `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n page_url = Controller.link_from_title(title)\n await msg.channel.send(**{\n 'content': page_url,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })", "def create_data_link(self, ctx, params):\n # ctx is the context object\n # return variables are: results\n #BEGIN create_data_link\n duid, sna, update = _create_data_link_params(params)\n as_admin, user = _get_admin_request_from_object(params, 'as_admin', 'as_user')\n _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.FULL,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'create_data_link', ctx.log_info, as_user=user, skip_check=not as_admin)\n link = self._samples.create_data_link(\n user if user else _UserID(ctx[_CTX_USER]),\n duid,\n sna,\n update,\n as_admin=as_admin)\n results = {'new_link': _links_to_dicts([link])[0]}\n #END create_data_link\n\n # At some point might do deeper type checking...\n if not isinstance(results, dict):\n raise ValueError('Method create_data_link return value ' +\n 'results is not type dict as required.')\n # return the results\n return [results]", "def related_url(self) -> pulumi.Output[Sequence['outputs.RelatedUrlResponse']]:\n return pulumi.get(self, \"related_url\")", "def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500", "def get_outcome(self):\n if not self.is_paid:\n raise ValueError(\"There isn't an outcome.\")\n return self.team_a if self.outcome else self.team_b", "def PostReward(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _publish_reward_topic(self, reward, steps, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)\n self.reward_list.append(reward)\n self.episode_list.append(episode_number)\n self.step_list.append(steps)\n list = str(reward) + \";\" + str(episode_number) + \";\" + str(steps) + \"\\n\"\n\n with open(self.csv_name + '.csv', 'a') as csv:\n csv.write(str(list))", "def add_link(\n self,\n url: str,\n label: Optional[str] = None,\n ) -> None:\n if not label:\n label = url\n self._client.add_element(\n Markdown(\n f\"[{label}]({url})\",\n on_tap_link=lambda e: self._client.page.launch_url(e.data),\n )\n )", "def _step(self, action):\n \n obs, reward, done, info = self.env.step(action)\n\n \n advice=self.generateAdvice()[1]\n\n obs = {\n \"image\": obs,\n \"advice\": advice\n }\n\n\n \n\n\n\n return obs, reward, done, info", "def cli_createLinko():\n\n info = ('Creates a linkograph from an (inverse) labeling json'\n ' and an ontology json.')\n\n parser = argparse.ArgumentParser(description=info)\n parser.add_argument('labeling', metavar='LABELING.json',\n nargs=1,\n help='the inverse labeling json file.')\n\n parser.add_argument('ontology', metavar='ONTOLOGY.json',\n nargs=1,\n help='the json of ontology.')\n\n parser.add_argument('-o', '--out', metavar='OUTPUT_FILE',\n help='the linkograph as a json')\n\n args = parser.parse_args()\n\n outfile = None\n if args.out:\n outfile = args.out\n\n # Load the json files.\n with open(args.labeling[0], 'r') as invLabelingFile:\n invLabeling = json.load(invLabelingFile)\n with open(args.ontology[0], 'r') as ontologyFile:\n ontology = json.load(ontologyFile)\n linko = createLinko(invLabeling, ontology)\n\n if outfile:\n writeLinkoJson(linko, outfile)\n else:\n print(linko)", "def add(self, context, action, reward):\n\n if self.intercept:\n c = np.array(context[:])\n c = np.append(c, 1.0).reshape((1, self.context_dim + 1))\n else:\n c = np.array(context[:]).reshape((1, self.context_dim))\n\n if self.contexts is None:\n self.contexts = c\n else:\n self.contexts = np.vstack((self.contexts, c))\n\n r = np.zeros((1, self.num_actions))\n r[0, action] = reward\n if self.rewards is None:\n self.rewards = r\n else:\n self.rewards = np.vstack((self.rewards, r))\n\n self.actions.append(action)", "def make_move(state, action, player, rewarding_move=False): # TODO : done and next_is_reward can be removed as\n # they are in the state object\n board = state.get_board()\n json_action = action.get_json_action()\n action = action.get_action_as_dict()\n captured = None\n reward = 0\n next_is_reward = False\n previous_is_reward = False\n if rewarding_move:\n state.boring_moves = 0\n previous_is_reward = True\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND:\n reward += 1\n state.in_hand[player * -1] -= 1\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n board.empty_cell(action['action']['at'])\n reward += 1\n else:\n if action['action_type'] == YoteActionType.ADD:\n state.boring_moves += 1\n state.in_hand[player] -= 1\n board.fill_cell(action['action']['to'], Color(player))\n elif action['action_type'] == YoteActionType.MOVE:\n at = action['action']['at']\n to = action['action']['to']\n\n def distance(cell_1, cell_2):\n import math\n return math.sqrt((cell_1[0] - cell_2[0]) ** 2 + (cell_1[1] - cell_2[1]) ** 2)\n\n board.empty_cell(at)\n board.fill_cell(to, Color(player))\n if int(distance(at, to)) == 1:\n state.boring_moves += 1\n elif int(distance(at, to)) > 1:\n state.boring_moves = 0\n next_is_reward = True\n board.fill_cell(to, Color(player))\n if at[0] == to[0] and at[1] < to[1]:\n board.empty_cell((at[0], at[1] + 1))\n captured = (at[0], at[1] + 1)\n elif at[0] == to[0] and at[1] > to[1]:\n board.empty_cell((at[0], at[1] - 1))\n captured = (at[0], at[1] - 1)\n elif at[1] == to[1] and at[0] < to[0]:\n board.empty_cell((at[0] + 1, at[1]))\n captured = (at[0] + 1, at[1])\n elif at[1] == to[1] and at[0] > to[0]:\n board.empty_cell((at[0] - 1, at[1]))\n captured = (at[0] - 1, at[1])\n reward += 1\n\n state.set_board(board)\n state.score[player] += reward\n state.captured = captured\n state.rewarding_move = next_is_reward\n state.previous_is_reward = previous_is_reward\n state.set_latest_player(player)\n state.set_latest_move(json_action)\n if next_is_reward:\n state.set_next_player(player)\n else:\n state.set_next_player(player * -1)\n\n done = YoteRules.is_end_game(state)\n return state, done, next_is_reward", "def link_to(self, criterion_or_node):\n if callable(criterion_or_node):\n target_node = criterion_or_node(self.graph)\n else:\n target_node = criterion_or_node\n return self.send(target_node, 'accept_link',\n originating_node=self.id)", "def add_experience(self, action, state, reward, terminal):\n self.replay_buffer.add_experience(action, state, reward, terminal)", "def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_link(self, **kwgs):\n self.links.append(kwgs)", "def _set_link(\n meta: Dict,\n link: Optional[Union[type(None), str, bool, KEChainPages]] = None,\n link_value: Optional[CardWidgetLinkValue] = None,\n link_target: Optional[Union[str, LinkTargets]] = LinkTargets.SAME_TAB,\n **kwargs,\n) -> Dict:\n meta[\"linkTarget\"] = check_enum(link_target, LinkTargets, \"link_target\")\n\n from pykechain.models import Activity\n\n if isinstance(link, Activity):\n if link.activity_type == ActivityType.TASK:\n default_link_value = CardWidgetLinkValue.TASK_LINK\n else:\n default_link_value = CardWidgetLinkValue.TREE_VIEW\n\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link.id,\n MetaWidget.SHOW_LINK_VALUE: default_link_value,\n }\n )\n elif isinstance(link, str) and is_uuid(link):\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.TASK_LINK,\n }\n )\n elif link is None or link is False:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: None,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.NO_LINK,\n }\n )\n elif link in KEChainPages.values():\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: \"\",\n MetaWidget.SHOW_LINK_VALUE: CardWidgetKEChainPageLink[link],\n }\n )\n else:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.EXTERNAL_LINK,\n }\n )\n\n if link_value is not None:\n meta.update(\n {\n MetaWidget.SHOW_LINK_VALUE: check_enum(\n link_value, CardWidgetLinkValue, \"link_value\"\n ),\n }\n )\n\n return meta", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n if done and self.episode % 50 == 0 and self.episode > 50:\n self.episode += 1\n self.remove_bad_experience()\n elif done:\n self.episode += 1", "def add(self, states, actions, rewards, next_states, dones):\n assert len(states) == self.num_agents, 'ERROR> group states size mismatch'\n assert len(actions) == self.num_agents, 'ERROR> group actions size mismatch'\n assert len(rewards) == self.num_agents, 'ERROR> group rewards size mismatch'\n assert len(next_states) == self.num_agents, 'ERROR> group next states size mismatch'\n assert len(dones) == self.num_agents, 'ERROR> group dones size mismatch'\n\n experience = (states, actions, rewards, next_states, dones)\n self.memory.append(experience)", "def strategy(self,\r\n opponent:Player,\r\n message=torch.zeros(10))->Action:\r\n #need to update name for posterity's sake\r\n self.finished_opponent = opponent.name\r\n #Regardless of intent for first few turns, do the base action.\r\n if len(self.history) == 0: return self.action_base\r\n \r\n #get overall reward\r\n self.reward = self.find_reward(opponent) \r\n\r\n # assess perceived intent message in opponent.sent_message\r\n self.intent_received_prev = self.intent_received\r\n self.intent_received = opponent.intent_sent\r\n self.assessment_prev = self.assessment\r\n self.assessment = self.assess_received_intent(opponent)#this is the estimate of what the opponent is doing\r\n \r\n # store for testing later\r\n self.list_reward.append(self.reward)\r\n self.list_intent_received.append(self.intent_received_prev)\r\n self.list_intent_sent.append(self.intent_sent_prev)\r\n self.list_intent_assessment.append(self.assessment_prev)\r\n self.list_intent_true.append(opponent.history[-1])\r\n \r\n # receive assessment and decide to stay with self.base_Action\r\n # OR change it to the other action. \r\n self.old_decision = self.decision\r\n self.decision = self.decide_based_on_new_intel(opponent) # what the opponent actually did last turn\r\n self.list_decision.append(self.old_decision)\r\n \r\n return self.decision", "def addReagentTargetedGene(\n self,\n reagent_id,\n gene_id,\n targeted_gene_id=None,\n targeted_gene_label=None,\n description=None,\n reagent_category=None\n ):\n\n # akin to a variant locus\n # is this some sort of pseudo bnode?\n if targeted_gene_id is None:\n targeted_gene_id = '_' + gene_id + '-' + reagent_id\n targeted_gene_id = targeted_gene_id.replace(\":\", \"\")\n self.model.addIndividualToGraph(\n targeted_gene_id,\n targeted_gene_label,\n self.globaltt['reagent_targeted_gene'],\n description,\n ind_category=reagent_category\n )\n\n if gene_id is not None:\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_expression_variant_of'], gene_id\n )\n\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_targeted_by'], reagent_id\n )", "def __init__(__self__, *,\n individual_outcome: Optional[pulumi.Input[Sequence[pulumi.Input['IndividualOutcomeArgs']]]] = None,\n roll_up: Optional[pulumi.Input['PrimaryStepRollUp']] = None):\n if individual_outcome is not None:\n pulumi.set(__self__, \"individual_outcome\", individual_outcome)\n if roll_up is not None:\n pulumi.set(__self__, \"roll_up\", roll_up)", "def _send_lti2_outcome(self):\r\n payload = textwrap.dedent(\"\"\"\r\n {{\r\n \"@context\" : \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\" : \"Result\",\r\n \"resultScore\" : {score},\r\n \"comment\" : \"This is awesome.\"\r\n }}\r\n \"\"\")\r\n data = payload.format(score=0.8)\r\n return self._send_lti2(data)", "def join(self, rewards_s3_path, obs_time_window=None, ratio=0.8, wait=True):\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n\n if obs_time_window is None:\n logger.warning(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with all the observation data\"\n )\n obs_end_time = None\n obs_start_time = None\n else:\n logger.info(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with observation \"\n f\"data in the past {obs_time_window} hours\"\n )\n obs_end_time = datetime.utcnow()\n obs_start_time = obs_end_time - timedelta(hours=obs_time_window)\n\n # update next_join_job_id and joining state\n next_join_job_id = JoinManager.name_next_join_job(experiment_id=self.experiment_id)\n self.exp_db_client.update_experiment_next_join_job_id(self.experiment_id, next_join_job_id)\n self.exp_db_client.update_experiment_joining_state(self.experiment_id, JoiningState.PENDING)\n\n input_obs_data_s3_path = (\n f\"s3://{self.resource_manager.firehose_bucket}/{self.experiment_id}\"\n )\n input_obs_data_s3_path = f\"{input_obs_data_s3_path}/inference_data\"\n # init joining job, update join table\n logger.info(\"Creating resource for joining job...\")\n\n try:\n self.next_join_job = JoinManager(\n join_db_client=self.join_db_client,\n experiment_id=self.experiment_id,\n join_job_id=next_join_job_id,\n input_obs_data_s3_path=input_obs_data_s3_path,\n obs_start_time=obs_start_time,\n obs_end_time=obs_end_time,\n input_reward_data_s3_path=rewards_s3_path,\n boto_session=self.boto_session,\n )\n\n logger.info(\"Started joining job...\")\n self.next_join_job.start_join(ratio=ratio, wait=wait)\n except Exception as e:\n logger.error(e)\n pass\n\n # wait until exp ddb table updated\n if self.local_mode or wait:\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries = 0\n\n while not succeeded_state:\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n logger.debug(\"Waiting for experiment table joining status to be updated...\")\n time.sleep(2 * (2 ** num_retries))\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries += 1\n if num_retries >= 5:\n raise UnhandledWorkflowException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"was in state of '{self.experiment_record._joining_state}'. Failed to sync table states.\"\n )\n if (\n self.experiment_record._joining_state == JoiningState.FAILED\n or self.experiment_record._joining_state == JoiningState.CANCELLED\n ):\n raise WorkflowJoiningJobException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"ended with state '{self.experiment_record._joining_state}'. Please check Athena queries logs \"\n \"for more information.\"\n )", "def test__put_two_way_link_into():\n for input_value, defaults, expected_output in (\n (False, False, {}),\n (False, True, {'two_way_link': False}),\n (True, False, {'two_way_link': True}),\n ):\n data = put_two_way_link_into(input_value, {}, defaults)\n vampytest.assert_eq(data, expected_output)", "def _act_impl(self, observation, reward,\n done):\n if done:\n raise core.EpisodeDoneError(\"Called act on a done episode.\")\n\n if not self.observation_space.contains(observation):\n raise core.InvalidObservationError(\"Invalid ovservation: %s\" %\n observation)\n if self.params.observation_adjustment_fn:\n observation = self.params.observation_adjustment_fn(\n self.rng, self.beliefs, observation)\n\n features = self.feature_selection_fn(observation)\n self.beliefs = self._update_beliefs(features, self.beliefs)\n action = self._allocate(self._n_resource, self.beliefs)\n\n if not self.action_space.contains(action):\n raise gym.error.InvalidAction(\"Invalid action: %s\" % action)\n\n return action", "async def link(self, ctx: Context) -> None:\r\n try:\r\n params: List[str] = get_cmd_params(ctx)\r\n\r\n if len(params) < 1 or not params[0].isdigit() or int(params[0]) > len(self.yt_result.ids):\r\n await ctx.send(\"Please enter a valid video number from 0 to 5\")\r\n return\r\n\r\n self.yt_link = await self.message.edit(content=self.yt_result.get_link(int(params[0])))\r\n await ctx.message.delete()\r\n except Exception as e:\r\n await self.channels.log_error(e, \"ytl\")", "def _rollout_an_episode(self):\n self._steps = 0\n me_id = self._learning_agent_id # short name\n oppo_id = self._oppo_agent_id # short name\n logger.log('episode begins with the task: {}'.format(str(self.task)))\n\n # passing me and oppo hyperparams to the arena interface\n assert self.task.hyperparam is not None\n logger.log('pulling oppo hyperparam of model key {}'.format(\n self.task.model_key2))\n oppo_hyperparam = self._model_pool_apis.pull_attr(attr='hyperparam',\n key=self.task.model_key2)\n logger.log('Done pulling oppo hyperparam')\n oppo_inter_kwargs = ({} if oppo_hyperparam is None\n else oppo_hyperparam.__dict__)\n inter_kwargs = ([self.task.hyperparam.__dict__]\n + [oppo_inter_kwargs] * (self.n_agents - 1))\n\n # agent, env reset\n obs = self.env.reset(inter_kwargs=inter_kwargs)\n for agt, ob in zip(self.agents, obs):\n agt.reset(ob)\n self._update_agents_model(self.task) # for agent Neural Net parameters\n\n me_reward_sum = 0.0\n self.time_beg = time.time()\n self._update_hyperparam(self.task)\n self._changed_task = False\n while True:\n self._steps += 1\n # predictions for each agent\n predictions = self._parallel.run((self._agent_pred, ob, i)\n for i, ob in enumerate(obs))\n me_prediction = predictions[me_id]\n me_action, extra_vars = me_prediction[me_id], me_prediction[oppo_id:]\n # predicted actions for each agent\n actions = [me_action] + [other_action\n for other_action in predictions[oppo_id:]]\n # book-keep obs in previous step\n last_obs = obs\n\n # agent-env interaction\n obs, reward, done, info = self.env.step(actions)\n\n me_rwd_scalar = self._reward_shape(reward[me_id])\n me_reward_sum += me_rwd_scalar\n\n if self._enable_push:\n # put the interested data (obs, rwd, act, ... for each agent) into the\n # _data_queue, which is watched in another Thread (the _push_data_to_learner()\n # method) that the data are dequeued and sent to remote Learner\n if self._data_queue.full():\n logger.log(\"Actor's queue is full.\", level=logger.WARN)\n rwd_to_push = (me_rwd_scalar if self.rwd_shape\n else np.asarray(reward[me_id], np.float32))\n if self.use_oppo_obs:\n if isinstance(extra_vars, tuple):\n extra_vars += (self.agents[self._oppo_agent_id]._last_state,)\n else:\n extra_vars.append(self.agents[self._oppo_agent_id]._last_state)\n data_tuple = (last_obs, tuple(actions), rwd_to_push, info, done, extra_vars)\n self._data_queue.put(data_tuple)\n logger.log('successfully put one tuple.', level=logger.DEBUG)\n\n if self._steps % self._log_interval_steps == 0:\n logger.log('_rollout_an_episode,', 'steps: {},'.format(self._steps),\n 'data qsize: {}'.format(self._data_queue.qsize()))\n\n if done:\n # an episode ends\n if self._replay_dir:\n self._save_replay()\n self.log_kvs(me_reward_sum, info)\n if self._changed_task:\n return None, info\n else:\n return self.log_outcome(info), info\n\n if self._update_model_freq and self._steps % self._update_model_freq == 0:\n # time to update the model for each agent\n if (self._enable_push and\n self._model_pool_apis.pull_attr(\n 'freezetime', self.task.model_key1) is not None):\n # Current task (learning period) finishes, start a new task or continue\n self._finish_task(self.task, None) # notify early abort\n last_task = self.task\n self.task = self._request_task() # try to continue\n if not is_inherit(last_task.model_key1, self.task.model_key1):\n self.log_kvs(me_reward_sum, info)\n return None, info\n if last_task.model_key2 != self.task.model_key2:\n self._changed_task = True\n self._update_agents_model(self.task)", "def scoring_opportunities(self, scoring_opportunities):\n\n self._scoring_opportunities = scoring_opportunities", "def link_sample(self, other):\n with other.entry.nxfile:\n if 'sample' in self.entry:\n if 'sample' in other.entry:\n del other.entry['sample']\n other.entry.makelink(self.entry['sample'])", "async def cmd_galaddlinkuwl(self, ctx):\n\n links = re.findall(r\"(?P<url>http[s]?://[^\\s]+)\", ctx.message.content)\n\n if not links:\n await ctx.channel.send('`Useage: [p]galaddlinkuwl <startoflink>, [Bot Owner] Adds a link from gallery link whitelist.`')\n \n # ===== ADD THE NEW LINKS TO THE WHITELIST\n new_gal_link_wl = list(set(self.cogset['link_wl']) + set(links))\n\n if Gallery.compare(new_gal_link_wl, self.cogset['link_wl']):\n await ctx.channel.send(content=\"{}\\n are already in the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return \n \n else:\n self.cogset['link_wl'] = new_gal_link_wl\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=\"{}\\n have been added to the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return", "def add_sample(self, state, action, state_new, reward, endstate, episode):\r\n new_sample = np.array([state, action, state_new, reward, endstate])\r\n if self.step_counter == 0 and episode == 0:\r\n self.experience_batch = new_sample\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # first sample twice in the batch to be able to index over the rows\r\n elif len(self.experience_batch) < self.experience_batch_size:\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # add new sample to batch when it is not full\r\n else:\r\n self.experience_batch[self.step_counter % self.experience_batch_size, :] = new_sample # override the components of the batch when it is full\r", "def actor_add_relation():\r\n\r\n data = get_request_data()\r\n if 'id' in data.keys():\r\n try:\r\n row_id = int(data['id'])\r\n relation_id = int(data['relation_id'])\r\n except:\r\n err = 'Id must be integer'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n obj = Movie.query.filter_by(id=relation_id).first()\r\n try:\r\n actor = Actor.add_relation(row_id, obj)\r\n rel_actor = {k: v for k, v in actor.__dict__.items() if k in ACTOR_FIELDS}\r\n rel_actor['filmography'] = str(actor.filmography)\r\n except:\r\n err = 'Record with such id does not exist'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n return make_response(jsonify(rel_actor), 200)\r\n\r\n else:\r\n err = 'No id specified'\r\n return make_response(jsonify(error=err), 400)", "def add_reward(self, choice, count=1):\n self.redis.hincrby(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, \"%s:rewards\" % choice, count)\n self._choices = None", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n self.priorities.append(max(self.priorities, default=1)) # Save all new experiences as maximum priority", "def update(self, state, reward, action, done, next_state, next_reward, num_episode, **kwargs):\n\n # Keep track of total reward\n self.episode_reward += next_reward\n if self.verbose > 0:\n logger.debug(\n f\"Agent acknowledges receiving a reward of {next_reward}, episode reward so far {self.episode_reward}\"\n )\n\n # Update MCTS tree\n if not done:\n self.mcts_head = self.mcts_head.children[action]\n self.mcts_head.prune() # This updates the node.path\n\n # Train\n if self.training:\n return self._train(kwargs[\"log_prob\"])\n else:\n return 0.0", "def test_shows_issues_from_grouplink(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def append(self, state, action, reward, done):\n assert state.shape == self._state_shape, \\\n 'Invalid state shape (required: %s, got: %s)' % (self._state_shape, state.shape)\n\n self._states[self._pos] = state\n self._actions[self._pos] = action\n self._rewards[self._pos] = reward\n self._terminals[self._pos] = done\n\n self._count = max(self._count, self._pos + 1)\n self._pos = (self._pos + 1) % self._max_size" ]
[ "0.717459", "0.7163847", "0.7061997", "0.6714336", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42772636", "0.4274945", "0.42674753", "0.4266109", "0.4264433", "0.42578077", "0.42515564", "0.42505667", "0.424946", "0.42489296", "0.42303044", "0.42303044", "0.4199248", "0.41974282", "0.41635618", "0.41283748", "0.4120942", "0.41087726", "0.41035667", "0.40950674", "0.408618", "0.40834093", "0.40813103", "0.40780374", "0.40770057", "0.4075737", "0.40686032", "0.40659428", "0.40503526", "0.40333", "0.40275708", "0.4023863", "0.4007657", "0.40069127", "0.40017104", "0.39986208", "0.39897925", "0.39840496", "0.39749613", "0.39745083", "0.3972324", "0.39586034", "0.39508075", "0.3946143", "0.39457676", "0.3932967", "0.39325184", "0.39297178", "0.39295676", "0.39276245", "0.39270565", "0.39229208", "0.39168912", "0.39096275", "0.3907056", "0.39025453", "0.39004102", "0.38994065", "0.38956124", "0.38943377", "0.38892362", "0.38891497", "0.38870266", "0.38820302", "0.38809907", "0.38804775", "0.38761795", "0.38738146", "0.3869238", "0.38686046", "0.38684267", "0.38673568", "0.38626942", "0.3862313", "0.3860057", "0.38582462", "0.38560754", "0.38555625", "0.38539255", "0.3853633" ]
0.7362988
0
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}' payload = { 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def ez_set_outcome(auth_token, dataset_id, outcome, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_set_outcome\"\n payload = {\n \"dataset_id\": dataset_id,\n \"outcome\" : outcome,\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def link(url, title, icon=None, badge=None, **context):\n\n return {\n \"url\": url,\n \"title\": title,\n \"context\": context,\n \"badge\": badge,\n \"class\": \"link\",\n \"icon\": icon\n }", "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, workflow_ID=None, parentobj_ID=None, **kwargs):\n\n uri = kwargs.get('uri')\n uid = kwargs.get('uid')\n desc = kwargs.get('desc')\n name = kwargs.get('name')\n source = kwargs.get('source')\n\n if (self.debug):\n print('MPO.ADD', workflow_ID, parentobj_ID, name, desc,uri,uid,source,kwargs, file=sys.stderr)\n\n if uid:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uid\":uid}\n elif uri:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uri\":uri}\n else:\n return {\"name\":name,\"description\":desc,\"source_uid\":source,\"message\":\"Must provide either uri or uid.\", 'uid':-1, \"status\":-1}\n\n return self.post(self.DATAOBJECT_RT,workflow_ID,[parentobj_ID],data=payload,**kwargs)", "def create_link(self, word, meaning):\n print(str(self.unique_id) + \" learned \" +\n str(word) + \" for \" + str(meaning))\n self.meaning2word[meaning] = word\n self.word2meaning[word] = meaning\n self.wordsuccess[word] = []\n\n if meaning not in self.model.vocabulary:\n self.model.vocabulary[meaning] = {}\n\n # If word not in vocabulary, add it\n if word not in self.model.vocabulary[meaning]:\n self.model.vocabulary[meaning][word] = [self.unique_id]\n # Else append this agent to its users\n else:\n self.model.vocabulary[meaning][word].append(self.unique_id)", "def add_link_to_bundle(request, bundle_id):\n\n # ensure bundle exists\n bundle = get_object_or_404(Bundle, id=bundle_id)\n\n # get/create link for given url\n url = request.data.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # assert that \"comfort_level\" is specified.\n # this is validated outside of the `Link` fields handled by\n # DRF serializer validation.\n comfort_level = int(request.data.get('comfort_level', None))\n if comfort_level not in [i[0] for i in COMFORT_LEVELS]:\n return Response({'error': True,\n 'msg': 'Please specify a reader comfort level'\n })\n\n url = urltools.normalize(url)\n\n try:\n # fetch existing link\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n # create a new link\n link_serializer = LinkSerializer(data=request.data)\n link_serializer.is_valid(raise_exception=True)\n link = link_serializer.save()\n\n # add link to bundle\n if not BundleLink.objects.filter(bundle=bundle, link=link).exists():\n # call alchemy util to fetch concepts for URL\n concepts = bundles.alchemy_utils.get_concepts(url)\n this_bundle = BundleLink.objects.create(bundle=bundle,\n link=link,\n comfort_level=comfort_level,\n curator_id=1)\n for concept in concepts:\n this_bundle.tags.add(concept)\n\n return Response('', status=201)", "def create_link(\n integration: Integration,\n installation: IntegrationInstallation,\n event: GroupEvent,\n response: Response,\n) -> None:\n external_issue = ExternalIssue.objects.create(\n organization_id=event.group.project.organization_id,\n integration_id=integration.id,\n key=response[\"key\"],\n title=event.title,\n description=installation.get_group_description(event.group, event),\n metadata=response.get(\"metadata\"),\n )\n GroupLink.objects.create(\n group_id=event.group.id,\n project_id=event.group.project_id,\n linked_type=GroupLink.LinkedType.issue,\n linked_id=external_issue.id,\n relationship=GroupLink.Relationship.references,\n data={\"provider\": integration.provider},\n )", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def relate(self, related):\n\n self._module._connection.relate(self, related)", "def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def outcomes(self, outcomes):\n\n self._outcomes = outcomes", "def add_rewards(self, step_reward, goal_reward, bad_state_reward=None, restart_state_reward = None):\n self.r_step = step_reward\n self.r_goal = goal_reward\n self.r_bad = bad_state_reward\n self.r_restart = restart_state_reward", "def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def addLink(self, obj1, obj2):\n\n link = vsdModels.ObjectLink(object1=obj1, object2=obj2)\n link.validate()\n return self.postRequest('object-links', data=link.to_struct())", "def _reward(self, action):\n raise NotImplementedError", "def add_reward(self, reward):\n self.quest_node['reward'] = reward\n self.reward = reward\n graph.push(self.quest_node)", "def link(self, callback, SpawnedLink=SpawnedLink):\n # XXX: Is the redefinition of SpawnedLink supposed to just be an\n # optimization, or do people use it? It's not documented\n # pylint:disable=redefined-outer-name\n self.rawlink(SpawnedLink(callback))", "def _on_outcome(self, outcome, condition):\n self._outcome = outcome\n self._condition = condition", "def create_hit(self, hit_type=None, question=None,\r\n lifetime=datetime.timedelta(days=7),\r\n max_assignments=1, \r\n title=None, description=None, keywords=None,\r\n reward=None, duration=datetime.timedelta(days=7),\r\n approval_delay=None, annotation=None,\r\n questions=None, qualifications=None,\r\n response_groups=None):\r\n \r\n # handle single or multiple questions\r\n neither = question is None and questions is None\r\n both = question is not None and questions is not None\r\n if neither or both:\r\n raise ValueError(\"Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both\")\r\n\r\n if question:\r\n questions = [question]\r\n question_param = QuestionForm(questions)\r\n if isinstance(question, QuestionForm):\r\n question_param = question\r\n elif isinstance(question, ExternalQuestion):\r\n question_param = question\r\n \r\n # Handle basic required arguments and set up params dict\r\n params = {'Question': question_param.get_as_xml(),\r\n 'LifetimeInSeconds' :\r\n self.duration_as_seconds(lifetime),\r\n 'MaxAssignments' : max_assignments,\r\n }\r\n\r\n # if hit type specified then add it\r\n # else add the additional required parameters\r\n if hit_type:\r\n params['HITTypeId'] = hit_type\r\n else:\r\n # Handle keywords\r\n final_keywords = MTurkConnection.get_keywords_as_string(keywords)\r\n \r\n # Handle price argument\r\n final_price = MTurkConnection.get_price_as_price(reward)\r\n \r\n final_duration = self.duration_as_seconds(duration)\r\n\r\n additional_params = dict(\r\n Title=title,\r\n Description=description,\r\n Keywords=final_keywords,\r\n AssignmentDurationInSeconds=final_duration,\r\n )\r\n additional_params.update(final_price.get_as_params('Reward'))\r\n\r\n if approval_delay is not None:\r\n d = self.duration_as_seconds(approval_delay)\r\n additional_params['AutoApprovalDelayInSeconds'] = d\r\n\r\n # add these params to the others\r\n params.update(additional_params)\r\n\r\n # add the annotation if specified\r\n if annotation is not None:\r\n params['RequesterAnnotation'] = annotation\r\n \r\n # Add the Qualifications if specified\r\n if qualifications is not None:\r\n params.update(qualifications.get_as_params())\r\n\r\n # Handle optional response groups argument\r\n if response_groups:\r\n self.build_list_params(params, response_groups, 'ResponseGroup')\r\n \r\n # Submit\r\n return self._process_request('CreateHIT', params, [('HIT', HIT),])", "def create_issue_link(self, link_type, inwardissue,\r\n outwardissue, comment=None):\r\n self.jira.create_issue_link(type=link_type,\r\n inwardIssue=str(inwardissue),\r\n outwardIssue=str(outwardissue))", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def new_link(self, key, link, default):\n\n s = self._new_link()\n s.key = key\n s.link = link\n s.default = default\n return s", "def addLink(self, name=None, **kwargs):\n if isinstance(name, rigmech):\n self.sym_prefix = name.sym_prefix + \"_\"\n self.addLink(\n name=name.name,\n mass=name.global_syms[\"mass\"],\n inertia=name.global_syms[\"Mq\"],\n origin_xyz=name.global_syms[\"xyz_com\"],\n )\n else:\n kwargs[\"name\"] = name\n LinkArgs = rigmech._check_field_inputs(\n \"addLink\", self._DefaultLinkFields, kwargs\n )\n self.Links[LinkArgs[\"name\"]] = LinkArgs", "def add_link(self, target, rel, title=None, title_star=None,\n anchor=None, hreflang=None, type_hint=None):\n\n # PERF(kgriffs): Heuristic to detect possiblity of an extension\n # relation type, in which case it will be a URL that may contain\n # reserved characters. Otherwise, don't waste time running the\n # string through uri.encode\n #\n # Example values for rel:\n #\n # \"next\"\n # \"http://example.com/ext-type\"\n # \"https://example.com/ext-type\"\n # \"alternate http://example.com/ext-type\"\n # \"http://example.com/ext-type alternate\"\n #\n if '//' in rel:\n if ' ' in rel:\n rel = ('\"' +\n ' '.join([uri.encode(r) for r in rel.split()]) +\n '\"')\n else:\n rel = '\"' + uri.encode(rel) + '\"'\n\n value = '<' + uri.encode(target) + '>; rel=' + rel\n\n if title is not None:\n value += '; title=\"' + title + '\"'\n\n if title_star is not None:\n value += (\"; title*=UTF-8'\" + title_star[0] + \"'\" +\n uri.encode_value(title_star[1]))\n\n if type_hint is not None:\n value += '; type=\"' + type_hint + '\"'\n\n if hreflang is not None:\n if isinstance(hreflang, six.string_types):\n value += '; hreflang=' + hreflang\n else:\n value += '; '\n value += '; '.join(['hreflang=' + lang for lang in hreflang])\n\n if anchor is not None:\n value += '; anchor=\"' + uri.encode(anchor) + '\"'\n\n _headers = self._headers\n if 'link' in _headers:\n _headers['link'] += ', ' + value\n else:\n _headers['link'] = value", "def reward(self, history_id, reward):\n pass", "def add(self, destination, kind):\n if destination in self.__links:\n raise SarasvatiException(\"Link to specified thought already exist\")\n if kind not in self.__correct_kinds:\n raise SarasvatiException(\"Link kind is not correct: \" + kind)\n if self.__source is destination:\n raise SarasvatiException(\"Unable link thought to itself\")\n link = Link(self.__source, destination, kind)\n return self.add_link(link)", "def add_outcome(self, node, cost=0, weight=1, classifier=None):\n\n if classifier is None:\n self.outcomes.append((Edge(self, node, cost=cost), weight))\n else:\n self.outcomes.append((Edge(self, node, cost=cost), classifier))", "def relate(a, b, **kwargs):\n return lib.relate(a, b, **kwargs)", "def set_outcome(self, outcome):\r\n if outcome < self._outcome:\r\n self._outcome = outcome\r\n self.choose(0, 0, 0, 0, 0) # Dummy call, to validate outcome.\r\n if self.parent: self.parent.set_outcome(self._outcome)", "def relate_object(self, obj):\n suffix = self._get_api_suffix(obj.__class__)\n endpoint = self._get_api_endpoint() + '/' + suffix\n obj_id = obj._id()\n results = self.tq.post(endpoint, data={'id': obj_id})\n\n results = results.get('data')\n if not results or 'pivot' not in results[0]:\n raise ActionFailedError('Relate indicators')", "def createPooledReward(self, name, rewardPoolId, product_key_name, instructions=None):\n param = {\"name\": name, product_key_name: 'pooled:%s' % rewardPoolId}\n if instructions:\n param[\"instructions\"] = instructions\n self.post_json('/reward', param)", "def create_relation_to_episode(episode_id):\n epi = Episode.query.get(episode_id)\n if not epi:\n abort(404)\n\n\n data = request.json\n if any([\n 'id' in data and not isinstance(data.get('id'), int)\n ]):\n abort(400)\n\n dire = Director.query.get(data[\"id\"])\n if not dire:\n abort(404)\n\n epi.directors.append(dire)\n db.session.commit()\n return jsonify({'result': f\"{dire} directed episode {epi}\"})", "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, state, action, reward, new_state, done):\n experience = (state, action, reward, new_state, done)\n self.buffer.append(experience)", "def get_reward(self, state, action, next_state, absorbing):\n raise NotImplementedError", "def add(self, state, action, reward, next_state, done):\n experience = Experience(state, action, reward, next_state, done)\n self.memory.append(experience)", "def add(self, obs, action, reward, new_obs, done):\n experience = (obs, action, reward, new_obs, done)\n insert_index = self.fix_index()\n if insert_index > 0:\n if insert_index in self._storage:\n del self._storage[insert_index]\n self._storage[insert_index] = experience\n # add to priority queue\n priority = self.priority_queue.get_max_priority()\n self.priority_queue.update(priority, insert_index)\n return True\n else:\n sys.stderr.write('Insert failed\\n')\n return False", "def append(self, state, action, reward, next_state=None, next_action=None,\n is_state_terminal=False):\n raise NotImplementedError", "def add_hyperlink(paragraph, url, text, color, underline):\r\n\r\n # This gets access to the document.xml.rels file and gets a new relation id value\r\n part = paragraph.part\r\n r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)\r\n\r\n # Create the w:hyperlink tag and add needed values\r\n hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')\r\n hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )\r\n\r\n # Create a w:r element\r\n new_run = docx.oxml.shared.OxmlElement('w:r')\r\n\r\n # Create a new w:rPr element\r\n rPr = docx.oxml.shared.OxmlElement('w:rPr')\r\n\r\n # Add color if it is given\r\n if not color is None:\r\n c = docx.oxml.shared.OxmlElement('w:color')\r\n c.set(docx.oxml.shared.qn('w:val'), color)\r\n rPr.append(c)\r\n\r\n # Remove underlining if it is requested\r\n if not underline:\r\n u = docx.oxml.shared.OxmlElement('w:u')\r\n u.set(docx.oxml.shared.qn('w:val'), 'none')\r\n rPr.append(u)\r\n\r\n # Join all the xml elements together add add the required text to the w:r element\r\n new_run.append(rPr)\r\n new_run.text = text\r\n hyperlink.append(new_run)\r\n\r\n paragraph._p.append(hyperlink)\r\n\r\n return hyperlink", "def add_link(self, link):\n raise NotImplementedError", "def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel", "def link(self, s_id):\r\n\r\n # Take the link entires from TOML file\r\n schedules = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if schedules:\r\n for entries in schedules:\r\n # Construct payload \r\n for payload in entries.get('link'):\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/link.json', test=payload)\r\n # Post request\r\n if 'id' in self.schedules[-1]:\r\n payload['schedule'] = self.schedules[-1].get('id')\r\n if 'id' in self.workouts[-1]:\r\n payload['workout'] = self.workouts[-1].get('id')\r\n return self.add_post(payload, API.url_link, self.links)", "def post(self, category_id, name, description, weight, category_ref, sponsor_id):\n\t\tproperties = {\"id\": category_id, \"name\": name, \"description\": description, \"weight\": weight, \"categoryRef\": category_ref, \"sponsorId\": sponsor_id}\n\t\treturn self.service.post(self.base_uri, json.dumps(properties))", "def perform_action(self, action):\n \n assert self.is_valid_action(action)\n \n # Save the action.\n self.action = action\n \n #the slight strategy of the opponent\n if self.reward==rLose :\n observation = self.observation\n else:\n observation = random.choice([oRock,oPaper,oScissor])\n \n #determine the result of the game and get the reward\n if action == aRock:\n if observation == oRock:\n reward= rDraw\n elif observation == oPaper:\n reward= rLose\n elif observation == oScissor:\n reward= rWin\n elif action == aPaper:\n if observation == oRock:\n reward= rWin\n elif observation == oPaper:\n reward= rDraw\n elif observation == oScissor:\n reward= rLose\n elif action == aScissor:\n if observation == oRock:\n reward= rLose\n elif observation == oPaper:\n reward= rWin\n elif observation == oScissor:\n reward= rDraw\n \n \n #Store the observation and reward in the environment.\n self.observation = observation\n \n self.reward = reward\n \n \n return (observation, reward)\n # end def", "def get_link(self, user_input):\r\n\r\n\t\t# state that you made it this far\r\n\t\tprint(f\"\\nSuccessfully called get_link() with the parameter(s): \\n\\n\\tuser_input -> {user_input}\")\r\n\r\n\t\t# tokenize the user's input, removing words like \"is\", \"the\", \"it\" and so on...\r\n\t\ttokens = self.tokenize(user_input)\r\n\r\n\t\t# categorize the question\r\n\t\tprint(f\"\\nIdentifying question's category...\")\r\n\t\tcategory = self.bayesian_naive_logic(tokens)\r\n\r\n\t\t# start looking for a link that may provide a Answer\r\n\t\tresponse_set = self.storage.get_urls(tokens, category)\r\n\t\tprint(f\"\\nBest Answer found: {response_set}\")\r\n\r\n\t\treturn f\"Here is a link with information closely matching your question: <a href='{response_set}' target='_blank'>{response_set}</a>\"", "def reward(self, history_id, reward):\n reward_action = self._historystorage.unrewarded_histories[history_id].action\n reward_action_idx = self._actions.index(reward_action)\n context = self._historystorage.unrewarded_histories[history_id].context[reward_action_idx]\n context = np.matrix(context)\n\n # Update the model\n matrix_a = self._modelstorage.get_model()['matrix_a']\n matrix_ainv = self._modelstorage.get_model()['matrix_ainv']\n b = self._modelstorage.get_model()['b']\n theta = self._modelstorage.get_model()['theta']\n matrix_a[reward_action] += np.dot(context.T, context)\n matrix_ainv[reward_action] = np.linalg.solve(matrix_a[reward_action], np.identity(self.d))\n b[reward_action] += reward * context.T\n theta[reward_action] = np.dot(matrix_ainv[reward_action], b[reward_action])\n self._modelstorage.save_model({'matrix_a': matrix_a, 'matrix_ainv': matrix_ainv, 'b': b, 'theta': theta})\n\n # Update the history\n self._historystorage.add_reward(history_id, reward)", "def link_room(self, room_to_link, direction):\n self.linked_rooms[direction] = room_to_link\n # print(self.name + \" linked rooms :\" + repr(self.linked_rooms) )", "def add_new_event(self,\n event_type: str,\n event_datetime: str,\n covid_status: str = \"U\",\n death: int = 0,\n critical_care_admission: int = 0,\n component: str or None = None,\n source: str or None = None,\n source_type: str or None = None,\n wimd: int or None = None,\n **kwargs):\n # Parse datetime and check validity (None for date if invalid)\n event_datetime = parse_datetime(event_datetime)\n if event_datetime.get(\"date\") is None:\n err = f\"Datetime parsed when trying to generate a new outcome event for {self.patientId} was invalid!\"\n self._config.write_to_log(err)\n raise ValueError(err)\n # Create outcome document\n new_outcome = Event(patientId=self.patientId,\n eventType=event_type.strip(),\n eventDate=event_datetime.get(\"date\"),\n covidStatus=covid_status,\n death=death,\n criticalCareAdmission=critical_care_admission,\n **kwargs)\n # Populate with optional parameters if given\n new_outcome = _add_if_value(new_outcome, [(\"component\", component),\n (\"source\", source),\n (\"sourceType\", source_type),\n (\"wimd\", wimd),\n (\"eventTime\", event_datetime.get(\"time\"))])\n new_outcome = new_outcome.save()\n self.outcomeEvents.append(new_outcome)\n self.save()\n self._config.write_to_log(f\"Outcome event {new_outcome.id} for patient {self.patientId}\")", "def make_link(first, second):\n manager = Actions()\n manager.make_link(first, second)", "def add_sample(self, img, action, reward, terminal):\n self.imgs[:, :, self.top] = img\n self.actions[self.top] = action\n self.rewards[self.top] = reward\n self.terminal[self.top] = terminal\n\n if self.size == self.max_steps:\n self.bottom = (self.bottom + 1) % self.max_steps\n else:\n self.size += 1\n\n self.top = (self.top + 1) % self.max_steps", "async def link(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'Cannot send links on this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n title = await Controller.canonical_title(item)\n if title is None:\n await msg.channel.send(**{\n 'content': f'There are no pages matching `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n page_url = Controller.link_from_title(title)\n await msg.channel.send(**{\n 'content': page_url,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })", "def create_data_link(self, ctx, params):\n # ctx is the context object\n # return variables are: results\n #BEGIN create_data_link\n duid, sna, update = _create_data_link_params(params)\n as_admin, user = _get_admin_request_from_object(params, 'as_admin', 'as_user')\n _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.FULL,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'create_data_link', ctx.log_info, as_user=user, skip_check=not as_admin)\n link = self._samples.create_data_link(\n user if user else _UserID(ctx[_CTX_USER]),\n duid,\n sna,\n update,\n as_admin=as_admin)\n results = {'new_link': _links_to_dicts([link])[0]}\n #END create_data_link\n\n # At some point might do deeper type checking...\n if not isinstance(results, dict):\n raise ValueError('Method create_data_link return value ' +\n 'results is not type dict as required.')\n # return the results\n return [results]", "def related_url(self) -> pulumi.Output[Sequence['outputs.RelatedUrlResponse']]:\n return pulumi.get(self, \"related_url\")", "def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500", "def get_outcome(self):\n if not self.is_paid:\n raise ValueError(\"There isn't an outcome.\")\n return self.team_a if self.outcome else self.team_b", "def PostReward(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _publish_reward_topic(self, reward, steps, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)\n self.reward_list.append(reward)\n self.episode_list.append(episode_number)\n self.step_list.append(steps)\n list = str(reward) + \";\" + str(episode_number) + \";\" + str(steps) + \"\\n\"\n\n with open(self.csv_name + '.csv', 'a') as csv:\n csv.write(str(list))", "def add_link(\n self,\n url: str,\n label: Optional[str] = None,\n ) -> None:\n if not label:\n label = url\n self._client.add_element(\n Markdown(\n f\"[{label}]({url})\",\n on_tap_link=lambda e: self._client.page.launch_url(e.data),\n )\n )", "def _step(self, action):\n \n obs, reward, done, info = self.env.step(action)\n\n \n advice=self.generateAdvice()[1]\n\n obs = {\n \"image\": obs,\n \"advice\": advice\n }\n\n\n \n\n\n\n return obs, reward, done, info", "def cli_createLinko():\n\n info = ('Creates a linkograph from an (inverse) labeling json'\n ' and an ontology json.')\n\n parser = argparse.ArgumentParser(description=info)\n parser.add_argument('labeling', metavar='LABELING.json',\n nargs=1,\n help='the inverse labeling json file.')\n\n parser.add_argument('ontology', metavar='ONTOLOGY.json',\n nargs=1,\n help='the json of ontology.')\n\n parser.add_argument('-o', '--out', metavar='OUTPUT_FILE',\n help='the linkograph as a json')\n\n args = parser.parse_args()\n\n outfile = None\n if args.out:\n outfile = args.out\n\n # Load the json files.\n with open(args.labeling[0], 'r') as invLabelingFile:\n invLabeling = json.load(invLabelingFile)\n with open(args.ontology[0], 'r') as ontologyFile:\n ontology = json.load(ontologyFile)\n linko = createLinko(invLabeling, ontology)\n\n if outfile:\n writeLinkoJson(linko, outfile)\n else:\n print(linko)", "def add(self, context, action, reward):\n\n if self.intercept:\n c = np.array(context[:])\n c = np.append(c, 1.0).reshape((1, self.context_dim + 1))\n else:\n c = np.array(context[:]).reshape((1, self.context_dim))\n\n if self.contexts is None:\n self.contexts = c\n else:\n self.contexts = np.vstack((self.contexts, c))\n\n r = np.zeros((1, self.num_actions))\n r[0, action] = reward\n if self.rewards is None:\n self.rewards = r\n else:\n self.rewards = np.vstack((self.rewards, r))\n\n self.actions.append(action)", "def make_move(state, action, player, rewarding_move=False): # TODO : done and next_is_reward can be removed as\n # they are in the state object\n board = state.get_board()\n json_action = action.get_json_action()\n action = action.get_action_as_dict()\n captured = None\n reward = 0\n next_is_reward = False\n previous_is_reward = False\n if rewarding_move:\n state.boring_moves = 0\n previous_is_reward = True\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND:\n reward += 1\n state.in_hand[player * -1] -= 1\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n board.empty_cell(action['action']['at'])\n reward += 1\n else:\n if action['action_type'] == YoteActionType.ADD:\n state.boring_moves += 1\n state.in_hand[player] -= 1\n board.fill_cell(action['action']['to'], Color(player))\n elif action['action_type'] == YoteActionType.MOVE:\n at = action['action']['at']\n to = action['action']['to']\n\n def distance(cell_1, cell_2):\n import math\n return math.sqrt((cell_1[0] - cell_2[0]) ** 2 + (cell_1[1] - cell_2[1]) ** 2)\n\n board.empty_cell(at)\n board.fill_cell(to, Color(player))\n if int(distance(at, to)) == 1:\n state.boring_moves += 1\n elif int(distance(at, to)) > 1:\n state.boring_moves = 0\n next_is_reward = True\n board.fill_cell(to, Color(player))\n if at[0] == to[0] and at[1] < to[1]:\n board.empty_cell((at[0], at[1] + 1))\n captured = (at[0], at[1] + 1)\n elif at[0] == to[0] and at[1] > to[1]:\n board.empty_cell((at[0], at[1] - 1))\n captured = (at[0], at[1] - 1)\n elif at[1] == to[1] and at[0] < to[0]:\n board.empty_cell((at[0] + 1, at[1]))\n captured = (at[0] + 1, at[1])\n elif at[1] == to[1] and at[0] > to[0]:\n board.empty_cell((at[0] - 1, at[1]))\n captured = (at[0] - 1, at[1])\n reward += 1\n\n state.set_board(board)\n state.score[player] += reward\n state.captured = captured\n state.rewarding_move = next_is_reward\n state.previous_is_reward = previous_is_reward\n state.set_latest_player(player)\n state.set_latest_move(json_action)\n if next_is_reward:\n state.set_next_player(player)\n else:\n state.set_next_player(player * -1)\n\n done = YoteRules.is_end_game(state)\n return state, done, next_is_reward", "def link_to(self, criterion_or_node):\n if callable(criterion_or_node):\n target_node = criterion_or_node(self.graph)\n else:\n target_node = criterion_or_node\n return self.send(target_node, 'accept_link',\n originating_node=self.id)", "def add_experience(self, action, state, reward, terminal):\n self.replay_buffer.add_experience(action, state, reward, terminal)", "def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_link(self, **kwgs):\n self.links.append(kwgs)", "def _set_link(\n meta: Dict,\n link: Optional[Union[type(None), str, bool, KEChainPages]] = None,\n link_value: Optional[CardWidgetLinkValue] = None,\n link_target: Optional[Union[str, LinkTargets]] = LinkTargets.SAME_TAB,\n **kwargs,\n) -> Dict:\n meta[\"linkTarget\"] = check_enum(link_target, LinkTargets, \"link_target\")\n\n from pykechain.models import Activity\n\n if isinstance(link, Activity):\n if link.activity_type == ActivityType.TASK:\n default_link_value = CardWidgetLinkValue.TASK_LINK\n else:\n default_link_value = CardWidgetLinkValue.TREE_VIEW\n\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link.id,\n MetaWidget.SHOW_LINK_VALUE: default_link_value,\n }\n )\n elif isinstance(link, str) and is_uuid(link):\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.TASK_LINK,\n }\n )\n elif link is None or link is False:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: None,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.NO_LINK,\n }\n )\n elif link in KEChainPages.values():\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: \"\",\n MetaWidget.SHOW_LINK_VALUE: CardWidgetKEChainPageLink[link],\n }\n )\n else:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.EXTERNAL_LINK,\n }\n )\n\n if link_value is not None:\n meta.update(\n {\n MetaWidget.SHOW_LINK_VALUE: check_enum(\n link_value, CardWidgetLinkValue, \"link_value\"\n ),\n }\n )\n\n return meta", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n if done and self.episode % 50 == 0 and self.episode > 50:\n self.episode += 1\n self.remove_bad_experience()\n elif done:\n self.episode += 1", "def add(self, states, actions, rewards, next_states, dones):\n assert len(states) == self.num_agents, 'ERROR> group states size mismatch'\n assert len(actions) == self.num_agents, 'ERROR> group actions size mismatch'\n assert len(rewards) == self.num_agents, 'ERROR> group rewards size mismatch'\n assert len(next_states) == self.num_agents, 'ERROR> group next states size mismatch'\n assert len(dones) == self.num_agents, 'ERROR> group dones size mismatch'\n\n experience = (states, actions, rewards, next_states, dones)\n self.memory.append(experience)", "def strategy(self,\r\n opponent:Player,\r\n message=torch.zeros(10))->Action:\r\n #need to update name for posterity's sake\r\n self.finished_opponent = opponent.name\r\n #Regardless of intent for first few turns, do the base action.\r\n if len(self.history) == 0: return self.action_base\r\n \r\n #get overall reward\r\n self.reward = self.find_reward(opponent) \r\n\r\n # assess perceived intent message in opponent.sent_message\r\n self.intent_received_prev = self.intent_received\r\n self.intent_received = opponent.intent_sent\r\n self.assessment_prev = self.assessment\r\n self.assessment = self.assess_received_intent(opponent)#this is the estimate of what the opponent is doing\r\n \r\n # store for testing later\r\n self.list_reward.append(self.reward)\r\n self.list_intent_received.append(self.intent_received_prev)\r\n self.list_intent_sent.append(self.intent_sent_prev)\r\n self.list_intent_assessment.append(self.assessment_prev)\r\n self.list_intent_true.append(opponent.history[-1])\r\n \r\n # receive assessment and decide to stay with self.base_Action\r\n # OR change it to the other action. \r\n self.old_decision = self.decision\r\n self.decision = self.decide_based_on_new_intel(opponent) # what the opponent actually did last turn\r\n self.list_decision.append(self.old_decision)\r\n \r\n return self.decision", "def addReagentTargetedGene(\n self,\n reagent_id,\n gene_id,\n targeted_gene_id=None,\n targeted_gene_label=None,\n description=None,\n reagent_category=None\n ):\n\n # akin to a variant locus\n # is this some sort of pseudo bnode?\n if targeted_gene_id is None:\n targeted_gene_id = '_' + gene_id + '-' + reagent_id\n targeted_gene_id = targeted_gene_id.replace(\":\", \"\")\n self.model.addIndividualToGraph(\n targeted_gene_id,\n targeted_gene_label,\n self.globaltt['reagent_targeted_gene'],\n description,\n ind_category=reagent_category\n )\n\n if gene_id is not None:\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_expression_variant_of'], gene_id\n )\n\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_targeted_by'], reagent_id\n )", "def __init__(__self__, *,\n individual_outcome: Optional[pulumi.Input[Sequence[pulumi.Input['IndividualOutcomeArgs']]]] = None,\n roll_up: Optional[pulumi.Input['PrimaryStepRollUp']] = None):\n if individual_outcome is not None:\n pulumi.set(__self__, \"individual_outcome\", individual_outcome)\n if roll_up is not None:\n pulumi.set(__self__, \"roll_up\", roll_up)", "def _send_lti2_outcome(self):\r\n payload = textwrap.dedent(\"\"\"\r\n {{\r\n \"@context\" : \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\" : \"Result\",\r\n \"resultScore\" : {score},\r\n \"comment\" : \"This is awesome.\"\r\n }}\r\n \"\"\")\r\n data = payload.format(score=0.8)\r\n return self._send_lti2(data)", "def join(self, rewards_s3_path, obs_time_window=None, ratio=0.8, wait=True):\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n\n if obs_time_window is None:\n logger.warning(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with all the observation data\"\n )\n obs_end_time = None\n obs_start_time = None\n else:\n logger.info(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with observation \"\n f\"data in the past {obs_time_window} hours\"\n )\n obs_end_time = datetime.utcnow()\n obs_start_time = obs_end_time - timedelta(hours=obs_time_window)\n\n # update next_join_job_id and joining state\n next_join_job_id = JoinManager.name_next_join_job(experiment_id=self.experiment_id)\n self.exp_db_client.update_experiment_next_join_job_id(self.experiment_id, next_join_job_id)\n self.exp_db_client.update_experiment_joining_state(self.experiment_id, JoiningState.PENDING)\n\n input_obs_data_s3_path = (\n f\"s3://{self.resource_manager.firehose_bucket}/{self.experiment_id}\"\n )\n input_obs_data_s3_path = f\"{input_obs_data_s3_path}/inference_data\"\n # init joining job, update join table\n logger.info(\"Creating resource for joining job...\")\n\n try:\n self.next_join_job = JoinManager(\n join_db_client=self.join_db_client,\n experiment_id=self.experiment_id,\n join_job_id=next_join_job_id,\n input_obs_data_s3_path=input_obs_data_s3_path,\n obs_start_time=obs_start_time,\n obs_end_time=obs_end_time,\n input_reward_data_s3_path=rewards_s3_path,\n boto_session=self.boto_session,\n )\n\n logger.info(\"Started joining job...\")\n self.next_join_job.start_join(ratio=ratio, wait=wait)\n except Exception as e:\n logger.error(e)\n pass\n\n # wait until exp ddb table updated\n if self.local_mode or wait:\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries = 0\n\n while not succeeded_state:\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n logger.debug(\"Waiting for experiment table joining status to be updated...\")\n time.sleep(2 * (2 ** num_retries))\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries += 1\n if num_retries >= 5:\n raise UnhandledWorkflowException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"was in state of '{self.experiment_record._joining_state}'. Failed to sync table states.\"\n )\n if (\n self.experiment_record._joining_state == JoiningState.FAILED\n or self.experiment_record._joining_state == JoiningState.CANCELLED\n ):\n raise WorkflowJoiningJobException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"ended with state '{self.experiment_record._joining_state}'. Please check Athena queries logs \"\n \"for more information.\"\n )", "def test__put_two_way_link_into():\n for input_value, defaults, expected_output in (\n (False, False, {}),\n (False, True, {'two_way_link': False}),\n (True, False, {'two_way_link': True}),\n ):\n data = put_two_way_link_into(input_value, {}, defaults)\n vampytest.assert_eq(data, expected_output)", "def _act_impl(self, observation, reward,\n done):\n if done:\n raise core.EpisodeDoneError(\"Called act on a done episode.\")\n\n if not self.observation_space.contains(observation):\n raise core.InvalidObservationError(\"Invalid ovservation: %s\" %\n observation)\n if self.params.observation_adjustment_fn:\n observation = self.params.observation_adjustment_fn(\n self.rng, self.beliefs, observation)\n\n features = self.feature_selection_fn(observation)\n self.beliefs = self._update_beliefs(features, self.beliefs)\n action = self._allocate(self._n_resource, self.beliefs)\n\n if not self.action_space.contains(action):\n raise gym.error.InvalidAction(\"Invalid action: %s\" % action)\n\n return action", "async def link(self, ctx: Context) -> None:\r\n try:\r\n params: List[str] = get_cmd_params(ctx)\r\n\r\n if len(params) < 1 or not params[0].isdigit() or int(params[0]) > len(self.yt_result.ids):\r\n await ctx.send(\"Please enter a valid video number from 0 to 5\")\r\n return\r\n\r\n self.yt_link = await self.message.edit(content=self.yt_result.get_link(int(params[0])))\r\n await ctx.message.delete()\r\n except Exception as e:\r\n await self.channels.log_error(e, \"ytl\")", "def _rollout_an_episode(self):\n self._steps = 0\n me_id = self._learning_agent_id # short name\n oppo_id = self._oppo_agent_id # short name\n logger.log('episode begins with the task: {}'.format(str(self.task)))\n\n # passing me and oppo hyperparams to the arena interface\n assert self.task.hyperparam is not None\n logger.log('pulling oppo hyperparam of model key {}'.format(\n self.task.model_key2))\n oppo_hyperparam = self._model_pool_apis.pull_attr(attr='hyperparam',\n key=self.task.model_key2)\n logger.log('Done pulling oppo hyperparam')\n oppo_inter_kwargs = ({} if oppo_hyperparam is None\n else oppo_hyperparam.__dict__)\n inter_kwargs = ([self.task.hyperparam.__dict__]\n + [oppo_inter_kwargs] * (self.n_agents - 1))\n\n # agent, env reset\n obs = self.env.reset(inter_kwargs=inter_kwargs)\n for agt, ob in zip(self.agents, obs):\n agt.reset(ob)\n self._update_agents_model(self.task) # for agent Neural Net parameters\n\n me_reward_sum = 0.0\n self.time_beg = time.time()\n self._update_hyperparam(self.task)\n self._changed_task = False\n while True:\n self._steps += 1\n # predictions for each agent\n predictions = self._parallel.run((self._agent_pred, ob, i)\n for i, ob in enumerate(obs))\n me_prediction = predictions[me_id]\n me_action, extra_vars = me_prediction[me_id], me_prediction[oppo_id:]\n # predicted actions for each agent\n actions = [me_action] + [other_action\n for other_action in predictions[oppo_id:]]\n # book-keep obs in previous step\n last_obs = obs\n\n # agent-env interaction\n obs, reward, done, info = self.env.step(actions)\n\n me_rwd_scalar = self._reward_shape(reward[me_id])\n me_reward_sum += me_rwd_scalar\n\n if self._enable_push:\n # put the interested data (obs, rwd, act, ... for each agent) into the\n # _data_queue, which is watched in another Thread (the _push_data_to_learner()\n # method) that the data are dequeued and sent to remote Learner\n if self._data_queue.full():\n logger.log(\"Actor's queue is full.\", level=logger.WARN)\n rwd_to_push = (me_rwd_scalar if self.rwd_shape\n else np.asarray(reward[me_id], np.float32))\n if self.use_oppo_obs:\n if isinstance(extra_vars, tuple):\n extra_vars += (self.agents[self._oppo_agent_id]._last_state,)\n else:\n extra_vars.append(self.agents[self._oppo_agent_id]._last_state)\n data_tuple = (last_obs, tuple(actions), rwd_to_push, info, done, extra_vars)\n self._data_queue.put(data_tuple)\n logger.log('successfully put one tuple.', level=logger.DEBUG)\n\n if self._steps % self._log_interval_steps == 0:\n logger.log('_rollout_an_episode,', 'steps: {},'.format(self._steps),\n 'data qsize: {}'.format(self._data_queue.qsize()))\n\n if done:\n # an episode ends\n if self._replay_dir:\n self._save_replay()\n self.log_kvs(me_reward_sum, info)\n if self._changed_task:\n return None, info\n else:\n return self.log_outcome(info), info\n\n if self._update_model_freq and self._steps % self._update_model_freq == 0:\n # time to update the model for each agent\n if (self._enable_push and\n self._model_pool_apis.pull_attr(\n 'freezetime', self.task.model_key1) is not None):\n # Current task (learning period) finishes, start a new task or continue\n self._finish_task(self.task, None) # notify early abort\n last_task = self.task\n self.task = self._request_task() # try to continue\n if not is_inherit(last_task.model_key1, self.task.model_key1):\n self.log_kvs(me_reward_sum, info)\n return None, info\n if last_task.model_key2 != self.task.model_key2:\n self._changed_task = True\n self._update_agents_model(self.task)", "def scoring_opportunities(self, scoring_opportunities):\n\n self._scoring_opportunities = scoring_opportunities", "def link_sample(self, other):\n with other.entry.nxfile:\n if 'sample' in self.entry:\n if 'sample' in other.entry:\n del other.entry['sample']\n other.entry.makelink(self.entry['sample'])", "async def cmd_galaddlinkuwl(self, ctx):\n\n links = re.findall(r\"(?P<url>http[s]?://[^\\s]+)\", ctx.message.content)\n\n if not links:\n await ctx.channel.send('`Useage: [p]galaddlinkuwl <startoflink>, [Bot Owner] Adds a link from gallery link whitelist.`')\n \n # ===== ADD THE NEW LINKS TO THE WHITELIST\n new_gal_link_wl = list(set(self.cogset['link_wl']) + set(links))\n\n if Gallery.compare(new_gal_link_wl, self.cogset['link_wl']):\n await ctx.channel.send(content=\"{}\\n are already in the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return \n \n else:\n self.cogset['link_wl'] = new_gal_link_wl\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=\"{}\\n have been added to the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return", "def add_sample(self, state, action, state_new, reward, endstate, episode):\r\n new_sample = np.array([state, action, state_new, reward, endstate])\r\n if self.step_counter == 0 and episode == 0:\r\n self.experience_batch = new_sample\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # first sample twice in the batch to be able to index over the rows\r\n elif len(self.experience_batch) < self.experience_batch_size:\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # add new sample to batch when it is not full\r\n else:\r\n self.experience_batch[self.step_counter % self.experience_batch_size, :] = new_sample # override the components of the batch when it is full\r", "def actor_add_relation():\r\n\r\n data = get_request_data()\r\n if 'id' in data.keys():\r\n try:\r\n row_id = int(data['id'])\r\n relation_id = int(data['relation_id'])\r\n except:\r\n err = 'Id must be integer'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n obj = Movie.query.filter_by(id=relation_id).first()\r\n try:\r\n actor = Actor.add_relation(row_id, obj)\r\n rel_actor = {k: v for k, v in actor.__dict__.items() if k in ACTOR_FIELDS}\r\n rel_actor['filmography'] = str(actor.filmography)\r\n except:\r\n err = 'Record with such id does not exist'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n return make_response(jsonify(rel_actor), 200)\r\n\r\n else:\r\n err = 'No id specified'\r\n return make_response(jsonify(error=err), 400)", "def add_reward(self, choice, count=1):\n self.redis.hincrby(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, \"%s:rewards\" % choice, count)\n self._choices = None", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n self.priorities.append(max(self.priorities, default=1)) # Save all new experiences as maximum priority", "def update(self, state, reward, action, done, next_state, next_reward, num_episode, **kwargs):\n\n # Keep track of total reward\n self.episode_reward += next_reward\n if self.verbose > 0:\n logger.debug(\n f\"Agent acknowledges receiving a reward of {next_reward}, episode reward so far {self.episode_reward}\"\n )\n\n # Update MCTS tree\n if not done:\n self.mcts_head = self.mcts_head.children[action]\n self.mcts_head.prune() # This updates the node.path\n\n # Train\n if self.training:\n return self._train(kwargs[\"log_prob\"])\n else:\n return 0.0", "def test_shows_issues_from_grouplink(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def append(self, state, action, reward, done):\n assert state.shape == self._state_shape, \\\n 'Invalid state shape (required: %s, got: %s)' % (self._state_shape, state.shape)\n\n self._states[self._pos] = state\n self._actions[self._pos] = action\n self._rewards[self._pos] = reward\n self._terminals[self._pos] = done\n\n self._count = max(self._count, self._pos + 1)\n self._pos = (self._pos + 1) % self._max_size" ]
[ "0.7362988", "0.7163847", "0.7061997", "0.6714336", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42772636", "0.4274945", "0.42674753", "0.4266109", "0.4264433", "0.42578077", "0.42515564", "0.42505667", "0.424946", "0.42489296", "0.42303044", "0.42303044", "0.4199248", "0.41974282", "0.41635618", "0.41283748", "0.4120942", "0.41087726", "0.41035667", "0.40950674", "0.408618", "0.40834093", "0.40813103", "0.40780374", "0.40770057", "0.4075737", "0.40686032", "0.40659428", "0.40503526", "0.40333", "0.40275708", "0.4023863", "0.4007657", "0.40069127", "0.40017104", "0.39986208", "0.39897925", "0.39840496", "0.39749613", "0.39745083", "0.3972324", "0.39586034", "0.39508075", "0.3946143", "0.39457676", "0.3932967", "0.39325184", "0.39297178", "0.39295676", "0.39276245", "0.39270565", "0.39229208", "0.39168912", "0.39096275", "0.3907056", "0.39025453", "0.39004102", "0.38994065", "0.38956124", "0.38943377", "0.38892362", "0.38891497", "0.38870266", "0.38820302", "0.38809907", "0.38804775", "0.38761795", "0.38738146", "0.3869238", "0.38686046", "0.38684267", "0.38673568", "0.38626942", "0.3862313", "0.3860057", "0.38582462", "0.38560754", "0.38555625", "0.38539255", "0.3853633" ]
0.717459
1
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes' payload = { 'outcome_id' : outcome_id, 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def ez_set_outcome(auth_token, dataset_id, outcome, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_set_outcome\"\n payload = {\n \"dataset_id\": dataset_id,\n \"outcome\" : outcome,\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def link(url, title, icon=None, badge=None, **context):\n\n return {\n \"url\": url,\n \"title\": title,\n \"context\": context,\n \"badge\": badge,\n \"class\": \"link\",\n \"icon\": icon\n }", "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, workflow_ID=None, parentobj_ID=None, **kwargs):\n\n uri = kwargs.get('uri')\n uid = kwargs.get('uid')\n desc = kwargs.get('desc')\n name = kwargs.get('name')\n source = kwargs.get('source')\n\n if (self.debug):\n print('MPO.ADD', workflow_ID, parentobj_ID, name, desc,uri,uid,source,kwargs, file=sys.stderr)\n\n if uid:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uid\":uid}\n elif uri:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uri\":uri}\n else:\n return {\"name\":name,\"description\":desc,\"source_uid\":source,\"message\":\"Must provide either uri or uid.\", 'uid':-1, \"status\":-1}\n\n return self.post(self.DATAOBJECT_RT,workflow_ID,[parentobj_ID],data=payload,**kwargs)", "def create_link(self, word, meaning):\n print(str(self.unique_id) + \" learned \" +\n str(word) + \" for \" + str(meaning))\n self.meaning2word[meaning] = word\n self.word2meaning[word] = meaning\n self.wordsuccess[word] = []\n\n if meaning not in self.model.vocabulary:\n self.model.vocabulary[meaning] = {}\n\n # If word not in vocabulary, add it\n if word not in self.model.vocabulary[meaning]:\n self.model.vocabulary[meaning][word] = [self.unique_id]\n # Else append this agent to its users\n else:\n self.model.vocabulary[meaning][word].append(self.unique_id)", "def add_link_to_bundle(request, bundle_id):\n\n # ensure bundle exists\n bundle = get_object_or_404(Bundle, id=bundle_id)\n\n # get/create link for given url\n url = request.data.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # assert that \"comfort_level\" is specified.\n # this is validated outside of the `Link` fields handled by\n # DRF serializer validation.\n comfort_level = int(request.data.get('comfort_level', None))\n if comfort_level not in [i[0] for i in COMFORT_LEVELS]:\n return Response({'error': True,\n 'msg': 'Please specify a reader comfort level'\n })\n\n url = urltools.normalize(url)\n\n try:\n # fetch existing link\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n # create a new link\n link_serializer = LinkSerializer(data=request.data)\n link_serializer.is_valid(raise_exception=True)\n link = link_serializer.save()\n\n # add link to bundle\n if not BundleLink.objects.filter(bundle=bundle, link=link).exists():\n # call alchemy util to fetch concepts for URL\n concepts = bundles.alchemy_utils.get_concepts(url)\n this_bundle = BundleLink.objects.create(bundle=bundle,\n link=link,\n comfort_level=comfort_level,\n curator_id=1)\n for concept in concepts:\n this_bundle.tags.add(concept)\n\n return Response('', status=201)", "def create_link(\n integration: Integration,\n installation: IntegrationInstallation,\n event: GroupEvent,\n response: Response,\n) -> None:\n external_issue = ExternalIssue.objects.create(\n organization_id=event.group.project.organization_id,\n integration_id=integration.id,\n key=response[\"key\"],\n title=event.title,\n description=installation.get_group_description(event.group, event),\n metadata=response.get(\"metadata\"),\n )\n GroupLink.objects.create(\n group_id=event.group.id,\n project_id=event.group.project_id,\n linked_type=GroupLink.LinkedType.issue,\n linked_id=external_issue.id,\n relationship=GroupLink.Relationship.references,\n data={\"provider\": integration.provider},\n )", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def relate(self, related):\n\n self._module._connection.relate(self, related)", "def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def outcomes(self, outcomes):\n\n self._outcomes = outcomes", "def add_rewards(self, step_reward, goal_reward, bad_state_reward=None, restart_state_reward = None):\n self.r_step = step_reward\n self.r_goal = goal_reward\n self.r_bad = bad_state_reward\n self.r_restart = restart_state_reward", "def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def addLink(self, obj1, obj2):\n\n link = vsdModels.ObjectLink(object1=obj1, object2=obj2)\n link.validate()\n return self.postRequest('object-links', data=link.to_struct())", "def _reward(self, action):\n raise NotImplementedError", "def add_reward(self, reward):\n self.quest_node['reward'] = reward\n self.reward = reward\n graph.push(self.quest_node)", "def link(self, callback, SpawnedLink=SpawnedLink):\n # XXX: Is the redefinition of SpawnedLink supposed to just be an\n # optimization, or do people use it? It's not documented\n # pylint:disable=redefined-outer-name\n self.rawlink(SpawnedLink(callback))", "def _on_outcome(self, outcome, condition):\n self._outcome = outcome\n self._condition = condition", "def create_hit(self, hit_type=None, question=None,\r\n lifetime=datetime.timedelta(days=7),\r\n max_assignments=1, \r\n title=None, description=None, keywords=None,\r\n reward=None, duration=datetime.timedelta(days=7),\r\n approval_delay=None, annotation=None,\r\n questions=None, qualifications=None,\r\n response_groups=None):\r\n \r\n # handle single or multiple questions\r\n neither = question is None and questions is None\r\n both = question is not None and questions is not None\r\n if neither or both:\r\n raise ValueError(\"Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both\")\r\n\r\n if question:\r\n questions = [question]\r\n question_param = QuestionForm(questions)\r\n if isinstance(question, QuestionForm):\r\n question_param = question\r\n elif isinstance(question, ExternalQuestion):\r\n question_param = question\r\n \r\n # Handle basic required arguments and set up params dict\r\n params = {'Question': question_param.get_as_xml(),\r\n 'LifetimeInSeconds' :\r\n self.duration_as_seconds(lifetime),\r\n 'MaxAssignments' : max_assignments,\r\n }\r\n\r\n # if hit type specified then add it\r\n # else add the additional required parameters\r\n if hit_type:\r\n params['HITTypeId'] = hit_type\r\n else:\r\n # Handle keywords\r\n final_keywords = MTurkConnection.get_keywords_as_string(keywords)\r\n \r\n # Handle price argument\r\n final_price = MTurkConnection.get_price_as_price(reward)\r\n \r\n final_duration = self.duration_as_seconds(duration)\r\n\r\n additional_params = dict(\r\n Title=title,\r\n Description=description,\r\n Keywords=final_keywords,\r\n AssignmentDurationInSeconds=final_duration,\r\n )\r\n additional_params.update(final_price.get_as_params('Reward'))\r\n\r\n if approval_delay is not None:\r\n d = self.duration_as_seconds(approval_delay)\r\n additional_params['AutoApprovalDelayInSeconds'] = d\r\n\r\n # add these params to the others\r\n params.update(additional_params)\r\n\r\n # add the annotation if specified\r\n if annotation is not None:\r\n params['RequesterAnnotation'] = annotation\r\n \r\n # Add the Qualifications if specified\r\n if qualifications is not None:\r\n params.update(qualifications.get_as_params())\r\n\r\n # Handle optional response groups argument\r\n if response_groups:\r\n self.build_list_params(params, response_groups, 'ResponseGroup')\r\n \r\n # Submit\r\n return self._process_request('CreateHIT', params, [('HIT', HIT),])", "def create_issue_link(self, link_type, inwardissue,\r\n outwardissue, comment=None):\r\n self.jira.create_issue_link(type=link_type,\r\n inwardIssue=str(inwardissue),\r\n outwardIssue=str(outwardissue))", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def new_link(self, key, link, default):\n\n s = self._new_link()\n s.key = key\n s.link = link\n s.default = default\n return s", "def addLink(self, name=None, **kwargs):\n if isinstance(name, rigmech):\n self.sym_prefix = name.sym_prefix + \"_\"\n self.addLink(\n name=name.name,\n mass=name.global_syms[\"mass\"],\n inertia=name.global_syms[\"Mq\"],\n origin_xyz=name.global_syms[\"xyz_com\"],\n )\n else:\n kwargs[\"name\"] = name\n LinkArgs = rigmech._check_field_inputs(\n \"addLink\", self._DefaultLinkFields, kwargs\n )\n self.Links[LinkArgs[\"name\"]] = LinkArgs", "def add_link(self, target, rel, title=None, title_star=None,\n anchor=None, hreflang=None, type_hint=None):\n\n # PERF(kgriffs): Heuristic to detect possiblity of an extension\n # relation type, in which case it will be a URL that may contain\n # reserved characters. Otherwise, don't waste time running the\n # string through uri.encode\n #\n # Example values for rel:\n #\n # \"next\"\n # \"http://example.com/ext-type\"\n # \"https://example.com/ext-type\"\n # \"alternate http://example.com/ext-type\"\n # \"http://example.com/ext-type alternate\"\n #\n if '//' in rel:\n if ' ' in rel:\n rel = ('\"' +\n ' '.join([uri.encode(r) for r in rel.split()]) +\n '\"')\n else:\n rel = '\"' + uri.encode(rel) + '\"'\n\n value = '<' + uri.encode(target) + '>; rel=' + rel\n\n if title is not None:\n value += '; title=\"' + title + '\"'\n\n if title_star is not None:\n value += (\"; title*=UTF-8'\" + title_star[0] + \"'\" +\n uri.encode_value(title_star[1]))\n\n if type_hint is not None:\n value += '; type=\"' + type_hint + '\"'\n\n if hreflang is not None:\n if isinstance(hreflang, six.string_types):\n value += '; hreflang=' + hreflang\n else:\n value += '; '\n value += '; '.join(['hreflang=' + lang for lang in hreflang])\n\n if anchor is not None:\n value += '; anchor=\"' + uri.encode(anchor) + '\"'\n\n _headers = self._headers\n if 'link' in _headers:\n _headers['link'] += ', ' + value\n else:\n _headers['link'] = value", "def reward(self, history_id, reward):\n pass", "def add(self, destination, kind):\n if destination in self.__links:\n raise SarasvatiException(\"Link to specified thought already exist\")\n if kind not in self.__correct_kinds:\n raise SarasvatiException(\"Link kind is not correct: \" + kind)\n if self.__source is destination:\n raise SarasvatiException(\"Unable link thought to itself\")\n link = Link(self.__source, destination, kind)\n return self.add_link(link)", "def add_outcome(self, node, cost=0, weight=1, classifier=None):\n\n if classifier is None:\n self.outcomes.append((Edge(self, node, cost=cost), weight))\n else:\n self.outcomes.append((Edge(self, node, cost=cost), classifier))", "def relate(a, b, **kwargs):\n return lib.relate(a, b, **kwargs)", "def set_outcome(self, outcome):\r\n if outcome < self._outcome:\r\n self._outcome = outcome\r\n self.choose(0, 0, 0, 0, 0) # Dummy call, to validate outcome.\r\n if self.parent: self.parent.set_outcome(self._outcome)", "def relate_object(self, obj):\n suffix = self._get_api_suffix(obj.__class__)\n endpoint = self._get_api_endpoint() + '/' + suffix\n obj_id = obj._id()\n results = self.tq.post(endpoint, data={'id': obj_id})\n\n results = results.get('data')\n if not results or 'pivot' not in results[0]:\n raise ActionFailedError('Relate indicators')", "def createPooledReward(self, name, rewardPoolId, product_key_name, instructions=None):\n param = {\"name\": name, product_key_name: 'pooled:%s' % rewardPoolId}\n if instructions:\n param[\"instructions\"] = instructions\n self.post_json('/reward', param)", "def create_relation_to_episode(episode_id):\n epi = Episode.query.get(episode_id)\n if not epi:\n abort(404)\n\n\n data = request.json\n if any([\n 'id' in data and not isinstance(data.get('id'), int)\n ]):\n abort(400)\n\n dire = Director.query.get(data[\"id\"])\n if not dire:\n abort(404)\n\n epi.directors.append(dire)\n db.session.commit()\n return jsonify({'result': f\"{dire} directed episode {epi}\"})", "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, state, action, reward, new_state, done):\n experience = (state, action, reward, new_state, done)\n self.buffer.append(experience)", "def get_reward(self, state, action, next_state, absorbing):\n raise NotImplementedError", "def add(self, state, action, reward, next_state, done):\n experience = Experience(state, action, reward, next_state, done)\n self.memory.append(experience)", "def add(self, obs, action, reward, new_obs, done):\n experience = (obs, action, reward, new_obs, done)\n insert_index = self.fix_index()\n if insert_index > 0:\n if insert_index in self._storage:\n del self._storage[insert_index]\n self._storage[insert_index] = experience\n # add to priority queue\n priority = self.priority_queue.get_max_priority()\n self.priority_queue.update(priority, insert_index)\n return True\n else:\n sys.stderr.write('Insert failed\\n')\n return False", "def append(self, state, action, reward, next_state=None, next_action=None,\n is_state_terminal=False):\n raise NotImplementedError", "def add_hyperlink(paragraph, url, text, color, underline):\r\n\r\n # This gets access to the document.xml.rels file and gets a new relation id value\r\n part = paragraph.part\r\n r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)\r\n\r\n # Create the w:hyperlink tag and add needed values\r\n hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')\r\n hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )\r\n\r\n # Create a w:r element\r\n new_run = docx.oxml.shared.OxmlElement('w:r')\r\n\r\n # Create a new w:rPr element\r\n rPr = docx.oxml.shared.OxmlElement('w:rPr')\r\n\r\n # Add color if it is given\r\n if not color is None:\r\n c = docx.oxml.shared.OxmlElement('w:color')\r\n c.set(docx.oxml.shared.qn('w:val'), color)\r\n rPr.append(c)\r\n\r\n # Remove underlining if it is requested\r\n if not underline:\r\n u = docx.oxml.shared.OxmlElement('w:u')\r\n u.set(docx.oxml.shared.qn('w:val'), 'none')\r\n rPr.append(u)\r\n\r\n # Join all the xml elements together add add the required text to the w:r element\r\n new_run.append(rPr)\r\n new_run.text = text\r\n hyperlink.append(new_run)\r\n\r\n paragraph._p.append(hyperlink)\r\n\r\n return hyperlink", "def add_link(self, link):\n raise NotImplementedError", "def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel", "def link(self, s_id):\r\n\r\n # Take the link entires from TOML file\r\n schedules = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if schedules:\r\n for entries in schedules:\r\n # Construct payload \r\n for payload in entries.get('link'):\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/link.json', test=payload)\r\n # Post request\r\n if 'id' in self.schedules[-1]:\r\n payload['schedule'] = self.schedules[-1].get('id')\r\n if 'id' in self.workouts[-1]:\r\n payload['workout'] = self.workouts[-1].get('id')\r\n return self.add_post(payload, API.url_link, self.links)", "def post(self, category_id, name, description, weight, category_ref, sponsor_id):\n\t\tproperties = {\"id\": category_id, \"name\": name, \"description\": description, \"weight\": weight, \"categoryRef\": category_ref, \"sponsorId\": sponsor_id}\n\t\treturn self.service.post(self.base_uri, json.dumps(properties))", "def perform_action(self, action):\n \n assert self.is_valid_action(action)\n \n # Save the action.\n self.action = action\n \n #the slight strategy of the opponent\n if self.reward==rLose :\n observation = self.observation\n else:\n observation = random.choice([oRock,oPaper,oScissor])\n \n #determine the result of the game and get the reward\n if action == aRock:\n if observation == oRock:\n reward= rDraw\n elif observation == oPaper:\n reward= rLose\n elif observation == oScissor:\n reward= rWin\n elif action == aPaper:\n if observation == oRock:\n reward= rWin\n elif observation == oPaper:\n reward= rDraw\n elif observation == oScissor:\n reward= rLose\n elif action == aScissor:\n if observation == oRock:\n reward= rLose\n elif observation == oPaper:\n reward= rWin\n elif observation == oScissor:\n reward= rDraw\n \n \n #Store the observation and reward in the environment.\n self.observation = observation\n \n self.reward = reward\n \n \n return (observation, reward)\n # end def", "def get_link(self, user_input):\r\n\r\n\t\t# state that you made it this far\r\n\t\tprint(f\"\\nSuccessfully called get_link() with the parameter(s): \\n\\n\\tuser_input -> {user_input}\")\r\n\r\n\t\t# tokenize the user's input, removing words like \"is\", \"the\", \"it\" and so on...\r\n\t\ttokens = self.tokenize(user_input)\r\n\r\n\t\t# categorize the question\r\n\t\tprint(f\"\\nIdentifying question's category...\")\r\n\t\tcategory = self.bayesian_naive_logic(tokens)\r\n\r\n\t\t# start looking for a link that may provide a Answer\r\n\t\tresponse_set = self.storage.get_urls(tokens, category)\r\n\t\tprint(f\"\\nBest Answer found: {response_set}\")\r\n\r\n\t\treturn f\"Here is a link with information closely matching your question: <a href='{response_set}' target='_blank'>{response_set}</a>\"", "def reward(self, history_id, reward):\n reward_action = self._historystorage.unrewarded_histories[history_id].action\n reward_action_idx = self._actions.index(reward_action)\n context = self._historystorage.unrewarded_histories[history_id].context[reward_action_idx]\n context = np.matrix(context)\n\n # Update the model\n matrix_a = self._modelstorage.get_model()['matrix_a']\n matrix_ainv = self._modelstorage.get_model()['matrix_ainv']\n b = self._modelstorage.get_model()['b']\n theta = self._modelstorage.get_model()['theta']\n matrix_a[reward_action] += np.dot(context.T, context)\n matrix_ainv[reward_action] = np.linalg.solve(matrix_a[reward_action], np.identity(self.d))\n b[reward_action] += reward * context.T\n theta[reward_action] = np.dot(matrix_ainv[reward_action], b[reward_action])\n self._modelstorage.save_model({'matrix_a': matrix_a, 'matrix_ainv': matrix_ainv, 'b': b, 'theta': theta})\n\n # Update the history\n self._historystorage.add_reward(history_id, reward)", "def link_room(self, room_to_link, direction):\n self.linked_rooms[direction] = room_to_link\n # print(self.name + \" linked rooms :\" + repr(self.linked_rooms) )", "def add_new_event(self,\n event_type: str,\n event_datetime: str,\n covid_status: str = \"U\",\n death: int = 0,\n critical_care_admission: int = 0,\n component: str or None = None,\n source: str or None = None,\n source_type: str or None = None,\n wimd: int or None = None,\n **kwargs):\n # Parse datetime and check validity (None for date if invalid)\n event_datetime = parse_datetime(event_datetime)\n if event_datetime.get(\"date\") is None:\n err = f\"Datetime parsed when trying to generate a new outcome event for {self.patientId} was invalid!\"\n self._config.write_to_log(err)\n raise ValueError(err)\n # Create outcome document\n new_outcome = Event(patientId=self.patientId,\n eventType=event_type.strip(),\n eventDate=event_datetime.get(\"date\"),\n covidStatus=covid_status,\n death=death,\n criticalCareAdmission=critical_care_admission,\n **kwargs)\n # Populate with optional parameters if given\n new_outcome = _add_if_value(new_outcome, [(\"component\", component),\n (\"source\", source),\n (\"sourceType\", source_type),\n (\"wimd\", wimd),\n (\"eventTime\", event_datetime.get(\"time\"))])\n new_outcome = new_outcome.save()\n self.outcomeEvents.append(new_outcome)\n self.save()\n self._config.write_to_log(f\"Outcome event {new_outcome.id} for patient {self.patientId}\")", "def make_link(first, second):\n manager = Actions()\n manager.make_link(first, second)", "def add_sample(self, img, action, reward, terminal):\n self.imgs[:, :, self.top] = img\n self.actions[self.top] = action\n self.rewards[self.top] = reward\n self.terminal[self.top] = terminal\n\n if self.size == self.max_steps:\n self.bottom = (self.bottom + 1) % self.max_steps\n else:\n self.size += 1\n\n self.top = (self.top + 1) % self.max_steps", "async def link(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'Cannot send links on this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n title = await Controller.canonical_title(item)\n if title is None:\n await msg.channel.send(**{\n 'content': f'There are no pages matching `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n page_url = Controller.link_from_title(title)\n await msg.channel.send(**{\n 'content': page_url,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })", "def create_data_link(self, ctx, params):\n # ctx is the context object\n # return variables are: results\n #BEGIN create_data_link\n duid, sna, update = _create_data_link_params(params)\n as_admin, user = _get_admin_request_from_object(params, 'as_admin', 'as_user')\n _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.FULL,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'create_data_link', ctx.log_info, as_user=user, skip_check=not as_admin)\n link = self._samples.create_data_link(\n user if user else _UserID(ctx[_CTX_USER]),\n duid,\n sna,\n update,\n as_admin=as_admin)\n results = {'new_link': _links_to_dicts([link])[0]}\n #END create_data_link\n\n # At some point might do deeper type checking...\n if not isinstance(results, dict):\n raise ValueError('Method create_data_link return value ' +\n 'results is not type dict as required.')\n # return the results\n return [results]", "def related_url(self) -> pulumi.Output[Sequence['outputs.RelatedUrlResponse']]:\n return pulumi.get(self, \"related_url\")", "def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500", "def get_outcome(self):\n if not self.is_paid:\n raise ValueError(\"There isn't an outcome.\")\n return self.team_a if self.outcome else self.team_b", "def PostReward(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _publish_reward_topic(self, reward, steps, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)\n self.reward_list.append(reward)\n self.episode_list.append(episode_number)\n self.step_list.append(steps)\n list = str(reward) + \";\" + str(episode_number) + \";\" + str(steps) + \"\\n\"\n\n with open(self.csv_name + '.csv', 'a') as csv:\n csv.write(str(list))", "def add_link(\n self,\n url: str,\n label: Optional[str] = None,\n ) -> None:\n if not label:\n label = url\n self._client.add_element(\n Markdown(\n f\"[{label}]({url})\",\n on_tap_link=lambda e: self._client.page.launch_url(e.data),\n )\n )", "def _step(self, action):\n \n obs, reward, done, info = self.env.step(action)\n\n \n advice=self.generateAdvice()[1]\n\n obs = {\n \"image\": obs,\n \"advice\": advice\n }\n\n\n \n\n\n\n return obs, reward, done, info", "def cli_createLinko():\n\n info = ('Creates a linkograph from an (inverse) labeling json'\n ' and an ontology json.')\n\n parser = argparse.ArgumentParser(description=info)\n parser.add_argument('labeling', metavar='LABELING.json',\n nargs=1,\n help='the inverse labeling json file.')\n\n parser.add_argument('ontology', metavar='ONTOLOGY.json',\n nargs=1,\n help='the json of ontology.')\n\n parser.add_argument('-o', '--out', metavar='OUTPUT_FILE',\n help='the linkograph as a json')\n\n args = parser.parse_args()\n\n outfile = None\n if args.out:\n outfile = args.out\n\n # Load the json files.\n with open(args.labeling[0], 'r') as invLabelingFile:\n invLabeling = json.load(invLabelingFile)\n with open(args.ontology[0], 'r') as ontologyFile:\n ontology = json.load(ontologyFile)\n linko = createLinko(invLabeling, ontology)\n\n if outfile:\n writeLinkoJson(linko, outfile)\n else:\n print(linko)", "def add(self, context, action, reward):\n\n if self.intercept:\n c = np.array(context[:])\n c = np.append(c, 1.0).reshape((1, self.context_dim + 1))\n else:\n c = np.array(context[:]).reshape((1, self.context_dim))\n\n if self.contexts is None:\n self.contexts = c\n else:\n self.contexts = np.vstack((self.contexts, c))\n\n r = np.zeros((1, self.num_actions))\n r[0, action] = reward\n if self.rewards is None:\n self.rewards = r\n else:\n self.rewards = np.vstack((self.rewards, r))\n\n self.actions.append(action)", "def make_move(state, action, player, rewarding_move=False): # TODO : done and next_is_reward can be removed as\n # they are in the state object\n board = state.get_board()\n json_action = action.get_json_action()\n action = action.get_action_as_dict()\n captured = None\n reward = 0\n next_is_reward = False\n previous_is_reward = False\n if rewarding_move:\n state.boring_moves = 0\n previous_is_reward = True\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND:\n reward += 1\n state.in_hand[player * -1] -= 1\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n board.empty_cell(action['action']['at'])\n reward += 1\n else:\n if action['action_type'] == YoteActionType.ADD:\n state.boring_moves += 1\n state.in_hand[player] -= 1\n board.fill_cell(action['action']['to'], Color(player))\n elif action['action_type'] == YoteActionType.MOVE:\n at = action['action']['at']\n to = action['action']['to']\n\n def distance(cell_1, cell_2):\n import math\n return math.sqrt((cell_1[0] - cell_2[0]) ** 2 + (cell_1[1] - cell_2[1]) ** 2)\n\n board.empty_cell(at)\n board.fill_cell(to, Color(player))\n if int(distance(at, to)) == 1:\n state.boring_moves += 1\n elif int(distance(at, to)) > 1:\n state.boring_moves = 0\n next_is_reward = True\n board.fill_cell(to, Color(player))\n if at[0] == to[0] and at[1] < to[1]:\n board.empty_cell((at[0], at[1] + 1))\n captured = (at[0], at[1] + 1)\n elif at[0] == to[0] and at[1] > to[1]:\n board.empty_cell((at[0], at[1] - 1))\n captured = (at[0], at[1] - 1)\n elif at[1] == to[1] and at[0] < to[0]:\n board.empty_cell((at[0] + 1, at[1]))\n captured = (at[0] + 1, at[1])\n elif at[1] == to[1] and at[0] > to[0]:\n board.empty_cell((at[0] - 1, at[1]))\n captured = (at[0] - 1, at[1])\n reward += 1\n\n state.set_board(board)\n state.score[player] += reward\n state.captured = captured\n state.rewarding_move = next_is_reward\n state.previous_is_reward = previous_is_reward\n state.set_latest_player(player)\n state.set_latest_move(json_action)\n if next_is_reward:\n state.set_next_player(player)\n else:\n state.set_next_player(player * -1)\n\n done = YoteRules.is_end_game(state)\n return state, done, next_is_reward", "def link_to(self, criterion_or_node):\n if callable(criterion_or_node):\n target_node = criterion_or_node(self.graph)\n else:\n target_node = criterion_or_node\n return self.send(target_node, 'accept_link',\n originating_node=self.id)", "def add_experience(self, action, state, reward, terminal):\n self.replay_buffer.add_experience(action, state, reward, terminal)", "def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_link(self, **kwgs):\n self.links.append(kwgs)", "def _set_link(\n meta: Dict,\n link: Optional[Union[type(None), str, bool, KEChainPages]] = None,\n link_value: Optional[CardWidgetLinkValue] = None,\n link_target: Optional[Union[str, LinkTargets]] = LinkTargets.SAME_TAB,\n **kwargs,\n) -> Dict:\n meta[\"linkTarget\"] = check_enum(link_target, LinkTargets, \"link_target\")\n\n from pykechain.models import Activity\n\n if isinstance(link, Activity):\n if link.activity_type == ActivityType.TASK:\n default_link_value = CardWidgetLinkValue.TASK_LINK\n else:\n default_link_value = CardWidgetLinkValue.TREE_VIEW\n\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link.id,\n MetaWidget.SHOW_LINK_VALUE: default_link_value,\n }\n )\n elif isinstance(link, str) and is_uuid(link):\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.TASK_LINK,\n }\n )\n elif link is None or link is False:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: None,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.NO_LINK,\n }\n )\n elif link in KEChainPages.values():\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: \"\",\n MetaWidget.SHOW_LINK_VALUE: CardWidgetKEChainPageLink[link],\n }\n )\n else:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.EXTERNAL_LINK,\n }\n )\n\n if link_value is not None:\n meta.update(\n {\n MetaWidget.SHOW_LINK_VALUE: check_enum(\n link_value, CardWidgetLinkValue, \"link_value\"\n ),\n }\n )\n\n return meta", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n if done and self.episode % 50 == 0 and self.episode > 50:\n self.episode += 1\n self.remove_bad_experience()\n elif done:\n self.episode += 1", "def add(self, states, actions, rewards, next_states, dones):\n assert len(states) == self.num_agents, 'ERROR> group states size mismatch'\n assert len(actions) == self.num_agents, 'ERROR> group actions size mismatch'\n assert len(rewards) == self.num_agents, 'ERROR> group rewards size mismatch'\n assert len(next_states) == self.num_agents, 'ERROR> group next states size mismatch'\n assert len(dones) == self.num_agents, 'ERROR> group dones size mismatch'\n\n experience = (states, actions, rewards, next_states, dones)\n self.memory.append(experience)", "def strategy(self,\r\n opponent:Player,\r\n message=torch.zeros(10))->Action:\r\n #need to update name for posterity's sake\r\n self.finished_opponent = opponent.name\r\n #Regardless of intent for first few turns, do the base action.\r\n if len(self.history) == 0: return self.action_base\r\n \r\n #get overall reward\r\n self.reward = self.find_reward(opponent) \r\n\r\n # assess perceived intent message in opponent.sent_message\r\n self.intent_received_prev = self.intent_received\r\n self.intent_received = opponent.intent_sent\r\n self.assessment_prev = self.assessment\r\n self.assessment = self.assess_received_intent(opponent)#this is the estimate of what the opponent is doing\r\n \r\n # store for testing later\r\n self.list_reward.append(self.reward)\r\n self.list_intent_received.append(self.intent_received_prev)\r\n self.list_intent_sent.append(self.intent_sent_prev)\r\n self.list_intent_assessment.append(self.assessment_prev)\r\n self.list_intent_true.append(opponent.history[-1])\r\n \r\n # receive assessment and decide to stay with self.base_Action\r\n # OR change it to the other action. \r\n self.old_decision = self.decision\r\n self.decision = self.decide_based_on_new_intel(opponent) # what the opponent actually did last turn\r\n self.list_decision.append(self.old_decision)\r\n \r\n return self.decision", "def addReagentTargetedGene(\n self,\n reagent_id,\n gene_id,\n targeted_gene_id=None,\n targeted_gene_label=None,\n description=None,\n reagent_category=None\n ):\n\n # akin to a variant locus\n # is this some sort of pseudo bnode?\n if targeted_gene_id is None:\n targeted_gene_id = '_' + gene_id + '-' + reagent_id\n targeted_gene_id = targeted_gene_id.replace(\":\", \"\")\n self.model.addIndividualToGraph(\n targeted_gene_id,\n targeted_gene_label,\n self.globaltt['reagent_targeted_gene'],\n description,\n ind_category=reagent_category\n )\n\n if gene_id is not None:\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_expression_variant_of'], gene_id\n )\n\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_targeted_by'], reagent_id\n )", "def __init__(__self__, *,\n individual_outcome: Optional[pulumi.Input[Sequence[pulumi.Input['IndividualOutcomeArgs']]]] = None,\n roll_up: Optional[pulumi.Input['PrimaryStepRollUp']] = None):\n if individual_outcome is not None:\n pulumi.set(__self__, \"individual_outcome\", individual_outcome)\n if roll_up is not None:\n pulumi.set(__self__, \"roll_up\", roll_up)", "def _send_lti2_outcome(self):\r\n payload = textwrap.dedent(\"\"\"\r\n {{\r\n \"@context\" : \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\" : \"Result\",\r\n \"resultScore\" : {score},\r\n \"comment\" : \"This is awesome.\"\r\n }}\r\n \"\"\")\r\n data = payload.format(score=0.8)\r\n return self._send_lti2(data)", "def join(self, rewards_s3_path, obs_time_window=None, ratio=0.8, wait=True):\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n\n if obs_time_window is None:\n logger.warning(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with all the observation data\"\n )\n obs_end_time = None\n obs_start_time = None\n else:\n logger.info(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with observation \"\n f\"data in the past {obs_time_window} hours\"\n )\n obs_end_time = datetime.utcnow()\n obs_start_time = obs_end_time - timedelta(hours=obs_time_window)\n\n # update next_join_job_id and joining state\n next_join_job_id = JoinManager.name_next_join_job(experiment_id=self.experiment_id)\n self.exp_db_client.update_experiment_next_join_job_id(self.experiment_id, next_join_job_id)\n self.exp_db_client.update_experiment_joining_state(self.experiment_id, JoiningState.PENDING)\n\n input_obs_data_s3_path = (\n f\"s3://{self.resource_manager.firehose_bucket}/{self.experiment_id}\"\n )\n input_obs_data_s3_path = f\"{input_obs_data_s3_path}/inference_data\"\n # init joining job, update join table\n logger.info(\"Creating resource for joining job...\")\n\n try:\n self.next_join_job = JoinManager(\n join_db_client=self.join_db_client,\n experiment_id=self.experiment_id,\n join_job_id=next_join_job_id,\n input_obs_data_s3_path=input_obs_data_s3_path,\n obs_start_time=obs_start_time,\n obs_end_time=obs_end_time,\n input_reward_data_s3_path=rewards_s3_path,\n boto_session=self.boto_session,\n )\n\n logger.info(\"Started joining job...\")\n self.next_join_job.start_join(ratio=ratio, wait=wait)\n except Exception as e:\n logger.error(e)\n pass\n\n # wait until exp ddb table updated\n if self.local_mode or wait:\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries = 0\n\n while not succeeded_state:\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n logger.debug(\"Waiting for experiment table joining status to be updated...\")\n time.sleep(2 * (2 ** num_retries))\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries += 1\n if num_retries >= 5:\n raise UnhandledWorkflowException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"was in state of '{self.experiment_record._joining_state}'. Failed to sync table states.\"\n )\n if (\n self.experiment_record._joining_state == JoiningState.FAILED\n or self.experiment_record._joining_state == JoiningState.CANCELLED\n ):\n raise WorkflowJoiningJobException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"ended with state '{self.experiment_record._joining_state}'. Please check Athena queries logs \"\n \"for more information.\"\n )", "def test__put_two_way_link_into():\n for input_value, defaults, expected_output in (\n (False, False, {}),\n (False, True, {'two_way_link': False}),\n (True, False, {'two_way_link': True}),\n ):\n data = put_two_way_link_into(input_value, {}, defaults)\n vampytest.assert_eq(data, expected_output)", "def _act_impl(self, observation, reward,\n done):\n if done:\n raise core.EpisodeDoneError(\"Called act on a done episode.\")\n\n if not self.observation_space.contains(observation):\n raise core.InvalidObservationError(\"Invalid ovservation: %s\" %\n observation)\n if self.params.observation_adjustment_fn:\n observation = self.params.observation_adjustment_fn(\n self.rng, self.beliefs, observation)\n\n features = self.feature_selection_fn(observation)\n self.beliefs = self._update_beliefs(features, self.beliefs)\n action = self._allocate(self._n_resource, self.beliefs)\n\n if not self.action_space.contains(action):\n raise gym.error.InvalidAction(\"Invalid action: %s\" % action)\n\n return action", "async def link(self, ctx: Context) -> None:\r\n try:\r\n params: List[str] = get_cmd_params(ctx)\r\n\r\n if len(params) < 1 or not params[0].isdigit() or int(params[0]) > len(self.yt_result.ids):\r\n await ctx.send(\"Please enter a valid video number from 0 to 5\")\r\n return\r\n\r\n self.yt_link = await self.message.edit(content=self.yt_result.get_link(int(params[0])))\r\n await ctx.message.delete()\r\n except Exception as e:\r\n await self.channels.log_error(e, \"ytl\")", "def _rollout_an_episode(self):\n self._steps = 0\n me_id = self._learning_agent_id # short name\n oppo_id = self._oppo_agent_id # short name\n logger.log('episode begins with the task: {}'.format(str(self.task)))\n\n # passing me and oppo hyperparams to the arena interface\n assert self.task.hyperparam is not None\n logger.log('pulling oppo hyperparam of model key {}'.format(\n self.task.model_key2))\n oppo_hyperparam = self._model_pool_apis.pull_attr(attr='hyperparam',\n key=self.task.model_key2)\n logger.log('Done pulling oppo hyperparam')\n oppo_inter_kwargs = ({} if oppo_hyperparam is None\n else oppo_hyperparam.__dict__)\n inter_kwargs = ([self.task.hyperparam.__dict__]\n + [oppo_inter_kwargs] * (self.n_agents - 1))\n\n # agent, env reset\n obs = self.env.reset(inter_kwargs=inter_kwargs)\n for agt, ob in zip(self.agents, obs):\n agt.reset(ob)\n self._update_agents_model(self.task) # for agent Neural Net parameters\n\n me_reward_sum = 0.0\n self.time_beg = time.time()\n self._update_hyperparam(self.task)\n self._changed_task = False\n while True:\n self._steps += 1\n # predictions for each agent\n predictions = self._parallel.run((self._agent_pred, ob, i)\n for i, ob in enumerate(obs))\n me_prediction = predictions[me_id]\n me_action, extra_vars = me_prediction[me_id], me_prediction[oppo_id:]\n # predicted actions for each agent\n actions = [me_action] + [other_action\n for other_action in predictions[oppo_id:]]\n # book-keep obs in previous step\n last_obs = obs\n\n # agent-env interaction\n obs, reward, done, info = self.env.step(actions)\n\n me_rwd_scalar = self._reward_shape(reward[me_id])\n me_reward_sum += me_rwd_scalar\n\n if self._enable_push:\n # put the interested data (obs, rwd, act, ... for each agent) into the\n # _data_queue, which is watched in another Thread (the _push_data_to_learner()\n # method) that the data are dequeued and sent to remote Learner\n if self._data_queue.full():\n logger.log(\"Actor's queue is full.\", level=logger.WARN)\n rwd_to_push = (me_rwd_scalar if self.rwd_shape\n else np.asarray(reward[me_id], np.float32))\n if self.use_oppo_obs:\n if isinstance(extra_vars, tuple):\n extra_vars += (self.agents[self._oppo_agent_id]._last_state,)\n else:\n extra_vars.append(self.agents[self._oppo_agent_id]._last_state)\n data_tuple = (last_obs, tuple(actions), rwd_to_push, info, done, extra_vars)\n self._data_queue.put(data_tuple)\n logger.log('successfully put one tuple.', level=logger.DEBUG)\n\n if self._steps % self._log_interval_steps == 0:\n logger.log('_rollout_an_episode,', 'steps: {},'.format(self._steps),\n 'data qsize: {}'.format(self._data_queue.qsize()))\n\n if done:\n # an episode ends\n if self._replay_dir:\n self._save_replay()\n self.log_kvs(me_reward_sum, info)\n if self._changed_task:\n return None, info\n else:\n return self.log_outcome(info), info\n\n if self._update_model_freq and self._steps % self._update_model_freq == 0:\n # time to update the model for each agent\n if (self._enable_push and\n self._model_pool_apis.pull_attr(\n 'freezetime', self.task.model_key1) is not None):\n # Current task (learning period) finishes, start a new task or continue\n self._finish_task(self.task, None) # notify early abort\n last_task = self.task\n self.task = self._request_task() # try to continue\n if not is_inherit(last_task.model_key1, self.task.model_key1):\n self.log_kvs(me_reward_sum, info)\n return None, info\n if last_task.model_key2 != self.task.model_key2:\n self._changed_task = True\n self._update_agents_model(self.task)", "def scoring_opportunities(self, scoring_opportunities):\n\n self._scoring_opportunities = scoring_opportunities", "def link_sample(self, other):\n with other.entry.nxfile:\n if 'sample' in self.entry:\n if 'sample' in other.entry:\n del other.entry['sample']\n other.entry.makelink(self.entry['sample'])", "async def cmd_galaddlinkuwl(self, ctx):\n\n links = re.findall(r\"(?P<url>http[s]?://[^\\s]+)\", ctx.message.content)\n\n if not links:\n await ctx.channel.send('`Useage: [p]galaddlinkuwl <startoflink>, [Bot Owner] Adds a link from gallery link whitelist.`')\n \n # ===== ADD THE NEW LINKS TO THE WHITELIST\n new_gal_link_wl = list(set(self.cogset['link_wl']) + set(links))\n\n if Gallery.compare(new_gal_link_wl, self.cogset['link_wl']):\n await ctx.channel.send(content=\"{}\\n are already in the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return \n \n else:\n self.cogset['link_wl'] = new_gal_link_wl\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=\"{}\\n have been added to the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return", "def add_sample(self, state, action, state_new, reward, endstate, episode):\r\n new_sample = np.array([state, action, state_new, reward, endstate])\r\n if self.step_counter == 0 and episode == 0:\r\n self.experience_batch = new_sample\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # first sample twice in the batch to be able to index over the rows\r\n elif len(self.experience_batch) < self.experience_batch_size:\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # add new sample to batch when it is not full\r\n else:\r\n self.experience_batch[self.step_counter % self.experience_batch_size, :] = new_sample # override the components of the batch when it is full\r", "def actor_add_relation():\r\n\r\n data = get_request_data()\r\n if 'id' in data.keys():\r\n try:\r\n row_id = int(data['id'])\r\n relation_id = int(data['relation_id'])\r\n except:\r\n err = 'Id must be integer'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n obj = Movie.query.filter_by(id=relation_id).first()\r\n try:\r\n actor = Actor.add_relation(row_id, obj)\r\n rel_actor = {k: v for k, v in actor.__dict__.items() if k in ACTOR_FIELDS}\r\n rel_actor['filmography'] = str(actor.filmography)\r\n except:\r\n err = 'Record with such id does not exist'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n return make_response(jsonify(rel_actor), 200)\r\n\r\n else:\r\n err = 'No id specified'\r\n return make_response(jsonify(error=err), 400)", "def add_reward(self, choice, count=1):\n self.redis.hincrby(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, \"%s:rewards\" % choice, count)\n self._choices = None", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n self.priorities.append(max(self.priorities, default=1)) # Save all new experiences as maximum priority", "def update(self, state, reward, action, done, next_state, next_reward, num_episode, **kwargs):\n\n # Keep track of total reward\n self.episode_reward += next_reward\n if self.verbose > 0:\n logger.debug(\n f\"Agent acknowledges receiving a reward of {next_reward}, episode reward so far {self.episode_reward}\"\n )\n\n # Update MCTS tree\n if not done:\n self.mcts_head = self.mcts_head.children[action]\n self.mcts_head.prune() # This updates the node.path\n\n # Train\n if self.training:\n return self._train(kwargs[\"log_prob\"])\n else:\n return 0.0", "def test_shows_issues_from_grouplink(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def append(self, state, action, reward, done):\n assert state.shape == self._state_shape, \\\n 'Invalid state shape (required: %s, got: %s)' % (self._state_shape, state.shape)\n\n self._states[self._pos] = state\n self._actions[self._pos] = action\n self._rewards[self._pos] = reward\n self._terminals[self._pos] = done\n\n self._count = max(self._count, self._pos + 1)\n self._pos = (self._pos + 1) % self._max_size" ]
[ "0.7362988", "0.717459", "0.7163847", "0.7061997", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42772636", "0.4274945", "0.42674753", "0.4266109", "0.4264433", "0.42578077", "0.42515564", "0.42505667", "0.424946", "0.42489296", "0.42303044", "0.42303044", "0.4199248", "0.41974282", "0.41635618", "0.41283748", "0.4120942", "0.41087726", "0.41035667", "0.40950674", "0.408618", "0.40834093", "0.40813103", "0.40780374", "0.40770057", "0.4075737", "0.40686032", "0.40659428", "0.40503526", "0.40333", "0.40275708", "0.4023863", "0.4007657", "0.40069127", "0.40017104", "0.39986208", "0.39897925", "0.39840496", "0.39749613", "0.39745083", "0.3972324", "0.39586034", "0.39508075", "0.3946143", "0.39457676", "0.3932967", "0.39325184", "0.39297178", "0.39295676", "0.39276245", "0.39270565", "0.39229208", "0.39168912", "0.39096275", "0.3907056", "0.39025453", "0.39004102", "0.38994065", "0.38956124", "0.38943377", "0.38892362", "0.38891497", "0.38870266", "0.38820302", "0.38809907", "0.38804775", "0.38761795", "0.38738146", "0.3869238", "0.38686046", "0.38684267", "0.38673568", "0.38626942", "0.3862313", "0.3860057", "0.38582462", "0.38560754", "0.38555625", "0.38539255", "0.3853633" ]
0.6714336
4
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}' payload = { 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def ez_set_outcome(auth_token, dataset_id, outcome, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_set_outcome\"\n payload = {\n \"dataset_id\": dataset_id,\n \"outcome\" : outcome,\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def link(url, title, icon=None, badge=None, **context):\n\n return {\n \"url\": url,\n \"title\": title,\n \"context\": context,\n \"badge\": badge,\n \"class\": \"link\",\n \"icon\": icon\n }", "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, workflow_ID=None, parentobj_ID=None, **kwargs):\n\n uri = kwargs.get('uri')\n uid = kwargs.get('uid')\n desc = kwargs.get('desc')\n name = kwargs.get('name')\n source = kwargs.get('source')\n\n if (self.debug):\n print('MPO.ADD', workflow_ID, parentobj_ID, name, desc,uri,uid,source,kwargs, file=sys.stderr)\n\n if uid:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uid\":uid}\n elif uri:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uri\":uri}\n else:\n return {\"name\":name,\"description\":desc,\"source_uid\":source,\"message\":\"Must provide either uri or uid.\", 'uid':-1, \"status\":-1}\n\n return self.post(self.DATAOBJECT_RT,workflow_ID,[parentobj_ID],data=payload,**kwargs)", "def create_link(self, word, meaning):\n print(str(self.unique_id) + \" learned \" +\n str(word) + \" for \" + str(meaning))\n self.meaning2word[meaning] = word\n self.word2meaning[word] = meaning\n self.wordsuccess[word] = []\n\n if meaning not in self.model.vocabulary:\n self.model.vocabulary[meaning] = {}\n\n # If word not in vocabulary, add it\n if word not in self.model.vocabulary[meaning]:\n self.model.vocabulary[meaning][word] = [self.unique_id]\n # Else append this agent to its users\n else:\n self.model.vocabulary[meaning][word].append(self.unique_id)", "def add_link_to_bundle(request, bundle_id):\n\n # ensure bundle exists\n bundle = get_object_or_404(Bundle, id=bundle_id)\n\n # get/create link for given url\n url = request.data.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # assert that \"comfort_level\" is specified.\n # this is validated outside of the `Link` fields handled by\n # DRF serializer validation.\n comfort_level = int(request.data.get('comfort_level', None))\n if comfort_level not in [i[0] for i in COMFORT_LEVELS]:\n return Response({'error': True,\n 'msg': 'Please specify a reader comfort level'\n })\n\n url = urltools.normalize(url)\n\n try:\n # fetch existing link\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n # create a new link\n link_serializer = LinkSerializer(data=request.data)\n link_serializer.is_valid(raise_exception=True)\n link = link_serializer.save()\n\n # add link to bundle\n if not BundleLink.objects.filter(bundle=bundle, link=link).exists():\n # call alchemy util to fetch concepts for URL\n concepts = bundles.alchemy_utils.get_concepts(url)\n this_bundle = BundleLink.objects.create(bundle=bundle,\n link=link,\n comfort_level=comfort_level,\n curator_id=1)\n for concept in concepts:\n this_bundle.tags.add(concept)\n\n return Response('', status=201)", "def create_link(\n integration: Integration,\n installation: IntegrationInstallation,\n event: GroupEvent,\n response: Response,\n) -> None:\n external_issue = ExternalIssue.objects.create(\n organization_id=event.group.project.organization_id,\n integration_id=integration.id,\n key=response[\"key\"],\n title=event.title,\n description=installation.get_group_description(event.group, event),\n metadata=response.get(\"metadata\"),\n )\n GroupLink.objects.create(\n group_id=event.group.id,\n project_id=event.group.project_id,\n linked_type=GroupLink.LinkedType.issue,\n linked_id=external_issue.id,\n relationship=GroupLink.Relationship.references,\n data={\"provider\": integration.provider},\n )", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def relate(self, related):\n\n self._module._connection.relate(self, related)", "def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def outcomes(self, outcomes):\n\n self._outcomes = outcomes", "def add_rewards(self, step_reward, goal_reward, bad_state_reward=None, restart_state_reward = None):\n self.r_step = step_reward\n self.r_goal = goal_reward\n self.r_bad = bad_state_reward\n self.r_restart = restart_state_reward", "def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def addLink(self, obj1, obj2):\n\n link = vsdModels.ObjectLink(object1=obj1, object2=obj2)\n link.validate()\n return self.postRequest('object-links', data=link.to_struct())", "def _reward(self, action):\n raise NotImplementedError", "def add_reward(self, reward):\n self.quest_node['reward'] = reward\n self.reward = reward\n graph.push(self.quest_node)", "def link(self, callback, SpawnedLink=SpawnedLink):\n # XXX: Is the redefinition of SpawnedLink supposed to just be an\n # optimization, or do people use it? It's not documented\n # pylint:disable=redefined-outer-name\n self.rawlink(SpawnedLink(callback))", "def _on_outcome(self, outcome, condition):\n self._outcome = outcome\n self._condition = condition", "def create_hit(self, hit_type=None, question=None,\r\n lifetime=datetime.timedelta(days=7),\r\n max_assignments=1, \r\n title=None, description=None, keywords=None,\r\n reward=None, duration=datetime.timedelta(days=7),\r\n approval_delay=None, annotation=None,\r\n questions=None, qualifications=None,\r\n response_groups=None):\r\n \r\n # handle single or multiple questions\r\n neither = question is None and questions is None\r\n both = question is not None and questions is not None\r\n if neither or both:\r\n raise ValueError(\"Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both\")\r\n\r\n if question:\r\n questions = [question]\r\n question_param = QuestionForm(questions)\r\n if isinstance(question, QuestionForm):\r\n question_param = question\r\n elif isinstance(question, ExternalQuestion):\r\n question_param = question\r\n \r\n # Handle basic required arguments and set up params dict\r\n params = {'Question': question_param.get_as_xml(),\r\n 'LifetimeInSeconds' :\r\n self.duration_as_seconds(lifetime),\r\n 'MaxAssignments' : max_assignments,\r\n }\r\n\r\n # if hit type specified then add it\r\n # else add the additional required parameters\r\n if hit_type:\r\n params['HITTypeId'] = hit_type\r\n else:\r\n # Handle keywords\r\n final_keywords = MTurkConnection.get_keywords_as_string(keywords)\r\n \r\n # Handle price argument\r\n final_price = MTurkConnection.get_price_as_price(reward)\r\n \r\n final_duration = self.duration_as_seconds(duration)\r\n\r\n additional_params = dict(\r\n Title=title,\r\n Description=description,\r\n Keywords=final_keywords,\r\n AssignmentDurationInSeconds=final_duration,\r\n )\r\n additional_params.update(final_price.get_as_params('Reward'))\r\n\r\n if approval_delay is not None:\r\n d = self.duration_as_seconds(approval_delay)\r\n additional_params['AutoApprovalDelayInSeconds'] = d\r\n\r\n # add these params to the others\r\n params.update(additional_params)\r\n\r\n # add the annotation if specified\r\n if annotation is not None:\r\n params['RequesterAnnotation'] = annotation\r\n \r\n # Add the Qualifications if specified\r\n if qualifications is not None:\r\n params.update(qualifications.get_as_params())\r\n\r\n # Handle optional response groups argument\r\n if response_groups:\r\n self.build_list_params(params, response_groups, 'ResponseGroup')\r\n \r\n # Submit\r\n return self._process_request('CreateHIT', params, [('HIT', HIT),])", "def create_issue_link(self, link_type, inwardissue,\r\n outwardissue, comment=None):\r\n self.jira.create_issue_link(type=link_type,\r\n inwardIssue=str(inwardissue),\r\n outwardIssue=str(outwardissue))", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def new_link(self, key, link, default):\n\n s = self._new_link()\n s.key = key\n s.link = link\n s.default = default\n return s", "def addLink(self, name=None, **kwargs):\n if isinstance(name, rigmech):\n self.sym_prefix = name.sym_prefix + \"_\"\n self.addLink(\n name=name.name,\n mass=name.global_syms[\"mass\"],\n inertia=name.global_syms[\"Mq\"],\n origin_xyz=name.global_syms[\"xyz_com\"],\n )\n else:\n kwargs[\"name\"] = name\n LinkArgs = rigmech._check_field_inputs(\n \"addLink\", self._DefaultLinkFields, kwargs\n )\n self.Links[LinkArgs[\"name\"]] = LinkArgs", "def add_link(self, target, rel, title=None, title_star=None,\n anchor=None, hreflang=None, type_hint=None):\n\n # PERF(kgriffs): Heuristic to detect possiblity of an extension\n # relation type, in which case it will be a URL that may contain\n # reserved characters. Otherwise, don't waste time running the\n # string through uri.encode\n #\n # Example values for rel:\n #\n # \"next\"\n # \"http://example.com/ext-type\"\n # \"https://example.com/ext-type\"\n # \"alternate http://example.com/ext-type\"\n # \"http://example.com/ext-type alternate\"\n #\n if '//' in rel:\n if ' ' in rel:\n rel = ('\"' +\n ' '.join([uri.encode(r) for r in rel.split()]) +\n '\"')\n else:\n rel = '\"' + uri.encode(rel) + '\"'\n\n value = '<' + uri.encode(target) + '>; rel=' + rel\n\n if title is not None:\n value += '; title=\"' + title + '\"'\n\n if title_star is not None:\n value += (\"; title*=UTF-8'\" + title_star[0] + \"'\" +\n uri.encode_value(title_star[1]))\n\n if type_hint is not None:\n value += '; type=\"' + type_hint + '\"'\n\n if hreflang is not None:\n if isinstance(hreflang, six.string_types):\n value += '; hreflang=' + hreflang\n else:\n value += '; '\n value += '; '.join(['hreflang=' + lang for lang in hreflang])\n\n if anchor is not None:\n value += '; anchor=\"' + uri.encode(anchor) + '\"'\n\n _headers = self._headers\n if 'link' in _headers:\n _headers['link'] += ', ' + value\n else:\n _headers['link'] = value", "def reward(self, history_id, reward):\n pass", "def add(self, destination, kind):\n if destination in self.__links:\n raise SarasvatiException(\"Link to specified thought already exist\")\n if kind not in self.__correct_kinds:\n raise SarasvatiException(\"Link kind is not correct: \" + kind)\n if self.__source is destination:\n raise SarasvatiException(\"Unable link thought to itself\")\n link = Link(self.__source, destination, kind)\n return self.add_link(link)", "def add_outcome(self, node, cost=0, weight=1, classifier=None):\n\n if classifier is None:\n self.outcomes.append((Edge(self, node, cost=cost), weight))\n else:\n self.outcomes.append((Edge(self, node, cost=cost), classifier))", "def relate(a, b, **kwargs):\n return lib.relate(a, b, **kwargs)", "def set_outcome(self, outcome):\r\n if outcome < self._outcome:\r\n self._outcome = outcome\r\n self.choose(0, 0, 0, 0, 0) # Dummy call, to validate outcome.\r\n if self.parent: self.parent.set_outcome(self._outcome)", "def relate_object(self, obj):\n suffix = self._get_api_suffix(obj.__class__)\n endpoint = self._get_api_endpoint() + '/' + suffix\n obj_id = obj._id()\n results = self.tq.post(endpoint, data={'id': obj_id})\n\n results = results.get('data')\n if not results or 'pivot' not in results[0]:\n raise ActionFailedError('Relate indicators')", "def createPooledReward(self, name, rewardPoolId, product_key_name, instructions=None):\n param = {\"name\": name, product_key_name: 'pooled:%s' % rewardPoolId}\n if instructions:\n param[\"instructions\"] = instructions\n self.post_json('/reward', param)", "def create_relation_to_episode(episode_id):\n epi = Episode.query.get(episode_id)\n if not epi:\n abort(404)\n\n\n data = request.json\n if any([\n 'id' in data and not isinstance(data.get('id'), int)\n ]):\n abort(400)\n\n dire = Director.query.get(data[\"id\"])\n if not dire:\n abort(404)\n\n epi.directors.append(dire)\n db.session.commit()\n return jsonify({'result': f\"{dire} directed episode {epi}\"})", "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, state, action, reward, new_state, done):\n experience = (state, action, reward, new_state, done)\n self.buffer.append(experience)", "def get_reward(self, state, action, next_state, absorbing):\n raise NotImplementedError", "def add(self, state, action, reward, next_state, done):\n experience = Experience(state, action, reward, next_state, done)\n self.memory.append(experience)", "def add(self, obs, action, reward, new_obs, done):\n experience = (obs, action, reward, new_obs, done)\n insert_index = self.fix_index()\n if insert_index > 0:\n if insert_index in self._storage:\n del self._storage[insert_index]\n self._storage[insert_index] = experience\n # add to priority queue\n priority = self.priority_queue.get_max_priority()\n self.priority_queue.update(priority, insert_index)\n return True\n else:\n sys.stderr.write('Insert failed\\n')\n return False", "def append(self, state, action, reward, next_state=None, next_action=None,\n is_state_terminal=False):\n raise NotImplementedError", "def add_hyperlink(paragraph, url, text, color, underline):\r\n\r\n # This gets access to the document.xml.rels file and gets a new relation id value\r\n part = paragraph.part\r\n r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)\r\n\r\n # Create the w:hyperlink tag and add needed values\r\n hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')\r\n hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )\r\n\r\n # Create a w:r element\r\n new_run = docx.oxml.shared.OxmlElement('w:r')\r\n\r\n # Create a new w:rPr element\r\n rPr = docx.oxml.shared.OxmlElement('w:rPr')\r\n\r\n # Add color if it is given\r\n if not color is None:\r\n c = docx.oxml.shared.OxmlElement('w:color')\r\n c.set(docx.oxml.shared.qn('w:val'), color)\r\n rPr.append(c)\r\n\r\n # Remove underlining if it is requested\r\n if not underline:\r\n u = docx.oxml.shared.OxmlElement('w:u')\r\n u.set(docx.oxml.shared.qn('w:val'), 'none')\r\n rPr.append(u)\r\n\r\n # Join all the xml elements together add add the required text to the w:r element\r\n new_run.append(rPr)\r\n new_run.text = text\r\n hyperlink.append(new_run)\r\n\r\n paragraph._p.append(hyperlink)\r\n\r\n return hyperlink", "def add_link(self, link):\n raise NotImplementedError", "def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel", "def link(self, s_id):\r\n\r\n # Take the link entires from TOML file\r\n schedules = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if schedules:\r\n for entries in schedules:\r\n # Construct payload \r\n for payload in entries.get('link'):\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/link.json', test=payload)\r\n # Post request\r\n if 'id' in self.schedules[-1]:\r\n payload['schedule'] = self.schedules[-1].get('id')\r\n if 'id' in self.workouts[-1]:\r\n payload['workout'] = self.workouts[-1].get('id')\r\n return self.add_post(payload, API.url_link, self.links)", "def post(self, category_id, name, description, weight, category_ref, sponsor_id):\n\t\tproperties = {\"id\": category_id, \"name\": name, \"description\": description, \"weight\": weight, \"categoryRef\": category_ref, \"sponsorId\": sponsor_id}\n\t\treturn self.service.post(self.base_uri, json.dumps(properties))", "def perform_action(self, action):\n \n assert self.is_valid_action(action)\n \n # Save the action.\n self.action = action\n \n #the slight strategy of the opponent\n if self.reward==rLose :\n observation = self.observation\n else:\n observation = random.choice([oRock,oPaper,oScissor])\n \n #determine the result of the game and get the reward\n if action == aRock:\n if observation == oRock:\n reward= rDraw\n elif observation == oPaper:\n reward= rLose\n elif observation == oScissor:\n reward= rWin\n elif action == aPaper:\n if observation == oRock:\n reward= rWin\n elif observation == oPaper:\n reward= rDraw\n elif observation == oScissor:\n reward= rLose\n elif action == aScissor:\n if observation == oRock:\n reward= rLose\n elif observation == oPaper:\n reward= rWin\n elif observation == oScissor:\n reward= rDraw\n \n \n #Store the observation and reward in the environment.\n self.observation = observation\n \n self.reward = reward\n \n \n return (observation, reward)\n # end def", "def get_link(self, user_input):\r\n\r\n\t\t# state that you made it this far\r\n\t\tprint(f\"\\nSuccessfully called get_link() with the parameter(s): \\n\\n\\tuser_input -> {user_input}\")\r\n\r\n\t\t# tokenize the user's input, removing words like \"is\", \"the\", \"it\" and so on...\r\n\t\ttokens = self.tokenize(user_input)\r\n\r\n\t\t# categorize the question\r\n\t\tprint(f\"\\nIdentifying question's category...\")\r\n\t\tcategory = self.bayesian_naive_logic(tokens)\r\n\r\n\t\t# start looking for a link that may provide a Answer\r\n\t\tresponse_set = self.storage.get_urls(tokens, category)\r\n\t\tprint(f\"\\nBest Answer found: {response_set}\")\r\n\r\n\t\treturn f\"Here is a link with information closely matching your question: <a href='{response_set}' target='_blank'>{response_set}</a>\"", "def reward(self, history_id, reward):\n reward_action = self._historystorage.unrewarded_histories[history_id].action\n reward_action_idx = self._actions.index(reward_action)\n context = self._historystorage.unrewarded_histories[history_id].context[reward_action_idx]\n context = np.matrix(context)\n\n # Update the model\n matrix_a = self._modelstorage.get_model()['matrix_a']\n matrix_ainv = self._modelstorage.get_model()['matrix_ainv']\n b = self._modelstorage.get_model()['b']\n theta = self._modelstorage.get_model()['theta']\n matrix_a[reward_action] += np.dot(context.T, context)\n matrix_ainv[reward_action] = np.linalg.solve(matrix_a[reward_action], np.identity(self.d))\n b[reward_action] += reward * context.T\n theta[reward_action] = np.dot(matrix_ainv[reward_action], b[reward_action])\n self._modelstorage.save_model({'matrix_a': matrix_a, 'matrix_ainv': matrix_ainv, 'b': b, 'theta': theta})\n\n # Update the history\n self._historystorage.add_reward(history_id, reward)", "def link_room(self, room_to_link, direction):\n self.linked_rooms[direction] = room_to_link\n # print(self.name + \" linked rooms :\" + repr(self.linked_rooms) )", "def add_new_event(self,\n event_type: str,\n event_datetime: str,\n covid_status: str = \"U\",\n death: int = 0,\n critical_care_admission: int = 0,\n component: str or None = None,\n source: str or None = None,\n source_type: str or None = None,\n wimd: int or None = None,\n **kwargs):\n # Parse datetime and check validity (None for date if invalid)\n event_datetime = parse_datetime(event_datetime)\n if event_datetime.get(\"date\") is None:\n err = f\"Datetime parsed when trying to generate a new outcome event for {self.patientId} was invalid!\"\n self._config.write_to_log(err)\n raise ValueError(err)\n # Create outcome document\n new_outcome = Event(patientId=self.patientId,\n eventType=event_type.strip(),\n eventDate=event_datetime.get(\"date\"),\n covidStatus=covid_status,\n death=death,\n criticalCareAdmission=critical_care_admission,\n **kwargs)\n # Populate with optional parameters if given\n new_outcome = _add_if_value(new_outcome, [(\"component\", component),\n (\"source\", source),\n (\"sourceType\", source_type),\n (\"wimd\", wimd),\n (\"eventTime\", event_datetime.get(\"time\"))])\n new_outcome = new_outcome.save()\n self.outcomeEvents.append(new_outcome)\n self.save()\n self._config.write_to_log(f\"Outcome event {new_outcome.id} for patient {self.patientId}\")", "def make_link(first, second):\n manager = Actions()\n manager.make_link(first, second)", "def add_sample(self, img, action, reward, terminal):\n self.imgs[:, :, self.top] = img\n self.actions[self.top] = action\n self.rewards[self.top] = reward\n self.terminal[self.top] = terminal\n\n if self.size == self.max_steps:\n self.bottom = (self.bottom + 1) % self.max_steps\n else:\n self.size += 1\n\n self.top = (self.top + 1) % self.max_steps", "async def link(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'Cannot send links on this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n title = await Controller.canonical_title(item)\n if title is None:\n await msg.channel.send(**{\n 'content': f'There are no pages matching `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n page_url = Controller.link_from_title(title)\n await msg.channel.send(**{\n 'content': page_url,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })", "def create_data_link(self, ctx, params):\n # ctx is the context object\n # return variables are: results\n #BEGIN create_data_link\n duid, sna, update = _create_data_link_params(params)\n as_admin, user = _get_admin_request_from_object(params, 'as_admin', 'as_user')\n _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.FULL,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'create_data_link', ctx.log_info, as_user=user, skip_check=not as_admin)\n link = self._samples.create_data_link(\n user if user else _UserID(ctx[_CTX_USER]),\n duid,\n sna,\n update,\n as_admin=as_admin)\n results = {'new_link': _links_to_dicts([link])[0]}\n #END create_data_link\n\n # At some point might do deeper type checking...\n if not isinstance(results, dict):\n raise ValueError('Method create_data_link return value ' +\n 'results is not type dict as required.')\n # return the results\n return [results]", "def related_url(self) -> pulumi.Output[Sequence['outputs.RelatedUrlResponse']]:\n return pulumi.get(self, \"related_url\")", "def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500", "def get_outcome(self):\n if not self.is_paid:\n raise ValueError(\"There isn't an outcome.\")\n return self.team_a if self.outcome else self.team_b", "def PostReward(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _publish_reward_topic(self, reward, steps, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)\n self.reward_list.append(reward)\n self.episode_list.append(episode_number)\n self.step_list.append(steps)\n list = str(reward) + \";\" + str(episode_number) + \";\" + str(steps) + \"\\n\"\n\n with open(self.csv_name + '.csv', 'a') as csv:\n csv.write(str(list))", "def add_link(\n self,\n url: str,\n label: Optional[str] = None,\n ) -> None:\n if not label:\n label = url\n self._client.add_element(\n Markdown(\n f\"[{label}]({url})\",\n on_tap_link=lambda e: self._client.page.launch_url(e.data),\n )\n )", "def _step(self, action):\n \n obs, reward, done, info = self.env.step(action)\n\n \n advice=self.generateAdvice()[1]\n\n obs = {\n \"image\": obs,\n \"advice\": advice\n }\n\n\n \n\n\n\n return obs, reward, done, info", "def cli_createLinko():\n\n info = ('Creates a linkograph from an (inverse) labeling json'\n ' and an ontology json.')\n\n parser = argparse.ArgumentParser(description=info)\n parser.add_argument('labeling', metavar='LABELING.json',\n nargs=1,\n help='the inverse labeling json file.')\n\n parser.add_argument('ontology', metavar='ONTOLOGY.json',\n nargs=1,\n help='the json of ontology.')\n\n parser.add_argument('-o', '--out', metavar='OUTPUT_FILE',\n help='the linkograph as a json')\n\n args = parser.parse_args()\n\n outfile = None\n if args.out:\n outfile = args.out\n\n # Load the json files.\n with open(args.labeling[0], 'r') as invLabelingFile:\n invLabeling = json.load(invLabelingFile)\n with open(args.ontology[0], 'r') as ontologyFile:\n ontology = json.load(ontologyFile)\n linko = createLinko(invLabeling, ontology)\n\n if outfile:\n writeLinkoJson(linko, outfile)\n else:\n print(linko)", "def add(self, context, action, reward):\n\n if self.intercept:\n c = np.array(context[:])\n c = np.append(c, 1.0).reshape((1, self.context_dim + 1))\n else:\n c = np.array(context[:]).reshape((1, self.context_dim))\n\n if self.contexts is None:\n self.contexts = c\n else:\n self.contexts = np.vstack((self.contexts, c))\n\n r = np.zeros((1, self.num_actions))\n r[0, action] = reward\n if self.rewards is None:\n self.rewards = r\n else:\n self.rewards = np.vstack((self.rewards, r))\n\n self.actions.append(action)", "def make_move(state, action, player, rewarding_move=False): # TODO : done and next_is_reward can be removed as\n # they are in the state object\n board = state.get_board()\n json_action = action.get_json_action()\n action = action.get_action_as_dict()\n captured = None\n reward = 0\n next_is_reward = False\n previous_is_reward = False\n if rewarding_move:\n state.boring_moves = 0\n previous_is_reward = True\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND:\n reward += 1\n state.in_hand[player * -1] -= 1\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n board.empty_cell(action['action']['at'])\n reward += 1\n else:\n if action['action_type'] == YoteActionType.ADD:\n state.boring_moves += 1\n state.in_hand[player] -= 1\n board.fill_cell(action['action']['to'], Color(player))\n elif action['action_type'] == YoteActionType.MOVE:\n at = action['action']['at']\n to = action['action']['to']\n\n def distance(cell_1, cell_2):\n import math\n return math.sqrt((cell_1[0] - cell_2[0]) ** 2 + (cell_1[1] - cell_2[1]) ** 2)\n\n board.empty_cell(at)\n board.fill_cell(to, Color(player))\n if int(distance(at, to)) == 1:\n state.boring_moves += 1\n elif int(distance(at, to)) > 1:\n state.boring_moves = 0\n next_is_reward = True\n board.fill_cell(to, Color(player))\n if at[0] == to[0] and at[1] < to[1]:\n board.empty_cell((at[0], at[1] + 1))\n captured = (at[0], at[1] + 1)\n elif at[0] == to[0] and at[1] > to[1]:\n board.empty_cell((at[0], at[1] - 1))\n captured = (at[0], at[1] - 1)\n elif at[1] == to[1] and at[0] < to[0]:\n board.empty_cell((at[0] + 1, at[1]))\n captured = (at[0] + 1, at[1])\n elif at[1] == to[1] and at[0] > to[0]:\n board.empty_cell((at[0] - 1, at[1]))\n captured = (at[0] - 1, at[1])\n reward += 1\n\n state.set_board(board)\n state.score[player] += reward\n state.captured = captured\n state.rewarding_move = next_is_reward\n state.previous_is_reward = previous_is_reward\n state.set_latest_player(player)\n state.set_latest_move(json_action)\n if next_is_reward:\n state.set_next_player(player)\n else:\n state.set_next_player(player * -1)\n\n done = YoteRules.is_end_game(state)\n return state, done, next_is_reward", "def link_to(self, criterion_or_node):\n if callable(criterion_or_node):\n target_node = criterion_or_node(self.graph)\n else:\n target_node = criterion_or_node\n return self.send(target_node, 'accept_link',\n originating_node=self.id)", "def add_experience(self, action, state, reward, terminal):\n self.replay_buffer.add_experience(action, state, reward, terminal)", "def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_link(self, **kwgs):\n self.links.append(kwgs)", "def _set_link(\n meta: Dict,\n link: Optional[Union[type(None), str, bool, KEChainPages]] = None,\n link_value: Optional[CardWidgetLinkValue] = None,\n link_target: Optional[Union[str, LinkTargets]] = LinkTargets.SAME_TAB,\n **kwargs,\n) -> Dict:\n meta[\"linkTarget\"] = check_enum(link_target, LinkTargets, \"link_target\")\n\n from pykechain.models import Activity\n\n if isinstance(link, Activity):\n if link.activity_type == ActivityType.TASK:\n default_link_value = CardWidgetLinkValue.TASK_LINK\n else:\n default_link_value = CardWidgetLinkValue.TREE_VIEW\n\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link.id,\n MetaWidget.SHOW_LINK_VALUE: default_link_value,\n }\n )\n elif isinstance(link, str) and is_uuid(link):\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.TASK_LINK,\n }\n )\n elif link is None or link is False:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: None,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.NO_LINK,\n }\n )\n elif link in KEChainPages.values():\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: \"\",\n MetaWidget.SHOW_LINK_VALUE: CardWidgetKEChainPageLink[link],\n }\n )\n else:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.EXTERNAL_LINK,\n }\n )\n\n if link_value is not None:\n meta.update(\n {\n MetaWidget.SHOW_LINK_VALUE: check_enum(\n link_value, CardWidgetLinkValue, \"link_value\"\n ),\n }\n )\n\n return meta", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n if done and self.episode % 50 == 0 and self.episode > 50:\n self.episode += 1\n self.remove_bad_experience()\n elif done:\n self.episode += 1", "def add(self, states, actions, rewards, next_states, dones):\n assert len(states) == self.num_agents, 'ERROR> group states size mismatch'\n assert len(actions) == self.num_agents, 'ERROR> group actions size mismatch'\n assert len(rewards) == self.num_agents, 'ERROR> group rewards size mismatch'\n assert len(next_states) == self.num_agents, 'ERROR> group next states size mismatch'\n assert len(dones) == self.num_agents, 'ERROR> group dones size mismatch'\n\n experience = (states, actions, rewards, next_states, dones)\n self.memory.append(experience)", "def strategy(self,\r\n opponent:Player,\r\n message=torch.zeros(10))->Action:\r\n #need to update name for posterity's sake\r\n self.finished_opponent = opponent.name\r\n #Regardless of intent for first few turns, do the base action.\r\n if len(self.history) == 0: return self.action_base\r\n \r\n #get overall reward\r\n self.reward = self.find_reward(opponent) \r\n\r\n # assess perceived intent message in opponent.sent_message\r\n self.intent_received_prev = self.intent_received\r\n self.intent_received = opponent.intent_sent\r\n self.assessment_prev = self.assessment\r\n self.assessment = self.assess_received_intent(opponent)#this is the estimate of what the opponent is doing\r\n \r\n # store for testing later\r\n self.list_reward.append(self.reward)\r\n self.list_intent_received.append(self.intent_received_prev)\r\n self.list_intent_sent.append(self.intent_sent_prev)\r\n self.list_intent_assessment.append(self.assessment_prev)\r\n self.list_intent_true.append(opponent.history[-1])\r\n \r\n # receive assessment and decide to stay with self.base_Action\r\n # OR change it to the other action. \r\n self.old_decision = self.decision\r\n self.decision = self.decide_based_on_new_intel(opponent) # what the opponent actually did last turn\r\n self.list_decision.append(self.old_decision)\r\n \r\n return self.decision", "def addReagentTargetedGene(\n self,\n reagent_id,\n gene_id,\n targeted_gene_id=None,\n targeted_gene_label=None,\n description=None,\n reagent_category=None\n ):\n\n # akin to a variant locus\n # is this some sort of pseudo bnode?\n if targeted_gene_id is None:\n targeted_gene_id = '_' + gene_id + '-' + reagent_id\n targeted_gene_id = targeted_gene_id.replace(\":\", \"\")\n self.model.addIndividualToGraph(\n targeted_gene_id,\n targeted_gene_label,\n self.globaltt['reagent_targeted_gene'],\n description,\n ind_category=reagent_category\n )\n\n if gene_id is not None:\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_expression_variant_of'], gene_id\n )\n\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_targeted_by'], reagent_id\n )", "def __init__(__self__, *,\n individual_outcome: Optional[pulumi.Input[Sequence[pulumi.Input['IndividualOutcomeArgs']]]] = None,\n roll_up: Optional[pulumi.Input['PrimaryStepRollUp']] = None):\n if individual_outcome is not None:\n pulumi.set(__self__, \"individual_outcome\", individual_outcome)\n if roll_up is not None:\n pulumi.set(__self__, \"roll_up\", roll_up)", "def _send_lti2_outcome(self):\r\n payload = textwrap.dedent(\"\"\"\r\n {{\r\n \"@context\" : \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\" : \"Result\",\r\n \"resultScore\" : {score},\r\n \"comment\" : \"This is awesome.\"\r\n }}\r\n \"\"\")\r\n data = payload.format(score=0.8)\r\n return self._send_lti2(data)", "def join(self, rewards_s3_path, obs_time_window=None, ratio=0.8, wait=True):\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n\n if obs_time_window is None:\n logger.warning(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with all the observation data\"\n )\n obs_end_time = None\n obs_start_time = None\n else:\n logger.info(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with observation \"\n f\"data in the past {obs_time_window} hours\"\n )\n obs_end_time = datetime.utcnow()\n obs_start_time = obs_end_time - timedelta(hours=obs_time_window)\n\n # update next_join_job_id and joining state\n next_join_job_id = JoinManager.name_next_join_job(experiment_id=self.experiment_id)\n self.exp_db_client.update_experiment_next_join_job_id(self.experiment_id, next_join_job_id)\n self.exp_db_client.update_experiment_joining_state(self.experiment_id, JoiningState.PENDING)\n\n input_obs_data_s3_path = (\n f\"s3://{self.resource_manager.firehose_bucket}/{self.experiment_id}\"\n )\n input_obs_data_s3_path = f\"{input_obs_data_s3_path}/inference_data\"\n # init joining job, update join table\n logger.info(\"Creating resource for joining job...\")\n\n try:\n self.next_join_job = JoinManager(\n join_db_client=self.join_db_client,\n experiment_id=self.experiment_id,\n join_job_id=next_join_job_id,\n input_obs_data_s3_path=input_obs_data_s3_path,\n obs_start_time=obs_start_time,\n obs_end_time=obs_end_time,\n input_reward_data_s3_path=rewards_s3_path,\n boto_session=self.boto_session,\n )\n\n logger.info(\"Started joining job...\")\n self.next_join_job.start_join(ratio=ratio, wait=wait)\n except Exception as e:\n logger.error(e)\n pass\n\n # wait until exp ddb table updated\n if self.local_mode or wait:\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries = 0\n\n while not succeeded_state:\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n logger.debug(\"Waiting for experiment table joining status to be updated...\")\n time.sleep(2 * (2 ** num_retries))\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries += 1\n if num_retries >= 5:\n raise UnhandledWorkflowException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"was in state of '{self.experiment_record._joining_state}'. Failed to sync table states.\"\n )\n if (\n self.experiment_record._joining_state == JoiningState.FAILED\n or self.experiment_record._joining_state == JoiningState.CANCELLED\n ):\n raise WorkflowJoiningJobException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"ended with state '{self.experiment_record._joining_state}'. Please check Athena queries logs \"\n \"for more information.\"\n )", "def test__put_two_way_link_into():\n for input_value, defaults, expected_output in (\n (False, False, {}),\n (False, True, {'two_way_link': False}),\n (True, False, {'two_way_link': True}),\n ):\n data = put_two_way_link_into(input_value, {}, defaults)\n vampytest.assert_eq(data, expected_output)", "def _act_impl(self, observation, reward,\n done):\n if done:\n raise core.EpisodeDoneError(\"Called act on a done episode.\")\n\n if not self.observation_space.contains(observation):\n raise core.InvalidObservationError(\"Invalid ovservation: %s\" %\n observation)\n if self.params.observation_adjustment_fn:\n observation = self.params.observation_adjustment_fn(\n self.rng, self.beliefs, observation)\n\n features = self.feature_selection_fn(observation)\n self.beliefs = self._update_beliefs(features, self.beliefs)\n action = self._allocate(self._n_resource, self.beliefs)\n\n if not self.action_space.contains(action):\n raise gym.error.InvalidAction(\"Invalid action: %s\" % action)\n\n return action", "async def link(self, ctx: Context) -> None:\r\n try:\r\n params: List[str] = get_cmd_params(ctx)\r\n\r\n if len(params) < 1 or not params[0].isdigit() or int(params[0]) > len(self.yt_result.ids):\r\n await ctx.send(\"Please enter a valid video number from 0 to 5\")\r\n return\r\n\r\n self.yt_link = await self.message.edit(content=self.yt_result.get_link(int(params[0])))\r\n await ctx.message.delete()\r\n except Exception as e:\r\n await self.channels.log_error(e, \"ytl\")", "def _rollout_an_episode(self):\n self._steps = 0\n me_id = self._learning_agent_id # short name\n oppo_id = self._oppo_agent_id # short name\n logger.log('episode begins with the task: {}'.format(str(self.task)))\n\n # passing me and oppo hyperparams to the arena interface\n assert self.task.hyperparam is not None\n logger.log('pulling oppo hyperparam of model key {}'.format(\n self.task.model_key2))\n oppo_hyperparam = self._model_pool_apis.pull_attr(attr='hyperparam',\n key=self.task.model_key2)\n logger.log('Done pulling oppo hyperparam')\n oppo_inter_kwargs = ({} if oppo_hyperparam is None\n else oppo_hyperparam.__dict__)\n inter_kwargs = ([self.task.hyperparam.__dict__]\n + [oppo_inter_kwargs] * (self.n_agents - 1))\n\n # agent, env reset\n obs = self.env.reset(inter_kwargs=inter_kwargs)\n for agt, ob in zip(self.agents, obs):\n agt.reset(ob)\n self._update_agents_model(self.task) # for agent Neural Net parameters\n\n me_reward_sum = 0.0\n self.time_beg = time.time()\n self._update_hyperparam(self.task)\n self._changed_task = False\n while True:\n self._steps += 1\n # predictions for each agent\n predictions = self._parallel.run((self._agent_pred, ob, i)\n for i, ob in enumerate(obs))\n me_prediction = predictions[me_id]\n me_action, extra_vars = me_prediction[me_id], me_prediction[oppo_id:]\n # predicted actions for each agent\n actions = [me_action] + [other_action\n for other_action in predictions[oppo_id:]]\n # book-keep obs in previous step\n last_obs = obs\n\n # agent-env interaction\n obs, reward, done, info = self.env.step(actions)\n\n me_rwd_scalar = self._reward_shape(reward[me_id])\n me_reward_sum += me_rwd_scalar\n\n if self._enable_push:\n # put the interested data (obs, rwd, act, ... for each agent) into the\n # _data_queue, which is watched in another Thread (the _push_data_to_learner()\n # method) that the data are dequeued and sent to remote Learner\n if self._data_queue.full():\n logger.log(\"Actor's queue is full.\", level=logger.WARN)\n rwd_to_push = (me_rwd_scalar if self.rwd_shape\n else np.asarray(reward[me_id], np.float32))\n if self.use_oppo_obs:\n if isinstance(extra_vars, tuple):\n extra_vars += (self.agents[self._oppo_agent_id]._last_state,)\n else:\n extra_vars.append(self.agents[self._oppo_agent_id]._last_state)\n data_tuple = (last_obs, tuple(actions), rwd_to_push, info, done, extra_vars)\n self._data_queue.put(data_tuple)\n logger.log('successfully put one tuple.', level=logger.DEBUG)\n\n if self._steps % self._log_interval_steps == 0:\n logger.log('_rollout_an_episode,', 'steps: {},'.format(self._steps),\n 'data qsize: {}'.format(self._data_queue.qsize()))\n\n if done:\n # an episode ends\n if self._replay_dir:\n self._save_replay()\n self.log_kvs(me_reward_sum, info)\n if self._changed_task:\n return None, info\n else:\n return self.log_outcome(info), info\n\n if self._update_model_freq and self._steps % self._update_model_freq == 0:\n # time to update the model for each agent\n if (self._enable_push and\n self._model_pool_apis.pull_attr(\n 'freezetime', self.task.model_key1) is not None):\n # Current task (learning period) finishes, start a new task or continue\n self._finish_task(self.task, None) # notify early abort\n last_task = self.task\n self.task = self._request_task() # try to continue\n if not is_inherit(last_task.model_key1, self.task.model_key1):\n self.log_kvs(me_reward_sum, info)\n return None, info\n if last_task.model_key2 != self.task.model_key2:\n self._changed_task = True\n self._update_agents_model(self.task)", "def scoring_opportunities(self, scoring_opportunities):\n\n self._scoring_opportunities = scoring_opportunities", "def link_sample(self, other):\n with other.entry.nxfile:\n if 'sample' in self.entry:\n if 'sample' in other.entry:\n del other.entry['sample']\n other.entry.makelink(self.entry['sample'])", "async def cmd_galaddlinkuwl(self, ctx):\n\n links = re.findall(r\"(?P<url>http[s]?://[^\\s]+)\", ctx.message.content)\n\n if not links:\n await ctx.channel.send('`Useage: [p]galaddlinkuwl <startoflink>, [Bot Owner] Adds a link from gallery link whitelist.`')\n \n # ===== ADD THE NEW LINKS TO THE WHITELIST\n new_gal_link_wl = list(set(self.cogset['link_wl']) + set(links))\n\n if Gallery.compare(new_gal_link_wl, self.cogset['link_wl']):\n await ctx.channel.send(content=\"{}\\n are already in the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return \n \n else:\n self.cogset['link_wl'] = new_gal_link_wl\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=\"{}\\n have been added to the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return", "def add_sample(self, state, action, state_new, reward, endstate, episode):\r\n new_sample = np.array([state, action, state_new, reward, endstate])\r\n if self.step_counter == 0 and episode == 0:\r\n self.experience_batch = new_sample\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # first sample twice in the batch to be able to index over the rows\r\n elif len(self.experience_batch) < self.experience_batch_size:\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # add new sample to batch when it is not full\r\n else:\r\n self.experience_batch[self.step_counter % self.experience_batch_size, :] = new_sample # override the components of the batch when it is full\r", "def actor_add_relation():\r\n\r\n data = get_request_data()\r\n if 'id' in data.keys():\r\n try:\r\n row_id = int(data['id'])\r\n relation_id = int(data['relation_id'])\r\n except:\r\n err = 'Id must be integer'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n obj = Movie.query.filter_by(id=relation_id).first()\r\n try:\r\n actor = Actor.add_relation(row_id, obj)\r\n rel_actor = {k: v for k, v in actor.__dict__.items() if k in ACTOR_FIELDS}\r\n rel_actor['filmography'] = str(actor.filmography)\r\n except:\r\n err = 'Record with such id does not exist'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n return make_response(jsonify(rel_actor), 200)\r\n\r\n else:\r\n err = 'No id specified'\r\n return make_response(jsonify(error=err), 400)", "def add_reward(self, choice, count=1):\n self.redis.hincrby(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, \"%s:rewards\" % choice, count)\n self._choices = None", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n self.priorities.append(max(self.priorities, default=1)) # Save all new experiences as maximum priority", "def update(self, state, reward, action, done, next_state, next_reward, num_episode, **kwargs):\n\n # Keep track of total reward\n self.episode_reward += next_reward\n if self.verbose > 0:\n logger.debug(\n f\"Agent acknowledges receiving a reward of {next_reward}, episode reward so far {self.episode_reward}\"\n )\n\n # Update MCTS tree\n if not done:\n self.mcts_head = self.mcts_head.children[action]\n self.mcts_head.prune() # This updates the node.path\n\n # Train\n if self.training:\n return self._train(kwargs[\"log_prob\"])\n else:\n return 0.0", "def test_shows_issues_from_grouplink(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def append(self, state, action, reward, done):\n assert state.shape == self._state_shape, \\\n 'Invalid state shape (required: %s, got: %s)' % (self._state_shape, state.shape)\n\n self._states[self._pos] = state\n self._actions[self._pos] = action\n self._rewards[self._pos] = reward\n self._terminals[self._pos] = done\n\n self._count = max(self._count, self._pos + 1)\n self._pos = (self._pos + 1) % self._max_size" ]
[ "0.7362988", "0.717459", "0.7061997", "0.6714336", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42772636", "0.4274945", "0.42674753", "0.4266109", "0.4264433", "0.42578077", "0.42515564", "0.42505667", "0.424946", "0.42489296", "0.42303044", "0.42303044", "0.4199248", "0.41974282", "0.41635618", "0.41283748", "0.4120942", "0.41087726", "0.41035667", "0.40950674", "0.408618", "0.40834093", "0.40813103", "0.40780374", "0.40770057", "0.4075737", "0.40686032", "0.40659428", "0.40503526", "0.40333", "0.40275708", "0.4023863", "0.4007657", "0.40069127", "0.40017104", "0.39986208", "0.39897925", "0.39840496", "0.39749613", "0.39745083", "0.3972324", "0.39586034", "0.39508075", "0.3946143", "0.39457676", "0.3932967", "0.39325184", "0.39297178", "0.39295676", "0.39276245", "0.39270565", "0.39229208", "0.39168912", "0.39096275", "0.3907056", "0.39025453", "0.39004102", "0.38994065", "0.38956124", "0.38943377", "0.38892362", "0.38891497", "0.38870266", "0.38820302", "0.38809907", "0.38804775", "0.38761795", "0.38738146", "0.3869238", "0.38686046", "0.38684267", "0.38673568", "0.38626942", "0.3862313", "0.3860057", "0.38582462", "0.38560754", "0.38555625", "0.38539255", "0.3853633" ]
0.7163847
2
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes' payload = { 'outcome_id' : outcome_id, 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def ez_set_outcome(auth_token, dataset_id, outcome, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_set_outcome\"\n payload = {\n \"dataset_id\": dataset_id,\n \"outcome\" : outcome,\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def link(url, title, icon=None, badge=None, **context):\n\n return {\n \"url\": url,\n \"title\": title,\n \"context\": context,\n \"badge\": badge,\n \"class\": \"link\",\n \"icon\": icon\n }", "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, workflow_ID=None, parentobj_ID=None, **kwargs):\n\n uri = kwargs.get('uri')\n uid = kwargs.get('uid')\n desc = kwargs.get('desc')\n name = kwargs.get('name')\n source = kwargs.get('source')\n\n if (self.debug):\n print('MPO.ADD', workflow_ID, parentobj_ID, name, desc,uri,uid,source,kwargs, file=sys.stderr)\n\n if uid:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uid\":uid}\n elif uri:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uri\":uri}\n else:\n return {\"name\":name,\"description\":desc,\"source_uid\":source,\"message\":\"Must provide either uri or uid.\", 'uid':-1, \"status\":-1}\n\n return self.post(self.DATAOBJECT_RT,workflow_ID,[parentobj_ID],data=payload,**kwargs)", "def create_link(self, word, meaning):\n print(str(self.unique_id) + \" learned \" +\n str(word) + \" for \" + str(meaning))\n self.meaning2word[meaning] = word\n self.word2meaning[word] = meaning\n self.wordsuccess[word] = []\n\n if meaning not in self.model.vocabulary:\n self.model.vocabulary[meaning] = {}\n\n # If word not in vocabulary, add it\n if word not in self.model.vocabulary[meaning]:\n self.model.vocabulary[meaning][word] = [self.unique_id]\n # Else append this agent to its users\n else:\n self.model.vocabulary[meaning][word].append(self.unique_id)", "def add_link_to_bundle(request, bundle_id):\n\n # ensure bundle exists\n bundle = get_object_or_404(Bundle, id=bundle_id)\n\n # get/create link for given url\n url = request.data.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # assert that \"comfort_level\" is specified.\n # this is validated outside of the `Link` fields handled by\n # DRF serializer validation.\n comfort_level = int(request.data.get('comfort_level', None))\n if comfort_level not in [i[0] for i in COMFORT_LEVELS]:\n return Response({'error': True,\n 'msg': 'Please specify a reader comfort level'\n })\n\n url = urltools.normalize(url)\n\n try:\n # fetch existing link\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n # create a new link\n link_serializer = LinkSerializer(data=request.data)\n link_serializer.is_valid(raise_exception=True)\n link = link_serializer.save()\n\n # add link to bundle\n if not BundleLink.objects.filter(bundle=bundle, link=link).exists():\n # call alchemy util to fetch concepts for URL\n concepts = bundles.alchemy_utils.get_concepts(url)\n this_bundle = BundleLink.objects.create(bundle=bundle,\n link=link,\n comfort_level=comfort_level,\n curator_id=1)\n for concept in concepts:\n this_bundle.tags.add(concept)\n\n return Response('', status=201)", "def create_link(\n integration: Integration,\n installation: IntegrationInstallation,\n event: GroupEvent,\n response: Response,\n) -> None:\n external_issue = ExternalIssue.objects.create(\n organization_id=event.group.project.organization_id,\n integration_id=integration.id,\n key=response[\"key\"],\n title=event.title,\n description=installation.get_group_description(event.group, event),\n metadata=response.get(\"metadata\"),\n )\n GroupLink.objects.create(\n group_id=event.group.id,\n project_id=event.group.project_id,\n linked_type=GroupLink.LinkedType.issue,\n linked_id=external_issue.id,\n relationship=GroupLink.Relationship.references,\n data={\"provider\": integration.provider},\n )", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def relate(self, related):\n\n self._module._connection.relate(self, related)", "def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def outcomes(self, outcomes):\n\n self._outcomes = outcomes", "def add_rewards(self, step_reward, goal_reward, bad_state_reward=None, restart_state_reward = None):\n self.r_step = step_reward\n self.r_goal = goal_reward\n self.r_bad = bad_state_reward\n self.r_restart = restart_state_reward", "def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def addLink(self, obj1, obj2):\n\n link = vsdModels.ObjectLink(object1=obj1, object2=obj2)\n link.validate()\n return self.postRequest('object-links', data=link.to_struct())", "def _reward(self, action):\n raise NotImplementedError", "def add_reward(self, reward):\n self.quest_node['reward'] = reward\n self.reward = reward\n graph.push(self.quest_node)", "def link(self, callback, SpawnedLink=SpawnedLink):\n # XXX: Is the redefinition of SpawnedLink supposed to just be an\n # optimization, or do people use it? It's not documented\n # pylint:disable=redefined-outer-name\n self.rawlink(SpawnedLink(callback))", "def _on_outcome(self, outcome, condition):\n self._outcome = outcome\n self._condition = condition", "def create_hit(self, hit_type=None, question=None,\r\n lifetime=datetime.timedelta(days=7),\r\n max_assignments=1, \r\n title=None, description=None, keywords=None,\r\n reward=None, duration=datetime.timedelta(days=7),\r\n approval_delay=None, annotation=None,\r\n questions=None, qualifications=None,\r\n response_groups=None):\r\n \r\n # handle single or multiple questions\r\n neither = question is None and questions is None\r\n both = question is not None and questions is not None\r\n if neither or both:\r\n raise ValueError(\"Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both\")\r\n\r\n if question:\r\n questions = [question]\r\n question_param = QuestionForm(questions)\r\n if isinstance(question, QuestionForm):\r\n question_param = question\r\n elif isinstance(question, ExternalQuestion):\r\n question_param = question\r\n \r\n # Handle basic required arguments and set up params dict\r\n params = {'Question': question_param.get_as_xml(),\r\n 'LifetimeInSeconds' :\r\n self.duration_as_seconds(lifetime),\r\n 'MaxAssignments' : max_assignments,\r\n }\r\n\r\n # if hit type specified then add it\r\n # else add the additional required parameters\r\n if hit_type:\r\n params['HITTypeId'] = hit_type\r\n else:\r\n # Handle keywords\r\n final_keywords = MTurkConnection.get_keywords_as_string(keywords)\r\n \r\n # Handle price argument\r\n final_price = MTurkConnection.get_price_as_price(reward)\r\n \r\n final_duration = self.duration_as_seconds(duration)\r\n\r\n additional_params = dict(\r\n Title=title,\r\n Description=description,\r\n Keywords=final_keywords,\r\n AssignmentDurationInSeconds=final_duration,\r\n )\r\n additional_params.update(final_price.get_as_params('Reward'))\r\n\r\n if approval_delay is not None:\r\n d = self.duration_as_seconds(approval_delay)\r\n additional_params['AutoApprovalDelayInSeconds'] = d\r\n\r\n # add these params to the others\r\n params.update(additional_params)\r\n\r\n # add the annotation if specified\r\n if annotation is not None:\r\n params['RequesterAnnotation'] = annotation\r\n \r\n # Add the Qualifications if specified\r\n if qualifications is not None:\r\n params.update(qualifications.get_as_params())\r\n\r\n # Handle optional response groups argument\r\n if response_groups:\r\n self.build_list_params(params, response_groups, 'ResponseGroup')\r\n \r\n # Submit\r\n return self._process_request('CreateHIT', params, [('HIT', HIT),])", "def create_issue_link(self, link_type, inwardissue,\r\n outwardissue, comment=None):\r\n self.jira.create_issue_link(type=link_type,\r\n inwardIssue=str(inwardissue),\r\n outwardIssue=str(outwardissue))", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def new_link(self, key, link, default):\n\n s = self._new_link()\n s.key = key\n s.link = link\n s.default = default\n return s", "def addLink(self, name=None, **kwargs):\n if isinstance(name, rigmech):\n self.sym_prefix = name.sym_prefix + \"_\"\n self.addLink(\n name=name.name,\n mass=name.global_syms[\"mass\"],\n inertia=name.global_syms[\"Mq\"],\n origin_xyz=name.global_syms[\"xyz_com\"],\n )\n else:\n kwargs[\"name\"] = name\n LinkArgs = rigmech._check_field_inputs(\n \"addLink\", self._DefaultLinkFields, kwargs\n )\n self.Links[LinkArgs[\"name\"]] = LinkArgs", "def add_link(self, target, rel, title=None, title_star=None,\n anchor=None, hreflang=None, type_hint=None):\n\n # PERF(kgriffs): Heuristic to detect possiblity of an extension\n # relation type, in which case it will be a URL that may contain\n # reserved characters. Otherwise, don't waste time running the\n # string through uri.encode\n #\n # Example values for rel:\n #\n # \"next\"\n # \"http://example.com/ext-type\"\n # \"https://example.com/ext-type\"\n # \"alternate http://example.com/ext-type\"\n # \"http://example.com/ext-type alternate\"\n #\n if '//' in rel:\n if ' ' in rel:\n rel = ('\"' +\n ' '.join([uri.encode(r) for r in rel.split()]) +\n '\"')\n else:\n rel = '\"' + uri.encode(rel) + '\"'\n\n value = '<' + uri.encode(target) + '>; rel=' + rel\n\n if title is not None:\n value += '; title=\"' + title + '\"'\n\n if title_star is not None:\n value += (\"; title*=UTF-8'\" + title_star[0] + \"'\" +\n uri.encode_value(title_star[1]))\n\n if type_hint is not None:\n value += '; type=\"' + type_hint + '\"'\n\n if hreflang is not None:\n if isinstance(hreflang, six.string_types):\n value += '; hreflang=' + hreflang\n else:\n value += '; '\n value += '; '.join(['hreflang=' + lang for lang in hreflang])\n\n if anchor is not None:\n value += '; anchor=\"' + uri.encode(anchor) + '\"'\n\n _headers = self._headers\n if 'link' in _headers:\n _headers['link'] += ', ' + value\n else:\n _headers['link'] = value", "def reward(self, history_id, reward):\n pass", "def add(self, destination, kind):\n if destination in self.__links:\n raise SarasvatiException(\"Link to specified thought already exist\")\n if kind not in self.__correct_kinds:\n raise SarasvatiException(\"Link kind is not correct: \" + kind)\n if self.__source is destination:\n raise SarasvatiException(\"Unable link thought to itself\")\n link = Link(self.__source, destination, kind)\n return self.add_link(link)", "def add_outcome(self, node, cost=0, weight=1, classifier=None):\n\n if classifier is None:\n self.outcomes.append((Edge(self, node, cost=cost), weight))\n else:\n self.outcomes.append((Edge(self, node, cost=cost), classifier))", "def relate(a, b, **kwargs):\n return lib.relate(a, b, **kwargs)", "def set_outcome(self, outcome):\r\n if outcome < self._outcome:\r\n self._outcome = outcome\r\n self.choose(0, 0, 0, 0, 0) # Dummy call, to validate outcome.\r\n if self.parent: self.parent.set_outcome(self._outcome)", "def relate_object(self, obj):\n suffix = self._get_api_suffix(obj.__class__)\n endpoint = self._get_api_endpoint() + '/' + suffix\n obj_id = obj._id()\n results = self.tq.post(endpoint, data={'id': obj_id})\n\n results = results.get('data')\n if not results or 'pivot' not in results[0]:\n raise ActionFailedError('Relate indicators')", "def createPooledReward(self, name, rewardPoolId, product_key_name, instructions=None):\n param = {\"name\": name, product_key_name: 'pooled:%s' % rewardPoolId}\n if instructions:\n param[\"instructions\"] = instructions\n self.post_json('/reward', param)", "def create_relation_to_episode(episode_id):\n epi = Episode.query.get(episode_id)\n if not epi:\n abort(404)\n\n\n data = request.json\n if any([\n 'id' in data and not isinstance(data.get('id'), int)\n ]):\n abort(400)\n\n dire = Director.query.get(data[\"id\"])\n if not dire:\n abort(404)\n\n epi.directors.append(dire)\n db.session.commit()\n return jsonify({'result': f\"{dire} directed episode {epi}\"})", "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, state, action, reward, new_state, done):\n experience = (state, action, reward, new_state, done)\n self.buffer.append(experience)", "def get_reward(self, state, action, next_state, absorbing):\n raise NotImplementedError", "def add(self, state, action, reward, next_state, done):\n experience = Experience(state, action, reward, next_state, done)\n self.memory.append(experience)", "def add(self, obs, action, reward, new_obs, done):\n experience = (obs, action, reward, new_obs, done)\n insert_index = self.fix_index()\n if insert_index > 0:\n if insert_index in self._storage:\n del self._storage[insert_index]\n self._storage[insert_index] = experience\n # add to priority queue\n priority = self.priority_queue.get_max_priority()\n self.priority_queue.update(priority, insert_index)\n return True\n else:\n sys.stderr.write('Insert failed\\n')\n return False", "def append(self, state, action, reward, next_state=None, next_action=None,\n is_state_terminal=False):\n raise NotImplementedError", "def add_hyperlink(paragraph, url, text, color, underline):\r\n\r\n # This gets access to the document.xml.rels file and gets a new relation id value\r\n part = paragraph.part\r\n r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)\r\n\r\n # Create the w:hyperlink tag and add needed values\r\n hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')\r\n hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )\r\n\r\n # Create a w:r element\r\n new_run = docx.oxml.shared.OxmlElement('w:r')\r\n\r\n # Create a new w:rPr element\r\n rPr = docx.oxml.shared.OxmlElement('w:rPr')\r\n\r\n # Add color if it is given\r\n if not color is None:\r\n c = docx.oxml.shared.OxmlElement('w:color')\r\n c.set(docx.oxml.shared.qn('w:val'), color)\r\n rPr.append(c)\r\n\r\n # Remove underlining if it is requested\r\n if not underline:\r\n u = docx.oxml.shared.OxmlElement('w:u')\r\n u.set(docx.oxml.shared.qn('w:val'), 'none')\r\n rPr.append(u)\r\n\r\n # Join all the xml elements together add add the required text to the w:r element\r\n new_run.append(rPr)\r\n new_run.text = text\r\n hyperlink.append(new_run)\r\n\r\n paragraph._p.append(hyperlink)\r\n\r\n return hyperlink", "def add_link(self, link):\n raise NotImplementedError", "def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel", "def link(self, s_id):\r\n\r\n # Take the link entires from TOML file\r\n schedules = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if schedules:\r\n for entries in schedules:\r\n # Construct payload \r\n for payload in entries.get('link'):\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/link.json', test=payload)\r\n # Post request\r\n if 'id' in self.schedules[-1]:\r\n payload['schedule'] = self.schedules[-1].get('id')\r\n if 'id' in self.workouts[-1]:\r\n payload['workout'] = self.workouts[-1].get('id')\r\n return self.add_post(payload, API.url_link, self.links)", "def post(self, category_id, name, description, weight, category_ref, sponsor_id):\n\t\tproperties = {\"id\": category_id, \"name\": name, \"description\": description, \"weight\": weight, \"categoryRef\": category_ref, \"sponsorId\": sponsor_id}\n\t\treturn self.service.post(self.base_uri, json.dumps(properties))", "def perform_action(self, action):\n \n assert self.is_valid_action(action)\n \n # Save the action.\n self.action = action\n \n #the slight strategy of the opponent\n if self.reward==rLose :\n observation = self.observation\n else:\n observation = random.choice([oRock,oPaper,oScissor])\n \n #determine the result of the game and get the reward\n if action == aRock:\n if observation == oRock:\n reward= rDraw\n elif observation == oPaper:\n reward= rLose\n elif observation == oScissor:\n reward= rWin\n elif action == aPaper:\n if observation == oRock:\n reward= rWin\n elif observation == oPaper:\n reward= rDraw\n elif observation == oScissor:\n reward= rLose\n elif action == aScissor:\n if observation == oRock:\n reward= rLose\n elif observation == oPaper:\n reward= rWin\n elif observation == oScissor:\n reward= rDraw\n \n \n #Store the observation and reward in the environment.\n self.observation = observation\n \n self.reward = reward\n \n \n return (observation, reward)\n # end def", "def get_link(self, user_input):\r\n\r\n\t\t# state that you made it this far\r\n\t\tprint(f\"\\nSuccessfully called get_link() with the parameter(s): \\n\\n\\tuser_input -> {user_input}\")\r\n\r\n\t\t# tokenize the user's input, removing words like \"is\", \"the\", \"it\" and so on...\r\n\t\ttokens = self.tokenize(user_input)\r\n\r\n\t\t# categorize the question\r\n\t\tprint(f\"\\nIdentifying question's category...\")\r\n\t\tcategory = self.bayesian_naive_logic(tokens)\r\n\r\n\t\t# start looking for a link that may provide a Answer\r\n\t\tresponse_set = self.storage.get_urls(tokens, category)\r\n\t\tprint(f\"\\nBest Answer found: {response_set}\")\r\n\r\n\t\treturn f\"Here is a link with information closely matching your question: <a href='{response_set}' target='_blank'>{response_set}</a>\"", "def reward(self, history_id, reward):\n reward_action = self._historystorage.unrewarded_histories[history_id].action\n reward_action_idx = self._actions.index(reward_action)\n context = self._historystorage.unrewarded_histories[history_id].context[reward_action_idx]\n context = np.matrix(context)\n\n # Update the model\n matrix_a = self._modelstorage.get_model()['matrix_a']\n matrix_ainv = self._modelstorage.get_model()['matrix_ainv']\n b = self._modelstorage.get_model()['b']\n theta = self._modelstorage.get_model()['theta']\n matrix_a[reward_action] += np.dot(context.T, context)\n matrix_ainv[reward_action] = np.linalg.solve(matrix_a[reward_action], np.identity(self.d))\n b[reward_action] += reward * context.T\n theta[reward_action] = np.dot(matrix_ainv[reward_action], b[reward_action])\n self._modelstorage.save_model({'matrix_a': matrix_a, 'matrix_ainv': matrix_ainv, 'b': b, 'theta': theta})\n\n # Update the history\n self._historystorage.add_reward(history_id, reward)", "def link_room(self, room_to_link, direction):\n self.linked_rooms[direction] = room_to_link\n # print(self.name + \" linked rooms :\" + repr(self.linked_rooms) )", "def add_new_event(self,\n event_type: str,\n event_datetime: str,\n covid_status: str = \"U\",\n death: int = 0,\n critical_care_admission: int = 0,\n component: str or None = None,\n source: str or None = None,\n source_type: str or None = None,\n wimd: int or None = None,\n **kwargs):\n # Parse datetime and check validity (None for date if invalid)\n event_datetime = parse_datetime(event_datetime)\n if event_datetime.get(\"date\") is None:\n err = f\"Datetime parsed when trying to generate a new outcome event for {self.patientId} was invalid!\"\n self._config.write_to_log(err)\n raise ValueError(err)\n # Create outcome document\n new_outcome = Event(patientId=self.patientId,\n eventType=event_type.strip(),\n eventDate=event_datetime.get(\"date\"),\n covidStatus=covid_status,\n death=death,\n criticalCareAdmission=critical_care_admission,\n **kwargs)\n # Populate with optional parameters if given\n new_outcome = _add_if_value(new_outcome, [(\"component\", component),\n (\"source\", source),\n (\"sourceType\", source_type),\n (\"wimd\", wimd),\n (\"eventTime\", event_datetime.get(\"time\"))])\n new_outcome = new_outcome.save()\n self.outcomeEvents.append(new_outcome)\n self.save()\n self._config.write_to_log(f\"Outcome event {new_outcome.id} for patient {self.patientId}\")", "def make_link(first, second):\n manager = Actions()\n manager.make_link(first, second)", "def add_sample(self, img, action, reward, terminal):\n self.imgs[:, :, self.top] = img\n self.actions[self.top] = action\n self.rewards[self.top] = reward\n self.terminal[self.top] = terminal\n\n if self.size == self.max_steps:\n self.bottom = (self.bottom + 1) % self.max_steps\n else:\n self.size += 1\n\n self.top = (self.top + 1) % self.max_steps", "async def link(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'Cannot send links on this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n title = await Controller.canonical_title(item)\n if title is None:\n await msg.channel.send(**{\n 'content': f'There are no pages matching `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n page_url = Controller.link_from_title(title)\n await msg.channel.send(**{\n 'content': page_url,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })", "def create_data_link(self, ctx, params):\n # ctx is the context object\n # return variables are: results\n #BEGIN create_data_link\n duid, sna, update = _create_data_link_params(params)\n as_admin, user = _get_admin_request_from_object(params, 'as_admin', 'as_user')\n _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.FULL,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'create_data_link', ctx.log_info, as_user=user, skip_check=not as_admin)\n link = self._samples.create_data_link(\n user if user else _UserID(ctx[_CTX_USER]),\n duid,\n sna,\n update,\n as_admin=as_admin)\n results = {'new_link': _links_to_dicts([link])[0]}\n #END create_data_link\n\n # At some point might do deeper type checking...\n if not isinstance(results, dict):\n raise ValueError('Method create_data_link return value ' +\n 'results is not type dict as required.')\n # return the results\n return [results]", "def related_url(self) -> pulumi.Output[Sequence['outputs.RelatedUrlResponse']]:\n return pulumi.get(self, \"related_url\")", "def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500", "def get_outcome(self):\n if not self.is_paid:\n raise ValueError(\"There isn't an outcome.\")\n return self.team_a if self.outcome else self.team_b", "def PostReward(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _publish_reward_topic(self, reward, steps, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)\n self.reward_list.append(reward)\n self.episode_list.append(episode_number)\n self.step_list.append(steps)\n list = str(reward) + \";\" + str(episode_number) + \";\" + str(steps) + \"\\n\"\n\n with open(self.csv_name + '.csv', 'a') as csv:\n csv.write(str(list))", "def add_link(\n self,\n url: str,\n label: Optional[str] = None,\n ) -> None:\n if not label:\n label = url\n self._client.add_element(\n Markdown(\n f\"[{label}]({url})\",\n on_tap_link=lambda e: self._client.page.launch_url(e.data),\n )\n )", "def _step(self, action):\n \n obs, reward, done, info = self.env.step(action)\n\n \n advice=self.generateAdvice()[1]\n\n obs = {\n \"image\": obs,\n \"advice\": advice\n }\n\n\n \n\n\n\n return obs, reward, done, info", "def cli_createLinko():\n\n info = ('Creates a linkograph from an (inverse) labeling json'\n ' and an ontology json.')\n\n parser = argparse.ArgumentParser(description=info)\n parser.add_argument('labeling', metavar='LABELING.json',\n nargs=1,\n help='the inverse labeling json file.')\n\n parser.add_argument('ontology', metavar='ONTOLOGY.json',\n nargs=1,\n help='the json of ontology.')\n\n parser.add_argument('-o', '--out', metavar='OUTPUT_FILE',\n help='the linkograph as a json')\n\n args = parser.parse_args()\n\n outfile = None\n if args.out:\n outfile = args.out\n\n # Load the json files.\n with open(args.labeling[0], 'r') as invLabelingFile:\n invLabeling = json.load(invLabelingFile)\n with open(args.ontology[0], 'r') as ontologyFile:\n ontology = json.load(ontologyFile)\n linko = createLinko(invLabeling, ontology)\n\n if outfile:\n writeLinkoJson(linko, outfile)\n else:\n print(linko)", "def add(self, context, action, reward):\n\n if self.intercept:\n c = np.array(context[:])\n c = np.append(c, 1.0).reshape((1, self.context_dim + 1))\n else:\n c = np.array(context[:]).reshape((1, self.context_dim))\n\n if self.contexts is None:\n self.contexts = c\n else:\n self.contexts = np.vstack((self.contexts, c))\n\n r = np.zeros((1, self.num_actions))\n r[0, action] = reward\n if self.rewards is None:\n self.rewards = r\n else:\n self.rewards = np.vstack((self.rewards, r))\n\n self.actions.append(action)", "def make_move(state, action, player, rewarding_move=False): # TODO : done and next_is_reward can be removed as\n # they are in the state object\n board = state.get_board()\n json_action = action.get_json_action()\n action = action.get_action_as_dict()\n captured = None\n reward = 0\n next_is_reward = False\n previous_is_reward = False\n if rewarding_move:\n state.boring_moves = 0\n previous_is_reward = True\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND:\n reward += 1\n state.in_hand[player * -1] -= 1\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n board.empty_cell(action['action']['at'])\n reward += 1\n else:\n if action['action_type'] == YoteActionType.ADD:\n state.boring_moves += 1\n state.in_hand[player] -= 1\n board.fill_cell(action['action']['to'], Color(player))\n elif action['action_type'] == YoteActionType.MOVE:\n at = action['action']['at']\n to = action['action']['to']\n\n def distance(cell_1, cell_2):\n import math\n return math.sqrt((cell_1[0] - cell_2[0]) ** 2 + (cell_1[1] - cell_2[1]) ** 2)\n\n board.empty_cell(at)\n board.fill_cell(to, Color(player))\n if int(distance(at, to)) == 1:\n state.boring_moves += 1\n elif int(distance(at, to)) > 1:\n state.boring_moves = 0\n next_is_reward = True\n board.fill_cell(to, Color(player))\n if at[0] == to[0] and at[1] < to[1]:\n board.empty_cell((at[0], at[1] + 1))\n captured = (at[0], at[1] + 1)\n elif at[0] == to[0] and at[1] > to[1]:\n board.empty_cell((at[0], at[1] - 1))\n captured = (at[0], at[1] - 1)\n elif at[1] == to[1] and at[0] < to[0]:\n board.empty_cell((at[0] + 1, at[1]))\n captured = (at[0] + 1, at[1])\n elif at[1] == to[1] and at[0] > to[0]:\n board.empty_cell((at[0] - 1, at[1]))\n captured = (at[0] - 1, at[1])\n reward += 1\n\n state.set_board(board)\n state.score[player] += reward\n state.captured = captured\n state.rewarding_move = next_is_reward\n state.previous_is_reward = previous_is_reward\n state.set_latest_player(player)\n state.set_latest_move(json_action)\n if next_is_reward:\n state.set_next_player(player)\n else:\n state.set_next_player(player * -1)\n\n done = YoteRules.is_end_game(state)\n return state, done, next_is_reward", "def link_to(self, criterion_or_node):\n if callable(criterion_or_node):\n target_node = criterion_or_node(self.graph)\n else:\n target_node = criterion_or_node\n return self.send(target_node, 'accept_link',\n originating_node=self.id)", "def add_experience(self, action, state, reward, terminal):\n self.replay_buffer.add_experience(action, state, reward, terminal)", "def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_link(self, **kwgs):\n self.links.append(kwgs)", "def _set_link(\n meta: Dict,\n link: Optional[Union[type(None), str, bool, KEChainPages]] = None,\n link_value: Optional[CardWidgetLinkValue] = None,\n link_target: Optional[Union[str, LinkTargets]] = LinkTargets.SAME_TAB,\n **kwargs,\n) -> Dict:\n meta[\"linkTarget\"] = check_enum(link_target, LinkTargets, \"link_target\")\n\n from pykechain.models import Activity\n\n if isinstance(link, Activity):\n if link.activity_type == ActivityType.TASK:\n default_link_value = CardWidgetLinkValue.TASK_LINK\n else:\n default_link_value = CardWidgetLinkValue.TREE_VIEW\n\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link.id,\n MetaWidget.SHOW_LINK_VALUE: default_link_value,\n }\n )\n elif isinstance(link, str) and is_uuid(link):\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.TASK_LINK,\n }\n )\n elif link is None or link is False:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: None,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.NO_LINK,\n }\n )\n elif link in KEChainPages.values():\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: \"\",\n MetaWidget.SHOW_LINK_VALUE: CardWidgetKEChainPageLink[link],\n }\n )\n else:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.EXTERNAL_LINK,\n }\n )\n\n if link_value is not None:\n meta.update(\n {\n MetaWidget.SHOW_LINK_VALUE: check_enum(\n link_value, CardWidgetLinkValue, \"link_value\"\n ),\n }\n )\n\n return meta", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n if done and self.episode % 50 == 0 and self.episode > 50:\n self.episode += 1\n self.remove_bad_experience()\n elif done:\n self.episode += 1", "def add(self, states, actions, rewards, next_states, dones):\n assert len(states) == self.num_agents, 'ERROR> group states size mismatch'\n assert len(actions) == self.num_agents, 'ERROR> group actions size mismatch'\n assert len(rewards) == self.num_agents, 'ERROR> group rewards size mismatch'\n assert len(next_states) == self.num_agents, 'ERROR> group next states size mismatch'\n assert len(dones) == self.num_agents, 'ERROR> group dones size mismatch'\n\n experience = (states, actions, rewards, next_states, dones)\n self.memory.append(experience)", "def strategy(self,\r\n opponent:Player,\r\n message=torch.zeros(10))->Action:\r\n #need to update name for posterity's sake\r\n self.finished_opponent = opponent.name\r\n #Regardless of intent for first few turns, do the base action.\r\n if len(self.history) == 0: return self.action_base\r\n \r\n #get overall reward\r\n self.reward = self.find_reward(opponent) \r\n\r\n # assess perceived intent message in opponent.sent_message\r\n self.intent_received_prev = self.intent_received\r\n self.intent_received = opponent.intent_sent\r\n self.assessment_prev = self.assessment\r\n self.assessment = self.assess_received_intent(opponent)#this is the estimate of what the opponent is doing\r\n \r\n # store for testing later\r\n self.list_reward.append(self.reward)\r\n self.list_intent_received.append(self.intent_received_prev)\r\n self.list_intent_sent.append(self.intent_sent_prev)\r\n self.list_intent_assessment.append(self.assessment_prev)\r\n self.list_intent_true.append(opponent.history[-1])\r\n \r\n # receive assessment and decide to stay with self.base_Action\r\n # OR change it to the other action. \r\n self.old_decision = self.decision\r\n self.decision = self.decide_based_on_new_intel(opponent) # what the opponent actually did last turn\r\n self.list_decision.append(self.old_decision)\r\n \r\n return self.decision", "def addReagentTargetedGene(\n self,\n reagent_id,\n gene_id,\n targeted_gene_id=None,\n targeted_gene_label=None,\n description=None,\n reagent_category=None\n ):\n\n # akin to a variant locus\n # is this some sort of pseudo bnode?\n if targeted_gene_id is None:\n targeted_gene_id = '_' + gene_id + '-' + reagent_id\n targeted_gene_id = targeted_gene_id.replace(\":\", \"\")\n self.model.addIndividualToGraph(\n targeted_gene_id,\n targeted_gene_label,\n self.globaltt['reagent_targeted_gene'],\n description,\n ind_category=reagent_category\n )\n\n if gene_id is not None:\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_expression_variant_of'], gene_id\n )\n\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_targeted_by'], reagent_id\n )", "def __init__(__self__, *,\n individual_outcome: Optional[pulumi.Input[Sequence[pulumi.Input['IndividualOutcomeArgs']]]] = None,\n roll_up: Optional[pulumi.Input['PrimaryStepRollUp']] = None):\n if individual_outcome is not None:\n pulumi.set(__self__, \"individual_outcome\", individual_outcome)\n if roll_up is not None:\n pulumi.set(__self__, \"roll_up\", roll_up)", "def _send_lti2_outcome(self):\r\n payload = textwrap.dedent(\"\"\"\r\n {{\r\n \"@context\" : \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\" : \"Result\",\r\n \"resultScore\" : {score},\r\n \"comment\" : \"This is awesome.\"\r\n }}\r\n \"\"\")\r\n data = payload.format(score=0.8)\r\n return self._send_lti2(data)", "def join(self, rewards_s3_path, obs_time_window=None, ratio=0.8, wait=True):\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n\n if obs_time_window is None:\n logger.warning(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with all the observation data\"\n )\n obs_end_time = None\n obs_start_time = None\n else:\n logger.info(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with observation \"\n f\"data in the past {obs_time_window} hours\"\n )\n obs_end_time = datetime.utcnow()\n obs_start_time = obs_end_time - timedelta(hours=obs_time_window)\n\n # update next_join_job_id and joining state\n next_join_job_id = JoinManager.name_next_join_job(experiment_id=self.experiment_id)\n self.exp_db_client.update_experiment_next_join_job_id(self.experiment_id, next_join_job_id)\n self.exp_db_client.update_experiment_joining_state(self.experiment_id, JoiningState.PENDING)\n\n input_obs_data_s3_path = (\n f\"s3://{self.resource_manager.firehose_bucket}/{self.experiment_id}\"\n )\n input_obs_data_s3_path = f\"{input_obs_data_s3_path}/inference_data\"\n # init joining job, update join table\n logger.info(\"Creating resource for joining job...\")\n\n try:\n self.next_join_job = JoinManager(\n join_db_client=self.join_db_client,\n experiment_id=self.experiment_id,\n join_job_id=next_join_job_id,\n input_obs_data_s3_path=input_obs_data_s3_path,\n obs_start_time=obs_start_time,\n obs_end_time=obs_end_time,\n input_reward_data_s3_path=rewards_s3_path,\n boto_session=self.boto_session,\n )\n\n logger.info(\"Started joining job...\")\n self.next_join_job.start_join(ratio=ratio, wait=wait)\n except Exception as e:\n logger.error(e)\n pass\n\n # wait until exp ddb table updated\n if self.local_mode or wait:\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries = 0\n\n while not succeeded_state:\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n logger.debug(\"Waiting for experiment table joining status to be updated...\")\n time.sleep(2 * (2 ** num_retries))\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries += 1\n if num_retries >= 5:\n raise UnhandledWorkflowException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"was in state of '{self.experiment_record._joining_state}'. Failed to sync table states.\"\n )\n if (\n self.experiment_record._joining_state == JoiningState.FAILED\n or self.experiment_record._joining_state == JoiningState.CANCELLED\n ):\n raise WorkflowJoiningJobException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"ended with state '{self.experiment_record._joining_state}'. Please check Athena queries logs \"\n \"for more information.\"\n )", "def test__put_two_way_link_into():\n for input_value, defaults, expected_output in (\n (False, False, {}),\n (False, True, {'two_way_link': False}),\n (True, False, {'two_way_link': True}),\n ):\n data = put_two_way_link_into(input_value, {}, defaults)\n vampytest.assert_eq(data, expected_output)", "def _act_impl(self, observation, reward,\n done):\n if done:\n raise core.EpisodeDoneError(\"Called act on a done episode.\")\n\n if not self.observation_space.contains(observation):\n raise core.InvalidObservationError(\"Invalid ovservation: %s\" %\n observation)\n if self.params.observation_adjustment_fn:\n observation = self.params.observation_adjustment_fn(\n self.rng, self.beliefs, observation)\n\n features = self.feature_selection_fn(observation)\n self.beliefs = self._update_beliefs(features, self.beliefs)\n action = self._allocate(self._n_resource, self.beliefs)\n\n if not self.action_space.contains(action):\n raise gym.error.InvalidAction(\"Invalid action: %s\" % action)\n\n return action", "async def link(self, ctx: Context) -> None:\r\n try:\r\n params: List[str] = get_cmd_params(ctx)\r\n\r\n if len(params) < 1 or not params[0].isdigit() or int(params[0]) > len(self.yt_result.ids):\r\n await ctx.send(\"Please enter a valid video number from 0 to 5\")\r\n return\r\n\r\n self.yt_link = await self.message.edit(content=self.yt_result.get_link(int(params[0])))\r\n await ctx.message.delete()\r\n except Exception as e:\r\n await self.channels.log_error(e, \"ytl\")", "def _rollout_an_episode(self):\n self._steps = 0\n me_id = self._learning_agent_id # short name\n oppo_id = self._oppo_agent_id # short name\n logger.log('episode begins with the task: {}'.format(str(self.task)))\n\n # passing me and oppo hyperparams to the arena interface\n assert self.task.hyperparam is not None\n logger.log('pulling oppo hyperparam of model key {}'.format(\n self.task.model_key2))\n oppo_hyperparam = self._model_pool_apis.pull_attr(attr='hyperparam',\n key=self.task.model_key2)\n logger.log('Done pulling oppo hyperparam')\n oppo_inter_kwargs = ({} if oppo_hyperparam is None\n else oppo_hyperparam.__dict__)\n inter_kwargs = ([self.task.hyperparam.__dict__]\n + [oppo_inter_kwargs] * (self.n_agents - 1))\n\n # agent, env reset\n obs = self.env.reset(inter_kwargs=inter_kwargs)\n for agt, ob in zip(self.agents, obs):\n agt.reset(ob)\n self._update_agents_model(self.task) # for agent Neural Net parameters\n\n me_reward_sum = 0.0\n self.time_beg = time.time()\n self._update_hyperparam(self.task)\n self._changed_task = False\n while True:\n self._steps += 1\n # predictions for each agent\n predictions = self._parallel.run((self._agent_pred, ob, i)\n for i, ob in enumerate(obs))\n me_prediction = predictions[me_id]\n me_action, extra_vars = me_prediction[me_id], me_prediction[oppo_id:]\n # predicted actions for each agent\n actions = [me_action] + [other_action\n for other_action in predictions[oppo_id:]]\n # book-keep obs in previous step\n last_obs = obs\n\n # agent-env interaction\n obs, reward, done, info = self.env.step(actions)\n\n me_rwd_scalar = self._reward_shape(reward[me_id])\n me_reward_sum += me_rwd_scalar\n\n if self._enable_push:\n # put the interested data (obs, rwd, act, ... for each agent) into the\n # _data_queue, which is watched in another Thread (the _push_data_to_learner()\n # method) that the data are dequeued and sent to remote Learner\n if self._data_queue.full():\n logger.log(\"Actor's queue is full.\", level=logger.WARN)\n rwd_to_push = (me_rwd_scalar if self.rwd_shape\n else np.asarray(reward[me_id], np.float32))\n if self.use_oppo_obs:\n if isinstance(extra_vars, tuple):\n extra_vars += (self.agents[self._oppo_agent_id]._last_state,)\n else:\n extra_vars.append(self.agents[self._oppo_agent_id]._last_state)\n data_tuple = (last_obs, tuple(actions), rwd_to_push, info, done, extra_vars)\n self._data_queue.put(data_tuple)\n logger.log('successfully put one tuple.', level=logger.DEBUG)\n\n if self._steps % self._log_interval_steps == 0:\n logger.log('_rollout_an_episode,', 'steps: {},'.format(self._steps),\n 'data qsize: {}'.format(self._data_queue.qsize()))\n\n if done:\n # an episode ends\n if self._replay_dir:\n self._save_replay()\n self.log_kvs(me_reward_sum, info)\n if self._changed_task:\n return None, info\n else:\n return self.log_outcome(info), info\n\n if self._update_model_freq and self._steps % self._update_model_freq == 0:\n # time to update the model for each agent\n if (self._enable_push and\n self._model_pool_apis.pull_attr(\n 'freezetime', self.task.model_key1) is not None):\n # Current task (learning period) finishes, start a new task or continue\n self._finish_task(self.task, None) # notify early abort\n last_task = self.task\n self.task = self._request_task() # try to continue\n if not is_inherit(last_task.model_key1, self.task.model_key1):\n self.log_kvs(me_reward_sum, info)\n return None, info\n if last_task.model_key2 != self.task.model_key2:\n self._changed_task = True\n self._update_agents_model(self.task)", "def scoring_opportunities(self, scoring_opportunities):\n\n self._scoring_opportunities = scoring_opportunities", "def link_sample(self, other):\n with other.entry.nxfile:\n if 'sample' in self.entry:\n if 'sample' in other.entry:\n del other.entry['sample']\n other.entry.makelink(self.entry['sample'])", "async def cmd_galaddlinkuwl(self, ctx):\n\n links = re.findall(r\"(?P<url>http[s]?://[^\\s]+)\", ctx.message.content)\n\n if not links:\n await ctx.channel.send('`Useage: [p]galaddlinkuwl <startoflink>, [Bot Owner] Adds a link from gallery link whitelist.`')\n \n # ===== ADD THE NEW LINKS TO THE WHITELIST\n new_gal_link_wl = list(set(self.cogset['link_wl']) + set(links))\n\n if Gallery.compare(new_gal_link_wl, self.cogset['link_wl']):\n await ctx.channel.send(content=\"{}\\n are already in the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return \n \n else:\n self.cogset['link_wl'] = new_gal_link_wl\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=\"{}\\n have been added to the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return", "def add_sample(self, state, action, state_new, reward, endstate, episode):\r\n new_sample = np.array([state, action, state_new, reward, endstate])\r\n if self.step_counter == 0 and episode == 0:\r\n self.experience_batch = new_sample\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # first sample twice in the batch to be able to index over the rows\r\n elif len(self.experience_batch) < self.experience_batch_size:\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # add new sample to batch when it is not full\r\n else:\r\n self.experience_batch[self.step_counter % self.experience_batch_size, :] = new_sample # override the components of the batch when it is full\r", "def actor_add_relation():\r\n\r\n data = get_request_data()\r\n if 'id' in data.keys():\r\n try:\r\n row_id = int(data['id'])\r\n relation_id = int(data['relation_id'])\r\n except:\r\n err = 'Id must be integer'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n obj = Movie.query.filter_by(id=relation_id).first()\r\n try:\r\n actor = Actor.add_relation(row_id, obj)\r\n rel_actor = {k: v for k, v in actor.__dict__.items() if k in ACTOR_FIELDS}\r\n rel_actor['filmography'] = str(actor.filmography)\r\n except:\r\n err = 'Record with such id does not exist'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n return make_response(jsonify(rel_actor), 200)\r\n\r\n else:\r\n err = 'No id specified'\r\n return make_response(jsonify(error=err), 400)", "def add_reward(self, choice, count=1):\n self.redis.hincrby(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, \"%s:rewards\" % choice, count)\n self._choices = None", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n self.priorities.append(max(self.priorities, default=1)) # Save all new experiences as maximum priority", "def update(self, state, reward, action, done, next_state, next_reward, num_episode, **kwargs):\n\n # Keep track of total reward\n self.episode_reward += next_reward\n if self.verbose > 0:\n logger.debug(\n f\"Agent acknowledges receiving a reward of {next_reward}, episode reward so far {self.episode_reward}\"\n )\n\n # Update MCTS tree\n if not done:\n self.mcts_head = self.mcts_head.children[action]\n self.mcts_head.prune() # This updates the node.path\n\n # Train\n if self.training:\n return self._train(kwargs[\"log_prob\"])\n else:\n return 0.0", "def test_shows_issues_from_grouplink(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def append(self, state, action, reward, done):\n assert state.shape == self._state_shape, \\\n 'Invalid state shape (required: %s, got: %s)' % (self._state_shape, state.shape)\n\n self._states[self._pos] = state\n self._actions[self._pos] = action\n self._rewards[self._pos] = reward\n self._terminals[self._pos] = done\n\n self._count = max(self._count, self._pos + 1)\n self._pos = (self._pos + 1) % self._max_size" ]
[ "0.7362988", "0.717459", "0.7163847", "0.7061997", "0.6714336", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42772636", "0.4274945", "0.42674753", "0.4266109", "0.4264433", "0.42578077", "0.42515564", "0.42505667", "0.424946", "0.42489296", "0.42303044", "0.42303044", "0.4199248", "0.41974282", "0.41635618", "0.41283748", "0.4120942", "0.41087726", "0.41035667", "0.40950674", "0.408618", "0.40834093", "0.40813103", "0.40780374", "0.40770057", "0.4075737", "0.40686032", "0.40659428", "0.40503526", "0.40333", "0.40275708", "0.4023863", "0.4007657", "0.40069127", "0.40017104", "0.39986208", "0.39897925", "0.39840496", "0.39749613", "0.39745083", "0.3972324", "0.39586034", "0.39508075", "0.3946143", "0.39457676", "0.3932967", "0.39325184", "0.39297178", "0.39295676", "0.39276245", "0.39270565", "0.39229208", "0.39168912", "0.39096275", "0.3907056", "0.39025453", "0.39004102", "0.38994065", "0.38956124", "0.38943377", "0.38892362", "0.38891497", "0.38870266", "0.38820302", "0.38809907", "0.38804775", "0.38761795", "0.38738146", "0.3869238", "0.38686046", "0.38684267", "0.38673568", "0.38626942", "0.3862313", "0.3860057", "0.38582462", "0.38560754", "0.38555625", "0.38539255", "0.3853633" ]
0.66986096
5
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}' payload = { 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def ez_set_outcome(auth_token, dataset_id, outcome, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_set_outcome\"\n payload = {\n \"dataset_id\": dataset_id,\n \"outcome\" : outcome,\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def link(url, title, icon=None, badge=None, **context):\n\n return {\n \"url\": url,\n \"title\": title,\n \"context\": context,\n \"badge\": badge,\n \"class\": \"link\",\n \"icon\": icon\n }", "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, workflow_ID=None, parentobj_ID=None, **kwargs):\n\n uri = kwargs.get('uri')\n uid = kwargs.get('uid')\n desc = kwargs.get('desc')\n name = kwargs.get('name')\n source = kwargs.get('source')\n\n if (self.debug):\n print('MPO.ADD', workflow_ID, parentobj_ID, name, desc,uri,uid,source,kwargs, file=sys.stderr)\n\n if uid:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uid\":uid}\n elif uri:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uri\":uri}\n else:\n return {\"name\":name,\"description\":desc,\"source_uid\":source,\"message\":\"Must provide either uri or uid.\", 'uid':-1, \"status\":-1}\n\n return self.post(self.DATAOBJECT_RT,workflow_ID,[parentobj_ID],data=payload,**kwargs)", "def create_link(self, word, meaning):\n print(str(self.unique_id) + \" learned \" +\n str(word) + \" for \" + str(meaning))\n self.meaning2word[meaning] = word\n self.word2meaning[word] = meaning\n self.wordsuccess[word] = []\n\n if meaning not in self.model.vocabulary:\n self.model.vocabulary[meaning] = {}\n\n # If word not in vocabulary, add it\n if word not in self.model.vocabulary[meaning]:\n self.model.vocabulary[meaning][word] = [self.unique_id]\n # Else append this agent to its users\n else:\n self.model.vocabulary[meaning][word].append(self.unique_id)", "def add_link_to_bundle(request, bundle_id):\n\n # ensure bundle exists\n bundle = get_object_or_404(Bundle, id=bundle_id)\n\n # get/create link for given url\n url = request.data.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # assert that \"comfort_level\" is specified.\n # this is validated outside of the `Link` fields handled by\n # DRF serializer validation.\n comfort_level = int(request.data.get('comfort_level', None))\n if comfort_level not in [i[0] for i in COMFORT_LEVELS]:\n return Response({'error': True,\n 'msg': 'Please specify a reader comfort level'\n })\n\n url = urltools.normalize(url)\n\n try:\n # fetch existing link\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n # create a new link\n link_serializer = LinkSerializer(data=request.data)\n link_serializer.is_valid(raise_exception=True)\n link = link_serializer.save()\n\n # add link to bundle\n if not BundleLink.objects.filter(bundle=bundle, link=link).exists():\n # call alchemy util to fetch concepts for URL\n concepts = bundles.alchemy_utils.get_concepts(url)\n this_bundle = BundleLink.objects.create(bundle=bundle,\n link=link,\n comfort_level=comfort_level,\n curator_id=1)\n for concept in concepts:\n this_bundle.tags.add(concept)\n\n return Response('', status=201)", "def create_link(\n integration: Integration,\n installation: IntegrationInstallation,\n event: GroupEvent,\n response: Response,\n) -> None:\n external_issue = ExternalIssue.objects.create(\n organization_id=event.group.project.organization_id,\n integration_id=integration.id,\n key=response[\"key\"],\n title=event.title,\n description=installation.get_group_description(event.group, event),\n metadata=response.get(\"metadata\"),\n )\n GroupLink.objects.create(\n group_id=event.group.id,\n project_id=event.group.project_id,\n linked_type=GroupLink.LinkedType.issue,\n linked_id=external_issue.id,\n relationship=GroupLink.Relationship.references,\n data={\"provider\": integration.provider},\n )", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def relate(self, related):\n\n self._module._connection.relate(self, related)", "def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def outcomes(self, outcomes):\n\n self._outcomes = outcomes", "def add_rewards(self, step_reward, goal_reward, bad_state_reward=None, restart_state_reward = None):\n self.r_step = step_reward\n self.r_goal = goal_reward\n self.r_bad = bad_state_reward\n self.r_restart = restart_state_reward", "def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def addLink(self, obj1, obj2):\n\n link = vsdModels.ObjectLink(object1=obj1, object2=obj2)\n link.validate()\n return self.postRequest('object-links', data=link.to_struct())", "def _reward(self, action):\n raise NotImplementedError", "def add_reward(self, reward):\n self.quest_node['reward'] = reward\n self.reward = reward\n graph.push(self.quest_node)", "def link(self, callback, SpawnedLink=SpawnedLink):\n # XXX: Is the redefinition of SpawnedLink supposed to just be an\n # optimization, or do people use it? It's not documented\n # pylint:disable=redefined-outer-name\n self.rawlink(SpawnedLink(callback))", "def _on_outcome(self, outcome, condition):\n self._outcome = outcome\n self._condition = condition", "def create_hit(self, hit_type=None, question=None,\r\n lifetime=datetime.timedelta(days=7),\r\n max_assignments=1, \r\n title=None, description=None, keywords=None,\r\n reward=None, duration=datetime.timedelta(days=7),\r\n approval_delay=None, annotation=None,\r\n questions=None, qualifications=None,\r\n response_groups=None):\r\n \r\n # handle single or multiple questions\r\n neither = question is None and questions is None\r\n both = question is not None and questions is not None\r\n if neither or both:\r\n raise ValueError(\"Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both\")\r\n\r\n if question:\r\n questions = [question]\r\n question_param = QuestionForm(questions)\r\n if isinstance(question, QuestionForm):\r\n question_param = question\r\n elif isinstance(question, ExternalQuestion):\r\n question_param = question\r\n \r\n # Handle basic required arguments and set up params dict\r\n params = {'Question': question_param.get_as_xml(),\r\n 'LifetimeInSeconds' :\r\n self.duration_as_seconds(lifetime),\r\n 'MaxAssignments' : max_assignments,\r\n }\r\n\r\n # if hit type specified then add it\r\n # else add the additional required parameters\r\n if hit_type:\r\n params['HITTypeId'] = hit_type\r\n else:\r\n # Handle keywords\r\n final_keywords = MTurkConnection.get_keywords_as_string(keywords)\r\n \r\n # Handle price argument\r\n final_price = MTurkConnection.get_price_as_price(reward)\r\n \r\n final_duration = self.duration_as_seconds(duration)\r\n\r\n additional_params = dict(\r\n Title=title,\r\n Description=description,\r\n Keywords=final_keywords,\r\n AssignmentDurationInSeconds=final_duration,\r\n )\r\n additional_params.update(final_price.get_as_params('Reward'))\r\n\r\n if approval_delay is not None:\r\n d = self.duration_as_seconds(approval_delay)\r\n additional_params['AutoApprovalDelayInSeconds'] = d\r\n\r\n # add these params to the others\r\n params.update(additional_params)\r\n\r\n # add the annotation if specified\r\n if annotation is not None:\r\n params['RequesterAnnotation'] = annotation\r\n \r\n # Add the Qualifications if specified\r\n if qualifications is not None:\r\n params.update(qualifications.get_as_params())\r\n\r\n # Handle optional response groups argument\r\n if response_groups:\r\n self.build_list_params(params, response_groups, 'ResponseGroup')\r\n \r\n # Submit\r\n return self._process_request('CreateHIT', params, [('HIT', HIT),])", "def create_issue_link(self, link_type, inwardissue,\r\n outwardissue, comment=None):\r\n self.jira.create_issue_link(type=link_type,\r\n inwardIssue=str(inwardissue),\r\n outwardIssue=str(outwardissue))", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def new_link(self, key, link, default):\n\n s = self._new_link()\n s.key = key\n s.link = link\n s.default = default\n return s", "def addLink(self, name=None, **kwargs):\n if isinstance(name, rigmech):\n self.sym_prefix = name.sym_prefix + \"_\"\n self.addLink(\n name=name.name,\n mass=name.global_syms[\"mass\"],\n inertia=name.global_syms[\"Mq\"],\n origin_xyz=name.global_syms[\"xyz_com\"],\n )\n else:\n kwargs[\"name\"] = name\n LinkArgs = rigmech._check_field_inputs(\n \"addLink\", self._DefaultLinkFields, kwargs\n )\n self.Links[LinkArgs[\"name\"]] = LinkArgs", "def add_link(self, target, rel, title=None, title_star=None,\n anchor=None, hreflang=None, type_hint=None):\n\n # PERF(kgriffs): Heuristic to detect possiblity of an extension\n # relation type, in which case it will be a URL that may contain\n # reserved characters. Otherwise, don't waste time running the\n # string through uri.encode\n #\n # Example values for rel:\n #\n # \"next\"\n # \"http://example.com/ext-type\"\n # \"https://example.com/ext-type\"\n # \"alternate http://example.com/ext-type\"\n # \"http://example.com/ext-type alternate\"\n #\n if '//' in rel:\n if ' ' in rel:\n rel = ('\"' +\n ' '.join([uri.encode(r) for r in rel.split()]) +\n '\"')\n else:\n rel = '\"' + uri.encode(rel) + '\"'\n\n value = '<' + uri.encode(target) + '>; rel=' + rel\n\n if title is not None:\n value += '; title=\"' + title + '\"'\n\n if title_star is not None:\n value += (\"; title*=UTF-8'\" + title_star[0] + \"'\" +\n uri.encode_value(title_star[1]))\n\n if type_hint is not None:\n value += '; type=\"' + type_hint + '\"'\n\n if hreflang is not None:\n if isinstance(hreflang, six.string_types):\n value += '; hreflang=' + hreflang\n else:\n value += '; '\n value += '; '.join(['hreflang=' + lang for lang in hreflang])\n\n if anchor is not None:\n value += '; anchor=\"' + uri.encode(anchor) + '\"'\n\n _headers = self._headers\n if 'link' in _headers:\n _headers['link'] += ', ' + value\n else:\n _headers['link'] = value", "def reward(self, history_id, reward):\n pass", "def add(self, destination, kind):\n if destination in self.__links:\n raise SarasvatiException(\"Link to specified thought already exist\")\n if kind not in self.__correct_kinds:\n raise SarasvatiException(\"Link kind is not correct: \" + kind)\n if self.__source is destination:\n raise SarasvatiException(\"Unable link thought to itself\")\n link = Link(self.__source, destination, kind)\n return self.add_link(link)", "def add_outcome(self, node, cost=0, weight=1, classifier=None):\n\n if classifier is None:\n self.outcomes.append((Edge(self, node, cost=cost), weight))\n else:\n self.outcomes.append((Edge(self, node, cost=cost), classifier))", "def relate(a, b, **kwargs):\n return lib.relate(a, b, **kwargs)", "def set_outcome(self, outcome):\r\n if outcome < self._outcome:\r\n self._outcome = outcome\r\n self.choose(0, 0, 0, 0, 0) # Dummy call, to validate outcome.\r\n if self.parent: self.parent.set_outcome(self._outcome)", "def relate_object(self, obj):\n suffix = self._get_api_suffix(obj.__class__)\n endpoint = self._get_api_endpoint() + '/' + suffix\n obj_id = obj._id()\n results = self.tq.post(endpoint, data={'id': obj_id})\n\n results = results.get('data')\n if not results or 'pivot' not in results[0]:\n raise ActionFailedError('Relate indicators')", "def createPooledReward(self, name, rewardPoolId, product_key_name, instructions=None):\n param = {\"name\": name, product_key_name: 'pooled:%s' % rewardPoolId}\n if instructions:\n param[\"instructions\"] = instructions\n self.post_json('/reward', param)", "def create_relation_to_episode(episode_id):\n epi = Episode.query.get(episode_id)\n if not epi:\n abort(404)\n\n\n data = request.json\n if any([\n 'id' in data and not isinstance(data.get('id'), int)\n ]):\n abort(400)\n\n dire = Director.query.get(data[\"id\"])\n if not dire:\n abort(404)\n\n epi.directors.append(dire)\n db.session.commit()\n return jsonify({'result': f\"{dire} directed episode {epi}\"})", "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, state, action, reward, new_state, done):\n experience = (state, action, reward, new_state, done)\n self.buffer.append(experience)", "def get_reward(self, state, action, next_state, absorbing):\n raise NotImplementedError", "def add(self, state, action, reward, next_state, done):\n experience = Experience(state, action, reward, next_state, done)\n self.memory.append(experience)", "def add(self, obs, action, reward, new_obs, done):\n experience = (obs, action, reward, new_obs, done)\n insert_index = self.fix_index()\n if insert_index > 0:\n if insert_index in self._storage:\n del self._storage[insert_index]\n self._storage[insert_index] = experience\n # add to priority queue\n priority = self.priority_queue.get_max_priority()\n self.priority_queue.update(priority, insert_index)\n return True\n else:\n sys.stderr.write('Insert failed\\n')\n return False", "def append(self, state, action, reward, next_state=None, next_action=None,\n is_state_terminal=False):\n raise NotImplementedError", "def add_hyperlink(paragraph, url, text, color, underline):\r\n\r\n # This gets access to the document.xml.rels file and gets a new relation id value\r\n part = paragraph.part\r\n r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)\r\n\r\n # Create the w:hyperlink tag and add needed values\r\n hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')\r\n hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )\r\n\r\n # Create a w:r element\r\n new_run = docx.oxml.shared.OxmlElement('w:r')\r\n\r\n # Create a new w:rPr element\r\n rPr = docx.oxml.shared.OxmlElement('w:rPr')\r\n\r\n # Add color if it is given\r\n if not color is None:\r\n c = docx.oxml.shared.OxmlElement('w:color')\r\n c.set(docx.oxml.shared.qn('w:val'), color)\r\n rPr.append(c)\r\n\r\n # Remove underlining if it is requested\r\n if not underline:\r\n u = docx.oxml.shared.OxmlElement('w:u')\r\n u.set(docx.oxml.shared.qn('w:val'), 'none')\r\n rPr.append(u)\r\n\r\n # Join all the xml elements together add add the required text to the w:r element\r\n new_run.append(rPr)\r\n new_run.text = text\r\n hyperlink.append(new_run)\r\n\r\n paragraph._p.append(hyperlink)\r\n\r\n return hyperlink", "def add_link(self, link):\n raise NotImplementedError", "def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel", "def link(self, s_id):\r\n\r\n # Take the link entires from TOML file\r\n schedules = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if schedules:\r\n for entries in schedules:\r\n # Construct payload \r\n for payload in entries.get('link'):\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/link.json', test=payload)\r\n # Post request\r\n if 'id' in self.schedules[-1]:\r\n payload['schedule'] = self.schedules[-1].get('id')\r\n if 'id' in self.workouts[-1]:\r\n payload['workout'] = self.workouts[-1].get('id')\r\n return self.add_post(payload, API.url_link, self.links)", "def post(self, category_id, name, description, weight, category_ref, sponsor_id):\n\t\tproperties = {\"id\": category_id, \"name\": name, \"description\": description, \"weight\": weight, \"categoryRef\": category_ref, \"sponsorId\": sponsor_id}\n\t\treturn self.service.post(self.base_uri, json.dumps(properties))", "def perform_action(self, action):\n \n assert self.is_valid_action(action)\n \n # Save the action.\n self.action = action\n \n #the slight strategy of the opponent\n if self.reward==rLose :\n observation = self.observation\n else:\n observation = random.choice([oRock,oPaper,oScissor])\n \n #determine the result of the game and get the reward\n if action == aRock:\n if observation == oRock:\n reward= rDraw\n elif observation == oPaper:\n reward= rLose\n elif observation == oScissor:\n reward= rWin\n elif action == aPaper:\n if observation == oRock:\n reward= rWin\n elif observation == oPaper:\n reward= rDraw\n elif observation == oScissor:\n reward= rLose\n elif action == aScissor:\n if observation == oRock:\n reward= rLose\n elif observation == oPaper:\n reward= rWin\n elif observation == oScissor:\n reward= rDraw\n \n \n #Store the observation and reward in the environment.\n self.observation = observation\n \n self.reward = reward\n \n \n return (observation, reward)\n # end def", "def get_link(self, user_input):\r\n\r\n\t\t# state that you made it this far\r\n\t\tprint(f\"\\nSuccessfully called get_link() with the parameter(s): \\n\\n\\tuser_input -> {user_input}\")\r\n\r\n\t\t# tokenize the user's input, removing words like \"is\", \"the\", \"it\" and so on...\r\n\t\ttokens = self.tokenize(user_input)\r\n\r\n\t\t# categorize the question\r\n\t\tprint(f\"\\nIdentifying question's category...\")\r\n\t\tcategory = self.bayesian_naive_logic(tokens)\r\n\r\n\t\t# start looking for a link that may provide a Answer\r\n\t\tresponse_set = self.storage.get_urls(tokens, category)\r\n\t\tprint(f\"\\nBest Answer found: {response_set}\")\r\n\r\n\t\treturn f\"Here is a link with information closely matching your question: <a href='{response_set}' target='_blank'>{response_set}</a>\"", "def reward(self, history_id, reward):\n reward_action = self._historystorage.unrewarded_histories[history_id].action\n reward_action_idx = self._actions.index(reward_action)\n context = self._historystorage.unrewarded_histories[history_id].context[reward_action_idx]\n context = np.matrix(context)\n\n # Update the model\n matrix_a = self._modelstorage.get_model()['matrix_a']\n matrix_ainv = self._modelstorage.get_model()['matrix_ainv']\n b = self._modelstorage.get_model()['b']\n theta = self._modelstorage.get_model()['theta']\n matrix_a[reward_action] += np.dot(context.T, context)\n matrix_ainv[reward_action] = np.linalg.solve(matrix_a[reward_action], np.identity(self.d))\n b[reward_action] += reward * context.T\n theta[reward_action] = np.dot(matrix_ainv[reward_action], b[reward_action])\n self._modelstorage.save_model({'matrix_a': matrix_a, 'matrix_ainv': matrix_ainv, 'b': b, 'theta': theta})\n\n # Update the history\n self._historystorage.add_reward(history_id, reward)", "def link_room(self, room_to_link, direction):\n self.linked_rooms[direction] = room_to_link\n # print(self.name + \" linked rooms :\" + repr(self.linked_rooms) )", "def add_new_event(self,\n event_type: str,\n event_datetime: str,\n covid_status: str = \"U\",\n death: int = 0,\n critical_care_admission: int = 0,\n component: str or None = None,\n source: str or None = None,\n source_type: str or None = None,\n wimd: int or None = None,\n **kwargs):\n # Parse datetime and check validity (None for date if invalid)\n event_datetime = parse_datetime(event_datetime)\n if event_datetime.get(\"date\") is None:\n err = f\"Datetime parsed when trying to generate a new outcome event for {self.patientId} was invalid!\"\n self._config.write_to_log(err)\n raise ValueError(err)\n # Create outcome document\n new_outcome = Event(patientId=self.patientId,\n eventType=event_type.strip(),\n eventDate=event_datetime.get(\"date\"),\n covidStatus=covid_status,\n death=death,\n criticalCareAdmission=critical_care_admission,\n **kwargs)\n # Populate with optional parameters if given\n new_outcome = _add_if_value(new_outcome, [(\"component\", component),\n (\"source\", source),\n (\"sourceType\", source_type),\n (\"wimd\", wimd),\n (\"eventTime\", event_datetime.get(\"time\"))])\n new_outcome = new_outcome.save()\n self.outcomeEvents.append(new_outcome)\n self.save()\n self._config.write_to_log(f\"Outcome event {new_outcome.id} for patient {self.patientId}\")", "def make_link(first, second):\n manager = Actions()\n manager.make_link(first, second)", "def add_sample(self, img, action, reward, terminal):\n self.imgs[:, :, self.top] = img\n self.actions[self.top] = action\n self.rewards[self.top] = reward\n self.terminal[self.top] = terminal\n\n if self.size == self.max_steps:\n self.bottom = (self.bottom + 1) % self.max_steps\n else:\n self.size += 1\n\n self.top = (self.top + 1) % self.max_steps", "async def link(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'Cannot send links on this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n title = await Controller.canonical_title(item)\n if title is None:\n await msg.channel.send(**{\n 'content': f'There are no pages matching `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n page_url = Controller.link_from_title(title)\n await msg.channel.send(**{\n 'content': page_url,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })", "def create_data_link(self, ctx, params):\n # ctx is the context object\n # return variables are: results\n #BEGIN create_data_link\n duid, sna, update = _create_data_link_params(params)\n as_admin, user = _get_admin_request_from_object(params, 'as_admin', 'as_user')\n _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.FULL,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'create_data_link', ctx.log_info, as_user=user, skip_check=not as_admin)\n link = self._samples.create_data_link(\n user if user else _UserID(ctx[_CTX_USER]),\n duid,\n sna,\n update,\n as_admin=as_admin)\n results = {'new_link': _links_to_dicts([link])[0]}\n #END create_data_link\n\n # At some point might do deeper type checking...\n if not isinstance(results, dict):\n raise ValueError('Method create_data_link return value ' +\n 'results is not type dict as required.')\n # return the results\n return [results]", "def related_url(self) -> pulumi.Output[Sequence['outputs.RelatedUrlResponse']]:\n return pulumi.get(self, \"related_url\")", "def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500", "def get_outcome(self):\n if not self.is_paid:\n raise ValueError(\"There isn't an outcome.\")\n return self.team_a if self.outcome else self.team_b", "def PostReward(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _publish_reward_topic(self, reward, steps, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)\n self.reward_list.append(reward)\n self.episode_list.append(episode_number)\n self.step_list.append(steps)\n list = str(reward) + \";\" + str(episode_number) + \";\" + str(steps) + \"\\n\"\n\n with open(self.csv_name + '.csv', 'a') as csv:\n csv.write(str(list))", "def add_link(\n self,\n url: str,\n label: Optional[str] = None,\n ) -> None:\n if not label:\n label = url\n self._client.add_element(\n Markdown(\n f\"[{label}]({url})\",\n on_tap_link=lambda e: self._client.page.launch_url(e.data),\n )\n )", "def _step(self, action):\n \n obs, reward, done, info = self.env.step(action)\n\n \n advice=self.generateAdvice()[1]\n\n obs = {\n \"image\": obs,\n \"advice\": advice\n }\n\n\n \n\n\n\n return obs, reward, done, info", "def cli_createLinko():\n\n info = ('Creates a linkograph from an (inverse) labeling json'\n ' and an ontology json.')\n\n parser = argparse.ArgumentParser(description=info)\n parser.add_argument('labeling', metavar='LABELING.json',\n nargs=1,\n help='the inverse labeling json file.')\n\n parser.add_argument('ontology', metavar='ONTOLOGY.json',\n nargs=1,\n help='the json of ontology.')\n\n parser.add_argument('-o', '--out', metavar='OUTPUT_FILE',\n help='the linkograph as a json')\n\n args = parser.parse_args()\n\n outfile = None\n if args.out:\n outfile = args.out\n\n # Load the json files.\n with open(args.labeling[0], 'r') as invLabelingFile:\n invLabeling = json.load(invLabelingFile)\n with open(args.ontology[0], 'r') as ontologyFile:\n ontology = json.load(ontologyFile)\n linko = createLinko(invLabeling, ontology)\n\n if outfile:\n writeLinkoJson(linko, outfile)\n else:\n print(linko)", "def add(self, context, action, reward):\n\n if self.intercept:\n c = np.array(context[:])\n c = np.append(c, 1.0).reshape((1, self.context_dim + 1))\n else:\n c = np.array(context[:]).reshape((1, self.context_dim))\n\n if self.contexts is None:\n self.contexts = c\n else:\n self.contexts = np.vstack((self.contexts, c))\n\n r = np.zeros((1, self.num_actions))\n r[0, action] = reward\n if self.rewards is None:\n self.rewards = r\n else:\n self.rewards = np.vstack((self.rewards, r))\n\n self.actions.append(action)", "def make_move(state, action, player, rewarding_move=False): # TODO : done and next_is_reward can be removed as\n # they are in the state object\n board = state.get_board()\n json_action = action.get_json_action()\n action = action.get_action_as_dict()\n captured = None\n reward = 0\n next_is_reward = False\n previous_is_reward = False\n if rewarding_move:\n state.boring_moves = 0\n previous_is_reward = True\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND:\n reward += 1\n state.in_hand[player * -1] -= 1\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n board.empty_cell(action['action']['at'])\n reward += 1\n else:\n if action['action_type'] == YoteActionType.ADD:\n state.boring_moves += 1\n state.in_hand[player] -= 1\n board.fill_cell(action['action']['to'], Color(player))\n elif action['action_type'] == YoteActionType.MOVE:\n at = action['action']['at']\n to = action['action']['to']\n\n def distance(cell_1, cell_2):\n import math\n return math.sqrt((cell_1[0] - cell_2[0]) ** 2 + (cell_1[1] - cell_2[1]) ** 2)\n\n board.empty_cell(at)\n board.fill_cell(to, Color(player))\n if int(distance(at, to)) == 1:\n state.boring_moves += 1\n elif int(distance(at, to)) > 1:\n state.boring_moves = 0\n next_is_reward = True\n board.fill_cell(to, Color(player))\n if at[0] == to[0] and at[1] < to[1]:\n board.empty_cell((at[0], at[1] + 1))\n captured = (at[0], at[1] + 1)\n elif at[0] == to[0] and at[1] > to[1]:\n board.empty_cell((at[0], at[1] - 1))\n captured = (at[0], at[1] - 1)\n elif at[1] == to[1] and at[0] < to[0]:\n board.empty_cell((at[0] + 1, at[1]))\n captured = (at[0] + 1, at[1])\n elif at[1] == to[1] and at[0] > to[0]:\n board.empty_cell((at[0] - 1, at[1]))\n captured = (at[0] - 1, at[1])\n reward += 1\n\n state.set_board(board)\n state.score[player] += reward\n state.captured = captured\n state.rewarding_move = next_is_reward\n state.previous_is_reward = previous_is_reward\n state.set_latest_player(player)\n state.set_latest_move(json_action)\n if next_is_reward:\n state.set_next_player(player)\n else:\n state.set_next_player(player * -1)\n\n done = YoteRules.is_end_game(state)\n return state, done, next_is_reward", "def link_to(self, criterion_or_node):\n if callable(criterion_or_node):\n target_node = criterion_or_node(self.graph)\n else:\n target_node = criterion_or_node\n return self.send(target_node, 'accept_link',\n originating_node=self.id)", "def add_experience(self, action, state, reward, terminal):\n self.replay_buffer.add_experience(action, state, reward, terminal)", "def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_link(self, **kwgs):\n self.links.append(kwgs)", "def _set_link(\n meta: Dict,\n link: Optional[Union[type(None), str, bool, KEChainPages]] = None,\n link_value: Optional[CardWidgetLinkValue] = None,\n link_target: Optional[Union[str, LinkTargets]] = LinkTargets.SAME_TAB,\n **kwargs,\n) -> Dict:\n meta[\"linkTarget\"] = check_enum(link_target, LinkTargets, \"link_target\")\n\n from pykechain.models import Activity\n\n if isinstance(link, Activity):\n if link.activity_type == ActivityType.TASK:\n default_link_value = CardWidgetLinkValue.TASK_LINK\n else:\n default_link_value = CardWidgetLinkValue.TREE_VIEW\n\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link.id,\n MetaWidget.SHOW_LINK_VALUE: default_link_value,\n }\n )\n elif isinstance(link, str) and is_uuid(link):\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.TASK_LINK,\n }\n )\n elif link is None or link is False:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: None,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.NO_LINK,\n }\n )\n elif link in KEChainPages.values():\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: \"\",\n MetaWidget.SHOW_LINK_VALUE: CardWidgetKEChainPageLink[link],\n }\n )\n else:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.EXTERNAL_LINK,\n }\n )\n\n if link_value is not None:\n meta.update(\n {\n MetaWidget.SHOW_LINK_VALUE: check_enum(\n link_value, CardWidgetLinkValue, \"link_value\"\n ),\n }\n )\n\n return meta", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n if done and self.episode % 50 == 0 and self.episode > 50:\n self.episode += 1\n self.remove_bad_experience()\n elif done:\n self.episode += 1", "def add(self, states, actions, rewards, next_states, dones):\n assert len(states) == self.num_agents, 'ERROR> group states size mismatch'\n assert len(actions) == self.num_agents, 'ERROR> group actions size mismatch'\n assert len(rewards) == self.num_agents, 'ERROR> group rewards size mismatch'\n assert len(next_states) == self.num_agents, 'ERROR> group next states size mismatch'\n assert len(dones) == self.num_agents, 'ERROR> group dones size mismatch'\n\n experience = (states, actions, rewards, next_states, dones)\n self.memory.append(experience)", "def strategy(self,\r\n opponent:Player,\r\n message=torch.zeros(10))->Action:\r\n #need to update name for posterity's sake\r\n self.finished_opponent = opponent.name\r\n #Regardless of intent for first few turns, do the base action.\r\n if len(self.history) == 0: return self.action_base\r\n \r\n #get overall reward\r\n self.reward = self.find_reward(opponent) \r\n\r\n # assess perceived intent message in opponent.sent_message\r\n self.intent_received_prev = self.intent_received\r\n self.intent_received = opponent.intent_sent\r\n self.assessment_prev = self.assessment\r\n self.assessment = self.assess_received_intent(opponent)#this is the estimate of what the opponent is doing\r\n \r\n # store for testing later\r\n self.list_reward.append(self.reward)\r\n self.list_intent_received.append(self.intent_received_prev)\r\n self.list_intent_sent.append(self.intent_sent_prev)\r\n self.list_intent_assessment.append(self.assessment_prev)\r\n self.list_intent_true.append(opponent.history[-1])\r\n \r\n # receive assessment and decide to stay with self.base_Action\r\n # OR change it to the other action. \r\n self.old_decision = self.decision\r\n self.decision = self.decide_based_on_new_intel(opponent) # what the opponent actually did last turn\r\n self.list_decision.append(self.old_decision)\r\n \r\n return self.decision", "def addReagentTargetedGene(\n self,\n reagent_id,\n gene_id,\n targeted_gene_id=None,\n targeted_gene_label=None,\n description=None,\n reagent_category=None\n ):\n\n # akin to a variant locus\n # is this some sort of pseudo bnode?\n if targeted_gene_id is None:\n targeted_gene_id = '_' + gene_id + '-' + reagent_id\n targeted_gene_id = targeted_gene_id.replace(\":\", \"\")\n self.model.addIndividualToGraph(\n targeted_gene_id,\n targeted_gene_label,\n self.globaltt['reagent_targeted_gene'],\n description,\n ind_category=reagent_category\n )\n\n if gene_id is not None:\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_expression_variant_of'], gene_id\n )\n\n self.graph.addTriple(\n targeted_gene_id, self.globaltt['is_targeted_by'], reagent_id\n )", "def __init__(__self__, *,\n individual_outcome: Optional[pulumi.Input[Sequence[pulumi.Input['IndividualOutcomeArgs']]]] = None,\n roll_up: Optional[pulumi.Input['PrimaryStepRollUp']] = None):\n if individual_outcome is not None:\n pulumi.set(__self__, \"individual_outcome\", individual_outcome)\n if roll_up is not None:\n pulumi.set(__self__, \"roll_up\", roll_up)", "def _send_lti2_outcome(self):\r\n payload = textwrap.dedent(\"\"\"\r\n {{\r\n \"@context\" : \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\" : \"Result\",\r\n \"resultScore\" : {score},\r\n \"comment\" : \"This is awesome.\"\r\n }}\r\n \"\"\")\r\n data = payload.format(score=0.8)\r\n return self._send_lti2(data)", "def join(self, rewards_s3_path, obs_time_window=None, ratio=0.8, wait=True):\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n\n if obs_time_window is None:\n logger.warning(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with all the observation data\"\n )\n obs_end_time = None\n obs_start_time = None\n else:\n logger.info(\n f\"Start a join job to join reward data \"\n f\"under '{rewards_s3_path}' with observation \"\n f\"data in the past {obs_time_window} hours\"\n )\n obs_end_time = datetime.utcnow()\n obs_start_time = obs_end_time - timedelta(hours=obs_time_window)\n\n # update next_join_job_id and joining state\n next_join_job_id = JoinManager.name_next_join_job(experiment_id=self.experiment_id)\n self.exp_db_client.update_experiment_next_join_job_id(self.experiment_id, next_join_job_id)\n self.exp_db_client.update_experiment_joining_state(self.experiment_id, JoiningState.PENDING)\n\n input_obs_data_s3_path = (\n f\"s3://{self.resource_manager.firehose_bucket}/{self.experiment_id}\"\n )\n input_obs_data_s3_path = f\"{input_obs_data_s3_path}/inference_data\"\n # init joining job, update join table\n logger.info(\"Creating resource for joining job...\")\n\n try:\n self.next_join_job = JoinManager(\n join_db_client=self.join_db_client,\n experiment_id=self.experiment_id,\n join_job_id=next_join_job_id,\n input_obs_data_s3_path=input_obs_data_s3_path,\n obs_start_time=obs_start_time,\n obs_end_time=obs_end_time,\n input_reward_data_s3_path=rewards_s3_path,\n boto_session=self.boto_session,\n )\n\n logger.info(\"Started joining job...\")\n self.next_join_job.start_join(ratio=ratio, wait=wait)\n except Exception as e:\n logger.error(e)\n pass\n\n # wait until exp ddb table updated\n if self.local_mode or wait:\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries = 0\n\n while not succeeded_state:\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n logger.debug(\"Waiting for experiment table joining status to be updated...\")\n time.sleep(2 * (2 ** num_retries))\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries += 1\n if num_retries >= 5:\n raise UnhandledWorkflowException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"was in state of '{self.experiment_record._joining_state}'. Failed to sync table states.\"\n )\n if (\n self.experiment_record._joining_state == JoiningState.FAILED\n or self.experiment_record._joining_state == JoiningState.CANCELLED\n ):\n raise WorkflowJoiningJobException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"ended with state '{self.experiment_record._joining_state}'. Please check Athena queries logs \"\n \"for more information.\"\n )", "def test__put_two_way_link_into():\n for input_value, defaults, expected_output in (\n (False, False, {}),\n (False, True, {'two_way_link': False}),\n (True, False, {'two_way_link': True}),\n ):\n data = put_two_way_link_into(input_value, {}, defaults)\n vampytest.assert_eq(data, expected_output)", "def _act_impl(self, observation, reward,\n done):\n if done:\n raise core.EpisodeDoneError(\"Called act on a done episode.\")\n\n if not self.observation_space.contains(observation):\n raise core.InvalidObservationError(\"Invalid ovservation: %s\" %\n observation)\n if self.params.observation_adjustment_fn:\n observation = self.params.observation_adjustment_fn(\n self.rng, self.beliefs, observation)\n\n features = self.feature_selection_fn(observation)\n self.beliefs = self._update_beliefs(features, self.beliefs)\n action = self._allocate(self._n_resource, self.beliefs)\n\n if not self.action_space.contains(action):\n raise gym.error.InvalidAction(\"Invalid action: %s\" % action)\n\n return action", "async def link(self, ctx: Context) -> None:\r\n try:\r\n params: List[str] = get_cmd_params(ctx)\r\n\r\n if len(params) < 1 or not params[0].isdigit() or int(params[0]) > len(self.yt_result.ids):\r\n await ctx.send(\"Please enter a valid video number from 0 to 5\")\r\n return\r\n\r\n self.yt_link = await self.message.edit(content=self.yt_result.get_link(int(params[0])))\r\n await ctx.message.delete()\r\n except Exception as e:\r\n await self.channels.log_error(e, \"ytl\")", "def _rollout_an_episode(self):\n self._steps = 0\n me_id = self._learning_agent_id # short name\n oppo_id = self._oppo_agent_id # short name\n logger.log('episode begins with the task: {}'.format(str(self.task)))\n\n # passing me and oppo hyperparams to the arena interface\n assert self.task.hyperparam is not None\n logger.log('pulling oppo hyperparam of model key {}'.format(\n self.task.model_key2))\n oppo_hyperparam = self._model_pool_apis.pull_attr(attr='hyperparam',\n key=self.task.model_key2)\n logger.log('Done pulling oppo hyperparam')\n oppo_inter_kwargs = ({} if oppo_hyperparam is None\n else oppo_hyperparam.__dict__)\n inter_kwargs = ([self.task.hyperparam.__dict__]\n + [oppo_inter_kwargs] * (self.n_agents - 1))\n\n # agent, env reset\n obs = self.env.reset(inter_kwargs=inter_kwargs)\n for agt, ob in zip(self.agents, obs):\n agt.reset(ob)\n self._update_agents_model(self.task) # for agent Neural Net parameters\n\n me_reward_sum = 0.0\n self.time_beg = time.time()\n self._update_hyperparam(self.task)\n self._changed_task = False\n while True:\n self._steps += 1\n # predictions for each agent\n predictions = self._parallel.run((self._agent_pred, ob, i)\n for i, ob in enumerate(obs))\n me_prediction = predictions[me_id]\n me_action, extra_vars = me_prediction[me_id], me_prediction[oppo_id:]\n # predicted actions for each agent\n actions = [me_action] + [other_action\n for other_action in predictions[oppo_id:]]\n # book-keep obs in previous step\n last_obs = obs\n\n # agent-env interaction\n obs, reward, done, info = self.env.step(actions)\n\n me_rwd_scalar = self._reward_shape(reward[me_id])\n me_reward_sum += me_rwd_scalar\n\n if self._enable_push:\n # put the interested data (obs, rwd, act, ... for each agent) into the\n # _data_queue, which is watched in another Thread (the _push_data_to_learner()\n # method) that the data are dequeued and sent to remote Learner\n if self._data_queue.full():\n logger.log(\"Actor's queue is full.\", level=logger.WARN)\n rwd_to_push = (me_rwd_scalar if self.rwd_shape\n else np.asarray(reward[me_id], np.float32))\n if self.use_oppo_obs:\n if isinstance(extra_vars, tuple):\n extra_vars += (self.agents[self._oppo_agent_id]._last_state,)\n else:\n extra_vars.append(self.agents[self._oppo_agent_id]._last_state)\n data_tuple = (last_obs, tuple(actions), rwd_to_push, info, done, extra_vars)\n self._data_queue.put(data_tuple)\n logger.log('successfully put one tuple.', level=logger.DEBUG)\n\n if self._steps % self._log_interval_steps == 0:\n logger.log('_rollout_an_episode,', 'steps: {},'.format(self._steps),\n 'data qsize: {}'.format(self._data_queue.qsize()))\n\n if done:\n # an episode ends\n if self._replay_dir:\n self._save_replay()\n self.log_kvs(me_reward_sum, info)\n if self._changed_task:\n return None, info\n else:\n return self.log_outcome(info), info\n\n if self._update_model_freq and self._steps % self._update_model_freq == 0:\n # time to update the model for each agent\n if (self._enable_push and\n self._model_pool_apis.pull_attr(\n 'freezetime', self.task.model_key1) is not None):\n # Current task (learning period) finishes, start a new task or continue\n self._finish_task(self.task, None) # notify early abort\n last_task = self.task\n self.task = self._request_task() # try to continue\n if not is_inherit(last_task.model_key1, self.task.model_key1):\n self.log_kvs(me_reward_sum, info)\n return None, info\n if last_task.model_key2 != self.task.model_key2:\n self._changed_task = True\n self._update_agents_model(self.task)", "def scoring_opportunities(self, scoring_opportunities):\n\n self._scoring_opportunities = scoring_opportunities", "def link_sample(self, other):\n with other.entry.nxfile:\n if 'sample' in self.entry:\n if 'sample' in other.entry:\n del other.entry['sample']\n other.entry.makelink(self.entry['sample'])", "async def cmd_galaddlinkuwl(self, ctx):\n\n links = re.findall(r\"(?P<url>http[s]?://[^\\s]+)\", ctx.message.content)\n\n if not links:\n await ctx.channel.send('`Useage: [p]galaddlinkuwl <startoflink>, [Bot Owner] Adds a link from gallery link whitelist.`')\n \n # ===== ADD THE NEW LINKS TO THE WHITELIST\n new_gal_link_wl = list(set(self.cogset['link_wl']) + set(links))\n\n if Gallery.compare(new_gal_link_wl, self.cogset['link_wl']):\n await ctx.channel.send(content=\"{}\\n are already in the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return \n \n else:\n self.cogset['link_wl'] = new_gal_link_wl\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=\"{}\\n have been added to the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return", "def add_sample(self, state, action, state_new, reward, endstate, episode):\r\n new_sample = np.array([state, action, state_new, reward, endstate])\r\n if self.step_counter == 0 and episode == 0:\r\n self.experience_batch = new_sample\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # first sample twice in the batch to be able to index over the rows\r\n elif len(self.experience_batch) < self.experience_batch_size:\r\n self.experience_batch = np.vstack([self.experience_batch, new_sample]) # add new sample to batch when it is not full\r\n else:\r\n self.experience_batch[self.step_counter % self.experience_batch_size, :] = new_sample # override the components of the batch when it is full\r", "def actor_add_relation():\r\n\r\n data = get_request_data()\r\n if 'id' in data.keys():\r\n try:\r\n row_id = int(data['id'])\r\n relation_id = int(data['relation_id'])\r\n except:\r\n err = 'Id must be integer'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n obj = Movie.query.filter_by(id=relation_id).first()\r\n try:\r\n actor = Actor.add_relation(row_id, obj)\r\n rel_actor = {k: v for k, v in actor.__dict__.items() if k in ACTOR_FIELDS}\r\n rel_actor['filmography'] = str(actor.filmography)\r\n except:\r\n err = 'Record with such id does not exist'\r\n return make_response(jsonify(error=err), 400)\r\n\r\n return make_response(jsonify(rel_actor), 200)\r\n\r\n else:\r\n err = 'No id specified'\r\n return make_response(jsonify(error=err), 400)", "def add_reward(self, choice, count=1):\n self.redis.hincrby(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, \"%s:rewards\" % choice, count)\n self._choices = None", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n self.priorities.append(max(self.priorities, default=1)) # Save all new experiences as maximum priority", "def update(self, state, reward, action, done, next_state, next_reward, num_episode, **kwargs):\n\n # Keep track of total reward\n self.episode_reward += next_reward\n if self.verbose > 0:\n logger.debug(\n f\"Agent acknowledges receiving a reward of {next_reward}, episode reward so far {self.episode_reward}\"\n )\n\n # Update MCTS tree\n if not done:\n self.mcts_head = self.mcts_head.children[action]\n self.mcts_head.prune() # This updates the node.path\n\n # Train\n if self.training:\n return self._train(kwargs[\"log_prob\"])\n else:\n return 0.0", "def test_shows_issues_from_grouplink(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def append(self, state, action, reward, done):\n assert state.shape == self._state_shape, \\\n 'Invalid state shape (required: %s, got: %s)' % (self._state_shape, state.shape)\n\n self._states[self._pos] = state\n self._actions[self._pos] = action\n self._rewards[self._pos] = reward\n self._terminals[self._pos] = done\n\n self._count = max(self._count, self._pos + 1)\n self._pos = (self._pos + 1) % self._max_size" ]
[ "0.7362988", "0.717459", "0.7163847", "0.6714336", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42772636", "0.4274945", "0.42674753", "0.4266109", "0.4264433", "0.42578077", "0.42515564", "0.42505667", "0.424946", "0.42489296", "0.42303044", "0.42303044", "0.4199248", "0.41974282", "0.41635618", "0.41283748", "0.4120942", "0.41087726", "0.41035667", "0.40950674", "0.408618", "0.40834093", "0.40813103", "0.40780374", "0.40770057", "0.4075737", "0.40686032", "0.40659428", "0.40503526", "0.40333", "0.40275708", "0.4023863", "0.4007657", "0.40069127", "0.40017104", "0.39986208", "0.39897925", "0.39840496", "0.39749613", "0.39745083", "0.3972324", "0.39586034", "0.39508075", "0.3946143", "0.39457676", "0.3932967", "0.39325184", "0.39297178", "0.39295676", "0.39276245", "0.39270565", "0.39229208", "0.39168912", "0.39096275", "0.3907056", "0.39025453", "0.39004102", "0.38994065", "0.38956124", "0.38943377", "0.38892362", "0.38891497", "0.38870266", "0.38820302", "0.38809907", "0.38804775", "0.38761795", "0.38738146", "0.3869238", "0.38686046", "0.38684267", "0.38673568", "0.38626942", "0.3862313", "0.3860057", "0.38582462", "0.38560754", "0.38555625", "0.38539255", "0.3853633" ]
0.7061997
3
Unlinking an outcome only deletes the outcome itself if this was the last link to the outcome in any group in any context. Aligned outcomes cannot be deleted; as such, if this is the last link to an aligned outcome, the unlinking will fail.
def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs): path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}' url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unlink(self, link_id):", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def remove_link():", "def delete_link(self, word):\n meaning = self.word2meaning[word]\n print(str(self.unique_id) + \" forgot \" +\n str(word) + \" for \" + str(meaning))\n del self.word2meaning[word]\n del self.meaning2word[meaning]\n del self.wordsuccess[word]\n\n # If the agent was the only one using the word, delete the word\n if len(self.model.vocabulary[meaning][word]) == 1:\n del self.model.vocabulary[meaning][word]\n # Else simply remove the agent\n else:\n self.model.vocabulary[meaning][word].remove(self.unique_id)", "def unlink(self):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n self.client.post_linked_resource(\n self.resource, RelationType.UNLINK_FROM_TEMPLATE,\n EntityType.ROLE.value, None)", "def unlink(self):\n album_id = self.albums_map[self.artist][self.c_album][1]\n # clear entry in self.albums_map[artist]\n self.albums_map[self.artist].pop(self.c_album)\n # remove Albums recording only if no more references to the album exist\n still_present = False\n for item in self.albums_map[self.artist].values():\n if item[1] == album_id:\n still_present = True\n if not still_present:\n dmla.unlink_album(self.a_album)\n self.modified = True\n self.refresh_screen(self.artists_list.currentIndex(),\n self.albums_list.currentIndex(), modifyoff=False)", "def unlink(self):\n self._linked = False\n self.is_dirty = False\n return self", "def unlink_pivot(remote, pivot_id):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_UnlinkPivot(pivot_id)\n remote.runCommand(cmd)", "async def unlink(self, ctx: MyContext):\n query = \"SELECT * FROM wormhole_channel WHERE channelID = ?\"\n wh_channel = self.bot.db_query(\n query, (ctx.channel.id,), astuple=True, fetchone=True\n )\n # comes as: (name, channelID, guildID, type, webhookID, webhookTOKEN)\n if len(wh_channel) == 0:\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-linked\"))\n return\n query = \"DELETE FROM wormhole_channel WHERE channelID = ? AND name = ?\"\n async with ClientSession() as session:\n webhook = discord.Webhook.partial(\n wh_channel[4], wh_channel[5], session=session\n )\n await webhook.delete()\n self.bot.db_query(query, (wh_channel[0], ctx.channel.id))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.channel-unlinked\")\n )", "def delete_sense_relation(wn, source, target, change_list=None):\n delete_sense_rel(wn, source, target, change_list)\n delete_sense_rel(wn, target, source, change_list)", "def delete_relation(wn, source, target, change_list=None):\n delete_rel(source, target, change_list)\n delete_rel(target, source, change_list)", "def _remove_link(self, name, object_id):\n if not name in self.data:\n return\n\n if self.data[name] and object_id in self.data[name]:\n self.data[name] = self.data[name].remove(object_id)", "def test_relation_after_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', 50021) == None\n assert query_row(db_conf, 'osm_landusages', -50021) == None", "def remove_link(self, dest):\n for i, link in enumerate(self.runscript.links):\n if link[1] == dest:\n del self.runscript.links[i]\n break", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n wn_source = wn\n entry = wn_source.entry_by_id(source_entry)\n if entry:\n sense = [sense for sense in entry.senses if sense.id == source][0]\n if not any(r for r in sense.sense_relations if r.target == target):\n print(\"No sense relations deleted\")\n else:\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]\n if change_list:\n change_list.change_entry(wn, entry)\n else:\n print(\"No entry for \" + source_entry)", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n entry = wn.entry_by_id(source_entry)\n if change_list:\n change_list.change_entry(wn, entry)\n sense = [sense for sense in entry.senses if sense.id == source][0]\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]", "def remove(self, thought):\n # delete references to thought\n for linked in thought.links.all:\n linked.links.remove(thought)\n self.update(linked)\n\n # remove thought itself\n if self.__db.contains(thought.key):\n self.__db.remove(thought.key)\n self.__cache.remove(thought)\n else:\n raise SarasvatiException(\"Unable to remove a non-existent thought\")", "def remove_link(self,link,verbose=False):\n label, child = link\n self.outgoing.remove((label,child))\n child.incoming.remove((label,self))\n if verbose: print('removed', label, self.nodeid, child.nodeid)", "def unlink_Group(self, group):\n\t\tself.__groups.remove(group.weakref)\n\t\tself._cli_invalidate()", "def unlink(self):\n if self._context.get('is_landlord_rent'):\n rent_ids = []\n for tenancy_rec in self:\n analytic_ids = self.env['account.analytic.line'].search(\n [('account_id', '=', tenancy_rec.id)])\n if analytic_ids and analytic_ids.ids:\n analytic_ids.unlink()\n rent_ids = self.env['tenancy.rent.schedule'].search(\n [('tenancy_id', '=', tenancy_rec.id)])\n post_rent = [x.id for x in rent_ids if x.move_check is True]\n if post_rent:\n raise Warning(\n _('''You cannot delete Tenancy record, if any related Rent'''\n '''Schedule entries are in posted.'''))\n else:\n rent_ids.unlink()\n return super(AccountAnalyticAccount, self).unlink()", "def unlink(address):", "def unlink(self, cr, uid, ids, context=None):\n allowances_archive = self.read(cr, uid, ids, ['transfer','state'], context=context)\n unlink_ids = []\n for record in allowances_archive:\n if record['transfer'] == False and record['state'] in ['draft','cancel']:\n unlink_ids.append(record['id'])\n else:\n raise osv.except_osv(_('Invalid action !'), _('Sorry you can not Delete this record(s), Because The request is in Process , You have To cancelled Firest or It already Transtered To account Voucher!'))\n for id in unlink_ids:\n allowances_archive_name = self.browse(cr, uid, id, context=context).name\n message = _(\"Env and Safety allowances archive '%s' has been deleted.\") % allowances_archive_name\n self.log(cr, uid, id, message)\n return super(env_and_safety_allowances_archive, self).unlink(cr, uid, unlink_ids, context=context)", "def deletelink(self, link_index=1, child=None):\n child = self.getnodenamed(child) # Verify pointer.\n\n # (int link_index, node_bn* child)\n cnetica.DeleteLink_bn.argtypes = [c_int, c_void_p]\n cnetica.DeleteLink_bn.restype = None\n cnetica.DeleteLink_bn(link_index, child)", "def unfollow(self, other):\n\t\tif self.follows(other):\n\t\t\tself.followed.remove(other)", "def unlink_action(self):\n self.check_access_rights('write', raise_exception=True)\n self.filtered('binding_model_id').write({'binding_model_id': False})\n return True", "def delete_rel(source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source.id, target.id))\n ss = source\n source.synset_relations = [\n r for r in ss.synset_relations if r.target != target.id]\n if change_list:\n change_list.change_synset(source)", "def unShare(sharedItem):\n sharedItem.store.query(Share, Share.sharedItem == sharedItem).deleteFromStore()", "def problem_relationship_delete(self, src_identifier, relation_dict):\n self._delete(\"problems/%d/relationships\" % src_identifier, json=relation_dict)", "def delete(self):\n\n lod_history = self.repo._get_lod_history(self.lod)\n assert lod_history.exists()\n lod_history.update(self.repo._youngest, None)\n self._mark_deleted()", "def pre_delete_crossing(sender, instance, **kwargs):\n Link.objects.filter(origin=instance.id).delete()\n Link.objects.filter(destination=instance.id).delete()", "def unlink(self):\n for activity in self:\n if activity.state != 'draft':\n raise ValidationError(_('You cannot delete activity'))\n return super(inagro_crop_activity, self).unlink()", "def unlink_all(self):\n del self._links[:]", "def unlink_from(self, criterion_or_node):\n if callable(criterion_or_node):\n target_node = criterion_or_node(self.graph.handle)\n else:\n target_node = criterion_or_node\n self.send(target_node, 'drop_link',\n originating_node=self.id)", "def delete_leader(self):", "def unlink(self, cr, uid, ids, context=None):\n if len(self.pool.get('payment.enrich').search(cr, uid,[('enrich_category','in',ids),('state','!=','draft')], context=context)) > 0:\n raise osv.except_osv(_('Invalid Action Error'), _('Can not delete category(categories), Where there are some enrich with this category'))\n return super(enrich_category, self).unlink(cr, uid, ids, context=context)", "def UnlinkPackages():\n for path in packagesToUnlink:\n if dirMod.isFolder(path):\n dirMod.deleteFolder(path)\n elif dirMod.isFile(path):\n dirMod.deleteFile(path)\n else:\n print(errMod.formatError(\"Sequestrum\", \"uwu This was not supposed to happen... uwu\"))", "def _revert(self):\n self.release_from_output(\"data\")\n # delete ONA submissions on ONA", "def test_unlink_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n self.replication.link_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id)\n linked_snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_linked=True)\n self.assertTrue(linked_snap_details.get('linked'))\n self.replication.unlink_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id)\n\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_unlinked=True)\n self.assertFalse(snap_details.get('linked'))\n self.provisioning.delete_storage_group(target_sg)", "def DeleteTarget(self, target_instance_id):", "def test_delete_link_no_resources(self):\n g = groups.get_by_name(\"fifth group\")\n \n self.open_url('/group/list')\n \n deletelink = self.wd.find_element(By.ID, \"delete-link-{0}\".format(g.id))\n deletelink.click()\n \n alert = self.wd.switch_to_alert()\n self.assertEqual(\"Are you sure you want to remove group {0} (id={1})\".format(g.name, g.id), alert.text)\n alert.accept()\n \n self.assert_notification(\"Group deleted: {0} (id={1})\".format(g.name, g.id))\n self.assert_not_in_list_table(g.name)", "def free_references(source_action):\n for act2_ref in source_action.references:\n act2 = act2_ref.action2\n act2.num_refs -= 1\n if act2.num_refs == 0: free_action2_ids.append(act2.id)", "def unlink(self, path: PathLike):", "def link_delete_callback(self):\n pass", "def remove(self, destination):\n if destination not in self.__links:\n raise SarasvatiException(\"Link to specified thought does not exist\")\n del self.__links[destination]", "def delete_data(request, result_id):\n result = TestResult.objects.get(id=result_id)\n result.delete()\n gun = result.bullet.gun\n return HttpResponseRedirect(reverse('gun', args=[gun.id]))", "def _die(self):\n\t\tself.site.agents_in_site.remove(self)\n\t\tself.site = None\n\t\tif self.debt_link != None:\n\t\t\tself.debt_link.lender.loans.remove(self.debt_link)\n\t\t\tself.debt_link = None\n\t\tfor l, loan in enumerate(self.loans):\n\t\t\tloan.borrower.debt_link = None\n\t\t\tdel self.loans[l]\n\t\tif self.gift_link != None:\n\t\t\tself.gift_link.giver.gifts.remove(self.gift_link)\n\t\t\tself.gift_link = None\n\t\tfor g, gift in enumerate(self.gifts):\n\t\t\tgift.taker.gift_link = None\n\t\t\tdel self.gifts[g]\n\t\tself.agents_list.remove(self)", "async def unlink(self, ctx):\n # Remove all link tokens and spotify details for this user\n remove_tokens(ctx.author.id)\n remove_spotify_details(ctx.author.id)\n await ctx.reply(\"All your linked accounts were removed, if you had any!\")", "def unfollow(source_id, destination_id):\n Forward.objects.filter(source_id=source_id,\n destination_id=destination_id).delete()\n Backward.objects.filter(destination_id=destination_id,\n source_id=source_id).delete()", "def unlink(self):\n context = self._context or {}\n for src_brw in self.browse():\n if src_brw.state != 'cancel':\n raise exceptions.except_orm(\n _(\"Invalid Procedure!!\"),\n _(\"The withholding document needs to be in cancel state to\"\n \" be deleted.\"))\n else:\n super(AccountWhSrc, self).unlink(\n )\n return True", "def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"", "def unlink(self):\n raise ValidationError(_(\"Products may not be deleted. Please archive them instead.\"))", "def test_modify_storage_group_snapshot_unlink_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n self.replication.modify_storage_group_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id, link=True)\n linked_snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_linked=True)\n self.assertTrue(linked_snap_details.get('linked'))\n self.replication.modify_storage_group_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id, unlink=True)\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_unlinked=True)\n self.assertFalse(snap_details.get('linked'))\n self.provisioning.delete_storage_group(target_sg)", "def delete_link(self):\n self.link_layout.links_list.remove_widget(self)\n self.link_layout.links.remove(self.text)\n utils.update_data()\n utils.data[self.link_layout.parent_screen.name]['links'] = self.link_layout.links\n utils.save_project_data(utils.data[self.link_layout.parent_screen.name],\n f\"{utils.data[self.link_layout.parent_screen.name]['proj_path']}/project_data.json\")", "def unfollow(self, user):\n f = self.followed.filter_by(followed_id=user.id).first()\n if f:\n db.session.delete(f)", "def unsetReference(self):\n return _libsbml.Association_unsetReference(self)", "def _unshare_file(target, force=False):\n logging.debug(\"Un-sharing file %s\" % target)\n if not force and os.stat(target).st_nlink == 1:\n msg = \"File %s has ONE hard link. Un-sharing this file will delete it! Apply \\'--force\\' to do so.\" % target\n logging.error(msg)\n raise FileNotFoundError(msg)\n os.unlink(target)", "def middledelalllistitems(self):\n self._linklist.delete()", "def check_post_delete_purge_links_metadata(integrated_ff):\n post_data = {\n 'biosource_type': 'immortalized cell line',\n 'award': '1U01CA200059-01',\n 'lab': '4dn-dcic-lab'\n }\n post_res = ff_utils.post_metadata(post_data, 'biosource', key=integrated_ff['ff_key'])\n post_item = post_res['@graph'][0]\n assert 'uuid' in post_item\n assert post_item['biosource_type'] == post_data['biosource_type']\n # make sure there is a 409 when posting to an existing item\n post_data['uuid'] = post_item['uuid']\n with pytest.raises(Exception) as exec_info:\n ff_utils.post_metadata(post_data, 'biosource', key=integrated_ff['ff_key'])\n assert '409' in str(exec_info.value) # 409 is conflict error\n\n # make a biosample that links to the biosource\n bios_data = {'biosource': [post_data['uuid']], 'status': 'deleted',\n 'lab': '4dn-dcic-lab', 'award': '1U01CA200059-01'}\n bios_res = ff_utils.post_metadata(bios_data, 'biosample', key=integrated_ff['ff_key'])\n bios_item = bios_res['@graph'][0]\n assert 'uuid' in bios_item\n\n # delete the biosource\n del_res = ff_utils.delete_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert del_res['status'] == 'success'\n assert del_res['@graph'][0]['status'] == 'deleted'\n\n # test get_metadata_links function (this will ensure everything is indexed, as well)\n links = []\n while not links or ff_utils.stuff_in_queues(integrated_ff['ff_env'], True):\n time.sleep(5)\n post_links = ff_utils.get_metadata_links(post_item['uuid'], key=integrated_ff['ff_key'])\n links = post_links.get('uuids_linking_to', [])\n assert len(links) == 1\n assert links[0]['uuid'] == bios_item['uuid']\n assert links[0]['field'] == 'biosource[0].uuid'\n\n # purge biosource first, which will failed because biosample is still linked\n purge_res1 = ff_utils.purge_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res1['status'] == 'error'\n assert bios_item['uuid'] in [purge['uuid'] for purge in purge_res1['comment']]\n\n # purge biosample and then biosource\n purge_res2 = ff_utils.purge_metadata(bios_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res2['status'] == 'success'\n\n # wait for indexing to catch up\n while len(links) > 0 or ff_utils.stuff_in_queues(integrated_ff['ff_env'], True):\n time.sleep(5)\n post_links = ff_utils.get_metadata_links(post_item['uuid'], key=integrated_ff['ff_key'])\n links = post_links.get('uuids_linking_to', [])\n assert len(links) == 0\n\n purge_res3 = ff_utils.purge_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res3['status'] == 'success'\n # make sure it is purged\n with pytest.raises(Exception) as exec_info:\n ff_utils.get_metadata(post_item['uuid'], key=integrated_ff['ff_key'],\n add_on='datastore=database')\n assert 'The resource could not be found' in str(exec_info.value)", "def test_groups_group_ref_delete(self):\n pass", "def drop_unlinked(data):\n data['exchanges'] = [exc\n for exc in data.get('exchanges', [])\n if exc['activity_code'] and exc['flow_code']\n ]\n return data", "def _delete(self):\n self.prev.next = self.next\n self.next.prev = self.prev", "def target_remove():\r\n try:\r\n target_id = request.post_vars[\"target\"]\r\n group_id = request.post_vars[\"group\"]\r\n except KeyError:\r\n pass\r\n else:\r\n result = gl.remove_from_targetgroup(target_id, group_id)\r\n if result:\r\n return response.json({'success': 'true'})\r\n return response.json({'success': 'false'})", "def remove_contained_cards_relations(event):\n resource = event.resource\n wall = find_interface(resource, IWall)\n for rid in event.contained_rids:\n for relation_id in wall.relations_map.find_relations(rid):\n del wall.relations_map[relation_id]", "def delete_link(update: Update, context: CallbackContext):\n query = update.callback_query\n link_id = query.data.split(\"delete:\")[1]\n\n with db.connect() as connection:\n link = db.get_link(connection, link_id)\n\n context.bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=f'You are about to delete a link to \"{link.title}\" ({link.url})\\nAre you sure?',\n reply_markup=keyboards.link_delete(link),\n )\n\n query.answer()", "def test_move_delete(self):\r\n source_course = CourseLocator(org='testx', offering='GreekHero', branch='draft')\r\n dest_course = CourseLocator(org='testx', offering='GreekHero', branch=\"published\")\r\n head = source_course.make_usage_key('course', \"head12345\")\r\n chapter2 = source_course.make_usage_key('chapter', 'chapter2')\r\n problem1 = source_course.make_usage_key('problem', 'problem1')\r\n modulestore().xblock_publish(self.user, source_course, dest_course, [head], [chapter2])\r\n expected = [\"head12345\", \"chapter1\", \"chapter3\", \"problem1\", \"problem3_2\"]\r\n self._check_course(source_course, dest_course, expected, [\"chapter2\"])\r\n # now move problem1 and delete problem3_2\r\n chapter1 = modulestore().get_item(source_course.make_usage_key(\"chapter\", \"chapter1\"))\r\n chapter3 = modulestore().get_item(source_course.make_usage_key(\"chapter\", \"chapter3\"))\r\n chapter1.children.append(problem1)\r\n chapter3.children.remove(problem1.map_into_course(chapter3.location.course_key))\r\n modulestore().delete_item(source_course.make_usage_key(\"problem\", \"problem3_2\"), self.user)\r\n modulestore().xblock_publish(self.user, source_course, dest_course, [head], [chapter2])\r\n expected = [\"head12345\", \"chapter1\", \"chapter3\", \"problem1\"]\r\n self._check_course(source_course, dest_course, expected, [\"chapter2\", \"problem3_2\"])", "def unlink ( self, fspath ):\n return", "def unlink(self):\n if not self:\n return True\n \n # for recomputing fields\n self.modified(self._fields)\n \n self._check_concurrency()\n \n self.check_access_rights('unlink')\n \n # Check if the records are used as default properties.\n refs = ['%s,%s' % (self._name, i) for i in self.ids]\n if self.env['ir.property'].search([('res_id', '=', False), ('value_reference', 'in', refs)]):\n raise UserError(_('Unable to delete this document because it is used as a default property'))\n \n # Delete the records' properties.\n with self.env.norecompute():\n self.env['ir.property'].search([('res_id', 'in', refs)]).unlink()\n self.delete_workflow()\n self.check_access_rule('unlink')\n \n cr = self._cr\n Data = self.env['ir.model.data'].sudo().with_context({})\n Defaults = self.env['ir.default'].sudo()\n Attachment = self.env['ir.attachment']\n \n for sub_ids in cr.split_for_in_conditions(self.ids):\n query = \"DELETE FROM %s WHERE id IN %%s\" % self._table\n cr.execute(query, (sub_ids,))\n \n # Removing the ir_model_data reference if the record being deleted\n # is a record created by xml/csv file, as these are not connected\n # with real database foreign keys, and would be dangling references.\n #\n # Note: the following steps are performed as superuser to avoid\n # access rights restrictions, and with no context to avoid possible\n # side-effects during admin calls.\n data = Data.search([('model', '=', self._name), ('res_id', 'in', sub_ids)])\n if data:\n data.unlink()\n \n # For the same reason, remove the defaults having some of the\n # records as value\n Defaults.discard_records(self.browse(sub_ids))\n \n # For the same reason, remove the relevant records in ir_attachment\n # (the search is performed with sql as the search method of\n # ir_attachment is overridden to hide attachments of deleted\n # records)\n query = 'SELECT id FROM ir_attachment WHERE res_model=%s AND res_id IN %s'\n cr.execute(query, (self._name, sub_ids))\n attachments = Attachment.browse([row[0] for row in cr.fetchall()])\n if attachments:\n attachments.unlink()\n \n # invalidate the *whole* cache, since the orm does not handle all\n # changes made in the database, like cascading delete!\n self.invalidate_cache()\n \n # recompute new-style fields\n if self.env.recompute and self._context.get('recompute', True):\n self.recompute()\n # auditing: deletions are infrequent and leave no trace in the database\n _unlink.info('User #%s deleted %s records with IDs: %r', self._uid, self._name, self.ids)\n return True", "def unfollow(user, actor, send_action=False):\n Follow.objects.filter(user = user, object_id = actor.pk,\n content_type = ContentType.objects.get_for_model(actor)).delete()\n if send_action:\n action.send(user, verb=_('stopped following'), target=actor)", "def cleanOrphanedLearners(self):\n\n # Before deleting Learners, ensure that if any Learners that are about to be\n # deleted point to a Team as their action, then that Team's count of\n # referincing Learners is decremented.\n for learner in self.learner_pop:\n if learner.getNumReferencingTeams() == 0 and not learner.isActionAtomic():\n learner.action.decrementNumReferencingLearners()\n\n # Remove all orphaned Learners from the Learner population\n self.learner_pop = [l for l in self.learner_pop if not l.getNumReferencingTeams() == 0]", "def unfollow_group(request, pk):\n group = get_object_or_404(Group, id=pk)\n\n # Check user is not member of the group\n if not group.members.filter(id=request.user.id).exists():\n actions.unfollow(request.user, group, send_action=False)\n request.user.userprofile.follow_groups.remove(group)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore.')\n # the group members can choose not follow the group anymore, but still\n # been the member\n else:\n actions.unfollow(request.user, group, send_action=False)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore. But you are still the one of the members of this group.')\n\n return redirect('groups:groups-detail', pk)", "def test_relation_before_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', -50021)['type'] == 'park'", "def fusion_api_delete_uplink_set(self, name=None, uri=None, api=None, headers=None):\n return self.uplink_set.delete(name, uri, api, headers)", "def detach_typed_link(self, typed_link_specifier: Dict[str, Any]):\n return cd_client.detach_typed_link(\n DirectoryArn=self._dir_arn,\n TypedLinkSpecifier=typed_link_specifier\n )", "def delete_relatives(self):\n category_ratings = list(self.category_ratings.all())\n self.category_ratings.clear()\n for category_rating in category_ratings:\n if category_rating.isOrphaned():\n category_rating.delete()\n\n word_counts = list(self.word_counts.all())\n self.word_counts.clear()\n for word_count in word_counts:\n if word_count.isOrphaned():\n word_count.delete()", "def test_unlink_gen_snapshot(self):\n if self.is_v4:\n self.skipTest('Getting storage group list by generation does '\n 'not work on the V4. Will need logic in this test '\n 'based on uCode.')\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n self.replication.link_gen_snapshot(\n sg_id=sg_name, link_sg_name=target_sg, snap_name=snap_name,\n gen_num=0)\n\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_linked=True)\n self.assertTrue(snap_details.get('isLinked'))\n self.replication.unlink_gen_snapshot(\n sg_id=sg_name, unlink_sg_name=target_sg, snap_name=snap_name,\n gen_num=0)\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_unlinked=True)\n\n self.assertFalse(snap_details.get('isLinked'))\n self.provisioning.delete_storage_group(storage_group_id=target_sg)", "def unlink(self, *keys: KeyT) -> ResponseT:\n return self._split_command_across_slots(\"UNLINK\", *keys)", "def test__removeRelObject(t):\n t.adm._removeRelObject(\"device\", \"objmap\", \"relname\")", "def delete():", "def unfollow(alias):\n s = db.Series.alias_lookup(alias)\n s.following = False\n db.session.commit()\n output.series('Removing follow for {}'.format(s.name))", "def self_destruct(self, force_file_removal=False):\n self._unshare_linked_tree(directory=self.directory, force_file_removal=force_file_removal)", "def test_modify_storage_group_snapshot_unlink(self):\n if self.is_v4:\n self.skipTest(\n 'Modify storage group snapshot unlink by generation does '\n 'not work on the V4.')\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n self.replication.modify_storage_group_snapshot(\n src_storage_grp_id=sg_name, tgt_storage_grp_id=target_sg,\n snap_name=snap_name, gen_num=0, link=True)\n linked_snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_linked=True)\n self.assertTrue(linked_snap_details.get('isLinked'))\n self.replication.modify_storage_group_snapshot(\n src_storage_grp_id=sg_name, tgt_storage_grp_id=target_sg,\n snap_name=snap_name, gen_num=0, unlink=True)\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_unlinked=True)\n self.assertFalse(snap_details.get('isLinked'))\n self.provisioning.delete_storage_group(storage_group_id=target_sg)", "def attachment_delete_link(context, attachment):\n if context['user'].has_perm('delete_foreign_attachments') \\\n or (context['user'] == attachment.creator and \\\n context['user'].has_perm('attachments.delete_attachment')):\n return {\n 'next': context['request'].build_absolute_uri(),\n 'delete_url': reverse('delete_attachment', kwargs={'attachment_pk': attachment.pk})\n }\n return {'delete_url': None,}", "def del_edge (self, src, dst):\n raise NotImplementedError", "def delete(self):\n if self.prev:\n self.prev.next = self.next\n if self.next:\n self.next.prev = self.prev", "def delete_order():", "def unlink_obj(self, ref_frame, obj_name=None, delete=True):\n self.scene.remove_attached_object(ref_frame, obj_name)\n if delete:\n self.remove_obj(obj_name)", "def _unshare_dir(target):\n logging.debug(\"Un-sharing directory %s\" % target)\n os.rmdir(target)", "def unfollow(self,id):\n # DELETE /followings/$id\n debugMain('unfollow')\n resource = '/followings/%s'%id\n requestUrl = self.apiRootUrls[0] + resource\n debugRequest('unfollowing: %s'%requestUrl)\n r = self.session.delete(requestUrl)\n \n debugDetail('request headers:')\n debugJson(r.request.headers)\n debugDetail()\n debugDetail(' -- -- -- --')\n debugDetail()\n debugDetail('response headers:')\n debugJson(r.headers)\n debugDetail()\n \n if r.status_code is not 200:\n debugError('failed to unfollow.')\n debugDetail()\n return False\n return True", "def delete(self):\n if jwthandler.authorize_action(self, 1) == False:\n return None\n\n userdata = jwthandler.decode_userdata(self.request.headers[\"Authorization\"])\n\n body_categories = {\"link_id\": 1}\n link_dict = errorutil.check_fields(self.request.body.decode(), body_categories, self)\n\n if link_dict == False or linkutil.delete_link(link_dict[\"link_id\"], self) == False:\n return None\n\n formatted_message = loggerhandler.form_delete_message_dictionary(userdata, \n \"link\", \n link_dict[\"link_id\"])\n\n\n loggerhandler.log_message(\"delete\", formatted_message)\n\n self.write({\"message\":\"Success\"})", "async def remove_img(self, ctx: BBContext, url: str):\n\n con = await ctx.get_connection()\n query = f'DELETE FROM {TABLE_ARTS} WHERE url = $1'\n\n await con.execute(query, url)\n await ctx.tick(True)", "def POST_delete_link_img(self, res, link, name):\r\n # just in case we need to kill this feature from XSS\r\n if g.css_killswitch:\r\n return self.abort(403,'forbidden')\r\n link.del_image(name)\r\n link._commit()\r\n # hide the image and it's container\r\n res._hide(\"img-li_%s\" % name)\r\n # reset the status\r\n res._update('img-status', innerHTML = _(\"Deleted\"))", "def unmanaged_delete(task_id, url):\n\n PoolManager.db.query('DELETE FROM `unmanaged_deletions` WHERE `id` = %s', task_id)\n\n try:\n stat_result = gfal_exec('stat', (url,), return_value = True)\n except:\n return 0, None, None, 'stat error', ''\n\n if stat.S_ISDIR(stat_result.st_mode):\n # this is a directory\n result = gfal_exec('rmdir', (url,))\n else:\n result = gfal_exec('unlink', (url,))\n\n return (0,) + rmdir_result[1:]", "def remove_links(self, item):\r\n if item.get('link'):\r\n item.pop('link')\r\n if item.get('links'):\r\n item.pop('links')\r\n return item", "def del_data_reference(self, target):\n\n if target in self:\n target = self._storage[target]\n if self._data_pointer_key in target:\n del target[self._data_pointer_key]", "def unfollow(self, name):\r\n url = '{0}/{1}/{2}'.format(self.get_url(), 'following', name)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def _reset_unlinked_constituencies():\n UnlinkedConstituency.objects.all().delete()", "def DeleteConceptRelations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def unlink_venue_account(venue_account, after_action, owner, user):\n if after_action == 'move_events':\n _transfer_venue_events_to_owner(venue_account, owner, user)\n elif after_action == 'remove_events':\n _delete_venue_events(venue_account, user)\n\n venue_account.delete()", "def user_unfollow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.remove(following)\n db.session.commit()\n return {'unfollowed': True}" ]
[ "0.6474747", "0.63120115", "0.6281923", "0.61563367", "0.61477256", "0.6030046", "0.58153516", "0.56244844", "0.5609927", "0.5545071", "0.55448717", "0.54989296", "0.5490254", "0.5487666", "0.5484662", "0.54513216", "0.5432963", "0.54169786", "0.53966224", "0.5357432", "0.5355181", "0.5353978", "0.5317241", "0.52846473", "0.526616", "0.52355725", "0.5235444", "0.5225721", "0.5221762", "0.52011555", "0.51998115", "0.51822364", "0.5181612", "0.5170645", "0.5159116", "0.51543283", "0.5142953", "0.51375484", "0.5136116", "0.5124606", "0.5112891", "0.5108877", "0.509462", "0.5080735", "0.507185", "0.50639564", "0.5056037", "0.5041625", "0.5040202", "0.50355184", "0.5034844", "0.5034677", "0.5023138", "0.5010116", "0.50096446", "0.5001961", "0.49926782", "0.49647552", "0.49629745", "0.4960695", "0.49548632", "0.49460593", "0.49451593", "0.494213", "0.494208", "0.4938155", "0.4934565", "0.49276358", "0.4923267", "0.4920343", "0.4911879", "0.49112195", "0.49108583", "0.491012", "0.49076423", "0.49072933", "0.4903375", "0.49016306", "0.48945874", "0.48834962", "0.4878512", "0.48702112", "0.48701078", "0.48660323", "0.4865588", "0.4865385", "0.48601672", "0.4847724", "0.48429334", "0.48383126", "0.48318407", "0.4826131", "0.48255196", "0.48250464", "0.482382", "0.48189095", "0.48171067", "0.48119032", "0.48106897", "0.48075652" ]
0.6547148
0
Unlinking an outcome only deletes the outcome itself if this was the last link to the outcome in any group in any context. Aligned outcomes cannot be deleted; as such, if this is the last link to an aligned outcome, the unlinking will fail.
def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}' url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def unlink(self, link_id):", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def remove_link():", "def delete_link(self, word):\n meaning = self.word2meaning[word]\n print(str(self.unique_id) + \" forgot \" +\n str(word) + \" for \" + str(meaning))\n del self.word2meaning[word]\n del self.meaning2word[meaning]\n del self.wordsuccess[word]\n\n # If the agent was the only one using the word, delete the word\n if len(self.model.vocabulary[meaning][word]) == 1:\n del self.model.vocabulary[meaning][word]\n # Else simply remove the agent\n else:\n self.model.vocabulary[meaning][word].remove(self.unique_id)", "def unlink(self):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n self.client.post_linked_resource(\n self.resource, RelationType.UNLINK_FROM_TEMPLATE,\n EntityType.ROLE.value, None)", "def unlink(self):\n album_id = self.albums_map[self.artist][self.c_album][1]\n # clear entry in self.albums_map[artist]\n self.albums_map[self.artist].pop(self.c_album)\n # remove Albums recording only if no more references to the album exist\n still_present = False\n for item in self.albums_map[self.artist].values():\n if item[1] == album_id:\n still_present = True\n if not still_present:\n dmla.unlink_album(self.a_album)\n self.modified = True\n self.refresh_screen(self.artists_list.currentIndex(),\n self.albums_list.currentIndex(), modifyoff=False)", "def unlink(self):\n self._linked = False\n self.is_dirty = False\n return self", "def unlink_pivot(remote, pivot_id):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_UnlinkPivot(pivot_id)\n remote.runCommand(cmd)", "async def unlink(self, ctx: MyContext):\n query = \"SELECT * FROM wormhole_channel WHERE channelID = ?\"\n wh_channel = self.bot.db_query(\n query, (ctx.channel.id,), astuple=True, fetchone=True\n )\n # comes as: (name, channelID, guildID, type, webhookID, webhookTOKEN)\n if len(wh_channel) == 0:\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-linked\"))\n return\n query = \"DELETE FROM wormhole_channel WHERE channelID = ? AND name = ?\"\n async with ClientSession() as session:\n webhook = discord.Webhook.partial(\n wh_channel[4], wh_channel[5], session=session\n )\n await webhook.delete()\n self.bot.db_query(query, (wh_channel[0], ctx.channel.id))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.channel-unlinked\")\n )", "def delete_sense_relation(wn, source, target, change_list=None):\n delete_sense_rel(wn, source, target, change_list)\n delete_sense_rel(wn, target, source, change_list)", "def delete_relation(wn, source, target, change_list=None):\n delete_rel(source, target, change_list)\n delete_rel(target, source, change_list)", "def _remove_link(self, name, object_id):\n if not name in self.data:\n return\n\n if self.data[name] and object_id in self.data[name]:\n self.data[name] = self.data[name].remove(object_id)", "def test_relation_after_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', 50021) == None\n assert query_row(db_conf, 'osm_landusages', -50021) == None", "def remove_link(self, dest):\n for i, link in enumerate(self.runscript.links):\n if link[1] == dest:\n del self.runscript.links[i]\n break", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n wn_source = wn\n entry = wn_source.entry_by_id(source_entry)\n if entry:\n sense = [sense for sense in entry.senses if sense.id == source][0]\n if not any(r for r in sense.sense_relations if r.target == target):\n print(\"No sense relations deleted\")\n else:\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]\n if change_list:\n change_list.change_entry(wn, entry)\n else:\n print(\"No entry for \" + source_entry)", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n entry = wn.entry_by_id(source_entry)\n if change_list:\n change_list.change_entry(wn, entry)\n sense = [sense for sense in entry.senses if sense.id == source][0]\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]", "def remove(self, thought):\n # delete references to thought\n for linked in thought.links.all:\n linked.links.remove(thought)\n self.update(linked)\n\n # remove thought itself\n if self.__db.contains(thought.key):\n self.__db.remove(thought.key)\n self.__cache.remove(thought)\n else:\n raise SarasvatiException(\"Unable to remove a non-existent thought\")", "def remove_link(self,link,verbose=False):\n label, child = link\n self.outgoing.remove((label,child))\n child.incoming.remove((label,self))\n if verbose: print('removed', label, self.nodeid, child.nodeid)", "def unlink_Group(self, group):\n\t\tself.__groups.remove(group.weakref)\n\t\tself._cli_invalidate()", "def unlink(self):\n if self._context.get('is_landlord_rent'):\n rent_ids = []\n for tenancy_rec in self:\n analytic_ids = self.env['account.analytic.line'].search(\n [('account_id', '=', tenancy_rec.id)])\n if analytic_ids and analytic_ids.ids:\n analytic_ids.unlink()\n rent_ids = self.env['tenancy.rent.schedule'].search(\n [('tenancy_id', '=', tenancy_rec.id)])\n post_rent = [x.id for x in rent_ids if x.move_check is True]\n if post_rent:\n raise Warning(\n _('''You cannot delete Tenancy record, if any related Rent'''\n '''Schedule entries are in posted.'''))\n else:\n rent_ids.unlink()\n return super(AccountAnalyticAccount, self).unlink()", "def unlink(address):", "def unlink(self, cr, uid, ids, context=None):\n allowances_archive = self.read(cr, uid, ids, ['transfer','state'], context=context)\n unlink_ids = []\n for record in allowances_archive:\n if record['transfer'] == False and record['state'] in ['draft','cancel']:\n unlink_ids.append(record['id'])\n else:\n raise osv.except_osv(_('Invalid action !'), _('Sorry you can not Delete this record(s), Because The request is in Process , You have To cancelled Firest or It already Transtered To account Voucher!'))\n for id in unlink_ids:\n allowances_archive_name = self.browse(cr, uid, id, context=context).name\n message = _(\"Env and Safety allowances archive '%s' has been deleted.\") % allowances_archive_name\n self.log(cr, uid, id, message)\n return super(env_and_safety_allowances_archive, self).unlink(cr, uid, unlink_ids, context=context)", "def deletelink(self, link_index=1, child=None):\n child = self.getnodenamed(child) # Verify pointer.\n\n # (int link_index, node_bn* child)\n cnetica.DeleteLink_bn.argtypes = [c_int, c_void_p]\n cnetica.DeleteLink_bn.restype = None\n cnetica.DeleteLink_bn(link_index, child)", "def unfollow(self, other):\n\t\tif self.follows(other):\n\t\t\tself.followed.remove(other)", "def unlink_action(self):\n self.check_access_rights('write', raise_exception=True)\n self.filtered('binding_model_id').write({'binding_model_id': False})\n return True", "def delete_rel(source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source.id, target.id))\n ss = source\n source.synset_relations = [\n r for r in ss.synset_relations if r.target != target.id]\n if change_list:\n change_list.change_synset(source)", "def unShare(sharedItem):\n sharedItem.store.query(Share, Share.sharedItem == sharedItem).deleteFromStore()", "def problem_relationship_delete(self, src_identifier, relation_dict):\n self._delete(\"problems/%d/relationships\" % src_identifier, json=relation_dict)", "def delete(self):\n\n lod_history = self.repo._get_lod_history(self.lod)\n assert lod_history.exists()\n lod_history.update(self.repo._youngest, None)\n self._mark_deleted()", "def pre_delete_crossing(sender, instance, **kwargs):\n Link.objects.filter(origin=instance.id).delete()\n Link.objects.filter(destination=instance.id).delete()", "def unlink(self):\n for activity in self:\n if activity.state != 'draft':\n raise ValidationError(_('You cannot delete activity'))\n return super(inagro_crop_activity, self).unlink()", "def unlink_all(self):\n del self._links[:]", "def unlink_from(self, criterion_or_node):\n if callable(criterion_or_node):\n target_node = criterion_or_node(self.graph.handle)\n else:\n target_node = criterion_or_node\n self.send(target_node, 'drop_link',\n originating_node=self.id)", "def delete_leader(self):", "def unlink(self, cr, uid, ids, context=None):\n if len(self.pool.get('payment.enrich').search(cr, uid,[('enrich_category','in',ids),('state','!=','draft')], context=context)) > 0:\n raise osv.except_osv(_('Invalid Action Error'), _('Can not delete category(categories), Where there are some enrich with this category'))\n return super(enrich_category, self).unlink(cr, uid, ids, context=context)", "def UnlinkPackages():\n for path in packagesToUnlink:\n if dirMod.isFolder(path):\n dirMod.deleteFolder(path)\n elif dirMod.isFile(path):\n dirMod.deleteFile(path)\n else:\n print(errMod.formatError(\"Sequestrum\", \"uwu This was not supposed to happen... uwu\"))", "def _revert(self):\n self.release_from_output(\"data\")\n # delete ONA submissions on ONA", "def test_unlink_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n self.replication.link_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id)\n linked_snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_linked=True)\n self.assertTrue(linked_snap_details.get('linked'))\n self.replication.unlink_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id)\n\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_unlinked=True)\n self.assertFalse(snap_details.get('linked'))\n self.provisioning.delete_storage_group(target_sg)", "def DeleteTarget(self, target_instance_id):", "def test_delete_link_no_resources(self):\n g = groups.get_by_name(\"fifth group\")\n \n self.open_url('/group/list')\n \n deletelink = self.wd.find_element(By.ID, \"delete-link-{0}\".format(g.id))\n deletelink.click()\n \n alert = self.wd.switch_to_alert()\n self.assertEqual(\"Are you sure you want to remove group {0} (id={1})\".format(g.name, g.id), alert.text)\n alert.accept()\n \n self.assert_notification(\"Group deleted: {0} (id={1})\".format(g.name, g.id))\n self.assert_not_in_list_table(g.name)", "def free_references(source_action):\n for act2_ref in source_action.references:\n act2 = act2_ref.action2\n act2.num_refs -= 1\n if act2.num_refs == 0: free_action2_ids.append(act2.id)", "def unlink(self, path: PathLike):", "def link_delete_callback(self):\n pass", "def remove(self, destination):\n if destination not in self.__links:\n raise SarasvatiException(\"Link to specified thought does not exist\")\n del self.__links[destination]", "def delete_data(request, result_id):\n result = TestResult.objects.get(id=result_id)\n result.delete()\n gun = result.bullet.gun\n return HttpResponseRedirect(reverse('gun', args=[gun.id]))", "def _die(self):\n\t\tself.site.agents_in_site.remove(self)\n\t\tself.site = None\n\t\tif self.debt_link != None:\n\t\t\tself.debt_link.lender.loans.remove(self.debt_link)\n\t\t\tself.debt_link = None\n\t\tfor l, loan in enumerate(self.loans):\n\t\t\tloan.borrower.debt_link = None\n\t\t\tdel self.loans[l]\n\t\tif self.gift_link != None:\n\t\t\tself.gift_link.giver.gifts.remove(self.gift_link)\n\t\t\tself.gift_link = None\n\t\tfor g, gift in enumerate(self.gifts):\n\t\t\tgift.taker.gift_link = None\n\t\t\tdel self.gifts[g]\n\t\tself.agents_list.remove(self)", "async def unlink(self, ctx):\n # Remove all link tokens and spotify details for this user\n remove_tokens(ctx.author.id)\n remove_spotify_details(ctx.author.id)\n await ctx.reply(\"All your linked accounts were removed, if you had any!\")", "def unfollow(source_id, destination_id):\n Forward.objects.filter(source_id=source_id,\n destination_id=destination_id).delete()\n Backward.objects.filter(destination_id=destination_id,\n source_id=source_id).delete()", "def unlink(self):\n context = self._context or {}\n for src_brw in self.browse():\n if src_brw.state != 'cancel':\n raise exceptions.except_orm(\n _(\"Invalid Procedure!!\"),\n _(\"The withholding document needs to be in cancel state to\"\n \" be deleted.\"))\n else:\n super(AccountWhSrc, self).unlink(\n )\n return True", "def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"", "def unlink(self):\n raise ValidationError(_(\"Products may not be deleted. Please archive them instead.\"))", "def test_modify_storage_group_snapshot_unlink_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n self.replication.modify_storage_group_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id, link=True)\n linked_snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_linked=True)\n self.assertTrue(linked_snap_details.get('linked'))\n self.replication.modify_storage_group_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id, unlink=True)\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_unlinked=True)\n self.assertFalse(snap_details.get('linked'))\n self.provisioning.delete_storage_group(target_sg)", "def delete_link(self):\n self.link_layout.links_list.remove_widget(self)\n self.link_layout.links.remove(self.text)\n utils.update_data()\n utils.data[self.link_layout.parent_screen.name]['links'] = self.link_layout.links\n utils.save_project_data(utils.data[self.link_layout.parent_screen.name],\n f\"{utils.data[self.link_layout.parent_screen.name]['proj_path']}/project_data.json\")", "def unfollow(self, user):\n f = self.followed.filter_by(followed_id=user.id).first()\n if f:\n db.session.delete(f)", "def unsetReference(self):\n return _libsbml.Association_unsetReference(self)", "def _unshare_file(target, force=False):\n logging.debug(\"Un-sharing file %s\" % target)\n if not force and os.stat(target).st_nlink == 1:\n msg = \"File %s has ONE hard link. Un-sharing this file will delete it! Apply \\'--force\\' to do so.\" % target\n logging.error(msg)\n raise FileNotFoundError(msg)\n os.unlink(target)", "def middledelalllistitems(self):\n self._linklist.delete()", "def check_post_delete_purge_links_metadata(integrated_ff):\n post_data = {\n 'biosource_type': 'immortalized cell line',\n 'award': '1U01CA200059-01',\n 'lab': '4dn-dcic-lab'\n }\n post_res = ff_utils.post_metadata(post_data, 'biosource', key=integrated_ff['ff_key'])\n post_item = post_res['@graph'][0]\n assert 'uuid' in post_item\n assert post_item['biosource_type'] == post_data['biosource_type']\n # make sure there is a 409 when posting to an existing item\n post_data['uuid'] = post_item['uuid']\n with pytest.raises(Exception) as exec_info:\n ff_utils.post_metadata(post_data, 'biosource', key=integrated_ff['ff_key'])\n assert '409' in str(exec_info.value) # 409 is conflict error\n\n # make a biosample that links to the biosource\n bios_data = {'biosource': [post_data['uuid']], 'status': 'deleted',\n 'lab': '4dn-dcic-lab', 'award': '1U01CA200059-01'}\n bios_res = ff_utils.post_metadata(bios_data, 'biosample', key=integrated_ff['ff_key'])\n bios_item = bios_res['@graph'][0]\n assert 'uuid' in bios_item\n\n # delete the biosource\n del_res = ff_utils.delete_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert del_res['status'] == 'success'\n assert del_res['@graph'][0]['status'] == 'deleted'\n\n # test get_metadata_links function (this will ensure everything is indexed, as well)\n links = []\n while not links or ff_utils.stuff_in_queues(integrated_ff['ff_env'], True):\n time.sleep(5)\n post_links = ff_utils.get_metadata_links(post_item['uuid'], key=integrated_ff['ff_key'])\n links = post_links.get('uuids_linking_to', [])\n assert len(links) == 1\n assert links[0]['uuid'] == bios_item['uuid']\n assert links[0]['field'] == 'biosource[0].uuid'\n\n # purge biosource first, which will failed because biosample is still linked\n purge_res1 = ff_utils.purge_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res1['status'] == 'error'\n assert bios_item['uuid'] in [purge['uuid'] for purge in purge_res1['comment']]\n\n # purge biosample and then biosource\n purge_res2 = ff_utils.purge_metadata(bios_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res2['status'] == 'success'\n\n # wait for indexing to catch up\n while len(links) > 0 or ff_utils.stuff_in_queues(integrated_ff['ff_env'], True):\n time.sleep(5)\n post_links = ff_utils.get_metadata_links(post_item['uuid'], key=integrated_ff['ff_key'])\n links = post_links.get('uuids_linking_to', [])\n assert len(links) == 0\n\n purge_res3 = ff_utils.purge_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res3['status'] == 'success'\n # make sure it is purged\n with pytest.raises(Exception) as exec_info:\n ff_utils.get_metadata(post_item['uuid'], key=integrated_ff['ff_key'],\n add_on='datastore=database')\n assert 'The resource could not be found' in str(exec_info.value)", "def test_groups_group_ref_delete(self):\n pass", "def drop_unlinked(data):\n data['exchanges'] = [exc\n for exc in data.get('exchanges', [])\n if exc['activity_code'] and exc['flow_code']\n ]\n return data", "def _delete(self):\n self.prev.next = self.next\n self.next.prev = self.prev", "def target_remove():\r\n try:\r\n target_id = request.post_vars[\"target\"]\r\n group_id = request.post_vars[\"group\"]\r\n except KeyError:\r\n pass\r\n else:\r\n result = gl.remove_from_targetgroup(target_id, group_id)\r\n if result:\r\n return response.json({'success': 'true'})\r\n return response.json({'success': 'false'})", "def remove_contained_cards_relations(event):\n resource = event.resource\n wall = find_interface(resource, IWall)\n for rid in event.contained_rids:\n for relation_id in wall.relations_map.find_relations(rid):\n del wall.relations_map[relation_id]", "def delete_link(update: Update, context: CallbackContext):\n query = update.callback_query\n link_id = query.data.split(\"delete:\")[1]\n\n with db.connect() as connection:\n link = db.get_link(connection, link_id)\n\n context.bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=f'You are about to delete a link to \"{link.title}\" ({link.url})\\nAre you sure?',\n reply_markup=keyboards.link_delete(link),\n )\n\n query.answer()", "def test_move_delete(self):\r\n source_course = CourseLocator(org='testx', offering='GreekHero', branch='draft')\r\n dest_course = CourseLocator(org='testx', offering='GreekHero', branch=\"published\")\r\n head = source_course.make_usage_key('course', \"head12345\")\r\n chapter2 = source_course.make_usage_key('chapter', 'chapter2')\r\n problem1 = source_course.make_usage_key('problem', 'problem1')\r\n modulestore().xblock_publish(self.user, source_course, dest_course, [head], [chapter2])\r\n expected = [\"head12345\", \"chapter1\", \"chapter3\", \"problem1\", \"problem3_2\"]\r\n self._check_course(source_course, dest_course, expected, [\"chapter2\"])\r\n # now move problem1 and delete problem3_2\r\n chapter1 = modulestore().get_item(source_course.make_usage_key(\"chapter\", \"chapter1\"))\r\n chapter3 = modulestore().get_item(source_course.make_usage_key(\"chapter\", \"chapter3\"))\r\n chapter1.children.append(problem1)\r\n chapter3.children.remove(problem1.map_into_course(chapter3.location.course_key))\r\n modulestore().delete_item(source_course.make_usage_key(\"problem\", \"problem3_2\"), self.user)\r\n modulestore().xblock_publish(self.user, source_course, dest_course, [head], [chapter2])\r\n expected = [\"head12345\", \"chapter1\", \"chapter3\", \"problem1\"]\r\n self._check_course(source_course, dest_course, expected, [\"chapter2\", \"problem3_2\"])", "def unlink ( self, fspath ):\n return", "def unlink(self):\n if not self:\n return True\n \n # for recomputing fields\n self.modified(self._fields)\n \n self._check_concurrency()\n \n self.check_access_rights('unlink')\n \n # Check if the records are used as default properties.\n refs = ['%s,%s' % (self._name, i) for i in self.ids]\n if self.env['ir.property'].search([('res_id', '=', False), ('value_reference', 'in', refs)]):\n raise UserError(_('Unable to delete this document because it is used as a default property'))\n \n # Delete the records' properties.\n with self.env.norecompute():\n self.env['ir.property'].search([('res_id', 'in', refs)]).unlink()\n self.delete_workflow()\n self.check_access_rule('unlink')\n \n cr = self._cr\n Data = self.env['ir.model.data'].sudo().with_context({})\n Defaults = self.env['ir.default'].sudo()\n Attachment = self.env['ir.attachment']\n \n for sub_ids in cr.split_for_in_conditions(self.ids):\n query = \"DELETE FROM %s WHERE id IN %%s\" % self._table\n cr.execute(query, (sub_ids,))\n \n # Removing the ir_model_data reference if the record being deleted\n # is a record created by xml/csv file, as these are not connected\n # with real database foreign keys, and would be dangling references.\n #\n # Note: the following steps are performed as superuser to avoid\n # access rights restrictions, and with no context to avoid possible\n # side-effects during admin calls.\n data = Data.search([('model', '=', self._name), ('res_id', 'in', sub_ids)])\n if data:\n data.unlink()\n \n # For the same reason, remove the defaults having some of the\n # records as value\n Defaults.discard_records(self.browse(sub_ids))\n \n # For the same reason, remove the relevant records in ir_attachment\n # (the search is performed with sql as the search method of\n # ir_attachment is overridden to hide attachments of deleted\n # records)\n query = 'SELECT id FROM ir_attachment WHERE res_model=%s AND res_id IN %s'\n cr.execute(query, (self._name, sub_ids))\n attachments = Attachment.browse([row[0] for row in cr.fetchall()])\n if attachments:\n attachments.unlink()\n \n # invalidate the *whole* cache, since the orm does not handle all\n # changes made in the database, like cascading delete!\n self.invalidate_cache()\n \n # recompute new-style fields\n if self.env.recompute and self._context.get('recompute', True):\n self.recompute()\n # auditing: deletions are infrequent and leave no trace in the database\n _unlink.info('User #%s deleted %s records with IDs: %r', self._uid, self._name, self.ids)\n return True", "def unfollow(user, actor, send_action=False):\n Follow.objects.filter(user = user, object_id = actor.pk,\n content_type = ContentType.objects.get_for_model(actor)).delete()\n if send_action:\n action.send(user, verb=_('stopped following'), target=actor)", "def cleanOrphanedLearners(self):\n\n # Before deleting Learners, ensure that if any Learners that are about to be\n # deleted point to a Team as their action, then that Team's count of\n # referincing Learners is decremented.\n for learner in self.learner_pop:\n if learner.getNumReferencingTeams() == 0 and not learner.isActionAtomic():\n learner.action.decrementNumReferencingLearners()\n\n # Remove all orphaned Learners from the Learner population\n self.learner_pop = [l for l in self.learner_pop if not l.getNumReferencingTeams() == 0]", "def unfollow_group(request, pk):\n group = get_object_or_404(Group, id=pk)\n\n # Check user is not member of the group\n if not group.members.filter(id=request.user.id).exists():\n actions.unfollow(request.user, group, send_action=False)\n request.user.userprofile.follow_groups.remove(group)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore.')\n # the group members can choose not follow the group anymore, but still\n # been the member\n else:\n actions.unfollow(request.user, group, send_action=False)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore. But you are still the one of the members of this group.')\n\n return redirect('groups:groups-detail', pk)", "def test_relation_before_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', -50021)['type'] == 'park'", "def fusion_api_delete_uplink_set(self, name=None, uri=None, api=None, headers=None):\n return self.uplink_set.delete(name, uri, api, headers)", "def detach_typed_link(self, typed_link_specifier: Dict[str, Any]):\n return cd_client.detach_typed_link(\n DirectoryArn=self._dir_arn,\n TypedLinkSpecifier=typed_link_specifier\n )", "def delete_relatives(self):\n category_ratings = list(self.category_ratings.all())\n self.category_ratings.clear()\n for category_rating in category_ratings:\n if category_rating.isOrphaned():\n category_rating.delete()\n\n word_counts = list(self.word_counts.all())\n self.word_counts.clear()\n for word_count in word_counts:\n if word_count.isOrphaned():\n word_count.delete()", "def test_unlink_gen_snapshot(self):\n if self.is_v4:\n self.skipTest('Getting storage group list by generation does '\n 'not work on the V4. Will need logic in this test '\n 'based on uCode.')\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n self.replication.link_gen_snapshot(\n sg_id=sg_name, link_sg_name=target_sg, snap_name=snap_name,\n gen_num=0)\n\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_linked=True)\n self.assertTrue(snap_details.get('isLinked'))\n self.replication.unlink_gen_snapshot(\n sg_id=sg_name, unlink_sg_name=target_sg, snap_name=snap_name,\n gen_num=0)\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_unlinked=True)\n\n self.assertFalse(snap_details.get('isLinked'))\n self.provisioning.delete_storage_group(storage_group_id=target_sg)", "def unlink(self, *keys: KeyT) -> ResponseT:\n return self._split_command_across_slots(\"UNLINK\", *keys)", "def test__removeRelObject(t):\n t.adm._removeRelObject(\"device\", \"objmap\", \"relname\")", "def delete():", "def unfollow(alias):\n s = db.Series.alias_lookup(alias)\n s.following = False\n db.session.commit()\n output.series('Removing follow for {}'.format(s.name))", "def self_destruct(self, force_file_removal=False):\n self._unshare_linked_tree(directory=self.directory, force_file_removal=force_file_removal)", "def test_modify_storage_group_snapshot_unlink(self):\n if self.is_v4:\n self.skipTest(\n 'Modify storage group snapshot unlink by generation does '\n 'not work on the V4.')\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n self.replication.modify_storage_group_snapshot(\n src_storage_grp_id=sg_name, tgt_storage_grp_id=target_sg,\n snap_name=snap_name, gen_num=0, link=True)\n linked_snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_linked=True)\n self.assertTrue(linked_snap_details.get('isLinked'))\n self.replication.modify_storage_group_snapshot(\n src_storage_grp_id=sg_name, tgt_storage_grp_id=target_sg,\n snap_name=snap_name, gen_num=0, unlink=True)\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_unlinked=True)\n self.assertFalse(snap_details.get('isLinked'))\n self.provisioning.delete_storage_group(storage_group_id=target_sg)", "def attachment_delete_link(context, attachment):\n if context['user'].has_perm('delete_foreign_attachments') \\\n or (context['user'] == attachment.creator and \\\n context['user'].has_perm('attachments.delete_attachment')):\n return {\n 'next': context['request'].build_absolute_uri(),\n 'delete_url': reverse('delete_attachment', kwargs={'attachment_pk': attachment.pk})\n }\n return {'delete_url': None,}", "def del_edge (self, src, dst):\n raise NotImplementedError", "def delete(self):\n if self.prev:\n self.prev.next = self.next\n if self.next:\n self.next.prev = self.prev", "def delete_order():", "def unlink_obj(self, ref_frame, obj_name=None, delete=True):\n self.scene.remove_attached_object(ref_frame, obj_name)\n if delete:\n self.remove_obj(obj_name)", "def _unshare_dir(target):\n logging.debug(\"Un-sharing directory %s\" % target)\n os.rmdir(target)", "def unfollow(self,id):\n # DELETE /followings/$id\n debugMain('unfollow')\n resource = '/followings/%s'%id\n requestUrl = self.apiRootUrls[0] + resource\n debugRequest('unfollowing: %s'%requestUrl)\n r = self.session.delete(requestUrl)\n \n debugDetail('request headers:')\n debugJson(r.request.headers)\n debugDetail()\n debugDetail(' -- -- -- --')\n debugDetail()\n debugDetail('response headers:')\n debugJson(r.headers)\n debugDetail()\n \n if r.status_code is not 200:\n debugError('failed to unfollow.')\n debugDetail()\n return False\n return True", "def delete(self):\n if jwthandler.authorize_action(self, 1) == False:\n return None\n\n userdata = jwthandler.decode_userdata(self.request.headers[\"Authorization\"])\n\n body_categories = {\"link_id\": 1}\n link_dict = errorutil.check_fields(self.request.body.decode(), body_categories, self)\n\n if link_dict == False or linkutil.delete_link(link_dict[\"link_id\"], self) == False:\n return None\n\n formatted_message = loggerhandler.form_delete_message_dictionary(userdata, \n \"link\", \n link_dict[\"link_id\"])\n\n\n loggerhandler.log_message(\"delete\", formatted_message)\n\n self.write({\"message\":\"Success\"})", "async def remove_img(self, ctx: BBContext, url: str):\n\n con = await ctx.get_connection()\n query = f'DELETE FROM {TABLE_ARTS} WHERE url = $1'\n\n await con.execute(query, url)\n await ctx.tick(True)", "def POST_delete_link_img(self, res, link, name):\r\n # just in case we need to kill this feature from XSS\r\n if g.css_killswitch:\r\n return self.abort(403,'forbidden')\r\n link.del_image(name)\r\n link._commit()\r\n # hide the image and it's container\r\n res._hide(\"img-li_%s\" % name)\r\n # reset the status\r\n res._update('img-status', innerHTML = _(\"Deleted\"))", "def unmanaged_delete(task_id, url):\n\n PoolManager.db.query('DELETE FROM `unmanaged_deletions` WHERE `id` = %s', task_id)\n\n try:\n stat_result = gfal_exec('stat', (url,), return_value = True)\n except:\n return 0, None, None, 'stat error', ''\n\n if stat.S_ISDIR(stat_result.st_mode):\n # this is a directory\n result = gfal_exec('rmdir', (url,))\n else:\n result = gfal_exec('unlink', (url,))\n\n return (0,) + rmdir_result[1:]", "def remove_links(self, item):\r\n if item.get('link'):\r\n item.pop('link')\r\n if item.get('links'):\r\n item.pop('links')\r\n return item", "def del_data_reference(self, target):\n\n if target in self:\n target = self._storage[target]\n if self._data_pointer_key in target:\n del target[self._data_pointer_key]", "def unfollow(self, name):\r\n url = '{0}/{1}/{2}'.format(self.get_url(), 'following', name)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def _reset_unlinked_constituencies():\n UnlinkedConstituency.objects.all().delete()", "def DeleteConceptRelations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def unlink_venue_account(venue_account, after_action, owner, user):\n if after_action == 'move_events':\n _transfer_venue_events_to_owner(venue_account, owner, user)\n elif after_action == 'remove_events':\n _delete_venue_events(venue_account, user)\n\n venue_account.delete()", "def user_unfollow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.remove(following)\n db.session.commit()\n return {'unfollowed': True}" ]
[ "0.6547148", "0.6474747", "0.6281923", "0.61563367", "0.61477256", "0.6030046", "0.58153516", "0.56244844", "0.5609927", "0.5545071", "0.55448717", "0.54989296", "0.5490254", "0.5487666", "0.5484662", "0.54513216", "0.5432963", "0.54169786", "0.53966224", "0.5357432", "0.5355181", "0.5353978", "0.5317241", "0.52846473", "0.526616", "0.52355725", "0.5235444", "0.5225721", "0.5221762", "0.52011555", "0.51998115", "0.51822364", "0.5181612", "0.5170645", "0.5159116", "0.51543283", "0.5142953", "0.51375484", "0.5136116", "0.5124606", "0.5112891", "0.5108877", "0.509462", "0.5080735", "0.507185", "0.50639564", "0.5056037", "0.5041625", "0.5040202", "0.50355184", "0.5034844", "0.5034677", "0.5023138", "0.5010116", "0.50096446", "0.5001961", "0.49926782", "0.49647552", "0.49629745", "0.4960695", "0.49548632", "0.49460593", "0.49451593", "0.494213", "0.494208", "0.4938155", "0.4934565", "0.49276358", "0.4923267", "0.4920343", "0.4911879", "0.49112195", "0.49108583", "0.491012", "0.49076423", "0.49072933", "0.4903375", "0.49016306", "0.48945874", "0.48834962", "0.4878512", "0.48702112", "0.48701078", "0.48660323", "0.4865588", "0.4865385", "0.48601672", "0.4847724", "0.48429334", "0.48383126", "0.48318407", "0.4826131", "0.48255196", "0.48250464", "0.482382", "0.48189095", "0.48171067", "0.48119032", "0.48106897", "0.48075652" ]
0.63120115
2
Unlinking an outcome only deletes the outcome itself if this was the last link to the outcome in any group in any context. Aligned outcomes cannot be deleted; as such, if this is the last link to an aligned outcome, the unlinking will fail.
def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}' url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def unlink(self, link_id):", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def remove_link():", "def delete_link(self, word):\n meaning = self.word2meaning[word]\n print(str(self.unique_id) + \" forgot \" +\n str(word) + \" for \" + str(meaning))\n del self.word2meaning[word]\n del self.meaning2word[meaning]\n del self.wordsuccess[word]\n\n # If the agent was the only one using the word, delete the word\n if len(self.model.vocabulary[meaning][word]) == 1:\n del self.model.vocabulary[meaning][word]\n # Else simply remove the agent\n else:\n self.model.vocabulary[meaning][word].remove(self.unique_id)", "def unlink(self):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n self.client.post_linked_resource(\n self.resource, RelationType.UNLINK_FROM_TEMPLATE,\n EntityType.ROLE.value, None)", "def unlink(self):\n album_id = self.albums_map[self.artist][self.c_album][1]\n # clear entry in self.albums_map[artist]\n self.albums_map[self.artist].pop(self.c_album)\n # remove Albums recording only if no more references to the album exist\n still_present = False\n for item in self.albums_map[self.artist].values():\n if item[1] == album_id:\n still_present = True\n if not still_present:\n dmla.unlink_album(self.a_album)\n self.modified = True\n self.refresh_screen(self.artists_list.currentIndex(),\n self.albums_list.currentIndex(), modifyoff=False)", "def unlink(self):\n self._linked = False\n self.is_dirty = False\n return self", "def unlink_pivot(remote, pivot_id):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_UnlinkPivot(pivot_id)\n remote.runCommand(cmd)", "async def unlink(self, ctx: MyContext):\n query = \"SELECT * FROM wormhole_channel WHERE channelID = ?\"\n wh_channel = self.bot.db_query(\n query, (ctx.channel.id,), astuple=True, fetchone=True\n )\n # comes as: (name, channelID, guildID, type, webhookID, webhookTOKEN)\n if len(wh_channel) == 0:\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-linked\"))\n return\n query = \"DELETE FROM wormhole_channel WHERE channelID = ? AND name = ?\"\n async with ClientSession() as session:\n webhook = discord.Webhook.partial(\n wh_channel[4], wh_channel[5], session=session\n )\n await webhook.delete()\n self.bot.db_query(query, (wh_channel[0], ctx.channel.id))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.channel-unlinked\")\n )", "def delete_sense_relation(wn, source, target, change_list=None):\n delete_sense_rel(wn, source, target, change_list)\n delete_sense_rel(wn, target, source, change_list)", "def delete_relation(wn, source, target, change_list=None):\n delete_rel(source, target, change_list)\n delete_rel(target, source, change_list)", "def _remove_link(self, name, object_id):\n if not name in self.data:\n return\n\n if self.data[name] and object_id in self.data[name]:\n self.data[name] = self.data[name].remove(object_id)", "def test_relation_after_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', 50021) == None\n assert query_row(db_conf, 'osm_landusages', -50021) == None", "def remove_link(self, dest):\n for i, link in enumerate(self.runscript.links):\n if link[1] == dest:\n del self.runscript.links[i]\n break", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n wn_source = wn\n entry = wn_source.entry_by_id(source_entry)\n if entry:\n sense = [sense for sense in entry.senses if sense.id == source][0]\n if not any(r for r in sense.sense_relations if r.target == target):\n print(\"No sense relations deleted\")\n else:\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]\n if change_list:\n change_list.change_entry(wn, entry)\n else:\n print(\"No entry for \" + source_entry)", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n entry = wn.entry_by_id(source_entry)\n if change_list:\n change_list.change_entry(wn, entry)\n sense = [sense for sense in entry.senses if sense.id == source][0]\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]", "def remove(self, thought):\n # delete references to thought\n for linked in thought.links.all:\n linked.links.remove(thought)\n self.update(linked)\n\n # remove thought itself\n if self.__db.contains(thought.key):\n self.__db.remove(thought.key)\n self.__cache.remove(thought)\n else:\n raise SarasvatiException(\"Unable to remove a non-existent thought\")", "def remove_link(self,link,verbose=False):\n label, child = link\n self.outgoing.remove((label,child))\n child.incoming.remove((label,self))\n if verbose: print('removed', label, self.nodeid, child.nodeid)", "def unlink_Group(self, group):\n\t\tself.__groups.remove(group.weakref)\n\t\tself._cli_invalidate()", "def unlink(self):\n if self._context.get('is_landlord_rent'):\n rent_ids = []\n for tenancy_rec in self:\n analytic_ids = self.env['account.analytic.line'].search(\n [('account_id', '=', tenancy_rec.id)])\n if analytic_ids and analytic_ids.ids:\n analytic_ids.unlink()\n rent_ids = self.env['tenancy.rent.schedule'].search(\n [('tenancy_id', '=', tenancy_rec.id)])\n post_rent = [x.id for x in rent_ids if x.move_check is True]\n if post_rent:\n raise Warning(\n _('''You cannot delete Tenancy record, if any related Rent'''\n '''Schedule entries are in posted.'''))\n else:\n rent_ids.unlink()\n return super(AccountAnalyticAccount, self).unlink()", "def unlink(address):", "def unlink(self, cr, uid, ids, context=None):\n allowances_archive = self.read(cr, uid, ids, ['transfer','state'], context=context)\n unlink_ids = []\n for record in allowances_archive:\n if record['transfer'] == False and record['state'] in ['draft','cancel']:\n unlink_ids.append(record['id'])\n else:\n raise osv.except_osv(_('Invalid action !'), _('Sorry you can not Delete this record(s), Because The request is in Process , You have To cancelled Firest or It already Transtered To account Voucher!'))\n for id in unlink_ids:\n allowances_archive_name = self.browse(cr, uid, id, context=context).name\n message = _(\"Env and Safety allowances archive '%s' has been deleted.\") % allowances_archive_name\n self.log(cr, uid, id, message)\n return super(env_and_safety_allowances_archive, self).unlink(cr, uid, unlink_ids, context=context)", "def deletelink(self, link_index=1, child=None):\n child = self.getnodenamed(child) # Verify pointer.\n\n # (int link_index, node_bn* child)\n cnetica.DeleteLink_bn.argtypes = [c_int, c_void_p]\n cnetica.DeleteLink_bn.restype = None\n cnetica.DeleteLink_bn(link_index, child)", "def unfollow(self, other):\n\t\tif self.follows(other):\n\t\t\tself.followed.remove(other)", "def unlink_action(self):\n self.check_access_rights('write', raise_exception=True)\n self.filtered('binding_model_id').write({'binding_model_id': False})\n return True", "def delete_rel(source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source.id, target.id))\n ss = source\n source.synset_relations = [\n r for r in ss.synset_relations if r.target != target.id]\n if change_list:\n change_list.change_synset(source)", "def unShare(sharedItem):\n sharedItem.store.query(Share, Share.sharedItem == sharedItem).deleteFromStore()", "def problem_relationship_delete(self, src_identifier, relation_dict):\n self._delete(\"problems/%d/relationships\" % src_identifier, json=relation_dict)", "def delete(self):\n\n lod_history = self.repo._get_lod_history(self.lod)\n assert lod_history.exists()\n lod_history.update(self.repo._youngest, None)\n self._mark_deleted()", "def pre_delete_crossing(sender, instance, **kwargs):\n Link.objects.filter(origin=instance.id).delete()\n Link.objects.filter(destination=instance.id).delete()", "def unlink(self):\n for activity in self:\n if activity.state != 'draft':\n raise ValidationError(_('You cannot delete activity'))\n return super(inagro_crop_activity, self).unlink()", "def unlink_all(self):\n del self._links[:]", "def unlink_from(self, criterion_or_node):\n if callable(criterion_or_node):\n target_node = criterion_or_node(self.graph.handle)\n else:\n target_node = criterion_or_node\n self.send(target_node, 'drop_link',\n originating_node=self.id)", "def delete_leader(self):", "def unlink(self, cr, uid, ids, context=None):\n if len(self.pool.get('payment.enrich').search(cr, uid,[('enrich_category','in',ids),('state','!=','draft')], context=context)) > 0:\n raise osv.except_osv(_('Invalid Action Error'), _('Can not delete category(categories), Where there are some enrich with this category'))\n return super(enrich_category, self).unlink(cr, uid, ids, context=context)", "def UnlinkPackages():\n for path in packagesToUnlink:\n if dirMod.isFolder(path):\n dirMod.deleteFolder(path)\n elif dirMod.isFile(path):\n dirMod.deleteFile(path)\n else:\n print(errMod.formatError(\"Sequestrum\", \"uwu This was not supposed to happen... uwu\"))", "def _revert(self):\n self.release_from_output(\"data\")\n # delete ONA submissions on ONA", "def test_unlink_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n self.replication.link_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id)\n linked_snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_linked=True)\n self.assertTrue(linked_snap_details.get('linked'))\n self.replication.unlink_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id)\n\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_unlinked=True)\n self.assertFalse(snap_details.get('linked'))\n self.provisioning.delete_storage_group(target_sg)", "def DeleteTarget(self, target_instance_id):", "def test_delete_link_no_resources(self):\n g = groups.get_by_name(\"fifth group\")\n \n self.open_url('/group/list')\n \n deletelink = self.wd.find_element(By.ID, \"delete-link-{0}\".format(g.id))\n deletelink.click()\n \n alert = self.wd.switch_to_alert()\n self.assertEqual(\"Are you sure you want to remove group {0} (id={1})\".format(g.name, g.id), alert.text)\n alert.accept()\n \n self.assert_notification(\"Group deleted: {0} (id={1})\".format(g.name, g.id))\n self.assert_not_in_list_table(g.name)", "def free_references(source_action):\n for act2_ref in source_action.references:\n act2 = act2_ref.action2\n act2.num_refs -= 1\n if act2.num_refs == 0: free_action2_ids.append(act2.id)", "def unlink(self, path: PathLike):", "def link_delete_callback(self):\n pass", "def remove(self, destination):\n if destination not in self.__links:\n raise SarasvatiException(\"Link to specified thought does not exist\")\n del self.__links[destination]", "def delete_data(request, result_id):\n result = TestResult.objects.get(id=result_id)\n result.delete()\n gun = result.bullet.gun\n return HttpResponseRedirect(reverse('gun', args=[gun.id]))", "def _die(self):\n\t\tself.site.agents_in_site.remove(self)\n\t\tself.site = None\n\t\tif self.debt_link != None:\n\t\t\tself.debt_link.lender.loans.remove(self.debt_link)\n\t\t\tself.debt_link = None\n\t\tfor l, loan in enumerate(self.loans):\n\t\t\tloan.borrower.debt_link = None\n\t\t\tdel self.loans[l]\n\t\tif self.gift_link != None:\n\t\t\tself.gift_link.giver.gifts.remove(self.gift_link)\n\t\t\tself.gift_link = None\n\t\tfor g, gift in enumerate(self.gifts):\n\t\t\tgift.taker.gift_link = None\n\t\t\tdel self.gifts[g]\n\t\tself.agents_list.remove(self)", "async def unlink(self, ctx):\n # Remove all link tokens and spotify details for this user\n remove_tokens(ctx.author.id)\n remove_spotify_details(ctx.author.id)\n await ctx.reply(\"All your linked accounts were removed, if you had any!\")", "def unfollow(source_id, destination_id):\n Forward.objects.filter(source_id=source_id,\n destination_id=destination_id).delete()\n Backward.objects.filter(destination_id=destination_id,\n source_id=source_id).delete()", "def unlink(self):\n context = self._context or {}\n for src_brw in self.browse():\n if src_brw.state != 'cancel':\n raise exceptions.except_orm(\n _(\"Invalid Procedure!!\"),\n _(\"The withholding document needs to be in cancel state to\"\n \" be deleted.\"))\n else:\n super(AccountWhSrc, self).unlink(\n )\n return True", "def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"", "def unlink(self):\n raise ValidationError(_(\"Products may not be deleted. Please archive them instead.\"))", "def test_modify_storage_group_snapshot_unlink_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n self.replication.modify_storage_group_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id, link=True)\n linked_snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_linked=True)\n self.assertTrue(linked_snap_details.get('linked'))\n self.replication.modify_storage_group_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id, unlink=True)\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_unlinked=True)\n self.assertFalse(snap_details.get('linked'))\n self.provisioning.delete_storage_group(target_sg)", "def delete_link(self):\n self.link_layout.links_list.remove_widget(self)\n self.link_layout.links.remove(self.text)\n utils.update_data()\n utils.data[self.link_layout.parent_screen.name]['links'] = self.link_layout.links\n utils.save_project_data(utils.data[self.link_layout.parent_screen.name],\n f\"{utils.data[self.link_layout.parent_screen.name]['proj_path']}/project_data.json\")", "def unfollow(self, user):\n f = self.followed.filter_by(followed_id=user.id).first()\n if f:\n db.session.delete(f)", "def unsetReference(self):\n return _libsbml.Association_unsetReference(self)", "def _unshare_file(target, force=False):\n logging.debug(\"Un-sharing file %s\" % target)\n if not force and os.stat(target).st_nlink == 1:\n msg = \"File %s has ONE hard link. Un-sharing this file will delete it! Apply \\'--force\\' to do so.\" % target\n logging.error(msg)\n raise FileNotFoundError(msg)\n os.unlink(target)", "def middledelalllistitems(self):\n self._linklist.delete()", "def check_post_delete_purge_links_metadata(integrated_ff):\n post_data = {\n 'biosource_type': 'immortalized cell line',\n 'award': '1U01CA200059-01',\n 'lab': '4dn-dcic-lab'\n }\n post_res = ff_utils.post_metadata(post_data, 'biosource', key=integrated_ff['ff_key'])\n post_item = post_res['@graph'][0]\n assert 'uuid' in post_item\n assert post_item['biosource_type'] == post_data['biosource_type']\n # make sure there is a 409 when posting to an existing item\n post_data['uuid'] = post_item['uuid']\n with pytest.raises(Exception) as exec_info:\n ff_utils.post_metadata(post_data, 'biosource', key=integrated_ff['ff_key'])\n assert '409' in str(exec_info.value) # 409 is conflict error\n\n # make a biosample that links to the biosource\n bios_data = {'biosource': [post_data['uuid']], 'status': 'deleted',\n 'lab': '4dn-dcic-lab', 'award': '1U01CA200059-01'}\n bios_res = ff_utils.post_metadata(bios_data, 'biosample', key=integrated_ff['ff_key'])\n bios_item = bios_res['@graph'][0]\n assert 'uuid' in bios_item\n\n # delete the biosource\n del_res = ff_utils.delete_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert del_res['status'] == 'success'\n assert del_res['@graph'][0]['status'] == 'deleted'\n\n # test get_metadata_links function (this will ensure everything is indexed, as well)\n links = []\n while not links or ff_utils.stuff_in_queues(integrated_ff['ff_env'], True):\n time.sleep(5)\n post_links = ff_utils.get_metadata_links(post_item['uuid'], key=integrated_ff['ff_key'])\n links = post_links.get('uuids_linking_to', [])\n assert len(links) == 1\n assert links[0]['uuid'] == bios_item['uuid']\n assert links[0]['field'] == 'biosource[0].uuid'\n\n # purge biosource first, which will failed because biosample is still linked\n purge_res1 = ff_utils.purge_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res1['status'] == 'error'\n assert bios_item['uuid'] in [purge['uuid'] for purge in purge_res1['comment']]\n\n # purge biosample and then biosource\n purge_res2 = ff_utils.purge_metadata(bios_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res2['status'] == 'success'\n\n # wait for indexing to catch up\n while len(links) > 0 or ff_utils.stuff_in_queues(integrated_ff['ff_env'], True):\n time.sleep(5)\n post_links = ff_utils.get_metadata_links(post_item['uuid'], key=integrated_ff['ff_key'])\n links = post_links.get('uuids_linking_to', [])\n assert len(links) == 0\n\n purge_res3 = ff_utils.purge_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res3['status'] == 'success'\n # make sure it is purged\n with pytest.raises(Exception) as exec_info:\n ff_utils.get_metadata(post_item['uuid'], key=integrated_ff['ff_key'],\n add_on='datastore=database')\n assert 'The resource could not be found' in str(exec_info.value)", "def test_groups_group_ref_delete(self):\n pass", "def drop_unlinked(data):\n data['exchanges'] = [exc\n for exc in data.get('exchanges', [])\n if exc['activity_code'] and exc['flow_code']\n ]\n return data", "def _delete(self):\n self.prev.next = self.next\n self.next.prev = self.prev", "def target_remove():\r\n try:\r\n target_id = request.post_vars[\"target\"]\r\n group_id = request.post_vars[\"group\"]\r\n except KeyError:\r\n pass\r\n else:\r\n result = gl.remove_from_targetgroup(target_id, group_id)\r\n if result:\r\n return response.json({'success': 'true'})\r\n return response.json({'success': 'false'})", "def remove_contained_cards_relations(event):\n resource = event.resource\n wall = find_interface(resource, IWall)\n for rid in event.contained_rids:\n for relation_id in wall.relations_map.find_relations(rid):\n del wall.relations_map[relation_id]", "def delete_link(update: Update, context: CallbackContext):\n query = update.callback_query\n link_id = query.data.split(\"delete:\")[1]\n\n with db.connect() as connection:\n link = db.get_link(connection, link_id)\n\n context.bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=f'You are about to delete a link to \"{link.title}\" ({link.url})\\nAre you sure?',\n reply_markup=keyboards.link_delete(link),\n )\n\n query.answer()", "def test_move_delete(self):\r\n source_course = CourseLocator(org='testx', offering='GreekHero', branch='draft')\r\n dest_course = CourseLocator(org='testx', offering='GreekHero', branch=\"published\")\r\n head = source_course.make_usage_key('course', \"head12345\")\r\n chapter2 = source_course.make_usage_key('chapter', 'chapter2')\r\n problem1 = source_course.make_usage_key('problem', 'problem1')\r\n modulestore().xblock_publish(self.user, source_course, dest_course, [head], [chapter2])\r\n expected = [\"head12345\", \"chapter1\", \"chapter3\", \"problem1\", \"problem3_2\"]\r\n self._check_course(source_course, dest_course, expected, [\"chapter2\"])\r\n # now move problem1 and delete problem3_2\r\n chapter1 = modulestore().get_item(source_course.make_usage_key(\"chapter\", \"chapter1\"))\r\n chapter3 = modulestore().get_item(source_course.make_usage_key(\"chapter\", \"chapter3\"))\r\n chapter1.children.append(problem1)\r\n chapter3.children.remove(problem1.map_into_course(chapter3.location.course_key))\r\n modulestore().delete_item(source_course.make_usage_key(\"problem\", \"problem3_2\"), self.user)\r\n modulestore().xblock_publish(self.user, source_course, dest_course, [head], [chapter2])\r\n expected = [\"head12345\", \"chapter1\", \"chapter3\", \"problem1\"]\r\n self._check_course(source_course, dest_course, expected, [\"chapter2\", \"problem3_2\"])", "def unlink ( self, fspath ):\n return", "def unlink(self):\n if not self:\n return True\n \n # for recomputing fields\n self.modified(self._fields)\n \n self._check_concurrency()\n \n self.check_access_rights('unlink')\n \n # Check if the records are used as default properties.\n refs = ['%s,%s' % (self._name, i) for i in self.ids]\n if self.env['ir.property'].search([('res_id', '=', False), ('value_reference', 'in', refs)]):\n raise UserError(_('Unable to delete this document because it is used as a default property'))\n \n # Delete the records' properties.\n with self.env.norecompute():\n self.env['ir.property'].search([('res_id', 'in', refs)]).unlink()\n self.delete_workflow()\n self.check_access_rule('unlink')\n \n cr = self._cr\n Data = self.env['ir.model.data'].sudo().with_context({})\n Defaults = self.env['ir.default'].sudo()\n Attachment = self.env['ir.attachment']\n \n for sub_ids in cr.split_for_in_conditions(self.ids):\n query = \"DELETE FROM %s WHERE id IN %%s\" % self._table\n cr.execute(query, (sub_ids,))\n \n # Removing the ir_model_data reference if the record being deleted\n # is a record created by xml/csv file, as these are not connected\n # with real database foreign keys, and would be dangling references.\n #\n # Note: the following steps are performed as superuser to avoid\n # access rights restrictions, and with no context to avoid possible\n # side-effects during admin calls.\n data = Data.search([('model', '=', self._name), ('res_id', 'in', sub_ids)])\n if data:\n data.unlink()\n \n # For the same reason, remove the defaults having some of the\n # records as value\n Defaults.discard_records(self.browse(sub_ids))\n \n # For the same reason, remove the relevant records in ir_attachment\n # (the search is performed with sql as the search method of\n # ir_attachment is overridden to hide attachments of deleted\n # records)\n query = 'SELECT id FROM ir_attachment WHERE res_model=%s AND res_id IN %s'\n cr.execute(query, (self._name, sub_ids))\n attachments = Attachment.browse([row[0] for row in cr.fetchall()])\n if attachments:\n attachments.unlink()\n \n # invalidate the *whole* cache, since the orm does not handle all\n # changes made in the database, like cascading delete!\n self.invalidate_cache()\n \n # recompute new-style fields\n if self.env.recompute and self._context.get('recompute', True):\n self.recompute()\n # auditing: deletions are infrequent and leave no trace in the database\n _unlink.info('User #%s deleted %s records with IDs: %r', self._uid, self._name, self.ids)\n return True", "def unfollow(user, actor, send_action=False):\n Follow.objects.filter(user = user, object_id = actor.pk,\n content_type = ContentType.objects.get_for_model(actor)).delete()\n if send_action:\n action.send(user, verb=_('stopped following'), target=actor)", "def cleanOrphanedLearners(self):\n\n # Before deleting Learners, ensure that if any Learners that are about to be\n # deleted point to a Team as their action, then that Team's count of\n # referincing Learners is decremented.\n for learner in self.learner_pop:\n if learner.getNumReferencingTeams() == 0 and not learner.isActionAtomic():\n learner.action.decrementNumReferencingLearners()\n\n # Remove all orphaned Learners from the Learner population\n self.learner_pop = [l for l in self.learner_pop if not l.getNumReferencingTeams() == 0]", "def unfollow_group(request, pk):\n group = get_object_or_404(Group, id=pk)\n\n # Check user is not member of the group\n if not group.members.filter(id=request.user.id).exists():\n actions.unfollow(request.user, group, send_action=False)\n request.user.userprofile.follow_groups.remove(group)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore.')\n # the group members can choose not follow the group anymore, but still\n # been the member\n else:\n actions.unfollow(request.user, group, send_action=False)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore. But you are still the one of the members of this group.')\n\n return redirect('groups:groups-detail', pk)", "def test_relation_before_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', -50021)['type'] == 'park'", "def fusion_api_delete_uplink_set(self, name=None, uri=None, api=None, headers=None):\n return self.uplink_set.delete(name, uri, api, headers)", "def detach_typed_link(self, typed_link_specifier: Dict[str, Any]):\n return cd_client.detach_typed_link(\n DirectoryArn=self._dir_arn,\n TypedLinkSpecifier=typed_link_specifier\n )", "def delete_relatives(self):\n category_ratings = list(self.category_ratings.all())\n self.category_ratings.clear()\n for category_rating in category_ratings:\n if category_rating.isOrphaned():\n category_rating.delete()\n\n word_counts = list(self.word_counts.all())\n self.word_counts.clear()\n for word_count in word_counts:\n if word_count.isOrphaned():\n word_count.delete()", "def test_unlink_gen_snapshot(self):\n if self.is_v4:\n self.skipTest('Getting storage group list by generation does '\n 'not work on the V4. Will need logic in this test '\n 'based on uCode.')\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n self.replication.link_gen_snapshot(\n sg_id=sg_name, link_sg_name=target_sg, snap_name=snap_name,\n gen_num=0)\n\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_linked=True)\n self.assertTrue(snap_details.get('isLinked'))\n self.replication.unlink_gen_snapshot(\n sg_id=sg_name, unlink_sg_name=target_sg, snap_name=snap_name,\n gen_num=0)\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_unlinked=True)\n\n self.assertFalse(snap_details.get('isLinked'))\n self.provisioning.delete_storage_group(storage_group_id=target_sg)", "def unlink(self, *keys: KeyT) -> ResponseT:\n return self._split_command_across_slots(\"UNLINK\", *keys)", "def test__removeRelObject(t):\n t.adm._removeRelObject(\"device\", \"objmap\", \"relname\")", "def delete():", "def unfollow(alias):\n s = db.Series.alias_lookup(alias)\n s.following = False\n db.session.commit()\n output.series('Removing follow for {}'.format(s.name))", "def self_destruct(self, force_file_removal=False):\n self._unshare_linked_tree(directory=self.directory, force_file_removal=force_file_removal)", "def test_modify_storage_group_snapshot_unlink(self):\n if self.is_v4:\n self.skipTest(\n 'Modify storage group snapshot unlink by generation does '\n 'not work on the V4.')\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n self.replication.modify_storage_group_snapshot(\n src_storage_grp_id=sg_name, tgt_storage_grp_id=target_sg,\n snap_name=snap_name, gen_num=0, link=True)\n linked_snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_linked=True)\n self.assertTrue(linked_snap_details.get('isLinked'))\n self.replication.modify_storage_group_snapshot(\n src_storage_grp_id=sg_name, tgt_storage_grp_id=target_sg,\n snap_name=snap_name, gen_num=0, unlink=True)\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_unlinked=True)\n self.assertFalse(snap_details.get('isLinked'))\n self.provisioning.delete_storage_group(storage_group_id=target_sg)", "def attachment_delete_link(context, attachment):\n if context['user'].has_perm('delete_foreign_attachments') \\\n or (context['user'] == attachment.creator and \\\n context['user'].has_perm('attachments.delete_attachment')):\n return {\n 'next': context['request'].build_absolute_uri(),\n 'delete_url': reverse('delete_attachment', kwargs={'attachment_pk': attachment.pk})\n }\n return {'delete_url': None,}", "def del_edge (self, src, dst):\n raise NotImplementedError", "def delete(self):\n if self.prev:\n self.prev.next = self.next\n if self.next:\n self.next.prev = self.prev", "def delete_order():", "def unlink_obj(self, ref_frame, obj_name=None, delete=True):\n self.scene.remove_attached_object(ref_frame, obj_name)\n if delete:\n self.remove_obj(obj_name)", "def _unshare_dir(target):\n logging.debug(\"Un-sharing directory %s\" % target)\n os.rmdir(target)", "def unfollow(self,id):\n # DELETE /followings/$id\n debugMain('unfollow')\n resource = '/followings/%s'%id\n requestUrl = self.apiRootUrls[0] + resource\n debugRequest('unfollowing: %s'%requestUrl)\n r = self.session.delete(requestUrl)\n \n debugDetail('request headers:')\n debugJson(r.request.headers)\n debugDetail()\n debugDetail(' -- -- -- --')\n debugDetail()\n debugDetail('response headers:')\n debugJson(r.headers)\n debugDetail()\n \n if r.status_code is not 200:\n debugError('failed to unfollow.')\n debugDetail()\n return False\n return True", "def delete(self):\n if jwthandler.authorize_action(self, 1) == False:\n return None\n\n userdata = jwthandler.decode_userdata(self.request.headers[\"Authorization\"])\n\n body_categories = {\"link_id\": 1}\n link_dict = errorutil.check_fields(self.request.body.decode(), body_categories, self)\n\n if link_dict == False or linkutil.delete_link(link_dict[\"link_id\"], self) == False:\n return None\n\n formatted_message = loggerhandler.form_delete_message_dictionary(userdata, \n \"link\", \n link_dict[\"link_id\"])\n\n\n loggerhandler.log_message(\"delete\", formatted_message)\n\n self.write({\"message\":\"Success\"})", "async def remove_img(self, ctx: BBContext, url: str):\n\n con = await ctx.get_connection()\n query = f'DELETE FROM {TABLE_ARTS} WHERE url = $1'\n\n await con.execute(query, url)\n await ctx.tick(True)", "def POST_delete_link_img(self, res, link, name):\r\n # just in case we need to kill this feature from XSS\r\n if g.css_killswitch:\r\n return self.abort(403,'forbidden')\r\n link.del_image(name)\r\n link._commit()\r\n # hide the image and it's container\r\n res._hide(\"img-li_%s\" % name)\r\n # reset the status\r\n res._update('img-status', innerHTML = _(\"Deleted\"))", "def unmanaged_delete(task_id, url):\n\n PoolManager.db.query('DELETE FROM `unmanaged_deletions` WHERE `id` = %s', task_id)\n\n try:\n stat_result = gfal_exec('stat', (url,), return_value = True)\n except:\n return 0, None, None, 'stat error', ''\n\n if stat.S_ISDIR(stat_result.st_mode):\n # this is a directory\n result = gfal_exec('rmdir', (url,))\n else:\n result = gfal_exec('unlink', (url,))\n\n return (0,) + rmdir_result[1:]", "def remove_links(self, item):\r\n if item.get('link'):\r\n item.pop('link')\r\n if item.get('links'):\r\n item.pop('links')\r\n return item", "def del_data_reference(self, target):\n\n if target in self:\n target = self._storage[target]\n if self._data_pointer_key in target:\n del target[self._data_pointer_key]", "def unfollow(self, name):\r\n url = '{0}/{1}/{2}'.format(self.get_url(), 'following', name)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def _reset_unlinked_constituencies():\n UnlinkedConstituency.objects.all().delete()", "def DeleteConceptRelations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def unlink_venue_account(venue_account, after_action, owner, user):\n if after_action == 'move_events':\n _transfer_venue_events_to_owner(venue_account, owner, user)\n elif after_action == 'remove_events':\n _delete_venue_events(venue_account, user)\n\n venue_account.delete()", "def user_unfollow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.remove(following)\n db.session.commit()\n return {'unfollowed': True}" ]
[ "0.6547148", "0.6474747", "0.63120115", "0.61563367", "0.61477256", "0.6030046", "0.58153516", "0.56244844", "0.5609927", "0.5545071", "0.55448717", "0.54989296", "0.5490254", "0.5487666", "0.5484662", "0.54513216", "0.5432963", "0.54169786", "0.53966224", "0.5357432", "0.5355181", "0.5353978", "0.5317241", "0.52846473", "0.526616", "0.52355725", "0.5235444", "0.5225721", "0.5221762", "0.52011555", "0.51998115", "0.51822364", "0.5181612", "0.5170645", "0.5159116", "0.51543283", "0.5142953", "0.51375484", "0.5136116", "0.5124606", "0.5112891", "0.5108877", "0.509462", "0.5080735", "0.507185", "0.50639564", "0.5056037", "0.5041625", "0.5040202", "0.50355184", "0.5034844", "0.5034677", "0.5023138", "0.5010116", "0.50096446", "0.5001961", "0.49926782", "0.49647552", "0.49629745", "0.4960695", "0.49548632", "0.49460593", "0.49451593", "0.494213", "0.494208", "0.4938155", "0.4934565", "0.49276358", "0.4923267", "0.4920343", "0.4911879", "0.49112195", "0.49108583", "0.491012", "0.49076423", "0.49072933", "0.4903375", "0.49016306", "0.48945874", "0.48834962", "0.4878512", "0.48702112", "0.48701078", "0.48660323", "0.4865588", "0.4865385", "0.48601672", "0.4847724", "0.48429334", "0.48383126", "0.48318407", "0.4826131", "0.48255196", "0.48250464", "0.482382", "0.48189095", "0.48171067", "0.48119032", "0.48106897", "0.48075652" ]
0.6281923
3
List the immediate OutcomeGroup children of the outcome group. Paginated.
def list_subgroups_global(request_ctx, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/global/outcome_groups/{id}/subgroups' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_children(self):\n\n pass", "def child_ents(self) -> Iterator['Entity']:\n for ent in self.vmf.entities:\n if self.id in ent.visgroup_ids:\n yield ent", "def get_children(self):\r\n return self.children", "def children(self): # noqa: ANN201", "def get_children(self):\n return []", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def getChildren(self):\n return []", "def get_children(self):\n return self.children", "def get_children(self):\r\n return self._children", "def get_children(self):\n return self.items", "def get_children(self):\n raise NotImplementedError()", "def children(self):\n return self._children", "def children(self):\n return self._children", "def children(self):\n \n return self._children", "def get_children(self):\n return self._children", "def children(self) -> Iterable[Heirarchical]:\n return []", "def getChildren(self):\n \n return self._children", "def children(self):\n if self._pedigree is None:\n raise Exception(\"Pedigree is not defined\")\n return [self._pedigree.individual(pid) for pid in sorted(self._children_ids, key=self._sort_by_birth)]", "def get_childs(self):\n\t\treturn self.__childs", "def children_of(self, member_name):\n for relationship in self.relationships[member_name]:\n yield self.members[relationship.child]", "def children(self):\n return list(self._children)", "def children(self):\r\n return self.location_set.filter(hidden=False)", "def getChildren():", "def GetChildren(self):\r\n\r\n return self._children", "def descendants(self):\n for a in self._related(set(), 'children'):\n yield a", "def fm_all_children(self):\n return self._relation_lst[self.CHILD].copy()", "def nav_children(self):\r\n return list(self.get_children().filter(show_in_nav=True))", "def GetChildren(self, *args, **kwargs):\n pass", "def children(self):\n return self.contents", "def children(self):\n return self.contents", "def test_get_all_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_all_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def child_views(self):\n return self.children", "def get_epic_children(self) -> list:\n\n children = [i['key'] for i in self.repo.api_call(requests.get, f\"search?jql=cf[10008]='{self.jira_key}'\")['issues']]\n return children", "def test_get_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def each_child(\n self,\n search_range=None,\n descended_from_type=_otio.Composable,\n shallow_search=False,\n):\n for child in self.children_if(descended_from_type, search_range, shallow_search):\n yield child", "def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)", "def children(self) -> List[Region]:\n return []", "def get_children_elements(self):\n\n pass", "def select_children(self):\n objs = []\n for obj in pm.selected():\n objs.extend(obj.listRelatives(ad=True, type=[\"transform\", \"joint\"]))\n pm.select(objs, add=True)", "def children(self) -> List[Region]:\n return self._children", "def getChildren(self):\n return self.child_edges", "def get_children_queryset(self):\n pass", "def _get_children(self, x):\n try:\n return x._pfp__children\n\n except AttributeError:\n return []", "def all_children(self):\n\n for child in self.children:\n yield child\n for sub_child in child.all_children():\n yield sub_child", "def getChildren(self):\n return self.directories.values()", "def children(self):\n startkey, endkey = self._key_bounds\n depth = len(self.path) + 2 # 1 for domain, 1 for next location level\n q = self.view('locations/hierarchy', startkey=startkey, endkey=endkey, group_level=depth)\n keys = [e['key'] for e in q if len(e['key']) == depth]\n return self.view('locations/hierarchy', keys=keys, reduce=False, include_docs=True).all()", "def get_children(self):\n return [node for node in self._children.values()]", "def get_organization_group_children_url(og_id):\n\n return '{organization_group_api_path}/{organization_group_id}/children'. \\\n format(organization_group_api_path=ORGANIZATION_GROUP_API_COMMON_PATH, organization_group_id=og_id)", "def __iter__(self):\n return iter(self.__children)", "def __iter__(self):\n\n for i in self._children:\n yield i", "def get_children(self):\r\n\r\n # FIXME: Expose iteration from CIndex, PR6125.\r\n def visitor(child, parent, children):\r\n # FIXME: Document this assertion in API.\r\n # FIXME: There should just be an isNull method.\r\n assert child != conf.lib.clang_getNullCursor()\r\n\r\n # Create reference to TU so it isn't GC'd before Cursor.\r\n child._tu = self._tu\r\n children.append(child)\r\n return 1 # continue\r\n children = []\r\n conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),\r\n children)\r\n return iter(children)", "def children(self) -> List[str]:\n return self._children", "def children_list(self):\n return [\n # self.notify,\n # self.snap_multiplier,\n # self.range_low, self.range_high,\n # self.activity_threshold\n ]", "def __iter__(self):\n return iter(self._children)", "def print_children(self, offset=None, outputfile=None):\n if offset is None:\n offset = ''\n if outputfile is None:\n print(offset + self.name)\n else:\n outputfile.write(offset + self.name + '\\n')\n for i in range(len(self.children)):\n self.children[i].print_children(offset=offset + ' ', outputfile=outputfile)", "def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children", "def get_all_children(self):\n return tuple(self.children)", "def __iter__(self):\n for child in self.children:\n yield child", "def get_all_children_seq(self):\n results = []\n queue = []\n children = self.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = node.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n return results", "def _get_children(self):\n if not self.ontology:\n raise ValueError(\"No associated ontology.\")\n\n return self.ontology.get_sub_properties(self)", "def children(self):\n address = self.address\n if address:\n address += \"/\"\n\n # Escape the address for re matching\n addres = re.escape(address)\n regex = \"^\" + address + \"[^/]+$\"\n children = Page.objects.filter(address__regex=regex).order_by(\"address\")\n return list(children)", "def descendants(self):\n for child in self.children:\n yield child\n if isinstance(child, LoggedAction):\n for descendant in child.descendants():\n yield descendant", "def get_children(self, item, level):\n return item.children", "def children_ids(self):\n return self._children_ids", "def getchildren(self):\n return self.root.getchildren()", "def children(self):\n if self._children is None:\n return set()\n else:\n return self._children", "def children(self):\n return self.leaves", "def get_children(self):\n\n return self._children.copy()", "def get_children(self, refobj):\n children = cmds.listConnections(\"%s.children\" % refobj, d=False)\n if not children:\n children = []\n return children", "def children(self, i):\n if i < 0:\n raise IndexError()\n return self._children[i]", "def get_children(target, concept_map):\n child_inds = []\n target_index = concept_map[CONCEPTS_STR].index(target)\n target_row = concept_map[ADJ_MAT_STR][target_index]\n for ind in range(len(target_row)): # for each ind in row of adj mat\n val = target_row[ind]\n if(val>0 and ind != target_index): # don't care concept is child of itself\n child_inds.append(ind)\n return list(map(lambda ind: concept_map[CONCEPTS_STR][ind], child_inds))", "def get_children(self):\r\n\r\n if not self.has_children:\r\n return []\r\n\r\n if getattr(self, '_child_instances', None) is None:\r\n self._child_instances = [] # pylint: disable=attribute-defined-outside-init\r\n for child_loc in self.children:\r\n try:\r\n child = self.runtime.get_block(child_loc)\r\n child.runtime.export_fs = self.runtime.export_fs\r\n except ItemNotFoundError:\r\n log.exception(u'Unable to load item {loc}, skipping'.format(loc=child_loc))\r\n continue\r\n self._child_instances.append(child)\r\n\r\n return self._child_instances", "def getChildren(self):\n if not self.Children:\n\n #print 'reached leave node {0}'.format(self.CommID)\n #raw_input()\n return [[], []]\n\n children = deque()\n parent = deque()\n for c in range(len(self.Children)):\n children.append(self.Children[c])\n parent.append(self.CommID)\n retval = (children, parent)\n\n #print 'retval of ID {0} is {1}'.format(self.CommID, retval)\n #raw_input('wait')\n\n return retval", "def getChildren(self):\n if not self.Children:\n\n #print 'reached leave node {0}'.format(self.CommID)\n #raw_input()\n return [[], []]\n\n children = deque()\n parent = deque()\n for c in range(len(self.Children)):\n children.append(self.Children[c])\n parent.append(self.CommID)\n retval = (children, parent)\n\n #print 'retval of ID {0} is {1}'.format(self.CommID, retval)\n #raw_input('wait')\n\n return retval", "def get_list(self):\n obj_list = []\n for group in self.root_item.child_items:\n for item in group.child_items:\n obj_list.append(item.obj)\n\n return obj_list", "def _children(self):\n for codeobj in self.body:\n if isinstance(codeobj, CodeEntity):\n yield codeobj", "def get_object_childs(self, obj_name):\n index = 0\n children_list = []\n child = 0\n parent_handle = self.get_object_handle(obj_name)\n while child != -1:\n res, child = vrep.simxGetObjectChild(self.client_id, parent_handle, index, vrep.simx_opmode_blocking)\n if res == vrep.simx_return_ok:\n children_list.append(child)\n index = index + 1\n else:\n print('Remote fucntion get_object_childs call failed.')\n return []\n del children_list[len(children_list) - 1]\n return children_list", "def Children(self) -> _n_1_t_2:", "def get_b_children(self, b_obj):\n return [child for child in Blender.Object.Get()\n if child.parent == b_obj]", "def _get_children(self):\n return set()", "def children(self):\n return self.hashring_watch.get_children()", "def visit_children(self, tree: lark.Tree) -> List[Result]:\n self.level += 1\n result = super().visit_children(tree)\n self.level -= 1\n return result", "def getAllChildren(self):\n \n l = []\n for child in self._children:\n l.append(child)\n l.extend(child.getAllChildren())\n \n return l", "def children(self):\n return {x[1] for x in self.outgoing}", "def children(self):\n return [self.cut]", "def make_children(self):\n\t\tchildren = []\n\t\tfor action in self.observations:\n\t\t\tchildren.append(\"empty\")\n\n\t\treturn children", "def get_children(obj):\n ret = obj.to_dict()\n if obj.children.all():\n ret.__setitem__('children',[get_children(j) for j in obj.children.all()])\n return ret", "def __next__(self):\n for child in self.children:\n yield child", "def get_next(self):\n return self.childs", "def children(self, child_class=None):\n if not child_class:\n child_class = self.CHILD_CLASS\n # pylint: disable=no-member\n return self.q(css=self._bounded_selector(child_class.BODY_SELECTOR)).map(\n lambda el: child_class(self.browser, el.get_attribute('data-locator'))).results", "def get_children(self, child_range=None):\n child_c, child_r = self.collection.get_child()\n child_c = [\"{}\".format(c) for c in child_c]\n res = child_c + child_r\n if child_range:\n start, stop = (int(el) for el in child_range.split(\"-\", 1))\n # map CDMI range value to python index\n stop += 1\n else:\n start = 0\n stop = len(res)\n return res[start:stop]", "def current_container_children(self):\n # noinspection PyProtectedMember\n return self.current_container._all_children", "def children(self):\r\n descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(\r\n lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results\r\n\r\n # Now remove any non-direct descendants.\r\n grandkids = []\r\n for descendant in descendants:\r\n grandkids.extend(descendant.children)\r\n\r\n grand_locators = [grandkid.locator for grandkid in grandkids]\r\n return [descendant for descendant in descendants if not descendant.locator in grand_locators]", "def walk(root_group):\n groups = [root_group]\n sheets = list(root_group.child_sheets)\n for child_group in root_group.child_groups:\n descendent_groups, descendent_sheets = walk(child_group)\n groups += descendent_groups\n sheets += descendent_sheets\n return groups, sheets", "def get_children(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_children()\n return set_parents", "def get_child_resource_nodes(self):\n raise errors.Unimplemented()", "def get_children(self, context: ResourceCommandContext, obj_ref: str, child_type: str) -> list:\n return self.handler.get_children(obj_ref, child_type)", "def outcomes(self):\n return self._get_child_page_of_type(LearningOutcomesPage)", "def GetChildren(self, p_int, p_int_1, p_int_2):\n ..." ]
[ "0.6219274", "0.62047005", "0.6197958", "0.6164997", "0.6146464", "0.6123517", "0.6123517", "0.6123517", "0.6090403", "0.60803944", "0.6052547", "0.6046702", "0.6037661", "0.60363", "0.60363", "0.6025189", "0.59819186", "0.59583235", "0.5945395", "0.59407395", "0.593865", "0.5887591", "0.58798635", "0.58404845", "0.5832817", "0.58281183", "0.58222735", "0.57788885", "0.57717764", "0.5765069", "0.5743773", "0.5743773", "0.57257074", "0.5677659", "0.56605566", "0.56603783", "0.56386477", "0.5634943", "0.5632342", "0.56318647", "0.5605921", "0.56046265", "0.55895394", "0.5587971", "0.55785775", "0.5572745", "0.5563347", "0.5562666", "0.5551925", "0.55442244", "0.5519042", "0.55178314", "0.5515718", "0.55053073", "0.5502807", "0.54746705", "0.546689", "0.5466064", "0.54638535", "0.5459027", "0.54564965", "0.5432159", "0.5413271", "0.5391215", "0.5380636", "0.53701955", "0.536629", "0.53609014", "0.5326579", "0.5305718", "0.5301742", "0.529065", "0.5282745", "0.5281266", "0.5279338", "0.5279338", "0.527048", "0.5270165", "0.52657396", "0.5260169", "0.5245433", "0.5244383", "0.5242271", "0.5235315", "0.5232728", "0.52284276", "0.5226746", "0.52235687", "0.52220285", "0.52199864", "0.5218219", "0.52095497", "0.51861894", "0.517719", "0.51614636", "0.51556623", "0.515261", "0.5150056", "0.514544", "0.51428765", "0.5139288" ]
0.0
-1
List the immediate OutcomeGroup children of the outcome group. Paginated.
def list_subgroups_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/accounts/{account_id}/outcome_groups/{id}/subgroups' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_children(self):\n\n pass", "def child_ents(self) -> Iterator['Entity']:\n for ent in self.vmf.entities:\n if self.id in ent.visgroup_ids:\n yield ent", "def get_children(self):\r\n return self.children", "def children(self): # noqa: ANN201", "def get_children(self):\n return []", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def getChildren(self):\n return []", "def get_children(self):\n return self.children", "def get_children(self):\r\n return self._children", "def get_children(self):\n return self.items", "def get_children(self):\n raise NotImplementedError()", "def children(self):\n return self._children", "def children(self):\n return self._children", "def children(self):\n \n return self._children", "def get_children(self):\n return self._children", "def children(self) -> Iterable[Heirarchical]:\n return []", "def getChildren(self):\n \n return self._children", "def children(self):\n if self._pedigree is None:\n raise Exception(\"Pedigree is not defined\")\n return [self._pedigree.individual(pid) for pid in sorted(self._children_ids, key=self._sort_by_birth)]", "def get_childs(self):\n\t\treturn self.__childs", "def children_of(self, member_name):\n for relationship in self.relationships[member_name]:\n yield self.members[relationship.child]", "def children(self):\n return list(self._children)", "def children(self):\r\n return self.location_set.filter(hidden=False)", "def getChildren():", "def GetChildren(self):\r\n\r\n return self._children", "def descendants(self):\n for a in self._related(set(), 'children'):\n yield a", "def fm_all_children(self):\n return self._relation_lst[self.CHILD].copy()", "def nav_children(self):\r\n return list(self.get_children().filter(show_in_nav=True))", "def GetChildren(self, *args, **kwargs):\n pass", "def children(self):\n return self.contents", "def children(self):\n return self.contents", "def test_get_all_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_all_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def child_views(self):\n return self.children", "def get_epic_children(self) -> list:\n\n children = [i['key'] for i in self.repo.api_call(requests.get, f\"search?jql=cf[10008]='{self.jira_key}'\")['issues']]\n return children", "def test_get_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def each_child(\n self,\n search_range=None,\n descended_from_type=_otio.Composable,\n shallow_search=False,\n):\n for child in self.children_if(descended_from_type, search_range, shallow_search):\n yield child", "def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)", "def children(self) -> List[Region]:\n return []", "def get_children_elements(self):\n\n pass", "def select_children(self):\n objs = []\n for obj in pm.selected():\n objs.extend(obj.listRelatives(ad=True, type=[\"transform\", \"joint\"]))\n pm.select(objs, add=True)", "def children(self) -> List[Region]:\n return self._children", "def getChildren(self):\n return self.child_edges", "def get_children_queryset(self):\n pass", "def _get_children(self, x):\n try:\n return x._pfp__children\n\n except AttributeError:\n return []", "def all_children(self):\n\n for child in self.children:\n yield child\n for sub_child in child.all_children():\n yield sub_child", "def getChildren(self):\n return self.directories.values()", "def children(self):\n startkey, endkey = self._key_bounds\n depth = len(self.path) + 2 # 1 for domain, 1 for next location level\n q = self.view('locations/hierarchy', startkey=startkey, endkey=endkey, group_level=depth)\n keys = [e['key'] for e in q if len(e['key']) == depth]\n return self.view('locations/hierarchy', keys=keys, reduce=False, include_docs=True).all()", "def get_children(self):\n return [node for node in self._children.values()]", "def get_organization_group_children_url(og_id):\n\n return '{organization_group_api_path}/{organization_group_id}/children'. \\\n format(organization_group_api_path=ORGANIZATION_GROUP_API_COMMON_PATH, organization_group_id=og_id)", "def __iter__(self):\n return iter(self.__children)", "def __iter__(self):\n\n for i in self._children:\n yield i", "def get_children(self):\r\n\r\n # FIXME: Expose iteration from CIndex, PR6125.\r\n def visitor(child, parent, children):\r\n # FIXME: Document this assertion in API.\r\n # FIXME: There should just be an isNull method.\r\n assert child != conf.lib.clang_getNullCursor()\r\n\r\n # Create reference to TU so it isn't GC'd before Cursor.\r\n child._tu = self._tu\r\n children.append(child)\r\n return 1 # continue\r\n children = []\r\n conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),\r\n children)\r\n return iter(children)", "def children(self) -> List[str]:\n return self._children", "def children_list(self):\n return [\n # self.notify,\n # self.snap_multiplier,\n # self.range_low, self.range_high,\n # self.activity_threshold\n ]", "def __iter__(self):\n return iter(self._children)", "def print_children(self, offset=None, outputfile=None):\n if offset is None:\n offset = ''\n if outputfile is None:\n print(offset + self.name)\n else:\n outputfile.write(offset + self.name + '\\n')\n for i in range(len(self.children)):\n self.children[i].print_children(offset=offset + ' ', outputfile=outputfile)", "def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children", "def get_all_children(self):\n return tuple(self.children)", "def __iter__(self):\n for child in self.children:\n yield child", "def get_all_children_seq(self):\n results = []\n queue = []\n children = self.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = node.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n return results", "def _get_children(self):\n if not self.ontology:\n raise ValueError(\"No associated ontology.\")\n\n return self.ontology.get_sub_properties(self)", "def children(self):\n address = self.address\n if address:\n address += \"/\"\n\n # Escape the address for re matching\n addres = re.escape(address)\n regex = \"^\" + address + \"[^/]+$\"\n children = Page.objects.filter(address__regex=regex).order_by(\"address\")\n return list(children)", "def descendants(self):\n for child in self.children:\n yield child\n if isinstance(child, LoggedAction):\n for descendant in child.descendants():\n yield descendant", "def get_children(self, item, level):\n return item.children", "def children_ids(self):\n return self._children_ids", "def getchildren(self):\n return self.root.getchildren()", "def children(self):\n if self._children is None:\n return set()\n else:\n return self._children", "def children(self):\n return self.leaves", "def get_children(self):\n\n return self._children.copy()", "def get_children(self, refobj):\n children = cmds.listConnections(\"%s.children\" % refobj, d=False)\n if not children:\n children = []\n return children", "def children(self, i):\n if i < 0:\n raise IndexError()\n return self._children[i]", "def get_children(target, concept_map):\n child_inds = []\n target_index = concept_map[CONCEPTS_STR].index(target)\n target_row = concept_map[ADJ_MAT_STR][target_index]\n for ind in range(len(target_row)): # for each ind in row of adj mat\n val = target_row[ind]\n if(val>0 and ind != target_index): # don't care concept is child of itself\n child_inds.append(ind)\n return list(map(lambda ind: concept_map[CONCEPTS_STR][ind], child_inds))", "def get_children(self):\r\n\r\n if not self.has_children:\r\n return []\r\n\r\n if getattr(self, '_child_instances', None) is None:\r\n self._child_instances = [] # pylint: disable=attribute-defined-outside-init\r\n for child_loc in self.children:\r\n try:\r\n child = self.runtime.get_block(child_loc)\r\n child.runtime.export_fs = self.runtime.export_fs\r\n except ItemNotFoundError:\r\n log.exception(u'Unable to load item {loc}, skipping'.format(loc=child_loc))\r\n continue\r\n self._child_instances.append(child)\r\n\r\n return self._child_instances", "def getChildren(self):\n if not self.Children:\n\n #print 'reached leave node {0}'.format(self.CommID)\n #raw_input()\n return [[], []]\n\n children = deque()\n parent = deque()\n for c in range(len(self.Children)):\n children.append(self.Children[c])\n parent.append(self.CommID)\n retval = (children, parent)\n\n #print 'retval of ID {0} is {1}'.format(self.CommID, retval)\n #raw_input('wait')\n\n return retval", "def getChildren(self):\n if not self.Children:\n\n #print 'reached leave node {0}'.format(self.CommID)\n #raw_input()\n return [[], []]\n\n children = deque()\n parent = deque()\n for c in range(len(self.Children)):\n children.append(self.Children[c])\n parent.append(self.CommID)\n retval = (children, parent)\n\n #print 'retval of ID {0} is {1}'.format(self.CommID, retval)\n #raw_input('wait')\n\n return retval", "def get_list(self):\n obj_list = []\n for group in self.root_item.child_items:\n for item in group.child_items:\n obj_list.append(item.obj)\n\n return obj_list", "def _children(self):\n for codeobj in self.body:\n if isinstance(codeobj, CodeEntity):\n yield codeobj", "def get_object_childs(self, obj_name):\n index = 0\n children_list = []\n child = 0\n parent_handle = self.get_object_handle(obj_name)\n while child != -1:\n res, child = vrep.simxGetObjectChild(self.client_id, parent_handle, index, vrep.simx_opmode_blocking)\n if res == vrep.simx_return_ok:\n children_list.append(child)\n index = index + 1\n else:\n print('Remote fucntion get_object_childs call failed.')\n return []\n del children_list[len(children_list) - 1]\n return children_list", "def Children(self) -> _n_1_t_2:", "def get_b_children(self, b_obj):\n return [child for child in Blender.Object.Get()\n if child.parent == b_obj]", "def _get_children(self):\n return set()", "def children(self):\n return self.hashring_watch.get_children()", "def visit_children(self, tree: lark.Tree) -> List[Result]:\n self.level += 1\n result = super().visit_children(tree)\n self.level -= 1\n return result", "def getAllChildren(self):\n \n l = []\n for child in self._children:\n l.append(child)\n l.extend(child.getAllChildren())\n \n return l", "def children(self):\n return {x[1] for x in self.outgoing}", "def children(self):\n return [self.cut]", "def make_children(self):\n\t\tchildren = []\n\t\tfor action in self.observations:\n\t\t\tchildren.append(\"empty\")\n\n\t\treturn children", "def get_children(obj):\n ret = obj.to_dict()\n if obj.children.all():\n ret.__setitem__('children',[get_children(j) for j in obj.children.all()])\n return ret", "def __next__(self):\n for child in self.children:\n yield child", "def get_next(self):\n return self.childs", "def children(self, child_class=None):\n if not child_class:\n child_class = self.CHILD_CLASS\n # pylint: disable=no-member\n return self.q(css=self._bounded_selector(child_class.BODY_SELECTOR)).map(\n lambda el: child_class(self.browser, el.get_attribute('data-locator'))).results", "def get_children(self, child_range=None):\n child_c, child_r = self.collection.get_child()\n child_c = [\"{}\".format(c) for c in child_c]\n res = child_c + child_r\n if child_range:\n start, stop = (int(el) for el in child_range.split(\"-\", 1))\n # map CDMI range value to python index\n stop += 1\n else:\n start = 0\n stop = len(res)\n return res[start:stop]", "def current_container_children(self):\n # noinspection PyProtectedMember\n return self.current_container._all_children", "def children(self):\r\n descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(\r\n lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results\r\n\r\n # Now remove any non-direct descendants.\r\n grandkids = []\r\n for descendant in descendants:\r\n grandkids.extend(descendant.children)\r\n\r\n grand_locators = [grandkid.locator for grandkid in grandkids]\r\n return [descendant for descendant in descendants if not descendant.locator in grand_locators]", "def walk(root_group):\n groups = [root_group]\n sheets = list(root_group.child_sheets)\n for child_group in root_group.child_groups:\n descendent_groups, descendent_sheets = walk(child_group)\n groups += descendent_groups\n sheets += descendent_sheets\n return groups, sheets", "def get_children(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_children()\n return set_parents", "def get_child_resource_nodes(self):\n raise errors.Unimplemented()", "def get_children(self, context: ResourceCommandContext, obj_ref: str, child_type: str) -> list:\n return self.handler.get_children(obj_ref, child_type)", "def outcomes(self):\n return self._get_child_page_of_type(LearningOutcomesPage)", "def GetChildren(self, p_int, p_int_1, p_int_2):\n ..." ]
[ "0.6219274", "0.62047005", "0.6197958", "0.6164997", "0.6146464", "0.6123517", "0.6123517", "0.6123517", "0.6090403", "0.60803944", "0.6052547", "0.6046702", "0.6037661", "0.60363", "0.60363", "0.6025189", "0.59819186", "0.59583235", "0.5945395", "0.59407395", "0.593865", "0.5887591", "0.58798635", "0.58404845", "0.5832817", "0.58281183", "0.58222735", "0.57788885", "0.57717764", "0.5765069", "0.5743773", "0.5743773", "0.57257074", "0.5677659", "0.56605566", "0.56603783", "0.56386477", "0.5634943", "0.5632342", "0.56318647", "0.5605921", "0.56046265", "0.55895394", "0.5587971", "0.55785775", "0.5572745", "0.5563347", "0.5562666", "0.5551925", "0.55442244", "0.5519042", "0.55178314", "0.5515718", "0.55053073", "0.5502807", "0.54746705", "0.546689", "0.5466064", "0.54638535", "0.5459027", "0.54564965", "0.5432159", "0.5413271", "0.5391215", "0.5380636", "0.53701955", "0.536629", "0.53609014", "0.5326579", "0.5305718", "0.5301742", "0.529065", "0.5282745", "0.5281266", "0.5279338", "0.5279338", "0.527048", "0.5270165", "0.52657396", "0.5260169", "0.5245433", "0.5244383", "0.5242271", "0.5235315", "0.5232728", "0.52284276", "0.5226746", "0.52235687", "0.52220285", "0.52199864", "0.5218219", "0.52095497", "0.51861894", "0.517719", "0.51614636", "0.51556623", "0.515261", "0.5150056", "0.514544", "0.51428765", "0.5139288" ]
0.0
-1
List the immediate OutcomeGroup children of the outcome group. Paginated.
def list_subgroups_courses(request_ctx, course_id, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/courses/{course_id}/outcome_groups/{id}/subgroups' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_children(self):\n\n pass", "def child_ents(self) -> Iterator['Entity']:\n for ent in self.vmf.entities:\n if self.id in ent.visgroup_ids:\n yield ent", "def get_children(self):\r\n return self.children", "def children(self): # noqa: ANN201", "def get_children(self):\n return []", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def getChildren(self):\n return []", "def get_children(self):\n return self.children", "def get_children(self):\r\n return self._children", "def get_children(self):\n return self.items", "def get_children(self):\n raise NotImplementedError()", "def children(self):\n return self._children", "def children(self):\n return self._children", "def children(self):\n \n return self._children", "def get_children(self):\n return self._children", "def children(self) -> Iterable[Heirarchical]:\n return []", "def getChildren(self):\n \n return self._children", "def children(self):\n if self._pedigree is None:\n raise Exception(\"Pedigree is not defined\")\n return [self._pedigree.individual(pid) for pid in sorted(self._children_ids, key=self._sort_by_birth)]", "def get_childs(self):\n\t\treturn self.__childs", "def children_of(self, member_name):\n for relationship in self.relationships[member_name]:\n yield self.members[relationship.child]", "def children(self):\n return list(self._children)", "def children(self):\r\n return self.location_set.filter(hidden=False)", "def getChildren():", "def GetChildren(self):\r\n\r\n return self._children", "def descendants(self):\n for a in self._related(set(), 'children'):\n yield a", "def fm_all_children(self):\n return self._relation_lst[self.CHILD].copy()", "def nav_children(self):\r\n return list(self.get_children().filter(show_in_nav=True))", "def GetChildren(self, *args, **kwargs):\n pass", "def children(self):\n return self.contents", "def children(self):\n return self.contents", "def test_get_all_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_all_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def child_views(self):\n return self.children", "def get_epic_children(self) -> list:\n\n children = [i['key'] for i in self.repo.api_call(requests.get, f\"search?jql=cf[10008]='{self.jira_key}'\")['issues']]\n return children", "def test_get_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def each_child(\n self,\n search_range=None,\n descended_from_type=_otio.Composable,\n shallow_search=False,\n):\n for child in self.children_if(descended_from_type, search_range, shallow_search):\n yield child", "def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)", "def children(self) -> List[Region]:\n return []", "def get_children_elements(self):\n\n pass", "def select_children(self):\n objs = []\n for obj in pm.selected():\n objs.extend(obj.listRelatives(ad=True, type=[\"transform\", \"joint\"]))\n pm.select(objs, add=True)", "def children(self) -> List[Region]:\n return self._children", "def getChildren(self):\n return self.child_edges", "def get_children_queryset(self):\n pass", "def _get_children(self, x):\n try:\n return x._pfp__children\n\n except AttributeError:\n return []", "def all_children(self):\n\n for child in self.children:\n yield child\n for sub_child in child.all_children():\n yield sub_child", "def getChildren(self):\n return self.directories.values()", "def children(self):\n startkey, endkey = self._key_bounds\n depth = len(self.path) + 2 # 1 for domain, 1 for next location level\n q = self.view('locations/hierarchy', startkey=startkey, endkey=endkey, group_level=depth)\n keys = [e['key'] for e in q if len(e['key']) == depth]\n return self.view('locations/hierarchy', keys=keys, reduce=False, include_docs=True).all()", "def get_children(self):\n return [node for node in self._children.values()]", "def get_organization_group_children_url(og_id):\n\n return '{organization_group_api_path}/{organization_group_id}/children'. \\\n format(organization_group_api_path=ORGANIZATION_GROUP_API_COMMON_PATH, organization_group_id=og_id)", "def __iter__(self):\n return iter(self.__children)", "def __iter__(self):\n\n for i in self._children:\n yield i", "def get_children(self):\r\n\r\n # FIXME: Expose iteration from CIndex, PR6125.\r\n def visitor(child, parent, children):\r\n # FIXME: Document this assertion in API.\r\n # FIXME: There should just be an isNull method.\r\n assert child != conf.lib.clang_getNullCursor()\r\n\r\n # Create reference to TU so it isn't GC'd before Cursor.\r\n child._tu = self._tu\r\n children.append(child)\r\n return 1 # continue\r\n children = []\r\n conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),\r\n children)\r\n return iter(children)", "def children(self) -> List[str]:\n return self._children", "def children_list(self):\n return [\n # self.notify,\n # self.snap_multiplier,\n # self.range_low, self.range_high,\n # self.activity_threshold\n ]", "def __iter__(self):\n return iter(self._children)", "def print_children(self, offset=None, outputfile=None):\n if offset is None:\n offset = ''\n if outputfile is None:\n print(offset + self.name)\n else:\n outputfile.write(offset + self.name + '\\n')\n for i in range(len(self.children)):\n self.children[i].print_children(offset=offset + ' ', outputfile=outputfile)", "def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children", "def get_all_children(self):\n return tuple(self.children)", "def __iter__(self):\n for child in self.children:\n yield child", "def get_all_children_seq(self):\n results = []\n queue = []\n children = self.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = node.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n return results", "def _get_children(self):\n if not self.ontology:\n raise ValueError(\"No associated ontology.\")\n\n return self.ontology.get_sub_properties(self)", "def children(self):\n address = self.address\n if address:\n address += \"/\"\n\n # Escape the address for re matching\n addres = re.escape(address)\n regex = \"^\" + address + \"[^/]+$\"\n children = Page.objects.filter(address__regex=regex).order_by(\"address\")\n return list(children)", "def descendants(self):\n for child in self.children:\n yield child\n if isinstance(child, LoggedAction):\n for descendant in child.descendants():\n yield descendant", "def get_children(self, item, level):\n return item.children", "def children_ids(self):\n return self._children_ids", "def getchildren(self):\n return self.root.getchildren()", "def children(self):\n if self._children is None:\n return set()\n else:\n return self._children", "def children(self):\n return self.leaves", "def get_children(self):\n\n return self._children.copy()", "def get_children(self, refobj):\n children = cmds.listConnections(\"%s.children\" % refobj, d=False)\n if not children:\n children = []\n return children", "def children(self, i):\n if i < 0:\n raise IndexError()\n return self._children[i]", "def get_children(target, concept_map):\n child_inds = []\n target_index = concept_map[CONCEPTS_STR].index(target)\n target_row = concept_map[ADJ_MAT_STR][target_index]\n for ind in range(len(target_row)): # for each ind in row of adj mat\n val = target_row[ind]\n if(val>0 and ind != target_index): # don't care concept is child of itself\n child_inds.append(ind)\n return list(map(lambda ind: concept_map[CONCEPTS_STR][ind], child_inds))", "def get_children(self):\r\n\r\n if not self.has_children:\r\n return []\r\n\r\n if getattr(self, '_child_instances', None) is None:\r\n self._child_instances = [] # pylint: disable=attribute-defined-outside-init\r\n for child_loc in self.children:\r\n try:\r\n child = self.runtime.get_block(child_loc)\r\n child.runtime.export_fs = self.runtime.export_fs\r\n except ItemNotFoundError:\r\n log.exception(u'Unable to load item {loc}, skipping'.format(loc=child_loc))\r\n continue\r\n self._child_instances.append(child)\r\n\r\n return self._child_instances", "def getChildren(self):\n if not self.Children:\n\n #print 'reached leave node {0}'.format(self.CommID)\n #raw_input()\n return [[], []]\n\n children = deque()\n parent = deque()\n for c in range(len(self.Children)):\n children.append(self.Children[c])\n parent.append(self.CommID)\n retval = (children, parent)\n\n #print 'retval of ID {0} is {1}'.format(self.CommID, retval)\n #raw_input('wait')\n\n return retval", "def getChildren(self):\n if not self.Children:\n\n #print 'reached leave node {0}'.format(self.CommID)\n #raw_input()\n return [[], []]\n\n children = deque()\n parent = deque()\n for c in range(len(self.Children)):\n children.append(self.Children[c])\n parent.append(self.CommID)\n retval = (children, parent)\n\n #print 'retval of ID {0} is {1}'.format(self.CommID, retval)\n #raw_input('wait')\n\n return retval", "def get_list(self):\n obj_list = []\n for group in self.root_item.child_items:\n for item in group.child_items:\n obj_list.append(item.obj)\n\n return obj_list", "def _children(self):\n for codeobj in self.body:\n if isinstance(codeobj, CodeEntity):\n yield codeobj", "def get_object_childs(self, obj_name):\n index = 0\n children_list = []\n child = 0\n parent_handle = self.get_object_handle(obj_name)\n while child != -1:\n res, child = vrep.simxGetObjectChild(self.client_id, parent_handle, index, vrep.simx_opmode_blocking)\n if res == vrep.simx_return_ok:\n children_list.append(child)\n index = index + 1\n else:\n print('Remote fucntion get_object_childs call failed.')\n return []\n del children_list[len(children_list) - 1]\n return children_list", "def Children(self) -> _n_1_t_2:", "def get_b_children(self, b_obj):\n return [child for child in Blender.Object.Get()\n if child.parent == b_obj]", "def _get_children(self):\n return set()", "def children(self):\n return self.hashring_watch.get_children()", "def visit_children(self, tree: lark.Tree) -> List[Result]:\n self.level += 1\n result = super().visit_children(tree)\n self.level -= 1\n return result", "def getAllChildren(self):\n \n l = []\n for child in self._children:\n l.append(child)\n l.extend(child.getAllChildren())\n \n return l", "def children(self):\n return {x[1] for x in self.outgoing}", "def children(self):\n return [self.cut]", "def make_children(self):\n\t\tchildren = []\n\t\tfor action in self.observations:\n\t\t\tchildren.append(\"empty\")\n\n\t\treturn children", "def get_children(obj):\n ret = obj.to_dict()\n if obj.children.all():\n ret.__setitem__('children',[get_children(j) for j in obj.children.all()])\n return ret", "def __next__(self):\n for child in self.children:\n yield child", "def get_next(self):\n return self.childs", "def children(self, child_class=None):\n if not child_class:\n child_class = self.CHILD_CLASS\n # pylint: disable=no-member\n return self.q(css=self._bounded_selector(child_class.BODY_SELECTOR)).map(\n lambda el: child_class(self.browser, el.get_attribute('data-locator'))).results", "def get_children(self, child_range=None):\n child_c, child_r = self.collection.get_child()\n child_c = [\"{}\".format(c) for c in child_c]\n res = child_c + child_r\n if child_range:\n start, stop = (int(el) for el in child_range.split(\"-\", 1))\n # map CDMI range value to python index\n stop += 1\n else:\n start = 0\n stop = len(res)\n return res[start:stop]", "def current_container_children(self):\n # noinspection PyProtectedMember\n return self.current_container._all_children", "def children(self):\r\n descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(\r\n lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results\r\n\r\n # Now remove any non-direct descendants.\r\n grandkids = []\r\n for descendant in descendants:\r\n grandkids.extend(descendant.children)\r\n\r\n grand_locators = [grandkid.locator for grandkid in grandkids]\r\n return [descendant for descendant in descendants if not descendant.locator in grand_locators]", "def walk(root_group):\n groups = [root_group]\n sheets = list(root_group.child_sheets)\n for child_group in root_group.child_groups:\n descendent_groups, descendent_sheets = walk(child_group)\n groups += descendent_groups\n sheets += descendent_sheets\n return groups, sheets", "def get_children(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_children()\n return set_parents", "def get_child_resource_nodes(self):\n raise errors.Unimplemented()", "def get_children(self, context: ResourceCommandContext, obj_ref: str, child_type: str) -> list:\n return self.handler.get_children(obj_ref, child_type)", "def outcomes(self):\n return self._get_child_page_of_type(LearningOutcomesPage)", "def GetChildren(self, p_int, p_int_1, p_int_2):\n ..." ]
[ "0.6219274", "0.62047005", "0.6197958", "0.6164997", "0.6146464", "0.6123517", "0.6123517", "0.6123517", "0.6090403", "0.60803944", "0.6052547", "0.6046702", "0.6037661", "0.60363", "0.60363", "0.6025189", "0.59819186", "0.59583235", "0.5945395", "0.59407395", "0.593865", "0.5887591", "0.58798635", "0.58404845", "0.5832817", "0.58281183", "0.58222735", "0.57788885", "0.57717764", "0.5765069", "0.5743773", "0.5743773", "0.57257074", "0.5677659", "0.56605566", "0.56603783", "0.56386477", "0.5634943", "0.5632342", "0.56318647", "0.5605921", "0.56046265", "0.55895394", "0.5587971", "0.55785775", "0.5572745", "0.5563347", "0.5562666", "0.5551925", "0.55442244", "0.5519042", "0.55178314", "0.5515718", "0.55053073", "0.5502807", "0.54746705", "0.546689", "0.5466064", "0.54638535", "0.5459027", "0.54564965", "0.5432159", "0.5413271", "0.5391215", "0.5380636", "0.53701955", "0.536629", "0.53609014", "0.5326579", "0.5305718", "0.5301742", "0.529065", "0.5282745", "0.5281266", "0.5279338", "0.5279338", "0.527048", "0.5270165", "0.52657396", "0.5260169", "0.5245433", "0.5244383", "0.5242271", "0.5235315", "0.5232728", "0.52284276", "0.5226746", "0.52235687", "0.52220285", "0.52199864", "0.5218219", "0.52095497", "0.51861894", "0.517719", "0.51614636", "0.51556623", "0.515261", "0.5150056", "0.514544", "0.51428765", "0.5139288" ]
0.0
-1
Creates a new empty subgroup under the outcome group with the given title and description.
def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}/subgroups' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, } url = request_ctx.base_api_url + path.format(id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def with_group(title: str) -> Generator[None, None, None]:\n if os.environ.get(\"GITHUB_ACTIONS\", \"false\") != \"true\":\n console.print(\"#\" * 10 + \" [bright_blue]\" + title + \"[/] \" + \"#\" * 10)\n yield\n return\n console.print(f\"::group::[bright_blue]{title}[/]\")\n yield\n console.print(\"::endgroup::\")", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def test_cannot_create_group_with_empty_field(self):\n\n utils.create_user_and_authenticate(self)\n group_fields = ['name', 'description']\n utils.test_cannot_post_with_empty_fields(self, self.url, group_fields)", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def create_group(self, label):\n group = OptionGroup(label)\n self.append(group)\n return group", "def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))", "def create_subgroup_courses(request_ctx, course_id, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_TestGroup(test_case, # type: AnyMagpieTestCaseType\n override_group_name=null, # type: Optional[Str]\n override_discoverable=null, # type: Optional[bool]\n override_data=null, # type: Optional[JSON]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n data = override_data\n if override_data is null:\n data = {\"group_name\": override_group_name if override_group_name is not null else test_case.test_group_name}\n # only add 'discoverable' if explicitly provided here to preserve original behaviour of 'no value provided'\n if override_discoverable is not null:\n data[\"discoverable\"] = override_discoverable\n grp_name = (data or {}).get(\"group_name\")\n if grp_name:\n test_case.extra_group_names.add(grp_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/groups\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n return check_response_basic_info(resp, 201, expected_method=\"POST\")", "def test_empty_description_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={'message': \"Description cannot be empty.\", 'status':\"error\"},\n status=200\n )\n\n with self.assertRaises(CreateError):\n self.azk.create('Project', '')", "def write_group_start(self, title):\n self.write('H', GROUP_START)\n self.write('i', ((len(title) + 1) * 2) + DB_STRING_SIZE_SZ)\n self.write('H', len(title) + 1)\n self.write_string(title, double_byte=True)", "def test_create_simple(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (1,))\n assert dset.shape == (1,)", "def test_create_project_title_delimiter(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': 'New{}Project'.format(CAT_DELIMITER),\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def add_group(self, *args, **kwds):\n title = kwds.pop('title', None)\n description = kwds.pop('description', None)\n if kwds:\n raise Exception('unknown keyword arguments: %s' % kwds)\n\n # set title, description if args[0] is string\n if isinstance(args[0], string_types):\n title = args[0]\n args = args[1:]\n if isinstance(args[0], string_types):\n description = args[0]\n args = args[1:]\n\n assert all(isinstance(arg, Command) for arg in args), 'all args should be instance of Command'\n self._arg_stack.append(('group', args, {'title': title, 'description': description}))\n return self", "def create_project(self, conn, name, description=\"\"):\n group = conn.group.allocate(name, description)\n # returns Project object\n return group", "def test_required_group_empty(self):\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10)\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n groups={'empty': GroupBuilder('empty')},\n )\n self.assertBuilderEqual(expected, builder)", "def _assert_create_group(self, personality, response=400):\n group_response = self.autoscale_behaviors.create_scaling_group_given(\n lc_personality=personality)\n self.assertEquals(group_response.status_code, response, msg='Create group '\n 'with invalid lc_personality returned {0} as against '\n '{1}'.format(group_response.status_code, response))\n if response is 200:\n group = group_response.entity\n self.resources.add(group, self.empty_scaling_group)\n return group", "def test_trivial(self):\n group = Group()", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def create( self, trans, payload, **kwd ):\n group_dict = dict( message='', status='ok' )\n name = payload.get( 'name', '' )\n if name:\n description = payload.get( 'description', '' )\n if not description:\n description = ''\n else:\n # TODO add description field to the model\n group_dict = self.group_manager.create( trans, name=name ).to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) )\n else:\n raise RequestParameterMissingException( 'Missing required parameter \"name\".' )\n return group_dict", "def test_create_resource_group(self):\n pass", "def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def put_in_groupbox(widget, title):\n box = QtGui.QGroupBox(title)\n layout = QtGui.QHBoxLayout(box)\n layout.addWidget(widget)\n return box", "def crea_grupo(self):\r\n \r\n self.comprueba_casos_seleccionados()", "def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response", "def _create_course(self):\r\n super(TestPublish, self)._create_course(split=False)\r\n\r\n self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid', split=False)\r\n self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid', split=False)\r\n self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', split=False)\r\n self._create_item('vertical', 'Vert2', {}, {'display_name': 'Vertical 2'}, 'chapter', 'Chapter1', split=False)\r\n self._create_item('html', 'Html1', \"<p>Goodbye</p>\", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', split=False)\r\n self._create_item(\r\n 'discussion', 'Discussion1',\r\n \"discussion discussion_category=\\\"Lecture 1\\\" discussion_id=\\\"a08bfd89b2aa40fa81f2c650a9332846\\\" discussion_target=\\\"Lecture 1\\\"/>\\n\",\r\n {\r\n \"discussion_category\": \"Lecture 1\",\r\n \"discussion_target\": \"Lecture 1\",\r\n \"display_name\": \"Lecture 1 Discussion\",\r\n \"discussion_id\": \"a08bfd89b2aa40fa81f2c650a9332846\"\r\n },\r\n 'vertical', 'Vert1',\r\n split=False\r\n )\r\n self._create_item('html', 'Html2', \"<p>Hellow</p>\", {'display_name': 'Hollow Html'}, 'vertical', 'Vert1', split=False)\r\n self._create_item(\r\n 'discussion', 'Discussion2',\r\n \"discussion discussion_category=\\\"Lecture 2\\\" discussion_id=\\\"b08bfd89b2aa40fa81f2c650a9332846\\\" discussion_target=\\\"Lecture 2\\\"/>\\n\",\r\n {\r\n \"discussion_category\": \"Lecture 2\",\r\n \"discussion_target\": \"Lecture 2\",\r\n \"display_name\": \"Lecture 2 Discussion\",\r\n \"discussion_id\": \"b08bfd89b2aa40fa81f2c650a9332846\"\r\n },\r\n 'vertical', 'Vert2',\r\n split=False\r\n )\r\n self._create_item('static_tab', 'staticuno', \"<p>tab</p>\", {'display_name': 'Tab uno'}, None, None, split=False)\r\n self._create_item('about', 'overview', \"<p>overview</p>\", {}, None, None, split=False)\r\n self._create_item('course_info', 'updates', \"<ol><li><h2>Sep 22</h2><p>test</p></li></ol>\", {}, None, None, split=False)", "def make_new_post(title: str, category: int, description: str):\n slug = _get_slug(title)\n header = _make_header(title, category, description, slug)\n filename = _get_filename(slug)\n with open(filename, \"w\") as fp:\n fp.write(header)\n print(f\"Created {filename}\")", "def test_createGroup(self):\n tabGroup = widgets.TabGroup(u'group1', u'Group', tabs=[\n widgets.Tab(u'id4', u'Title 4', self.contentFactory)])\n tabs = self.tabs + [\n tabGroup,\n widgets.Tab(u'id5', u'Title 5', self.contentFactory)]\n tabView = widgets.TabView(tabs)\n self.assertEquals(\n tabView.getTabIDs(),\n [u'id1', u'id2', u'id3', u'id4', u'id5'])\n self.assertEquals(\n tabView._tabGroups,\n {u'group1': tabGroup})", "def createGroup(self):\n return _libsbml.ListOfGroups_createGroup(self)", "def add_argument_group(self, title=None, description=None):\n\n if title is None:\n raise NameError('Missing arguments group title.')\n\n group = self._parser.add_argument_group(title, description)\n self._argumentGroups.append(group)\n\n return group", "def test_create_project_duplicate_title(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': self.project.title,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3", "def test_add_group(self):\n pass", "def creategroup(body):\n group = body.get(\"groupname\", None)\n pps = body.get(\"pilotpoints\", None)\n print('lol',group, pps)\n print(type(pps))\n\n # Does the person exist already?\n if group not in group_dict and group is not None:\n group_dict[group] = {\n \"groupname\": group,\n \"pilotpoints\": pps,\n }\n return group_dict[group], 201\n\n # Otherwise, they exist, that's an error\n else:\n abort(\n 406,\n \"Person with last name {group} already exists\".format(group=group),\n )", "def create_division(self, division_title):\n request = post(url=self.base_url + 'api/services/etender/division/CreateDivision',\n headers=self.headers,\n data=json.dumps({\"title\": division_title}))\n self.division = json.loads(request.content).get('result')\n print('Created division:', self.division)\n return self.division", "def add_group(self, groupname, grouptitle, path_to_group='/'):\n self.open_db()\n group = self.group_exists(path_to_group, groupname)\n if group is False:\n group = self.h5file.create_group(path_to_group, groupname,\n grouptitle)\n return group", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def create_new_lab(title):\n\n lab = Lab(title=title)\n db.session.add(lab)\n db.session.commit()\n\n return lab", "def _create(self, title=''):\n return ContentObject(title)", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def argument_group(self, *, title: str = None, description: str = None):\n return self.parser.add_argument_group(title, description)", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def test_add_existing_group(self, inventoryloader):\n grp_cnt = inventoryloader.count_groups()\n grp_vars = inventoryloader.groups['glance_api'].vars\n inventoryloader.add_group(u'glance_api')\n assert inventoryloader.count_groups() == grp_cnt\n assert inventoryloader.groups['glance_api'].vars == grp_vars\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n inventoryloader.add_group(u'glance_api', {\"vars\": { u'external_bridge': u'br-ext'}})\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n assert 'br-ext' == inventoryloader.groups['glance_api'].vars['external_bridge']", "def make(self):\n numberOfGroups = np.random.randint(1, len(self.getFirstParent().getGroups()))\n secParentGroups = np.random.choice(self.secondParent.getGroups(), numberOfGroups, replace=False)\n groups = []\n allSecElements = []\n numberOfElements = 0\n\n for grpSec in secParentGroups:\n allSecElements += grpSec.getElements()\n\n for grpFst in self.getFirstParent().getGroups():\n numberOfElements += len(grpFst.getElements())\n elements = list(set(grpFst.getElements()) - set(allSecElements))\n group = Group(grpFst.getIndex(), grpFst.getMinElements(), grpFst.getMaxElements())\n group.setElements(elements)\n groups.append(group)\n\n for grpSec in secParentGroups:\n for grpFst in groups:\n if grpSec.getIndex() == grpFst.getIndex():\n grpFst.addElements(grpSec.getElements())\n\n child = Individual(np.zeros(numberOfElements))\n child.setGroups(groups)\n\n return child", "def _confirm_group(cls):\n if cls.GROUP_NAME in bpy.data.objects:\n return\n #Backup current selection\n selection = ObjectSelection()\n #Create empty object\n bpy.ops.object.empty_add()\n new_group = bpy.context.selected_objects[0]\n new_group.name = cls.GROUP_NAME\n new_group.hide = True\n #Restore selection\n selection.restore()", "def _add_create_command(subparser: _SubParsersAction):\r\n parser = subparser.add_parser('create', help='Create a new folder.') \r\n parser.add_argument(\r\n '--project',\r\n required=True,\r\n help='Project key of the project that the folder will be created under.'\r\n )\r\n parser.add_argument(\r\n '--name',\r\n required=False,\r\n help='Name of the folder.'\r\n )\r\n parser.add_argument(\r\n '--type',\r\n required=False,\r\n choices=['plan', 'case', 'cycle'],\r\n help='Type of folder to create.',\r\n )\r\n parser.set_defaults(cmd=CreateFolderCommand(parser))", "def create_namespaced_group(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_subgroup_accounts(request_ctx, account_id, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def _problem(self, parent, group):\r\n return ItemFactory.create(\r\n parent_location=parent.location,\r\n category=\"problem\",\r\n display_name=\"Group {} Sees This Problem\".format(group),\r\n data=\"<h1>No Problem Defined Yet!</h1>\",\r\n )", "def create_tag_with_no_entries(title):\n return Tag.objects.create(title=title)", "def createFolder(self, title, description=\"\", index=None):\n assert isinstance(index, int) or index is None\n\n try:\n if index is None:\n url = self.metaData.getLink(\"create-folder\")\n else:\n url = self.getFolders()[index].getLink(\"create-folder\")\n\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n\n skeletonFolder = {\"title\" : title, \"description\" : description}\n jsonString = json.dumps(skeletonFolder)\n response = self._adapter.postRequest(url, header, jsonString)\n\n return Folder(self._client, response['Headers']['location'])\n except IndexError:\n print(\"the index: \" + index + \" does not exist in the list of folder numbers we have\")", "def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200", "def create_app_policy_group(self, name, **kwargs):\n post_body = {'application_policy_group': {'name': name}}\n if kwargs.get('description'):\n post_body['description'] = kwargs.get('description')\n post_body = json.dumps(post_body)\n resp, body = self.post(self.get_uri(self.resource), post_body)\n body = json.loads(body)\n self.expected_success(http_client.CREATED, resp.status)\n return rest_client.ResponseBody(resp, body)", "def __init__(self, name, desc):\n self.name = name\n self.desc = desc\n self.group = None", "def test_optional_group_empty(self):\n\n self.mapper.map_spec('attr3', self.mapper.spec.get_group('empty').get_attribute('attr3'))\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10)\n bar_inst1._Bar__attr3 = None # force attr3 to be None\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n )\n self.assertBuilderEqual(expected, builder)", "def test_grouping(self):\n n = self.create(NodeItem, UML.Node)\n a = self.create(ArtifactItem, UML.Artifact)\n\n self.group(n, a)\n\n assert 1 == len(n.subject.deployment)\n assert n.subject.deployment[0].deployedArtifact[0] is a.subject", "def test_create_group_409(self):\n request = {\n 'name': self.test_group1_groupid\n }\n # First create a group indirectly by making a user with a group\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now create a group that is already there\n resp = self.app.post('/groups', data=json.dumps(request))\n assert resp.status_code == 409", "def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups", "def test_createGroup(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/groups/\"\n\t\tdata = {\n\t\t\t'name' : 'testGroup3',\n\t\t\t'description' : 'This is another test group that just created.',\n\t\t\t'isPublic' : True\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 3)\n\t\tself.assertEqual(response.data[\"name\"], 'testGroup3')", "def add_subgroup(self, new_subgroup):\n self.subgroups[new_subgroup.get_title()] = new_subgroup", "def create_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n required_properties = {\n 'displayName': str(args.get('display_name')),\n 'mailNickname': str(args.get('mail_nickname')),\n 'mailEnabled': args.get('mail_enabled') == 'true',\n 'securityEnabled': args.get('security_enabled')\n }\n\n # create the group\n group = client.create_group(required_properties)\n\n # display the new group and it's properties\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=f\"{required_properties['displayName']} was created successfully:\",\n t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Mail Enabled'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}\n return human_readable, entry_context, group", "def test_verify_that_you_can_create_a_new_group():", "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_argument_group(self, *args, **kwargs):\n title = kwargs.get('title', args[0])\n for group in self._action_groups:\n if group.title == title:\n return group\n group = MutableArgumentGroup(self, *args, **kwargs)\n self._action_groups.append(group)\n return group", "def new_entry(self, entry=\"entry\", program_name=\"pyFAI\",\n title=\"description of experiment\",\n force_time=None, force_name=False):\n\n if not force_name:\n nb_entries = len(self.get_entries())\n entry = \"%s_%04i\" % (entry, nb_entries)\n entry_grp = self.h5.require_group(entry)\n entry_grp.attrs[\"NX_class\"] = numpy.string_(\"NXentry\")\n entry_grp[\"title\"] = numpy.string_(title)\n entry_grp[\"program_name\"] = numpy.string_(program_name)\n if force_time:\n entry_grp[\"start_time\"] = numpy.string_(force_time)\n else:\n entry_grp[\"start_time\"] = numpy.string_(get_isotime())\n self.to_close.append(entry_grp)\n return entry_grp", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def create(self, title):\n return self.app.post('/new-board', data = dict(\n title = title\n ), follow_redirects = True)", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n parent = self\n sdef, name, path = self.file.get_custom_node_info(qid, gslash, name, path, parent) \n grp = Group(self.file, sdef, name, path, attrs, parent)\n return grp", "def create_test_portgroup(**kw):\n portgroup = get_test_portgroup(**kw)\n # Let DB generate ID if it isn't specified explicitly\n if 'id' not in kw:\n del portgroup['id']\n dbapi = db_api.get_instance()\n return dbapi.create_portgroup(portgroup)", "def test_create_extended_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((63,), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape", "def test_single_group(self) -> None:\n self.settings.set(\"show_group_caption\", True)\n setting: ShowGroupCaptionTabSetting = ShowGroupCaptionTabSetting(\n self.settings,\n sublime.active_window()\n )\n scratch_view: sublime.View = sublime.active_window().new_file()\n tabs: List[Tab] = [Tab(scratch_view)]\n\n # single column layout\n layout: Dict[str, List] = {\n \"cells\": [[0, 0, 1, 1]],\n \"cols\": [0.0, 1.0],\n \"rows\": [0.0, 1.0]\n }\n\n sublime.active_window().set_layout(layout)\n\n self.assertFalse(setting.is_enabled())\n self.assertListEqual(tabs, setting.apply(tabs))\n self.assertListEqual([], tabs[0].get_captions())", "def groups(self, create, extracted, **kwargs):\n if not create:\n # Simple build, do nothing.\n return\n\n if extracted:\n # A list of groups were passed in, use them\n for group in extracted:\n # pylint: disable=no-member\n self.groups.add(group)", "def test_optional_group_not_empty(self):\n\n self.mapper.map_spec('attr3', self.mapper.spec.get_group('empty').get_attribute('attr3'))\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10, attr3=1.23)\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n groups={'empty': GroupBuilder(\n name='empty',\n attributes={'attr3': 1.23},\n )},\n )\n self.assertBuilderEqual(expected, builder)", "def group(*args, absolute: bool=True, empty: bool=True, name: AnyStr=\"\", parent: AnyStr=\"\",\n relative: bool=True, useAsGroup: AnyStr=\"\", world: bool=True, **kwargs)->AnyStr:\n pass", "def create_research_group(self, name, code=None, description=None):\n ResearchGroupRepository = get_repository('ResearchGroupRepository')\n\n research_group = ResearchGroup(unit_id=self.id, code=code)\n\n research_group.name = name\n\n research_group.description=description\n research_group.user_id= self.user_id\n research_group.startdate = None\n research_group.enddate = None\n research_group.license = None\n research_group.ids = None\n ResearchGroupRepository.save(research_group)\n\n return research_group", "def test_validate_title_identical(self):\n with self.assertRaises(ValidationError):\n self.make_project(\n title='TestCategory',\n type=PROJECT_TYPE_PROJECT,\n parent=self.category,\n )", "def __make_group_by_res(self, group_name, name_list):\r\n if group_name not in self.groups:\r\n res_group = self.group['Residue'].getChildGrps()\r\n groups = [ res for res in res_groups if res.name in name_list ]\r\n new_group = Group(parent=[], id=-1, type=group_name, childs=groups)\r\n self.groups[group_name] = new_group", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n sdef, name, path = self.get_custom_node_info(qid, gslash, name, path) \n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent)\n return grp", "async def create_group(ctx, name: str, role: str, group_type: str=None, comp: str=None, rating: int=None, time: str=None):\n\n owner = ctx.message.author.name\n \n if comp:\n comp = [int(i) for i in comp.split()] # convert string input to array\n\n new_group = Group(owner, name, role, group_type, rating, time, comp)\n bg_bot.manager.add_group(owner, new_group)\n \n await ctx.send(f'Created new {group_type} group for leader {owner}!')", "def create(self, title: str, levels: list, always_included: bool, elasticube: str = None) -> Resource:\n elasticube = elasticube if elasticube else self._elasticube\n data = {'title': title, 'levels': levels, 'alwaysIncluded': always_included}\n\n content = self._api.post(f'elasticubes/localhost/{elasticube}/hierarchies', data=data)\n return Hierarchy(self._api, content, elasticube)", "def create_human(self):\n self._type = Genre.HUMAN\n self._team = 0", "def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def get_existing_test_group(self, obj: object) -> str:", "def create_seurity_group(self):\n return True", "def test_create_team_user_group(client):\n group = client.create_team_user_group(TEAM_ID, {\n \"name\": \"Python group\",\n \"is_reviewer\": True,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == NEW_GROUP_ID\n assert group.name == \"Python group\"\n assert group.permissions['is_admin']\n assert group.permissions['is_reviewer']\n assert group.permissions['admin_rights'] == [\"upload\"]", "def create_default_groups():\n from flaskbb.fixtures.groups import fixture\n result = []\n for key, value in fixture.items():\n group = Group(name=key)\n\n for k, v in value.items():\n setattr(group, k, v)\n\n group.save()\n result.append(group)\n return result", "def create_group():\n incoming = request.get_json()\n chatroom = Chatroom(\n name = incoming['name'],\n tag = incoming['tag'],\n )\n db.session.add(chatroom)\n db.session.commit()\n participant = Participant(\n user_id = session['user_id'],\n room_id = chatroom.room_id,\n )\n db.session.add(participant)\n db.session.commit()\n return jsonify(results = chatroom.room_id)", "def create_group(name, nodes, description=None):\n group, created = Group.get_or_create(name=name)\n if created:\n print('Group created with PK={} and name {}'.format(group.pk, group.name))\n else:\n print('Group with name {} and pk {} already exists. Do you want to add nodes?[y/n]'.format(group.name, group.pk))\n answer = raw_input()\n if answer.strip().lower() == 'y':\n pass\n else:\n return\n nodes2 = []\n nodes2_pks = []\n for node in nodes:\n try:\n node = int(node)\n except ValueError:\n pass\n nodes2_pks.append(node)\n try:\n nodes2.append(load_node(node))\n except:# NotExistentError:\n pass\n\n group.add_nodes(nodes2)\n print('added nodes: {} to group {} {}'.format(nodes2_pks, group.name, group.pk))\n\n if description:\n group.description = description\n\n return group", "def save_new_valid_exploration(\n self, exploration_id, owner_id, title='A title'):\n exploration = exp_domain.Exploration.create_default_exploration(\n exploration_id, title, 'A category')\n exploration.states[exploration.init_state_name].widget.handlers[\n 0].rule_specs[0].dest = feconf.END_DEST\n exploration.objective = 'An objective'\n exp_services.save_new_exploration(owner_id, exploration)\n return exploration", "def test_add_existing_emptygroup_unauthorized(self, inventoryloader):\n assert 'glance_api' in inventoryloader.groups\n inventoryloader.add_group(u'glance_api', allow_update=False)\n # but ensures variables didn't get overriden\n assert 'management_bridge' in inventoryloader.groups['glance_api'].vars", "async def command_create(self, context):\n # await self._create_new_role(context, name, target=GROUP_CATEGORY_NAME)\n print('main create')", "def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()", "def new_group(request):\n return edit_group(request, None)", "def test_grouping(self):\n s = self.create(ComponentItem, UML.Component)\n uc1 = self.create(UseCaseItem, UML.UseCase)\n uc2 = self.create(UseCaseItem, UML.UseCase)\n\n self.group(s, uc1)\n assert 1 == len(uc1.subject.subject)\n self.group(s, uc2)\n assert 1 == len(uc2.subject.subject)\n\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(2, len(s.subject.useCase))", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def __create_group(self):\n\n group = time.strftime(_GROUP_NAME_FORMAT, time.localtime())\n LOG.info(\"Creating backup group '%s'.\", group)\n\n group_path = self.group_path(group)\n\n try:\n os.mkdir(group_path)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise Error(\"Unable to create a new backup group '{}': {}.\",\n group_path, psys.e(e))\n\n self.__on_group_created(group)\n\n return group" ]
[ "0.5995512", "0.57896537", "0.5699493", "0.55475307", "0.55475307", "0.55366933", "0.55120105", "0.5418014", "0.5416043", "0.53805554", "0.53521186", "0.53274035", "0.5322022", "0.53077036", "0.529829", "0.5248793", "0.5231281", "0.5218727", "0.5214925", "0.5195291", "0.5195138", "0.5189096", "0.51839924", "0.51741415", "0.51573783", "0.515698", "0.51557213", "0.5152268", "0.51454943", "0.51309705", "0.5130206", "0.51195765", "0.51194304", "0.5108653", "0.51021045", "0.50960755", "0.50933313", "0.5093023", "0.50878716", "0.5082816", "0.5067029", "0.5058224", "0.505791", "0.50524807", "0.50453764", "0.5044077", "0.5039243", "0.5038738", "0.5036768", "0.50341004", "0.5023766", "0.5018026", "0.5015707", "0.5010489", "0.5003552", "0.49816284", "0.4981605", "0.49783713", "0.49771774", "0.49681202", "0.49603915", "0.49591765", "0.49568456", "0.49547973", "0.493865", "0.49354917", "0.4927037", "0.49233532", "0.49122274", "0.49097893", "0.49006355", "0.48986998", "0.4893442", "0.487306", "0.48612246", "0.48570105", "0.48569962", "0.4849877", "0.48407304", "0.48302618", "0.48292556", "0.48209947", "0.48117328", "0.4810394", "0.48062333", "0.48048976", "0.48024464", "0.47973624", "0.4793661", "0.4792301", "0.47766462", "0.47622278", "0.47609714", "0.47486022", "0.47466755", "0.47379294", "0.47363847", "0.4735766", "0.47146642", "0.47127828" ]
0.6064545
0
Creates a new empty subgroup under the outcome group with the given title and description.
def create_subgroup_accounts(request_ctx, account_id, id, title, description=None, vendor_guid=None, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}/subgroups' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def with_group(title: str) -> Generator[None, None, None]:\n if os.environ.get(\"GITHUB_ACTIONS\", \"false\") != \"true\":\n console.print(\"#\" * 10 + \" [bright_blue]\" + title + \"[/] \" + \"#\" * 10)\n yield\n return\n console.print(f\"::group::[bright_blue]{title}[/]\")\n yield\n console.print(\"::endgroup::\")", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def test_cannot_create_group_with_empty_field(self):\n\n utils.create_user_and_authenticate(self)\n group_fields = ['name', 'description']\n utils.test_cannot_post_with_empty_fields(self, self.url, group_fields)", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def create_group(self, label):\n group = OptionGroup(label)\n self.append(group)\n return group", "def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))", "def create_subgroup_courses(request_ctx, course_id, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_TestGroup(test_case, # type: AnyMagpieTestCaseType\n override_group_name=null, # type: Optional[Str]\n override_discoverable=null, # type: Optional[bool]\n override_data=null, # type: Optional[JSON]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n data = override_data\n if override_data is null:\n data = {\"group_name\": override_group_name if override_group_name is not null else test_case.test_group_name}\n # only add 'discoverable' if explicitly provided here to preserve original behaviour of 'no value provided'\n if override_discoverable is not null:\n data[\"discoverable\"] = override_discoverable\n grp_name = (data or {}).get(\"group_name\")\n if grp_name:\n test_case.extra_group_names.add(grp_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/groups\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n return check_response_basic_info(resp, 201, expected_method=\"POST\")", "def test_empty_description_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={'message': \"Description cannot be empty.\", 'status':\"error\"},\n status=200\n )\n\n with self.assertRaises(CreateError):\n self.azk.create('Project', '')", "def write_group_start(self, title):\n self.write('H', GROUP_START)\n self.write('i', ((len(title) + 1) * 2) + DB_STRING_SIZE_SZ)\n self.write('H', len(title) + 1)\n self.write_string(title, double_byte=True)", "def test_create_simple(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (1,))\n assert dset.shape == (1,)", "def test_create_project_title_delimiter(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': 'New{}Project'.format(CAT_DELIMITER),\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def add_group(self, *args, **kwds):\n title = kwds.pop('title', None)\n description = kwds.pop('description', None)\n if kwds:\n raise Exception('unknown keyword arguments: %s' % kwds)\n\n # set title, description if args[0] is string\n if isinstance(args[0], string_types):\n title = args[0]\n args = args[1:]\n if isinstance(args[0], string_types):\n description = args[0]\n args = args[1:]\n\n assert all(isinstance(arg, Command) for arg in args), 'all args should be instance of Command'\n self._arg_stack.append(('group', args, {'title': title, 'description': description}))\n return self", "def create_project(self, conn, name, description=\"\"):\n group = conn.group.allocate(name, description)\n # returns Project object\n return group", "def test_required_group_empty(self):\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10)\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n groups={'empty': GroupBuilder('empty')},\n )\n self.assertBuilderEqual(expected, builder)", "def _assert_create_group(self, personality, response=400):\n group_response = self.autoscale_behaviors.create_scaling_group_given(\n lc_personality=personality)\n self.assertEquals(group_response.status_code, response, msg='Create group '\n 'with invalid lc_personality returned {0} as against '\n '{1}'.format(group_response.status_code, response))\n if response is 200:\n group = group_response.entity\n self.resources.add(group, self.empty_scaling_group)\n return group", "def test_trivial(self):\n group = Group()", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def create( self, trans, payload, **kwd ):\n group_dict = dict( message='', status='ok' )\n name = payload.get( 'name', '' )\n if name:\n description = payload.get( 'description', '' )\n if not description:\n description = ''\n else:\n # TODO add description field to the model\n group_dict = self.group_manager.create( trans, name=name ).to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) )\n else:\n raise RequestParameterMissingException( 'Missing required parameter \"name\".' )\n return group_dict", "def test_create_resource_group(self):\n pass", "def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def put_in_groupbox(widget, title):\n box = QtGui.QGroupBox(title)\n layout = QtGui.QHBoxLayout(box)\n layout.addWidget(widget)\n return box", "def crea_grupo(self):\r\n \r\n self.comprueba_casos_seleccionados()", "def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response", "def _create_course(self):\r\n super(TestPublish, self)._create_course(split=False)\r\n\r\n self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid', split=False)\r\n self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid', split=False)\r\n self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', split=False)\r\n self._create_item('vertical', 'Vert2', {}, {'display_name': 'Vertical 2'}, 'chapter', 'Chapter1', split=False)\r\n self._create_item('html', 'Html1', \"<p>Goodbye</p>\", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', split=False)\r\n self._create_item(\r\n 'discussion', 'Discussion1',\r\n \"discussion discussion_category=\\\"Lecture 1\\\" discussion_id=\\\"a08bfd89b2aa40fa81f2c650a9332846\\\" discussion_target=\\\"Lecture 1\\\"/>\\n\",\r\n {\r\n \"discussion_category\": \"Lecture 1\",\r\n \"discussion_target\": \"Lecture 1\",\r\n \"display_name\": \"Lecture 1 Discussion\",\r\n \"discussion_id\": \"a08bfd89b2aa40fa81f2c650a9332846\"\r\n },\r\n 'vertical', 'Vert1',\r\n split=False\r\n )\r\n self._create_item('html', 'Html2', \"<p>Hellow</p>\", {'display_name': 'Hollow Html'}, 'vertical', 'Vert1', split=False)\r\n self._create_item(\r\n 'discussion', 'Discussion2',\r\n \"discussion discussion_category=\\\"Lecture 2\\\" discussion_id=\\\"b08bfd89b2aa40fa81f2c650a9332846\\\" discussion_target=\\\"Lecture 2\\\"/>\\n\",\r\n {\r\n \"discussion_category\": \"Lecture 2\",\r\n \"discussion_target\": \"Lecture 2\",\r\n \"display_name\": \"Lecture 2 Discussion\",\r\n \"discussion_id\": \"b08bfd89b2aa40fa81f2c650a9332846\"\r\n },\r\n 'vertical', 'Vert2',\r\n split=False\r\n )\r\n self._create_item('static_tab', 'staticuno', \"<p>tab</p>\", {'display_name': 'Tab uno'}, None, None, split=False)\r\n self._create_item('about', 'overview', \"<p>overview</p>\", {}, None, None, split=False)\r\n self._create_item('course_info', 'updates', \"<ol><li><h2>Sep 22</h2><p>test</p></li></ol>\", {}, None, None, split=False)", "def make_new_post(title: str, category: int, description: str):\n slug = _get_slug(title)\n header = _make_header(title, category, description, slug)\n filename = _get_filename(slug)\n with open(filename, \"w\") as fp:\n fp.write(header)\n print(f\"Created {filename}\")", "def test_createGroup(self):\n tabGroup = widgets.TabGroup(u'group1', u'Group', tabs=[\n widgets.Tab(u'id4', u'Title 4', self.contentFactory)])\n tabs = self.tabs + [\n tabGroup,\n widgets.Tab(u'id5', u'Title 5', self.contentFactory)]\n tabView = widgets.TabView(tabs)\n self.assertEquals(\n tabView.getTabIDs(),\n [u'id1', u'id2', u'id3', u'id4', u'id5'])\n self.assertEquals(\n tabView._tabGroups,\n {u'group1': tabGroup})", "def createGroup(self):\n return _libsbml.ListOfGroups_createGroup(self)", "def add_argument_group(self, title=None, description=None):\n\n if title is None:\n raise NameError('Missing arguments group title.')\n\n group = self._parser.add_argument_group(title, description)\n self._argumentGroups.append(group)\n\n return group", "def test_create_project_duplicate_title(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': self.project.title,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3", "def test_add_group(self):\n pass", "def creategroup(body):\n group = body.get(\"groupname\", None)\n pps = body.get(\"pilotpoints\", None)\n print('lol',group, pps)\n print(type(pps))\n\n # Does the person exist already?\n if group not in group_dict and group is not None:\n group_dict[group] = {\n \"groupname\": group,\n \"pilotpoints\": pps,\n }\n return group_dict[group], 201\n\n # Otherwise, they exist, that's an error\n else:\n abort(\n 406,\n \"Person with last name {group} already exists\".format(group=group),\n )", "def create_division(self, division_title):\n request = post(url=self.base_url + 'api/services/etender/division/CreateDivision',\n headers=self.headers,\n data=json.dumps({\"title\": division_title}))\n self.division = json.loads(request.content).get('result')\n print('Created division:', self.division)\n return self.division", "def add_group(self, groupname, grouptitle, path_to_group='/'):\n self.open_db()\n group = self.group_exists(path_to_group, groupname)\n if group is False:\n group = self.h5file.create_group(path_to_group, groupname,\n grouptitle)\n return group", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def create_new_lab(title):\n\n lab = Lab(title=title)\n db.session.add(lab)\n db.session.commit()\n\n return lab", "def _create(self, title=''):\n return ContentObject(title)", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def argument_group(self, *, title: str = None, description: str = None):\n return self.parser.add_argument_group(title, description)", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def test_add_existing_group(self, inventoryloader):\n grp_cnt = inventoryloader.count_groups()\n grp_vars = inventoryloader.groups['glance_api'].vars\n inventoryloader.add_group(u'glance_api')\n assert inventoryloader.count_groups() == grp_cnt\n assert inventoryloader.groups['glance_api'].vars == grp_vars\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n inventoryloader.add_group(u'glance_api', {\"vars\": { u'external_bridge': u'br-ext'}})\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n assert 'br-ext' == inventoryloader.groups['glance_api'].vars['external_bridge']", "def make(self):\n numberOfGroups = np.random.randint(1, len(self.getFirstParent().getGroups()))\n secParentGroups = np.random.choice(self.secondParent.getGroups(), numberOfGroups, replace=False)\n groups = []\n allSecElements = []\n numberOfElements = 0\n\n for grpSec in secParentGroups:\n allSecElements += grpSec.getElements()\n\n for grpFst in self.getFirstParent().getGroups():\n numberOfElements += len(grpFst.getElements())\n elements = list(set(grpFst.getElements()) - set(allSecElements))\n group = Group(grpFst.getIndex(), grpFst.getMinElements(), grpFst.getMaxElements())\n group.setElements(elements)\n groups.append(group)\n\n for grpSec in secParentGroups:\n for grpFst in groups:\n if grpSec.getIndex() == grpFst.getIndex():\n grpFst.addElements(grpSec.getElements())\n\n child = Individual(np.zeros(numberOfElements))\n child.setGroups(groups)\n\n return child", "def _confirm_group(cls):\n if cls.GROUP_NAME in bpy.data.objects:\n return\n #Backup current selection\n selection = ObjectSelection()\n #Create empty object\n bpy.ops.object.empty_add()\n new_group = bpy.context.selected_objects[0]\n new_group.name = cls.GROUP_NAME\n new_group.hide = True\n #Restore selection\n selection.restore()", "def _add_create_command(subparser: _SubParsersAction):\r\n parser = subparser.add_parser('create', help='Create a new folder.') \r\n parser.add_argument(\r\n '--project',\r\n required=True,\r\n help='Project key of the project that the folder will be created under.'\r\n )\r\n parser.add_argument(\r\n '--name',\r\n required=False,\r\n help='Name of the folder.'\r\n )\r\n parser.add_argument(\r\n '--type',\r\n required=False,\r\n choices=['plan', 'case', 'cycle'],\r\n help='Type of folder to create.',\r\n )\r\n parser.set_defaults(cmd=CreateFolderCommand(parser))", "def create_namespaced_group(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def _problem(self, parent, group):\r\n return ItemFactory.create(\r\n parent_location=parent.location,\r\n category=\"problem\",\r\n display_name=\"Group {} Sees This Problem\".format(group),\r\n data=\"<h1>No Problem Defined Yet!</h1>\",\r\n )", "def create_tag_with_no_entries(title):\n return Tag.objects.create(title=title)", "def createFolder(self, title, description=\"\", index=None):\n assert isinstance(index, int) or index is None\n\n try:\n if index is None:\n url = self.metaData.getLink(\"create-folder\")\n else:\n url = self.getFolders()[index].getLink(\"create-folder\")\n\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n\n skeletonFolder = {\"title\" : title, \"description\" : description}\n jsonString = json.dumps(skeletonFolder)\n response = self._adapter.postRequest(url, header, jsonString)\n\n return Folder(self._client, response['Headers']['location'])\n except IndexError:\n print(\"the index: \" + index + \" does not exist in the list of folder numbers we have\")", "def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200", "def create_app_policy_group(self, name, **kwargs):\n post_body = {'application_policy_group': {'name': name}}\n if kwargs.get('description'):\n post_body['description'] = kwargs.get('description')\n post_body = json.dumps(post_body)\n resp, body = self.post(self.get_uri(self.resource), post_body)\n body = json.loads(body)\n self.expected_success(http_client.CREATED, resp.status)\n return rest_client.ResponseBody(resp, body)", "def __init__(self, name, desc):\n self.name = name\n self.desc = desc\n self.group = None", "def test_optional_group_empty(self):\n\n self.mapper.map_spec('attr3', self.mapper.spec.get_group('empty').get_attribute('attr3'))\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10)\n bar_inst1._Bar__attr3 = None # force attr3 to be None\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n )\n self.assertBuilderEqual(expected, builder)", "def test_grouping(self):\n n = self.create(NodeItem, UML.Node)\n a = self.create(ArtifactItem, UML.Artifact)\n\n self.group(n, a)\n\n assert 1 == len(n.subject.deployment)\n assert n.subject.deployment[0].deployedArtifact[0] is a.subject", "def test_create_group_409(self):\n request = {\n 'name': self.test_group1_groupid\n }\n # First create a group indirectly by making a user with a group\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now create a group that is already there\n resp = self.app.post('/groups', data=json.dumps(request))\n assert resp.status_code == 409", "def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups", "def test_createGroup(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/groups/\"\n\t\tdata = {\n\t\t\t'name' : 'testGroup3',\n\t\t\t'description' : 'This is another test group that just created.',\n\t\t\t'isPublic' : True\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 3)\n\t\tself.assertEqual(response.data[\"name\"], 'testGroup3')", "def add_subgroup(self, new_subgroup):\n self.subgroups[new_subgroup.get_title()] = new_subgroup", "def create_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n required_properties = {\n 'displayName': str(args.get('display_name')),\n 'mailNickname': str(args.get('mail_nickname')),\n 'mailEnabled': args.get('mail_enabled') == 'true',\n 'securityEnabled': args.get('security_enabled')\n }\n\n # create the group\n group = client.create_group(required_properties)\n\n # display the new group and it's properties\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=f\"{required_properties['displayName']} was created successfully:\",\n t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Mail Enabled'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}\n return human_readable, entry_context, group", "def test_verify_that_you_can_create_a_new_group():", "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_argument_group(self, *args, **kwargs):\n title = kwargs.get('title', args[0])\n for group in self._action_groups:\n if group.title == title:\n return group\n group = MutableArgumentGroup(self, *args, **kwargs)\n self._action_groups.append(group)\n return group", "def new_entry(self, entry=\"entry\", program_name=\"pyFAI\",\n title=\"description of experiment\",\n force_time=None, force_name=False):\n\n if not force_name:\n nb_entries = len(self.get_entries())\n entry = \"%s_%04i\" % (entry, nb_entries)\n entry_grp = self.h5.require_group(entry)\n entry_grp.attrs[\"NX_class\"] = numpy.string_(\"NXentry\")\n entry_grp[\"title\"] = numpy.string_(title)\n entry_grp[\"program_name\"] = numpy.string_(program_name)\n if force_time:\n entry_grp[\"start_time\"] = numpy.string_(force_time)\n else:\n entry_grp[\"start_time\"] = numpy.string_(get_isotime())\n self.to_close.append(entry_grp)\n return entry_grp", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def create(self, title):\n return self.app.post('/new-board', data = dict(\n title = title\n ), follow_redirects = True)", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n parent = self\n sdef, name, path = self.file.get_custom_node_info(qid, gslash, name, path, parent) \n grp = Group(self.file, sdef, name, path, attrs, parent)\n return grp", "def create_test_portgroup(**kw):\n portgroup = get_test_portgroup(**kw)\n # Let DB generate ID if it isn't specified explicitly\n if 'id' not in kw:\n del portgroup['id']\n dbapi = db_api.get_instance()\n return dbapi.create_portgroup(portgroup)", "def test_create_extended_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((63,), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape", "def test_single_group(self) -> None:\n self.settings.set(\"show_group_caption\", True)\n setting: ShowGroupCaptionTabSetting = ShowGroupCaptionTabSetting(\n self.settings,\n sublime.active_window()\n )\n scratch_view: sublime.View = sublime.active_window().new_file()\n tabs: List[Tab] = [Tab(scratch_view)]\n\n # single column layout\n layout: Dict[str, List] = {\n \"cells\": [[0, 0, 1, 1]],\n \"cols\": [0.0, 1.0],\n \"rows\": [0.0, 1.0]\n }\n\n sublime.active_window().set_layout(layout)\n\n self.assertFalse(setting.is_enabled())\n self.assertListEqual(tabs, setting.apply(tabs))\n self.assertListEqual([], tabs[0].get_captions())", "def groups(self, create, extracted, **kwargs):\n if not create:\n # Simple build, do nothing.\n return\n\n if extracted:\n # A list of groups were passed in, use them\n for group in extracted:\n # pylint: disable=no-member\n self.groups.add(group)", "def test_optional_group_not_empty(self):\n\n self.mapper.map_spec('attr3', self.mapper.spec.get_group('empty').get_attribute('attr3'))\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10, attr3=1.23)\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n groups={'empty': GroupBuilder(\n name='empty',\n attributes={'attr3': 1.23},\n )},\n )\n self.assertBuilderEqual(expected, builder)", "def group(*args, absolute: bool=True, empty: bool=True, name: AnyStr=\"\", parent: AnyStr=\"\",\n relative: bool=True, useAsGroup: AnyStr=\"\", world: bool=True, **kwargs)->AnyStr:\n pass", "def create_research_group(self, name, code=None, description=None):\n ResearchGroupRepository = get_repository('ResearchGroupRepository')\n\n research_group = ResearchGroup(unit_id=self.id, code=code)\n\n research_group.name = name\n\n research_group.description=description\n research_group.user_id= self.user_id\n research_group.startdate = None\n research_group.enddate = None\n research_group.license = None\n research_group.ids = None\n ResearchGroupRepository.save(research_group)\n\n return research_group", "def test_validate_title_identical(self):\n with self.assertRaises(ValidationError):\n self.make_project(\n title='TestCategory',\n type=PROJECT_TYPE_PROJECT,\n parent=self.category,\n )", "def __make_group_by_res(self, group_name, name_list):\r\n if group_name not in self.groups:\r\n res_group = self.group['Residue'].getChildGrps()\r\n groups = [ res for res in res_groups if res.name in name_list ]\r\n new_group = Group(parent=[], id=-1, type=group_name, childs=groups)\r\n self.groups[group_name] = new_group", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n sdef, name, path = self.get_custom_node_info(qid, gslash, name, path) \n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent)\n return grp", "async def create_group(ctx, name: str, role: str, group_type: str=None, comp: str=None, rating: int=None, time: str=None):\n\n owner = ctx.message.author.name\n \n if comp:\n comp = [int(i) for i in comp.split()] # convert string input to array\n\n new_group = Group(owner, name, role, group_type, rating, time, comp)\n bg_bot.manager.add_group(owner, new_group)\n \n await ctx.send(f'Created new {group_type} group for leader {owner}!')", "def create(self, title: str, levels: list, always_included: bool, elasticube: str = None) -> Resource:\n elasticube = elasticube if elasticube else self._elasticube\n data = {'title': title, 'levels': levels, 'alwaysIncluded': always_included}\n\n content = self._api.post(f'elasticubes/localhost/{elasticube}/hierarchies', data=data)\n return Hierarchy(self._api, content, elasticube)", "def create_human(self):\n self._type = Genre.HUMAN\n self._team = 0", "def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def get_existing_test_group(self, obj: object) -> str:", "def create_seurity_group(self):\n return True", "def test_create_team_user_group(client):\n group = client.create_team_user_group(TEAM_ID, {\n \"name\": \"Python group\",\n \"is_reviewer\": True,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == NEW_GROUP_ID\n assert group.name == \"Python group\"\n assert group.permissions['is_admin']\n assert group.permissions['is_reviewer']\n assert group.permissions['admin_rights'] == [\"upload\"]", "def create_default_groups():\n from flaskbb.fixtures.groups import fixture\n result = []\n for key, value in fixture.items():\n group = Group(name=key)\n\n for k, v in value.items():\n setattr(group, k, v)\n\n group.save()\n result.append(group)\n return result", "def create_group():\n incoming = request.get_json()\n chatroom = Chatroom(\n name = incoming['name'],\n tag = incoming['tag'],\n )\n db.session.add(chatroom)\n db.session.commit()\n participant = Participant(\n user_id = session['user_id'],\n room_id = chatroom.room_id,\n )\n db.session.add(participant)\n db.session.commit()\n return jsonify(results = chatroom.room_id)", "def create_group(name, nodes, description=None):\n group, created = Group.get_or_create(name=name)\n if created:\n print('Group created with PK={} and name {}'.format(group.pk, group.name))\n else:\n print('Group with name {} and pk {} already exists. Do you want to add nodes?[y/n]'.format(group.name, group.pk))\n answer = raw_input()\n if answer.strip().lower() == 'y':\n pass\n else:\n return\n nodes2 = []\n nodes2_pks = []\n for node in nodes:\n try:\n node = int(node)\n except ValueError:\n pass\n nodes2_pks.append(node)\n try:\n nodes2.append(load_node(node))\n except:# NotExistentError:\n pass\n\n group.add_nodes(nodes2)\n print('added nodes: {} to group {} {}'.format(nodes2_pks, group.name, group.pk))\n\n if description:\n group.description = description\n\n return group", "def save_new_valid_exploration(\n self, exploration_id, owner_id, title='A title'):\n exploration = exp_domain.Exploration.create_default_exploration(\n exploration_id, title, 'A category')\n exploration.states[exploration.init_state_name].widget.handlers[\n 0].rule_specs[0].dest = feconf.END_DEST\n exploration.objective = 'An objective'\n exp_services.save_new_exploration(owner_id, exploration)\n return exploration", "def test_add_existing_emptygroup_unauthorized(self, inventoryloader):\n assert 'glance_api' in inventoryloader.groups\n inventoryloader.add_group(u'glance_api', allow_update=False)\n # but ensures variables didn't get overriden\n assert 'management_bridge' in inventoryloader.groups['glance_api'].vars", "async def command_create(self, context):\n # await self._create_new_role(context, name, target=GROUP_CATEGORY_NAME)\n print('main create')", "def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()", "def new_group(request):\n return edit_group(request, None)", "def test_grouping(self):\n s = self.create(ComponentItem, UML.Component)\n uc1 = self.create(UseCaseItem, UML.UseCase)\n uc2 = self.create(UseCaseItem, UML.UseCase)\n\n self.group(s, uc1)\n assert 1 == len(uc1.subject.subject)\n self.group(s, uc2)\n assert 1 == len(uc2.subject.subject)\n\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(2, len(s.subject.useCase))", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def __create_group(self):\n\n group = time.strftime(_GROUP_NAME_FORMAT, time.localtime())\n LOG.info(\"Creating backup group '%s'.\", group)\n\n group_path = self.group_path(group)\n\n try:\n os.mkdir(group_path)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise Error(\"Unable to create a new backup group '{}': {}.\",\n group_path, psys.e(e))\n\n self.__on_group_created(group)\n\n return group" ]
[ "0.6064545", "0.5995512", "0.57896537", "0.5699493", "0.55475307", "0.55475307", "0.55366933", "0.55120105", "0.5418014", "0.5416043", "0.53805554", "0.53521186", "0.53274035", "0.5322022", "0.53077036", "0.529829", "0.5248793", "0.5231281", "0.5218727", "0.5214925", "0.5195291", "0.5195138", "0.5189096", "0.51839924", "0.51741415", "0.51573783", "0.515698", "0.51557213", "0.5152268", "0.51454943", "0.51309705", "0.5130206", "0.51195765", "0.51194304", "0.5108653", "0.51021045", "0.50960755", "0.50933313", "0.5093023", "0.50878716", "0.5082816", "0.5067029", "0.5058224", "0.505791", "0.50524807", "0.50453764", "0.5044077", "0.5039243", "0.5038738", "0.5036768", "0.50341004", "0.5023766", "0.5015707", "0.5010489", "0.5003552", "0.49816284", "0.4981605", "0.49783713", "0.49771774", "0.49681202", "0.49603915", "0.49591765", "0.49568456", "0.49547973", "0.493865", "0.49354917", "0.4927037", "0.49233532", "0.49122274", "0.49097893", "0.49006355", "0.48986998", "0.4893442", "0.487306", "0.48612246", "0.48570105", "0.48569962", "0.4849877", "0.48407304", "0.48302618", "0.48292556", "0.48209947", "0.48117328", "0.4810394", "0.48062333", "0.48048976", "0.48024464", "0.47973624", "0.4793661", "0.4792301", "0.47766462", "0.47622278", "0.47609714", "0.47486022", "0.47466755", "0.47379294", "0.47363847", "0.4735766", "0.47146642", "0.47127828" ]
0.5018026
52
Creates a new empty subgroup under the outcome group with the given title and description.
def create_subgroup_courses(request_ctx, course_id, id, title, description=None, vendor_guid=None, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}/subgroups' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def with_group(title: str) -> Generator[None, None, None]:\n if os.environ.get(\"GITHUB_ACTIONS\", \"false\") != \"true\":\n console.print(\"#\" * 10 + \" [bright_blue]\" + title + \"[/] \" + \"#\" * 10)\n yield\n return\n console.print(f\"::group::[bright_blue]{title}[/]\")\n yield\n console.print(\"::endgroup::\")", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def test_cannot_create_group_with_empty_field(self):\n\n utils.create_user_and_authenticate(self)\n group_fields = ['name', 'description']\n utils.test_cannot_post_with_empty_fields(self, self.url, group_fields)", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def create_group(self, label):\n group = OptionGroup(label)\n self.append(group)\n return group", "def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))", "def create_TestGroup(test_case, # type: AnyMagpieTestCaseType\n override_group_name=null, # type: Optional[Str]\n override_discoverable=null, # type: Optional[bool]\n override_data=null, # type: Optional[JSON]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n data = override_data\n if override_data is null:\n data = {\"group_name\": override_group_name if override_group_name is not null else test_case.test_group_name}\n # only add 'discoverable' if explicitly provided here to preserve original behaviour of 'no value provided'\n if override_discoverable is not null:\n data[\"discoverable\"] = override_discoverable\n grp_name = (data or {}).get(\"group_name\")\n if grp_name:\n test_case.extra_group_names.add(grp_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/groups\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n return check_response_basic_info(resp, 201, expected_method=\"POST\")", "def test_empty_description_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={'message': \"Description cannot be empty.\", 'status':\"error\"},\n status=200\n )\n\n with self.assertRaises(CreateError):\n self.azk.create('Project', '')", "def write_group_start(self, title):\n self.write('H', GROUP_START)\n self.write('i', ((len(title) + 1) * 2) + DB_STRING_SIZE_SZ)\n self.write('H', len(title) + 1)\n self.write_string(title, double_byte=True)", "def test_create_simple(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (1,))\n assert dset.shape == (1,)", "def test_create_project_title_delimiter(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': 'New{}Project'.format(CAT_DELIMITER),\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def add_group(self, *args, **kwds):\n title = kwds.pop('title', None)\n description = kwds.pop('description', None)\n if kwds:\n raise Exception('unknown keyword arguments: %s' % kwds)\n\n # set title, description if args[0] is string\n if isinstance(args[0], string_types):\n title = args[0]\n args = args[1:]\n if isinstance(args[0], string_types):\n description = args[0]\n args = args[1:]\n\n assert all(isinstance(arg, Command) for arg in args), 'all args should be instance of Command'\n self._arg_stack.append(('group', args, {'title': title, 'description': description}))\n return self", "def create_project(self, conn, name, description=\"\"):\n group = conn.group.allocate(name, description)\n # returns Project object\n return group", "def test_required_group_empty(self):\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10)\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n groups={'empty': GroupBuilder('empty')},\n )\n self.assertBuilderEqual(expected, builder)", "def _assert_create_group(self, personality, response=400):\n group_response = self.autoscale_behaviors.create_scaling_group_given(\n lc_personality=personality)\n self.assertEquals(group_response.status_code, response, msg='Create group '\n 'with invalid lc_personality returned {0} as against '\n '{1}'.format(group_response.status_code, response))\n if response is 200:\n group = group_response.entity\n self.resources.add(group, self.empty_scaling_group)\n return group", "def test_trivial(self):\n group = Group()", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def create( self, trans, payload, **kwd ):\n group_dict = dict( message='', status='ok' )\n name = payload.get( 'name', '' )\n if name:\n description = payload.get( 'description', '' )\n if not description:\n description = ''\n else:\n # TODO add description field to the model\n group_dict = self.group_manager.create( trans, name=name ).to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) )\n else:\n raise RequestParameterMissingException( 'Missing required parameter \"name\".' )\n return group_dict", "def test_create_resource_group(self):\n pass", "def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def put_in_groupbox(widget, title):\n box = QtGui.QGroupBox(title)\n layout = QtGui.QHBoxLayout(box)\n layout.addWidget(widget)\n return box", "def crea_grupo(self):\r\n \r\n self.comprueba_casos_seleccionados()", "def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response", "def _create_course(self):\r\n super(TestPublish, self)._create_course(split=False)\r\n\r\n self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid', split=False)\r\n self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid', split=False)\r\n self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', split=False)\r\n self._create_item('vertical', 'Vert2', {}, {'display_name': 'Vertical 2'}, 'chapter', 'Chapter1', split=False)\r\n self._create_item('html', 'Html1', \"<p>Goodbye</p>\", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', split=False)\r\n self._create_item(\r\n 'discussion', 'Discussion1',\r\n \"discussion discussion_category=\\\"Lecture 1\\\" discussion_id=\\\"a08bfd89b2aa40fa81f2c650a9332846\\\" discussion_target=\\\"Lecture 1\\\"/>\\n\",\r\n {\r\n \"discussion_category\": \"Lecture 1\",\r\n \"discussion_target\": \"Lecture 1\",\r\n \"display_name\": \"Lecture 1 Discussion\",\r\n \"discussion_id\": \"a08bfd89b2aa40fa81f2c650a9332846\"\r\n },\r\n 'vertical', 'Vert1',\r\n split=False\r\n )\r\n self._create_item('html', 'Html2', \"<p>Hellow</p>\", {'display_name': 'Hollow Html'}, 'vertical', 'Vert1', split=False)\r\n self._create_item(\r\n 'discussion', 'Discussion2',\r\n \"discussion discussion_category=\\\"Lecture 2\\\" discussion_id=\\\"b08bfd89b2aa40fa81f2c650a9332846\\\" discussion_target=\\\"Lecture 2\\\"/>\\n\",\r\n {\r\n \"discussion_category\": \"Lecture 2\",\r\n \"discussion_target\": \"Lecture 2\",\r\n \"display_name\": \"Lecture 2 Discussion\",\r\n \"discussion_id\": \"b08bfd89b2aa40fa81f2c650a9332846\"\r\n },\r\n 'vertical', 'Vert2',\r\n split=False\r\n )\r\n self._create_item('static_tab', 'staticuno', \"<p>tab</p>\", {'display_name': 'Tab uno'}, None, None, split=False)\r\n self._create_item('about', 'overview', \"<p>overview</p>\", {}, None, None, split=False)\r\n self._create_item('course_info', 'updates', \"<ol><li><h2>Sep 22</h2><p>test</p></li></ol>\", {}, None, None, split=False)", "def make_new_post(title: str, category: int, description: str):\n slug = _get_slug(title)\n header = _make_header(title, category, description, slug)\n filename = _get_filename(slug)\n with open(filename, \"w\") as fp:\n fp.write(header)\n print(f\"Created {filename}\")", "def test_createGroup(self):\n tabGroup = widgets.TabGroup(u'group1', u'Group', tabs=[\n widgets.Tab(u'id4', u'Title 4', self.contentFactory)])\n tabs = self.tabs + [\n tabGroup,\n widgets.Tab(u'id5', u'Title 5', self.contentFactory)]\n tabView = widgets.TabView(tabs)\n self.assertEquals(\n tabView.getTabIDs(),\n [u'id1', u'id2', u'id3', u'id4', u'id5'])\n self.assertEquals(\n tabView._tabGroups,\n {u'group1': tabGroup})", "def createGroup(self):\n return _libsbml.ListOfGroups_createGroup(self)", "def add_argument_group(self, title=None, description=None):\n\n if title is None:\n raise NameError('Missing arguments group title.')\n\n group = self._parser.add_argument_group(title, description)\n self._argumentGroups.append(group)\n\n return group", "def test_create_project_duplicate_title(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': self.project.title,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3", "def test_add_group(self):\n pass", "def creategroup(body):\n group = body.get(\"groupname\", None)\n pps = body.get(\"pilotpoints\", None)\n print('lol',group, pps)\n print(type(pps))\n\n # Does the person exist already?\n if group not in group_dict and group is not None:\n group_dict[group] = {\n \"groupname\": group,\n \"pilotpoints\": pps,\n }\n return group_dict[group], 201\n\n # Otherwise, they exist, that's an error\n else:\n abort(\n 406,\n \"Person with last name {group} already exists\".format(group=group),\n )", "def create_division(self, division_title):\n request = post(url=self.base_url + 'api/services/etender/division/CreateDivision',\n headers=self.headers,\n data=json.dumps({\"title\": division_title}))\n self.division = json.loads(request.content).get('result')\n print('Created division:', self.division)\n return self.division", "def add_group(self, groupname, grouptitle, path_to_group='/'):\n self.open_db()\n group = self.group_exists(path_to_group, groupname)\n if group is False:\n group = self.h5file.create_group(path_to_group, groupname,\n grouptitle)\n return group", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def create_new_lab(title):\n\n lab = Lab(title=title)\n db.session.add(lab)\n db.session.commit()\n\n return lab", "def _create(self, title=''):\n return ContentObject(title)", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def argument_group(self, *, title: str = None, description: str = None):\n return self.parser.add_argument_group(title, description)", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def test_add_existing_group(self, inventoryloader):\n grp_cnt = inventoryloader.count_groups()\n grp_vars = inventoryloader.groups['glance_api'].vars\n inventoryloader.add_group(u'glance_api')\n assert inventoryloader.count_groups() == grp_cnt\n assert inventoryloader.groups['glance_api'].vars == grp_vars\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n inventoryloader.add_group(u'glance_api', {\"vars\": { u'external_bridge': u'br-ext'}})\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n assert 'br-ext' == inventoryloader.groups['glance_api'].vars['external_bridge']", "def make(self):\n numberOfGroups = np.random.randint(1, len(self.getFirstParent().getGroups()))\n secParentGroups = np.random.choice(self.secondParent.getGroups(), numberOfGroups, replace=False)\n groups = []\n allSecElements = []\n numberOfElements = 0\n\n for grpSec in secParentGroups:\n allSecElements += grpSec.getElements()\n\n for grpFst in self.getFirstParent().getGroups():\n numberOfElements += len(grpFst.getElements())\n elements = list(set(grpFst.getElements()) - set(allSecElements))\n group = Group(grpFst.getIndex(), grpFst.getMinElements(), grpFst.getMaxElements())\n group.setElements(elements)\n groups.append(group)\n\n for grpSec in secParentGroups:\n for grpFst in groups:\n if grpSec.getIndex() == grpFst.getIndex():\n grpFst.addElements(grpSec.getElements())\n\n child = Individual(np.zeros(numberOfElements))\n child.setGroups(groups)\n\n return child", "def _confirm_group(cls):\n if cls.GROUP_NAME in bpy.data.objects:\n return\n #Backup current selection\n selection = ObjectSelection()\n #Create empty object\n bpy.ops.object.empty_add()\n new_group = bpy.context.selected_objects[0]\n new_group.name = cls.GROUP_NAME\n new_group.hide = True\n #Restore selection\n selection.restore()", "def _add_create_command(subparser: _SubParsersAction):\r\n parser = subparser.add_parser('create', help='Create a new folder.') \r\n parser.add_argument(\r\n '--project',\r\n required=True,\r\n help='Project key of the project that the folder will be created under.'\r\n )\r\n parser.add_argument(\r\n '--name',\r\n required=False,\r\n help='Name of the folder.'\r\n )\r\n parser.add_argument(\r\n '--type',\r\n required=False,\r\n choices=['plan', 'case', 'cycle'],\r\n help='Type of folder to create.',\r\n )\r\n parser.set_defaults(cmd=CreateFolderCommand(parser))", "def create_namespaced_group(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_subgroup_accounts(request_ctx, account_id, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def _problem(self, parent, group):\r\n return ItemFactory.create(\r\n parent_location=parent.location,\r\n category=\"problem\",\r\n display_name=\"Group {} Sees This Problem\".format(group),\r\n data=\"<h1>No Problem Defined Yet!</h1>\",\r\n )", "def create_tag_with_no_entries(title):\n return Tag.objects.create(title=title)", "def createFolder(self, title, description=\"\", index=None):\n assert isinstance(index, int) or index is None\n\n try:\n if index is None:\n url = self.metaData.getLink(\"create-folder\")\n else:\n url = self.getFolders()[index].getLink(\"create-folder\")\n\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n\n skeletonFolder = {\"title\" : title, \"description\" : description}\n jsonString = json.dumps(skeletonFolder)\n response = self._adapter.postRequest(url, header, jsonString)\n\n return Folder(self._client, response['Headers']['location'])\n except IndexError:\n print(\"the index: \" + index + \" does not exist in the list of folder numbers we have\")", "def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200", "def create_app_policy_group(self, name, **kwargs):\n post_body = {'application_policy_group': {'name': name}}\n if kwargs.get('description'):\n post_body['description'] = kwargs.get('description')\n post_body = json.dumps(post_body)\n resp, body = self.post(self.get_uri(self.resource), post_body)\n body = json.loads(body)\n self.expected_success(http_client.CREATED, resp.status)\n return rest_client.ResponseBody(resp, body)", "def __init__(self, name, desc):\n self.name = name\n self.desc = desc\n self.group = None", "def test_optional_group_empty(self):\n\n self.mapper.map_spec('attr3', self.mapper.spec.get_group('empty').get_attribute('attr3'))\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10)\n bar_inst1._Bar__attr3 = None # force attr3 to be None\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n )\n self.assertBuilderEqual(expected, builder)", "def test_grouping(self):\n n = self.create(NodeItem, UML.Node)\n a = self.create(ArtifactItem, UML.Artifact)\n\n self.group(n, a)\n\n assert 1 == len(n.subject.deployment)\n assert n.subject.deployment[0].deployedArtifact[0] is a.subject", "def test_create_group_409(self):\n request = {\n 'name': self.test_group1_groupid\n }\n # First create a group indirectly by making a user with a group\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now create a group that is already there\n resp = self.app.post('/groups', data=json.dumps(request))\n assert resp.status_code == 409", "def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups", "def test_createGroup(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/groups/\"\n\t\tdata = {\n\t\t\t'name' : 'testGroup3',\n\t\t\t'description' : 'This is another test group that just created.',\n\t\t\t'isPublic' : True\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 3)\n\t\tself.assertEqual(response.data[\"name\"], 'testGroup3')", "def add_subgroup(self, new_subgroup):\n self.subgroups[new_subgroup.get_title()] = new_subgroup", "def create_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n required_properties = {\n 'displayName': str(args.get('display_name')),\n 'mailNickname': str(args.get('mail_nickname')),\n 'mailEnabled': args.get('mail_enabled') == 'true',\n 'securityEnabled': args.get('security_enabled')\n }\n\n # create the group\n group = client.create_group(required_properties)\n\n # display the new group and it's properties\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=f\"{required_properties['displayName']} was created successfully:\",\n t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Mail Enabled'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}\n return human_readable, entry_context, group", "def test_verify_that_you_can_create_a_new_group():", "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_argument_group(self, *args, **kwargs):\n title = kwargs.get('title', args[0])\n for group in self._action_groups:\n if group.title == title:\n return group\n group = MutableArgumentGroup(self, *args, **kwargs)\n self._action_groups.append(group)\n return group", "def new_entry(self, entry=\"entry\", program_name=\"pyFAI\",\n title=\"description of experiment\",\n force_time=None, force_name=False):\n\n if not force_name:\n nb_entries = len(self.get_entries())\n entry = \"%s_%04i\" % (entry, nb_entries)\n entry_grp = self.h5.require_group(entry)\n entry_grp.attrs[\"NX_class\"] = numpy.string_(\"NXentry\")\n entry_grp[\"title\"] = numpy.string_(title)\n entry_grp[\"program_name\"] = numpy.string_(program_name)\n if force_time:\n entry_grp[\"start_time\"] = numpy.string_(force_time)\n else:\n entry_grp[\"start_time\"] = numpy.string_(get_isotime())\n self.to_close.append(entry_grp)\n return entry_grp", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def create(self, title):\n return self.app.post('/new-board', data = dict(\n title = title\n ), follow_redirects = True)", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n parent = self\n sdef, name, path = self.file.get_custom_node_info(qid, gslash, name, path, parent) \n grp = Group(self.file, sdef, name, path, attrs, parent)\n return grp", "def create_test_portgroup(**kw):\n portgroup = get_test_portgroup(**kw)\n # Let DB generate ID if it isn't specified explicitly\n if 'id' not in kw:\n del portgroup['id']\n dbapi = db_api.get_instance()\n return dbapi.create_portgroup(portgroup)", "def test_create_extended_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((63,), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape", "def test_single_group(self) -> None:\n self.settings.set(\"show_group_caption\", True)\n setting: ShowGroupCaptionTabSetting = ShowGroupCaptionTabSetting(\n self.settings,\n sublime.active_window()\n )\n scratch_view: sublime.View = sublime.active_window().new_file()\n tabs: List[Tab] = [Tab(scratch_view)]\n\n # single column layout\n layout: Dict[str, List] = {\n \"cells\": [[0, 0, 1, 1]],\n \"cols\": [0.0, 1.0],\n \"rows\": [0.0, 1.0]\n }\n\n sublime.active_window().set_layout(layout)\n\n self.assertFalse(setting.is_enabled())\n self.assertListEqual(tabs, setting.apply(tabs))\n self.assertListEqual([], tabs[0].get_captions())", "def groups(self, create, extracted, **kwargs):\n if not create:\n # Simple build, do nothing.\n return\n\n if extracted:\n # A list of groups were passed in, use them\n for group in extracted:\n # pylint: disable=no-member\n self.groups.add(group)", "def test_optional_group_not_empty(self):\n\n self.mapper.map_spec('attr3', self.mapper.spec.get_group('empty').get_attribute('attr3'))\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10, attr3=1.23)\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n groups={'empty': GroupBuilder(\n name='empty',\n attributes={'attr3': 1.23},\n )},\n )\n self.assertBuilderEqual(expected, builder)", "def group(*args, absolute: bool=True, empty: bool=True, name: AnyStr=\"\", parent: AnyStr=\"\",\n relative: bool=True, useAsGroup: AnyStr=\"\", world: bool=True, **kwargs)->AnyStr:\n pass", "def create_research_group(self, name, code=None, description=None):\n ResearchGroupRepository = get_repository('ResearchGroupRepository')\n\n research_group = ResearchGroup(unit_id=self.id, code=code)\n\n research_group.name = name\n\n research_group.description=description\n research_group.user_id= self.user_id\n research_group.startdate = None\n research_group.enddate = None\n research_group.license = None\n research_group.ids = None\n ResearchGroupRepository.save(research_group)\n\n return research_group", "def test_validate_title_identical(self):\n with self.assertRaises(ValidationError):\n self.make_project(\n title='TestCategory',\n type=PROJECT_TYPE_PROJECT,\n parent=self.category,\n )", "def __make_group_by_res(self, group_name, name_list):\r\n if group_name not in self.groups:\r\n res_group = self.group['Residue'].getChildGrps()\r\n groups = [ res for res in res_groups if res.name in name_list ]\r\n new_group = Group(parent=[], id=-1, type=group_name, childs=groups)\r\n self.groups[group_name] = new_group", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n sdef, name, path = self.get_custom_node_info(qid, gslash, name, path) \n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent)\n return grp", "async def create_group(ctx, name: str, role: str, group_type: str=None, comp: str=None, rating: int=None, time: str=None):\n\n owner = ctx.message.author.name\n \n if comp:\n comp = [int(i) for i in comp.split()] # convert string input to array\n\n new_group = Group(owner, name, role, group_type, rating, time, comp)\n bg_bot.manager.add_group(owner, new_group)\n \n await ctx.send(f'Created new {group_type} group for leader {owner}!')", "def create(self, title: str, levels: list, always_included: bool, elasticube: str = None) -> Resource:\n elasticube = elasticube if elasticube else self._elasticube\n data = {'title': title, 'levels': levels, 'alwaysIncluded': always_included}\n\n content = self._api.post(f'elasticubes/localhost/{elasticube}/hierarchies', data=data)\n return Hierarchy(self._api, content, elasticube)", "def create_human(self):\n self._type = Genre.HUMAN\n self._team = 0", "def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def get_existing_test_group(self, obj: object) -> str:", "def create_seurity_group(self):\n return True", "def test_create_team_user_group(client):\n group = client.create_team_user_group(TEAM_ID, {\n \"name\": \"Python group\",\n \"is_reviewer\": True,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == NEW_GROUP_ID\n assert group.name == \"Python group\"\n assert group.permissions['is_admin']\n assert group.permissions['is_reviewer']\n assert group.permissions['admin_rights'] == [\"upload\"]", "def create_default_groups():\n from flaskbb.fixtures.groups import fixture\n result = []\n for key, value in fixture.items():\n group = Group(name=key)\n\n for k, v in value.items():\n setattr(group, k, v)\n\n group.save()\n result.append(group)\n return result", "def create_group():\n incoming = request.get_json()\n chatroom = Chatroom(\n name = incoming['name'],\n tag = incoming['tag'],\n )\n db.session.add(chatroom)\n db.session.commit()\n participant = Participant(\n user_id = session['user_id'],\n room_id = chatroom.room_id,\n )\n db.session.add(participant)\n db.session.commit()\n return jsonify(results = chatroom.room_id)", "def create_group(name, nodes, description=None):\n group, created = Group.get_or_create(name=name)\n if created:\n print('Group created with PK={} and name {}'.format(group.pk, group.name))\n else:\n print('Group with name {} and pk {} already exists. Do you want to add nodes?[y/n]'.format(group.name, group.pk))\n answer = raw_input()\n if answer.strip().lower() == 'y':\n pass\n else:\n return\n nodes2 = []\n nodes2_pks = []\n for node in nodes:\n try:\n node = int(node)\n except ValueError:\n pass\n nodes2_pks.append(node)\n try:\n nodes2.append(load_node(node))\n except:# NotExistentError:\n pass\n\n group.add_nodes(nodes2)\n print('added nodes: {} to group {} {}'.format(nodes2_pks, group.name, group.pk))\n\n if description:\n group.description = description\n\n return group", "def save_new_valid_exploration(\n self, exploration_id, owner_id, title='A title'):\n exploration = exp_domain.Exploration.create_default_exploration(\n exploration_id, title, 'A category')\n exploration.states[exploration.init_state_name].widget.handlers[\n 0].rule_specs[0].dest = feconf.END_DEST\n exploration.objective = 'An objective'\n exp_services.save_new_exploration(owner_id, exploration)\n return exploration", "def test_add_existing_emptygroup_unauthorized(self, inventoryloader):\n assert 'glance_api' in inventoryloader.groups\n inventoryloader.add_group(u'glance_api', allow_update=False)\n # but ensures variables didn't get overriden\n assert 'management_bridge' in inventoryloader.groups['glance_api'].vars", "async def command_create(self, context):\n # await self._create_new_role(context, name, target=GROUP_CATEGORY_NAME)\n print('main create')", "def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()", "def new_group(request):\n return edit_group(request, None)", "def test_grouping(self):\n s = self.create(ComponentItem, UML.Component)\n uc1 = self.create(UseCaseItem, UML.UseCase)\n uc2 = self.create(UseCaseItem, UML.UseCase)\n\n self.group(s, uc1)\n assert 1 == len(uc1.subject.subject)\n self.group(s, uc2)\n assert 1 == len(uc2.subject.subject)\n\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(2, len(s.subject.useCase))", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def __create_group(self):\n\n group = time.strftime(_GROUP_NAME_FORMAT, time.localtime())\n LOG.info(\"Creating backup group '%s'.\", group)\n\n group_path = self.group_path(group)\n\n try:\n os.mkdir(group_path)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise Error(\"Unable to create a new backup group '{}': {}.\",\n group_path, psys.e(e))\n\n self.__on_group_created(group)\n\n return group" ]
[ "0.6064545", "0.5995512", "0.57896537", "0.5699493", "0.55475307", "0.55475307", "0.55366933", "0.55120105", "0.5418014", "0.5416043", "0.53521186", "0.53274035", "0.5322022", "0.53077036", "0.529829", "0.5248793", "0.5231281", "0.5218727", "0.5214925", "0.5195291", "0.5195138", "0.5189096", "0.51839924", "0.51741415", "0.51573783", "0.515698", "0.51557213", "0.5152268", "0.51454943", "0.51309705", "0.5130206", "0.51195765", "0.51194304", "0.5108653", "0.51021045", "0.50960755", "0.50933313", "0.5093023", "0.50878716", "0.5082816", "0.5067029", "0.5058224", "0.505791", "0.50524807", "0.50453764", "0.5044077", "0.5039243", "0.5038738", "0.5036768", "0.50341004", "0.5023766", "0.5018026", "0.5015707", "0.5010489", "0.5003552", "0.49816284", "0.4981605", "0.49783713", "0.49771774", "0.49681202", "0.49603915", "0.49591765", "0.49568456", "0.49547973", "0.493865", "0.49354917", "0.4927037", "0.49233532", "0.49122274", "0.49097893", "0.49006355", "0.48986998", "0.4893442", "0.487306", "0.48612246", "0.48570105", "0.48569962", "0.4849877", "0.48407304", "0.48302618", "0.48292556", "0.48209947", "0.48117328", "0.4810394", "0.48062333", "0.48048976", "0.48024464", "0.47973624", "0.4793661", "0.4792301", "0.47766462", "0.47622278", "0.47609714", "0.47486022", "0.47466755", "0.47379294", "0.47363847", "0.4735766", "0.47146642", "0.47127828" ]
0.53805554
10
Creates a new subgroup of the outcome group with the same title and description as the source group, then creates links in that new subgroup to the same outcomes that are linked in the source group. Recurses on the subgroups of the source group, importing them each in turn into the new subgroup. Allows you to copy organizational structure, but does not create copies of the outcomes themselves, only new links. The source group must be either global, from the same context as this outcome group, or from an associated account. The source group cannot be the root outcome group of its context.
def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs): path = '/v1/global/outcome_groups/{id}/import' payload = { 'source_outcome_group_id' : source_outcome_group_id, } url = request_ctx.base_api_url + path.format(id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_outcome_group_courses(request_ctx, course_id, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def import_outcome_group_accounts(request_ctx, account_id, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def copy_group(self):\n dd = self.destination_directory\n sg = self.source_group\n dg = self.destination_group\n\n data = {\n 'description': sg.description,\n 'name': sg.name,\n 'status': sg.status,\n }\n\n # If this Group already exists, we'll just update it.\n if dg:\n for key, value in data.items():\n setattr(dg, key, value)\n\n while True:\n try:\n dg.save()\n return dg\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))\n\n # If we get here, it means we need to create the Group from scratch.\n while True:\n try:\n return dd.groups.create(data)\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")", "def migrate(self):\n self.destination_group = self.get_destination_group()\n self.destination_group = self.copy_group()\n self.copy_custom_data()\n\n logger.info('Successfully copied Group: {}'.format(self.destination_group.name.encode('utf-8')))\n return self.destination_group", "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def test_convert_to_existing_group2(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance_all')\n inventoryloader.convert_group('glance_registry', 'glance_all')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance_registry' not in inventoryloader.groups\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert not inventoryloader.groups['glance_all'].has_group('glance_registry')\n assert inventoryloader.groups['glance_all'].has_host('localhost')\n assert inventoryloader.groups['glance_all'].has_host('localhost2')\n assert \"management_bridge\" in inventoryloader.groups['glance_all'].vars", "def _merge_groups(self, group, newgroup):\n\n # name\n if group.name != newgroup.name:\n raise errors.AnsibleError(\"Cannot merge group %s with %s\" % (group.name, newgroup.name))\n\n # depth\n group.depth = max([group.depth, newgroup.depth])\n\n # hosts list (host objects are by now already added to self.hosts)\n for host in newgroup.hosts:\n grouphosts = dict([(h.name, h) for h in group.hosts])\n if host.name in grouphosts:\n # same host name but different object, merge\n self._merge_hosts(grouphosts[host.name], host)\n else:\n # new membership, add host to group from self\n # group from self will also be added again to host.groups, but\n # as different object\n group.add_host(self.hosts[host.name])\n # now remove this the old object for group in host.groups\n for hostgroup in [g for g in host.groups]:\n if hostgroup.name == group.name and hostgroup != self.groups[group.name]:\n self.hosts[host.name].groups.remove(hostgroup)\n\n\n # group child membership relation\n for newchild in newgroup.child_groups:\n # dict with existing child groups:\n childgroups = dict([(g.name, g) for g in group.child_groups])\n # check if child of new group is already known as a child\n if newchild.name not in childgroups:\n self.groups[group.name].add_child_group(newchild)\n\n # group parent membership relation\n for newparent in newgroup.parent_groups:\n # dict with existing parent groups:\n parentgroups = dict([(g.name, g) for g in group.parent_groups])\n # check if parent of new group is already known as a parent\n if newparent.name not in parentgroups:\n if newparent.name not in self.groups:\n # group does not exist yet in self, import him\n self.groups[newparent.name] = newparent\n # group now exists but not yet as a parent here\n self.groups[newparent.name].add_child_group(group)\n\n # variables\n group.vars = utils.combine_vars(group.vars, newgroup.vars)", "def test_convert_to_newgroup(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance1')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance1' in inventoryloader.groups\n assert inventoryloader.groups['glance_all'].has_group('glance1')\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance1'].has_host('localhost')\n assert \"management_bridge\" in inventoryloader.groups['glance1'].vars", "def test_convert_to_existing_group(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance_registry')\n assert 'glance_api' not in inventoryloader.groups\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance_registry'].has_host('localhost')\n assert inventoryloader.groups['glance_registry'].has_host('localhost2')\n assert \"management_bridge\" in inventoryloader.groups['glance_registry'].vars", "def addgroup(self, abspath=None, sourcetree=pbxconsts.SOURCE_TREE.group, name=None, move=True):\n group_name = os.path.basename(abspath) if name is None or len(name) == 0 else name\n abspath = abspath if not abspath is None else self.realpath()\n subgroup = func.get_list_item(func.take(\\\n lambda o: o.isa == u'PBXGroup' and o.realpath() == abspath \\\n and o.displayname() == group_name, self.pbx_children), 0)\n if subgroup is None:\n subgroup = self.project().new_object(u'PBXGroup')\n pbxpath.set_path_with_source_tree(subgroup, abspath, source_tree=sourcetree, \\\n parent_group=self)\n if not name is None:\n subgroup.pbx_name = name\n self.addchild(subgroup, move=move)\n return subgroup", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "async def mergegroup(self, ctx, original_group_id: int, duplicate_group_id: int):\n original_group = await ex.get_group(original_group_id)\n duplicate_group = await ex.get_group(duplicate_group_id)\n if not duplicate_group:\n return await ctx.send(f\"> {duplicate_group_id} could not find a Group.\")\n if not original_group:\n return await ctx.send(f\"> {original_group} could not find a Group.\")\n # move aliases\n await ex.conn.execute(\"UPDATE groupmembers.aliases SET objectid = $1 WHERE isgroup = $2 AND objectid = $3\", original_group.id, 1, duplicate_group.id)\n for member_id in duplicate_group.members:\n if member_id not in original_group.members:\n # update the member location to the original group\n await ex.conn.execute(\"UPDATE groupmembers.idoltogroup SET groupid = $1 WHERE idolid = $2 AND groupid = $3\", original_group.id, member_id, duplicate_group.id)\n # delete group\n await ex.conn.execute(\"DELETE FROM groupmembers.groups WHERE groupid = $1\", duplicate_group.id)\n # recreate cache\n await ex.create_idol_cache()\n await ex.create_group_cache()\n await ctx.send(f\"> Merged {duplicate_group_id} to {original_group_id}.\")", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def clone(self):\n return _libsbml.Group_clone(self)", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_from_src(self, cgsnapshot_id, source_cgid, name=None,\n description=None, user_id=None,\n project_id=None):\n body = {'consistencygroup-from-src': {'name': name,\n 'description': description,\n 'cgsnapshot_id': cgsnapshot_id,\n 'source_cgid': source_cgid,\n 'user_id': user_id,\n 'project_id': project_id,\n 'status': \"creating\",\n }}\n\n self.run_hooks('modify_body_for_update', body,\n 'consistencygroup-from-src')\n resp, body = self.api.client.post(\n \"/consistencygroups/create_from_src\", body=body)\n return common_base.DictWithMeta(body['consistencygroup'], resp)", "def make_group(self, id, name='', attrs={}, link='', abort=True ): \n gid = id + \"/\"\n sgd = self.get_sgd(gid, name)\n path = self.full_path\n link_info = self.file.extract_link_info(name, link, Group)\n if not abort:\n # id = sgd['id'].rstrip('/') # not sure if need this\n grp = self.file.get_existing_group(path, id, name)\n if grp:\n return grp\n grp = Group(self.file, sgd, name, path, attrs, self, link_info)\n # self.mstats[gid]['created'].append(grp)\n return grp", "def _group_append(groups, id, new_group):\n\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n\n path_inds = list(reversed(path_inds))\n\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)\n\n return groups", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def make_group(self, qid, name='', path='', attrs={}, link='', abort=True):\n gqid = qid + \"/\"\n sdef = self.get_sdef(gqid, self.default_ns, \"referenced in make_group\")\n id = sdef['id']\n ns = sdef['ns']\n path = self.deduce_path(id, ns, path)\n if not abort:\n id_noslash = id.rstrip('/') # could be different from gqid if namespace present\n grp = self.get_existing_group(path, id_noslash, name)\n if grp:\n # found already existing group\n return grp \n link_info = self.extract_link_info(name, link, Group)\n # create the group\n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent, link_info)\n return grp", "def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def copyGroupFrom(self, groupName, sourceDesign, sourceProject=None, sourceProjectPath=None):\n oName = self.project_name\n if sourceProject == oName or sourceProject is None:\n oSrcProject = self._desktop.GetActiveProject()\n else:\n self._desktop.OpenProject(sourceProjectPath)\n oSrcProject = self._desktop.SetActiveProject(sourceProject)\n\n oDesign = oSrcProject.SetActiveDesign(sourceDesign)\n oEditor = oDesign.SetActiveEditor(\"3D Modeler\")\n oEditor.Copy([\"NAME:Selections\", \"Selections:=\", groupName])\n\n self.modeler.oeditor.Paste()\n self.modeler.primitives.refresh_all_ids()\n self.materials._load_from_project()\n return True", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def test_does_not_return_duplicate_groups(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n GroupResolution.objects.create(\n group=self.group,\n release=self.release,\n type=GroupResolution.Type.in_release,\n )\n\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def test_mergeGroups(self):\n tabs = [\n widgets.Tab(u'id1', u'Title 1', None),\n widgets.Tab(u'id2', u'Title 2', None)]\n tabGroup1 = widgets.TabGroup(u'id', u'Title', tabs=tabs)\n tabs = [\n widgets.Tab(u'id3', u'Title 3', None)]\n tabGroup2 = widgets.TabGroup(u'id', u'Hello', tabs=tabs)\n\n newGroup = widgets.TabGroup.mergeGroups(tabGroup1, tabGroup2)\n self.assertEquals(newGroup.id, u'id')\n self.assertEquals(newGroup.title, u'Hello')\n self.assertEquals(newGroup.tabs, tabGroup1.tabs + tabGroup2.tabs)", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def CopyAllSubElementsTo(self, other_group, ignore):\n # pylint: disable=protected-access\n collections_to_update = [\n (self._groups_to_load, other_group._groups_to_load),\n (self._commands_to_load, other_group._commands_to_load)]\n\n for src, dst in collections_to_update:\n for name, info in src.iteritems():\n if name in ignore:\n continue\n (module_dir, module_path, name, unused_track) = info\n dst[name] = (module_dir, module_path, name,\n other_group.ReleaseTrack())", "def test_add_existing_group(self, inventoryloader):\n grp_cnt = inventoryloader.count_groups()\n grp_vars = inventoryloader.groups['glance_api'].vars\n inventoryloader.add_group(u'glance_api')\n assert inventoryloader.count_groups() == grp_cnt\n assert inventoryloader.groups['glance_api'].vars == grp_vars\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n inventoryloader.add_group(u'glance_api', {\"vars\": { u'external_bridge': u'br-ext'}})\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n assert 'br-ext' == inventoryloader.groups['glance_api'].vars['external_bridge']", "def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def test_drag_group_into_group(self):\r\n expected_ordering = [{self.container_title: [self.group_a, self.group_empty]},\r\n {self.group_a: [self.group_b, self.group_a_item_1, self.group_a_item_2]},\r\n {self.group_b: [self.group_b_item_1, self.group_b_item_2]},\r\n {self.group_empty: []}]\r\n self.drag_and_verify(self.group_b_handle, self.group_a_item_1_handle, expected_ordering)", "def test_list_role_assignment_using_inherited_sourced_groups(self):\n test_plan = {\n # A domain with 3 users, 3 groups, 3 projects, a second domain,\n # plus 3 roles.\n 'entities': {'domains': [{'users': 3, 'groups': 3, 'projects': 3},\n 1],\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'domain': 0,\n 'inherited_to_projects': True},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1,\n 'inherited_to_projects': True},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1.\n # We should see the inherited group assigned on the 3 projects\n # from domain 0, as well as the direct assignments.\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'project': 0,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 1,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 2,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)", "def _copy_from_template(\n self, source_structures, source_keys, dest_structure, new_parent_block_key, user_id, head_validation\n ):\n new_blocks = set()\n\n new_children = list() # ordered list of the new children of new_parent_block_key\n\n for usage_key in source_keys:\n src_course_key = usage_key.course_key\n hashable_source_id = src_course_key.for_version(None)\n block_key = BlockKey(usage_key.block_type, usage_key.block_id)\n source_structure = source_structures[src_course_key]\n\n if block_key not in source_structure['blocks']:\n raise ItemNotFoundError(usage_key)\n source_block_info = source_structure['blocks'][block_key]\n\n # Compute a new block ID. This new block ID must be consistent when this\n # method is called with the same (source_key, dest_structure) pair\n unique_data = \"{}:{}:{}\".format(\n str(hashable_source_id).encode(\"utf-8\"),\n block_key.id,\n new_parent_block_key.id,\n )\n new_block_id = hashlib.sha1(unique_data.encode('utf-8')).hexdigest()[:20]\n new_block_key = BlockKey(block_key.type, new_block_id)\n\n # Now clone block_key to new_block_key:\n new_block_info = copy.deepcopy(source_block_info)\n # Note that new_block_info now points to the same definition ID entry as source_block_info did\n existing_block_info = dest_structure['blocks'].get(new_block_key, BlockData())\n # Inherit the Scope.settings values from 'fields' to 'defaults'\n new_block_info.defaults = new_block_info.fields\n\n # <workaround>\n # CAPA modules store their 'markdown' value (an alternate representation of their content)\n # in Scope.settings rather than Scope.content :-/\n # markdown is a field that really should not be overridable - it fundamentally changes the content.\n # capa modules also use a custom editor that always saves their markdown field to the metadata,\n # even if it hasn't changed, which breaks our override system.\n # So until capa modules are fixed, we special-case them and remove their markdown fields,\n # forcing the inherited version to use XML only.\n if usage_key.block_type == 'problem' and 'markdown' in new_block_info.defaults:\n del new_block_info.defaults['markdown']\n # </workaround>\n\n # Preserve any existing overrides\n new_block_info.fields = existing_block_info.fields\n\n if 'children' in new_block_info.defaults:\n del new_block_info.defaults['children'] # Will be set later\n\n new_block_info.edit_info = existing_block_info.edit_info\n new_block_info.edit_info.previous_version = new_block_info.edit_info.update_version\n new_block_info.edit_info.update_version = dest_structure['_id']\n # Note we do not set 'source_version' - it's only used for copying identical blocks\n # from draft to published as part of publishing workflow.\n # Setting it to the source_block_info structure version here breaks split_draft's has_changes() method.\n new_block_info.edit_info.edited_by = user_id\n new_block_info.edit_info.edited_on = datetime.datetime.now(UTC)\n new_block_info.edit_info.original_usage = str(usage_key.replace(branch=None, version_guid=None))\n new_block_info.edit_info.original_usage_version = source_block_info.edit_info.update_version\n dest_structure['blocks'][new_block_key] = new_block_info\n\n children = source_block_info.fields.get('children')\n if children:\n children = [src_course_key.make_usage_key(child.type, child.id) for child in children]\n new_blocks |= self._copy_from_template(\n source_structures, children, dest_structure, new_block_key, user_id, head_validation\n )\n\n new_blocks.add(new_block_key)\n # And add new_block_key to the list of new_parent_block_key's new children:\n new_children.append(new_block_key)\n\n # Update the children of new_parent_block_key\n dest_structure['blocks'][new_parent_block_key].fields['children'] = new_children\n\n return new_blocks", "def test_grouping(self):\n n = self.create(NodeItem, UML.Node)\n a = self.create(ArtifactItem, UML.Artifact)\n\n self.group(n, a)\n\n assert 1 == len(n.subject.deployment)\n assert n.subject.deployment[0].deployedArtifact[0] is a.subject", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def make(self):\n numberOfGroups = np.random.randint(1, len(self.getFirstParent().getGroups()))\n secParentGroups = np.random.choice(self.secondParent.getGroups(), numberOfGroups, replace=False)\n groups = []\n allSecElements = []\n numberOfElements = 0\n\n for grpSec in secParentGroups:\n allSecElements += grpSec.getElements()\n\n for grpFst in self.getFirstParent().getGroups():\n numberOfElements += len(grpFst.getElements())\n elements = list(set(grpFst.getElements()) - set(allSecElements))\n group = Group(grpFst.getIndex(), grpFst.getMinElements(), grpFst.getMaxElements())\n group.setElements(elements)\n groups.append(group)\n\n for grpSec in secParentGroups:\n for grpFst in groups:\n if grpSec.getIndex() == grpFst.getIndex():\n grpFst.addElements(grpSec.getElements())\n\n child = Individual(np.zeros(numberOfElements))\n child.setGroups(groups)\n\n return child", "def test_add_group(self):\n pass", "def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def _resolve_grouping_node(group_node, group_tree, group_graph,\n target_graph):\n\n # Get the predecessors of the grouping node\n preds = nx.dfs_predecessors(group_tree, group_node)\n\n # Get a list of unique node identifiers among predecessors. These are\n # the nodes on which a subgraph will be induced.\n preds = list(set(list(preds.keys()) + list(preds.values())))\n\n # Induce a subgraph based on the nodes\n pred_group = group_graph.subgraph(preds).copy()\n\n # Set up edge dictionary\n edge_attrs = {}\n\n # Encode edge type information\n for s, t in pred_group.edges():\n\n # Add edge attributes to the dictionary\n edge_attrs[(s, t)] = {'kind': 'grouping'}\n\n # Set edge attributes\n nx.set_edge_attributes(pred_group, edge_attrs)\n\n # Add the nodes and edges from the subgraph to the connectivity graph\n target_graph.add_nodes_from(pred_group.nodes(data=True))\n target_graph.add_edges_from(pred_group.edges(data=True))", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def setup_group_workspaces(context):\n if context.readDataFile(\"marker.txt\") is None:\n return\n\n portal = context.getSite()\n if \"groups\" not in portal.objectIds():\n\n groups = portal[\n portal.invokeFactory(\"Folder\",id=\"groups\")]\n\n # set default properties\n groups.setTitle(\"groups\")\n groups.setDescription(\"Group workspaces container.\")\n groups._getWorkflowTool().doActionFor(groups, \"publish\" \"\")\n groups.setExcludeFromNav(True)\n groups.update() \n logger.info(\"Groups container created.\")", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def ensure_target_group_created(vpc, environment):\n name = environment + '-web'\n\n # If it already exists, create returns the existing data\n response = ELB.create_target_group(\n Name=name,\n Protocol='HTTP',\n Port=9000,\n VpcId=vpc.id,\n Matcher={\n 'HttpCode': '200,301'\n }\n )\n\n arn = response['TargetGroups'][0]['TargetGroupArn']\n\n return arn", "def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))", "def add_source(self, group_source):\n if group_source.name in self._sources:\n raise ValueError(\"GroupSource '%s': name collision\" % \\\n group_source.name)\n self._sources[group_source.name] = group_source", "def test_list_role_assignment_using_sourced_groups(self):\n test_plan = {\n # The default domain with 3 users, 3 groups, 3 projects,\n # plus 3 roles.\n 'entities': {'domains': {'id': CONF.identity.default_domain_id,\n 'users': 3, 'groups': 3, 'projects': 3},\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'project': 0},\n {'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n # Adding a role a filter should further restrict the entries\n {'params': {'source_from_group_ids': [0, 1], 'role': 2,\n 'effective': True},\n 'results': [{'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)", "def copy_all_except_candidates(self):\n groupcopy = AssignmentGroup(parentnode=self.parentnode,\n name=self.name,\n is_open=self.is_open,\n delivery_status=self.delivery_status)\n groupcopy.full_clean()\n groupcopy.save()\n for examiner in self.examiners.all():\n groupcopy.examiners.create(relatedexaminer=examiner.relatedexaminer)\n for tagobj in self.tags.all():\n groupcopy.tags.create(tag=tagobj.tag)\n return groupcopy", "def _copy_subdag(self, user_id, destination_version, block_key, source_blocks, destination_blocks, blacklist):\n orphans = set()\n destination_block = destination_blocks.get(block_key)\n new_block = source_blocks[block_key]\n if destination_block:\n # reorder children to correspond to whatever order holds for source.\n # remove any which source no longer claims (put into orphans)\n # add any which are being copied\n source_children = new_block.fields.get('children', [])\n existing_children = destination_block.fields.get('children', [])\n destination_reordered = SparseList()\n for child in existing_children:\n try:\n index = source_children.index(child)\n destination_reordered[index] = child\n except ValueError:\n orphans.add(BlockKey(*child))\n if blacklist != EXCLUDE_ALL:\n for index, child in enumerate(source_children):\n if child not in blacklist:\n destination_reordered[index] = child\n # the history of the published leaps between publications and only points to\n # previously published versions.\n previous_version = destination_block.edit_info.update_version\n destination_block = copy.deepcopy(new_block)\n destination_block.fields['children'] = destination_reordered.compact_list()\n destination_block.edit_info.previous_version = previous_version\n destination_block.edit_info.update_version = destination_version\n destination_block.edit_info.edited_by = user_id\n destination_block.edit_info.edited_on = datetime.datetime.now(UTC)\n else:\n destination_block = self._new_block(\n user_id, new_block.block_type,\n self._filter_blacklist(copy.copy(new_block.fields), blacklist),\n new_block.definition,\n destination_version,\n raw=True,\n asides=new_block.asides,\n block_defaults=new_block.defaults\n )\n # Extend the block's new edit_info with any extra edit_info fields from the source (e.g. original_usage):\n for key, val in new_block.edit_info.to_storable().items():\n if getattr(destination_block.edit_info, key) is None:\n setattr(destination_block.edit_info, key, val)\n\n # If the block we are copying from was itself a copy, then just\n # reference the original source, rather than the copy.\n destination_block.edit_info.source_version = (\n new_block.edit_info.source_version or new_block.edit_info.update_version\n )\n\n if blacklist != EXCLUDE_ALL:\n for child in destination_block.fields.get('children', []):\n if child not in blacklist:\n orphans.update(\n self._copy_subdag(\n user_id, destination_version, BlockKey(*child), source_blocks, destination_blocks, blacklist\n )\n )\n destination_blocks[block_key] = destination_block\n return orphans", "def test_create_group_409(self):\n request = {\n 'name': self.test_group1_groupid\n }\n # First create a group indirectly by making a user with a group\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now create a group that is already there\n resp = self.app.post('/groups', data=json.dumps(request))\n assert resp.status_code == 409", "def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)", "def _add_group(self, group):\n\n if group.name not in self.groups:\n # it's brand new, add him!\n self.groups[group.name] = group\n if self.groups[group.name] != group:\n # different object, merge\n self._merge_groups(self.groups[group.name], group)", "def test_taskgroup_shift(self, test_taskgroup_dag):\n # Unpack the fixture\n dag, group, (op1, op2, op3, op4) = test_taskgroup_dag\n # Arrange them with a Label in the middle\n op1 >> Label(\"Group label\") >> group >> op4\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op1.task_id, op3.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op3.task_id, op4.task_id) == {}", "def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def group_by_source(self, group_by_source):\n\n self._group_by_source = group_by_source", "async def add_parent_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n stmt = select(Group).filter(Group.parent_id is None).options(selectinload(Group.parent))\n result = await dbsession.execute(stmt)\n stmt = select(func.count(Group.id)).filter(Group.parent_id is None)\n result_count = await dbsession.execute(stmt)\n with click.progressbar(\n result.scalars(), length=result_count.scalar_one(), label=\"Adding parent groups\"\n ) as progress:\n for group in progress:\n if \"aat\" in config[\"data\"][\"hierarchy\"][\"expansions\"]:\n categories = apply_aat(group.value, merge=False)\n if categories:\n for category_list in categories:\n mapped = False\n for category in category_list:\n stmt = select(Group).filter(Group.value == category)\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if not parent_group:\n parent_group = Group(\n value=category, label=category[0].upper() + category[1:], split=\"parent\"\n )\n dbsession.add(group)\n group.parent = parent_group\n mapped = True\n group = parent_group # noqa: PLW2901\n if group.parent_id:\n break\n if mapped:\n break\n else:\n mapped = False\n for category in apply_nlp(group.value):\n stmt = select(Group).filter(\n or_(Group.value == category, Group.value == inflection.pluralize(category))\n )\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if parent_group:\n group.parent = parent_group\n await dbsession.commit()\n mapped = True\n break\n if not mapped:\n if group.value not in [\"styles and periods\"]:\n for category in apply_nlp(group.value):\n hierarchies = apply_aat(category, merge=False)\n groups = []\n for hierarchy in hierarchies:\n if group.value not in hierarchy:\n stmt = (\n select(Group)\n .filter(Group.value.in_(hierarchy))\n .options(selectinload(Group.items))\n )\n result = await dbsession.execute(stmt)\n for potential_group in result.scalars():\n depth = 0\n tmp = potential_group\n while tmp:\n depth = depth + 1\n tmp = tmp.parent\n groups.append((potential_group, depth, len(potential_group.items)))\n if groups:\n groups.sort(key=lambda g: (g[1], g[2]), reverse=True)\n group.parent = groups[0][0]\n break\n await dbsession.commit()", "def _duplicate_item(parent_usage_key, duplicate_source_usage_key, display_name=None, user=None):\r\n store = get_modulestore(duplicate_source_usage_key)\r\n source_item = store.get_item(duplicate_source_usage_key)\r\n # Change the blockID to be unique.\r\n dest_usage_key = duplicate_source_usage_key.replace(name=uuid4().hex)\r\n category = dest_usage_key.category\r\n\r\n # Update the display name to indicate this is a duplicate (unless display name provided).\r\n duplicate_metadata = own_metadata(source_item)\r\n if display_name is not None:\r\n duplicate_metadata['display_name'] = display_name\r\n else:\r\n if source_item.display_name is None:\r\n duplicate_metadata['display_name'] = _(\"Duplicate of {0}\").format(source_item.category)\r\n else:\r\n duplicate_metadata['display_name'] = _(\"Duplicate of '{0}'\").format(source_item.display_name)\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=source_item.data if hasattr(source_item, 'data') else None,\r\n metadata=duplicate_metadata,\r\n system=source_item.runtime,\r\n )\r\n\r\n dest_module = get_modulestore(category).get_item(dest_usage_key)\r\n # Children are not automatically copied over (and not all xblocks have a 'children' attribute).\r\n # Because DAGs are not fully supported, we need to actually duplicate each child as well.\r\n if source_item.has_children:\r\n dest_module.children = []\r\n for child in source_item.children:\r\n dupe = _duplicate_item(dest_usage_key, child, user=user)\r\n dest_module.children.append(dupe)\r\n get_modulestore(dest_usage_key).update_item(dest_module, user.id if user else None)\r\n\r\n if not 'detached' in source_item.runtime.load_block_type(category)._class_tags:\r\n parent = get_modulestore(parent_usage_key).get_item(parent_usage_key)\r\n # If source was already a child of the parent, add duplicate immediately afterward.\r\n # Otherwise, add child to end.\r\n if duplicate_source_usage_key in parent.children:\r\n source_index = parent.children.index(duplicate_source_usage_key)\r\n parent.children.insert(source_index + 1, dest_usage_key)\r\n else:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent_usage_key).update_item(parent, user.id if user else None)\r\n\r\n return dest_usage_key", "def _duplicate_as_linked_tree(self, source_root):\n logging.debug(\"Started traversing %s \\'s tree for file linkage and directory duplication.\" % self.directory)\n # Create the containing directory that resides within the share\n within_share_dir_path = os.path.join(self.directory, os.path.basename(source_root))\n self._makedir(within_share_dir_path)\n for root, subdirectories, files in os.walk(source_root, followlinks=True):\n share_root = root.replace(str(source_root), within_share_dir_path, 1)\n for subdir in subdirectories:\n target = os.path.join(share_root, subdir)\n self._makedir(target)\n for file in files:\n source = os.path.join(root, file)\n target = os.path.join(share_root, file)\n self._link_files(source, target)", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def test_add_parent(self):\n _, _, groupa, groupb = create_objects()\n groupa.add_parent(groupb)\n assert groupb in groupa.parents\n assert groupa in groupb.children\n return (groupa, groupb)", "def add_subgroup(self, new_subgroup):\n self.subgroups[new_subgroup.get_title()] = new_subgroup", "def add_group(self):\n items = self.group_list.selectedItems()\n for item in items:\n self.parent.add_group_data(item.text())", "def clone(self):\n return _libsbml.GroupsExtension_clone(self)", "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def group_image(directory, image, group):\r\n\tif os.path.exists(directory + \"\\\\\" + group):\r\n\t\tpass\r\n\telse:\r\n\t\ttry:\r\n\t\t\tos.mkdir(directory + '\\\\' + group)\r\n\t\t\tprint(\"Successfully created directory\", group)\r\n\t\texcept OSError:\r\n\t\t\tprint(\"Creation of directory failed.\")\r\n\ttry:\r\n\t\tshutil.copy(str(directory + '\\\\' + image), str(directory + \"\\\\\" + group + \"\\\\\" + image))\r\n\texcept OSError as OSe:\r\n\t\tprint(OSe)", "def generate_website_group_edges(website_group_json, dst):\n with open(website_group_json) as f_h:\n with gremlin_writer(GremlinEdgeCSV, dst, attributes=[]) as writer:\n for data in json_lines_file(f_h):\n root_id = data[\"id\"]\n websites = data[\"websites\"]\n for website in websites:\n writer.add(\n _id=get_id(root_id, website, {}),\n _from=root_id,\n to=website,\n label=WEBISTE_GROUP_EDGE_LABEL,\n attribute_map={}\n )", "def test_list_role_assignment_fails_with_userid_and_source_groups(self):\n group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)\n group = self.identity_api.create_group(group)\n self.assertRaises(exception.UnexpectedError,\n self.assignment_api.list_role_assignments,\n effective=True,\n user_id=self.user_foo['id'],\n source_from_group_ids=[group['id']])", "def addObjectsToGroup(self):\n\t\tmc.delete( self.objects, ch = True )\n\t\tmc.parent( self.objects, self.grp.name )\n\t\tmc.makeIdentity( self.objects, apply=True,t=1,r=1,s=1,n=2)\n\t\t#self.lockObjects()", "def merge_groups(self, groups):\n if len(groups) < 2:\n raise ValidationError(gettext_lazy('Cannot merge less than 2 groups'))\n\n from devilry.apps.core.models import AssignmentGroupHistory\n\n target_group = groups.pop(0)\n # Check if we can merge\n for group in groups:\n group.can_merge(target_group)\n\n # Create or get target group history\n try:\n grouphistory = target_group.assignmentgrouphistory\n except AssignmentGroupHistory.DoesNotExist:\n grouphistory = AssignmentGroupHistory(assignment_group=target_group)\n # Insert groups in history\n grouphistory.merge_assignment_group_history(groups)\n\n # Merge groups\n with transaction.atomic():\n for group in groups:\n group.merge_into(target=target_group)\n group.set_all_target_feedbacksets_to_merge_type(target=target_group)\n group.create_new_first_attempt_for_target_group(target=target_group)\n grouphistory.save()", "def write_copy(file_source_path, original_file_name, **kwargs):\r\n\r\n group_dir, subject_dir, year_session_dir = None, None, None\r\n\r\n matched_groups = kwargs.get(\"matched_groups\")\r\n if matched_groups is not None:\r\n \"\"\"\r\n matched_groups[0] = year\r\n matched_groups[1] = session\r\n matched_groups[2] = subject group number with hyphen (if applicable)\r\n matched_groups[3] = subject group number (if applicable)\r\n matched_groups[4] = subject group name\r\n matched_groups[5] = subject name\r\n matched_groups[8] = paper number\r\n matched_groups[9] = further info\r\n \"\"\"\r\n\r\n # Handling computer science's change of group\r\n if \"Computer_science\" in matched_groups[5] and \"Mathematics\" in matched_groups[4]:\r\n group_dir = \"Group 4 - Sciences\"\r\n # Continuing regular group handing\r\n elif matched_groups[3] is None:\r\n group_dir = format_group_name(matched_groups[4])\r\n else:\r\n group_dir = matched_groups[2] + matched_groups[4]\r\n\r\n # Handling difficulty. Bulk of it is handling HLSL files and files with no difficulty stated.\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n elif \"HLSL\" in original_file_name:\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"SL\")\r\n return\r\n elif \"HL\" in original_file_name:\r\n difficulty = \"HL\"\r\n elif \"SL\" in original_file_name:\r\n difficulty = \"SL\"\r\n else:\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n # This is where we handle deprecated/changed subject names\r\n subject = matched_groups[5]\r\n if \"Business_and_management\" in subject:\r\n subject = subject.replace(\"Business_and_management\", \"Business_management\")\r\n elif \"Belarussian\" in subject:\r\n subject = subject.replace(\"Belarussian\", \"Belarusian\")\r\n elif \"Biology_HL\" in subject:\r\n subject = subject.replace(\"Biology_HL\", \"Biology\")\r\n elif \"Biology_SL\" in subject:\r\n subject = subject.replace(\"Biology_SL\", \"Biology\")\r\n elif \"Ecosystems_and_societies_SL\" in subject:\r\n subject = subject.replace(\"Ecosystems_and_societies_SL\", \"Ecosystems_and_societies\")\r\n elif \"Environmental_systems_SL\" in subject:\r\n subject = subject.replace(\"Environmental_systems_SL\", \"Environmental_systems\")\r\n elif \"History_route_1\" in subject:\r\n subject = subject.replace(\"History_route_1\", \"History\")\r\n elif \"History_route_2\" in subject:\r\n subject = subject.replace(\"History_route_2\", \"History\")\r\n elif \"History_of_the_Islamic_World\" in subject:\r\n subject = subject.replace(\"History_of_the_Islamic_World\", \"Islamic_history\")\r\n\r\n subject_dir = f\"{subject}_{difficulty}\"\r\n year_session_dir = f\"{matched_groups[0]} {matched_groups[1]} Examination Session\"\r\n\r\n music_groups = kwargs.get(\"music_groups\")\r\n if music_groups is not None:\r\n \"\"\"\r\n music_groups[0] = year\r\n music_groups[1] = session\r\n music_groups[2] = subject group number with hyphen (if applicable)\r\n music_groups[3] = subject group number (if applicable)\r\n music_groups[4] = subject group name\r\n music_groups[5] = file name\r\n \"\"\"\r\n\r\n group_dir = \"Group 6 - The Arts\"\r\n subject = \"Music\"\r\n\r\n # Handling difficulty. Bulk of it is handling HLSL files and files with no difficulty stated.\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n elif \"HLSL\" in original_file_name:\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"SL\")\r\n return\r\n elif \"HL\" in original_file_name:\r\n difficulty = \"HL\"\r\n elif \"SL\" in original_file_name:\r\n difficulty = \"SL\"\r\n else:\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n subject_dir = f\"{subject}_{difficulty}\"\r\n year_session_dir = f\"{music_groups[0]} {music_groups[1]} Examination Session\"\r\n\r\n audio_groups = kwargs.get(\"audio_groups\")\r\n if audio_groups is not None:\r\n \"\"\"\r\n audio_groups[0] = year\r\n audio_groups[1] = session\r\n audio_groups[2] = subject group number with hyphen (if applicable)\r\n audio_groups[3] = subject group number (if applicable)\r\n audio_groups[4] = subject group name (contains '\\\\audio' in some instances)\r\n audio_groups[5] = file name\r\n \"\"\"\r\n\r\n group_dir = \"Group 6 - The Arts\"\r\n\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n else:\r\n write_copy(file_source_path, original_file_name, audio_groups=audio_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, audio_groups=audio_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n subject_dir = f\"Music_{difficulty}\"\r\n year_session_dir = f\"{audio_groups[0]} {audio_groups[1]} Examination Session\"\r\n year_session_dir = os.path.join(year_session_dir, \"audio\")\r\n\r\n if None not in [group_dir, subject_dir, year_session_dir]:\r\n new_filepath = os.path.join(abs_destination_directory, group_dir, subject_dir, year_session_dir,\r\n original_file_name)\r\n os.makedirs(os.path.dirname(new_filepath), exist_ok=True)\r\n shutil.copy(file_source_path, new_filepath)\r\n else:\r\n print(f\"CRITICAL ERROR: File had 'None' path attributes: {file_source_path}\")", "def test_clone_scenario(self):\n pass", "def _create_group_rules(self, group_object):\n\n for rule in ctx.node.properties['rules']:\n\n if 'src_group_id' in rule:\n\n if 'cidr_ip' in rule:\n raise NonRecoverableError(\n 'You need to pass either src_group_id OR cidr_ip.')\n\n if not group_object.vpc_id:\n src_group_object = self.get_resource()\n else:\n src_group_object = self._get_vpc_security_group_from_name(\n rule['src_group_id'])\n\n if not src_group_object:\n raise NonRecoverableError(\n 'Supplied src_group_id {0} doesn ot exist in '\n 'the given account.'.format(rule['src_group_id']))\n\n del rule['src_group_id']\n rule['src_group'] = src_group_object\n\n elif 'cidr_ip' not in rule:\n raise NonRecoverableError(\n 'You need to pass either src_group_id OR cidr_ip.')\n\n try:\n group_object.authorize(**rule)\n except (exception.EC2ResponseError,\n exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n except Exception as e:\n self._delete_security_group(group_object.id)\n raise", "def __make_group_by_res(self, group_name, name_list):\r\n if group_name not in self.groups:\r\n res_group = self.group['Residue'].getChildGrps()\r\n groups = [ res for res in res_groups if res.name in name_list ]\r\n new_group = Group(parent=[], id=-1, type=group_name, childs=groups)\r\n self.groups[group_name] = new_group", "def add_to_group(self, group):\n\n if not self.in_group(group):\n self.secondary_groups.append(group)\n return self", "def create_subgroup_accounts(request_ctx, account_id, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def test_grouping(self):\n n1 = self.create(NodeItem, UML.Node)\n n2 = self.create(NodeItem, UML.Node)\n\n self.group(n1, n2)\n\n assert n2.subject in n1.subject.nestedNode\n assert n1.subject not in n2.subject.nestedNode", "def construct_relation_group(\n self,\n group_name,\n domain_type,\n range_type,\n group_members = None):\n if not group_members:\n group_members = sorted([\n rel for rel in self.get_relation_names()\n if self.get_domain(rel) == domain_type and\n self.get_range(rel) == range_type\n ])\n if self.is_type(group_name):\n raise RelationNameError(group_name, 'Group already exists.')\n\n self.declare_entity_type(\n group_name, fixed_vocab=group_members, unknown_marker=None)\n\n for r in group_members:\n if self.is_dense(r):\n raise ValueError('Dense relation %r is unsupported.' % r)\n\n group = RelationGroup(group_name, group_members)\n self._group[group_name] = group\n # declare the schema for the necessary extension to the KG\n self.declare_relation(group.relation_rel, group.triple_type, group_name)\n self.declare_relation(group.subject_rel, group.triple_type, domain_type)\n self.declare_relation(group.object_rel, group.triple_type, range_type)\n self.declare_relation(group.weight_rel, group.triple_type,\n group.triple_type)\n # relation i in this group has num_rows[i] rows\n try:\n num_rows = [self._np_initval[r].data.shape[0] for r in group.members]\n except KeyError as err:\n raise RelationNameError(\n str(err), 'An undefined relation was encountered. '\n 'All relations in a relation group must be defined before calling '\n 'construct_relation_group.')\n total_num_rows = sum(num_rows)\n # names of all those triples\n self.extend_type(\n group.triple_type,\n [group.triple_prefix + str(i) for i in range(total_num_rows)])\n # now populate the sparse matrixes\n triple_indices = np.arange(total_num_rows, dtype='int32')\n rel_indices = np.hstack([\n np.ones(num_rows[i], dtype='int32') * i\n for i in range(len(group.members))\n ])\n subj_indices = np.hstack([self._np_initval[r].col for r in group.members])\n obj_indices = np.hstack([self._np_initval[r].row for r in group.members])\n weight_data = np.hstack([self._np_initval[r].data for r in group.members])\n ones_data = np.ones_like(weight_data)\n # weights are in a diagonal matrix\n self._np_initval[group.weight_rel] = scipy.sparse.coo_matrix(\n (weight_data, (triple_indices, triple_indices)),\n shape=(total_num_rows, total_num_rows),\n dtype='float32')\n self._np_initval[group.relation_rel] = scipy.sparse.coo_matrix(\n (weight_data, (rel_indices, triple_indices)),\n shape=(len(group.members), total_num_rows),\n dtype='float32')\n self._np_initval[group.subject_rel] = scipy.sparse.coo_matrix(\n (ones_data, (subj_indices, triple_indices)),\n shape=(self.get_max_id(domain_type), total_num_rows),\n dtype='float32')\n self._np_initval[group.object_rel] = scipy.sparse.coo_matrix(\n (ones_data, (obj_indices, triple_indices)),\n shape=(self.get_max_id(range_type), total_num_rows),\n dtype='float32')\n self.freeze(group.triple_type, unknown_marker=None)\n return group", "def test_merge_nooverlap(self):\n self.open_url('/group/list')\n \n # Sanity check\n el = self.wd.find_element(By.LINK_TEXT, \"Second Group\")\n el.click()\n time.sleep(0.5)\n \n self.assert_num_rows(1)\n \n self.open_url('/group/list')\n self.wd.find_element(By.ID, \"subnav-merge\").click()\n time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...\n self.assertEquals('Merge Group', self.wd.title)\n \n sel = Select(self.wd.find_element(By.ID, \"from_group_id\"))\n sel.select_by_visible_text(\"6th group\")\n \n sel = Select(self.wd.find_element(By.ID, \"to_group_id\"))\n sel.select_by_visible_text(\"Second Group\")\n \n self.submit_form(\"merge_form\")\n \n self.open_url('/group/list')\n self.assert_not_in_list_table(\"6th group\")\n \n el = self.wd.find_element(By.LINK_TEXT, \"Second Group\")\n el.click()\n \n self.assert_num_rows(3)", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def interactive_insert(group_name=None, package_name=None):\n file_name = \"\"\n file_destination = \"\"\n file_source = None\n file_create_link = False\n file_sudo = False\n file_comments = []\n\n def ask_file_source():\n return message.question(\"What is the full source file path?\")\n\n def ask_sudo():\n return message.question(\"Is sudo needed for this operation?\", \"boolean\")\n\n while True:\n file_source = None\n file_create_link = False\n file_sudo = False\n\n message.heading(\"Creating a new file. (${vars} is supported, '~' is not)\")\n if group_name is not None:\n message.info(f\"Current group: {group_name}\")\n if package_name is not None:\n message.info(f\"Current package: {package_name}\")\n\n file_destination = message.question(\n \"Where will this file be (created/linked/copied) to? (no basename)\"\n )\n\n if message.question(\n \"Will this file be linked to [destination]?\", \"boolean\"\n ):\n file_create_link = True\n file_source = ask_file_source()\n file_sudo = ask_sudo()\n elif message.question(\n \"Will this file be copied to [destination]?\", \"boolean\"\n ):\n file_source = ask_file_source()\n file_sudo = ask_sudo()\n\n if file_source is not None:\n [_, file_name] = os.path.split(os.path.expandvars(file_source))\n else:\n file_name = message.question(\"What will be the file's name?\")\n\n if message.question(\n \"Will the file have comments to aid the user?\", \"boolean\"\n ):\n while True:\n comment = message.question(\"New comment:\")\n file_comments.append(comment)\n if not message.question(\"Add another comment?\", \"boolean\"):\n break\n\n new_file = File(\n file_name,\n file_destination,\n os.path.split(file_source)[0] if file_source is not None else None,\n \"\",\n file_create_link,\n file_sudo,\n file_comments,\n )\n\n new_file.evaluate()\n\n message.info(\n f\"\"\"File info:\n [Name]: '{new_file.name}'\n [Destination]: '{new_file.path_destination}'\n [Source]: '{new_file.path_source}'\n [Link?]: '{'Yes' if new_file.create_link else 'No'}'\n [Need superuser?]: '{'Yes' if new_file.sudo else 'No'}'\n [Comments]: {new_file.comments}\"\"\"\n )\n if message.question(\"Confirm?\", \"boolean\"):\n break\n\n return new_file", "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def do_push_group(dbsync, group):\n pass", "def test_taskgroup_set(self, test_taskgroup_dag):\n # Unpack the fixture\n dag, group, (op1, op2, op3, op4) = test_taskgroup_dag\n # Arrange them with a Label in the middle\n op1.set_downstream(group, Label(\"Group label\"))\n group.set_downstream(op4)\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op1.task_id, op3.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op3.task_id, op4.task_id) == {}", "def clone(self):\n return _libsbml.ListOfGroups_clone(self)", "def add_group(self, groupname, grouptitle, path_to_group='/'):\n self.open_db()\n group = self.group_exists(path_to_group, groupname)\n if group is False:\n group = self.h5file.create_group(path_to_group, groupname,\n grouptitle)\n return group", "def _merge_feedbackset_into(self, target):\n from devilry.devilry_group.models import FeedbackSet\n\n # Map feedbackset_type to merge prefix\n feedbackset_type_merge_map = {\n FeedbackSet.FEEDBACKSET_TYPE_FIRST_ATTEMPT: FeedbackSet.FEEDBACKSET_TYPE_MERGE_FIRST_ATTEMPT,\n FeedbackSet.FEEDBACKSET_TYPE_NEW_ATTEMPT: FeedbackSet.FEEDBACKSET_TYPE_MERGE_NEW_ATTEMPT,\n FeedbackSet.FEEDBACKSET_TYPE_RE_EDIT: FeedbackSet.FEEDBACKSET_TYPE_MERGE_RE_EDIT\n }\n\n feedbacksets = self.feedbackset_set.order_by_deadline_datetime()\\\n .select_related('group__parentnode')\n\n for feedbackset in feedbacksets:\n # change feedbackset_type to merge prefix\n if feedbackset.feedbackset_type in list(feedbackset_type_merge_map.keys()):\n feedbackset.feedbackset_type = feedbackset_type_merge_map[feedbackset.feedbackset_type]\n feedbackset.group = target\n feedbackset.save()", "def grp(self, grpNode):\n\t\tself._grp = grpNode", "def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()", "def test_groups_group_ref_put(self):\n pass", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs):\n\n path = '/v1/global/root_outcome_group'\n url = request_ctx.base_api_url + path.format()\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def test_duplicate_groups(self):\n\n auag = UsersAndGroups()\n\n # create a duplicate with default flag to raise an error.\n auag.add_group(Group(name=\"group1\"))\n with self.assertRaises(Exception):\n auag.add_group(Group(name=\"group1\"))\n\n # create with overwrite.\n auag.add_group(\n Group(name=\"group2\", group_names=[\"group2\"]),\n duplicate=UsersAndGroups.OVERWRITE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group2\")\n self.assertEqual(u.name, \"group2\")\n self.assertEqual(u.groupNames, [\"group2\"])\n\n auag.add_group(\n Group(name=\"group2\", group_names=[\"group3\"]),\n duplicate=UsersAndGroups.OVERWRITE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group2\")\n self.assertEqual(u.name, \"group2\")\n self.assertEqual(u.groupNames, [\"group3\"])\n\n # create with update.\n auag.add_group(\n Group(name=\"group3\", group_names=[\"group2\"]),\n duplicate=UsersAndGroups.OVERWRITE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group3\")\n self.assertEqual(u.name, \"group3\")\n self.assertEqual(u.groupNames, [\"group2\"])\n\n auag.add_group(\n Group(name=\"group3\", group_names=[\"group3\"]),\n duplicate=UsersAndGroups.UPDATE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group3\")\n self.assertEqual(u.groupNames, [\"group2\", \"group3\"])", "def test_rsync_set_group(self):\n \n root = tempfile.mkdtemp(prefix=\"rsync_test_set_group_\")\n avail_groups = os.getgroups()\n exp_group = grp.getgrgid(avail_groups[random.randint(1,len(avail_groups))-1])[0]\n \n # Create some files to move\n to_copy = self._create_test_files(root)\n \n # Run rsync\n with open(os.devnull, 'w') as f:\n old_stdout = sys.stdout\n sys.stdout = f\n rsync_files(to_copy,sys.stdout,exp_group,False)\n sys.stdout = old_stdout\n \n # Verify the copy process set the correct group on created directories\n for ddir in set([d[1] for d in to_copy]):\n gid = os.stat(ddir).st_gid\n obs_group = grp.getgrgid(gid)[0]\n self.assertEqual(obs_group,\n exp_group,\n \"Failed to set group '{}' on directory. Group is {}\".format(exp_group,\n obs_group))\n \n # Verify the copy process set the correct group\n for src, ddir, dname in to_copy:\n dfile = os.path.join(ddir,dname)\n gid = os.stat(dfile).st_gid\n obs_group = grp.getgrgid(gid)[0]\n self.assertEqual(obs_group,\n exp_group,\n \"Failed to set group '{}' on file. Group is {}\".format(exp_group,\n obs_group))", "def test_drag_into_different_group(self):\r\n expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},\r\n {self.group_a: [self.group_b_item_1, self.group_a_item_1, self.group_a_item_2]},\r\n {self.group_b: [self.group_b_item_2]},\r\n {self.group_empty: []}]\r\n self.drag_and_verify(self.group_b_item_1_handle, self.group_a_item_1_handle, expected_ordering)", "def copy_children(self):\n\n # Create a group\n self.fileh.create_group('/', 'agroup')\n # Create several objects there\n for i in range(10):\n # Create a new array\n self.fileh.create_array('/agroup', 'array' + str(i), self.a1)\n # Excercise copy_children\n for i in range(self.nobjects):\n # Create another group for destination\n self.fileh.create_group('/', 'anothergroup' + str(i))\n # Copy children from /agroup to /anothergroup+i\n self.fileh.copy_children('/agroup', '/anothergroup' + str(i))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print((\"Time for Undo, Redo (copy_children):\", undo, \"s, \",\n redo, \"s\"))", "def _internal_copy(source, source_path, target, target_path, maintain_flag):\n if maintain_flag:\n try:\n target.create_group(target_path)\n except ValueError:\n pass # In case the copy_to() function failed previously and the group already exists.\n\n if target_path == \"/\":\n source.copy(target_path, \"/\") if source == target else source.copy(\n target_path, target\n )\n else:\n if maintain_flag:\n if dest_path != \"\":\n source.copy(source_path, target[dest_path])\n else:\n source.copy(source_path, target)\n else:\n group_name_old = source_path.split(\"/\")[-1]\n try:\n target.create_group(\"/tmp\")\n except ValueError:\n pass\n source.copy(source_path, target[\"/tmp\"])\n try:\n target.move(\"/tmp/\" + group_name_old, target_path)\n except ValueError:\n del target[dest_path]\n target.move(\"/tmp/\" + group_name_old, target_path)\n del target[\"/tmp\"]" ]
[ "0.5898711", "0.5874123", "0.5813369", "0.57389224", "0.56055593", "0.55976254", "0.5595738", "0.55490917", "0.5544522", "0.5541461", "0.5518073", "0.53831655", "0.53829265", "0.53379554", "0.531845", "0.5302578", "0.52945095", "0.52933514", "0.5258613", "0.5246323", "0.52400297", "0.5225952", "0.5201779", "0.5191769", "0.5140609", "0.50881827", "0.50875765", "0.50842255", "0.50807875", "0.5079402", "0.5074019", "0.50512403", "0.5050658", "0.50449747", "0.50314426", "0.501575", "0.5015151", "0.5008633", "0.5008148", "0.50077254", "0.49875265", "0.49526322", "0.49441025", "0.49416566", "0.49363503", "0.49342582", "0.49342582", "0.4924192", "0.4912886", "0.49026668", "0.49015865", "0.49010867", "0.48911935", "0.4890151", "0.48841816", "0.48499066", "0.4844977", "0.4841554", "0.48306534", "0.48294988", "0.4825032", "0.48163125", "0.48045567", "0.4800212", "0.47978723", "0.47894487", "0.47880772", "0.4760176", "0.47558516", "0.47499806", "0.47430947", "0.47366083", "0.4732884", "0.47253084", "0.47247282", "0.47220522", "0.4721433", "0.47198492", "0.47170034", "0.47099406", "0.470273", "0.46990228", "0.46927205", "0.46876273", "0.46869034", "0.46855444", "0.46782595", "0.46760097", "0.4666705", "0.46601993", "0.4642347", "0.46380064", "0.46338528", "0.46334374", "0.46315676", "0.4629545", "0.46279424", "0.46268773", "0.4621315", "0.46150658" ]
0.5706337
4
Creates a new subgroup of the outcome group with the same title and description as the source group, then creates links in that new subgroup to the same outcomes that are linked in the source group. Recurses on the subgroups of the source group, importing them each in turn into the new subgroup. Allows you to copy organizational structure, but does not create copies of the outcomes themselves, only new links. The source group must be either global, from the same context as this outcome group, or from an associated account. The source group cannot be the root outcome group of its context.
def import_outcome_group_accounts(request_ctx, account_id, id, source_outcome_group_id, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}/import' payload = { 'source_outcome_group_id' : source_outcome_group_id, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_outcome_group_courses(request_ctx, course_id, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def copy_group(self):\n dd = self.destination_directory\n sg = self.source_group\n dg = self.destination_group\n\n data = {\n 'description': sg.description,\n 'name': sg.name,\n 'status': sg.status,\n }\n\n # If this Group already exists, we'll just update it.\n if dg:\n for key, value in data.items():\n setattr(dg, key, value)\n\n while True:\n try:\n dg.save()\n return dg\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))\n\n # If we get here, it means we need to create the Group from scratch.\n while True:\n try:\n return dd.groups.create(data)\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")", "def migrate(self):\n self.destination_group = self.get_destination_group()\n self.destination_group = self.copy_group()\n self.copy_custom_data()\n\n logger.info('Successfully copied Group: {}'.format(self.destination_group.name.encode('utf-8')))\n return self.destination_group", "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def test_convert_to_existing_group2(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance_all')\n inventoryloader.convert_group('glance_registry', 'glance_all')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance_registry' not in inventoryloader.groups\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert not inventoryloader.groups['glance_all'].has_group('glance_registry')\n assert inventoryloader.groups['glance_all'].has_host('localhost')\n assert inventoryloader.groups['glance_all'].has_host('localhost2')\n assert \"management_bridge\" in inventoryloader.groups['glance_all'].vars", "def _merge_groups(self, group, newgroup):\n\n # name\n if group.name != newgroup.name:\n raise errors.AnsibleError(\"Cannot merge group %s with %s\" % (group.name, newgroup.name))\n\n # depth\n group.depth = max([group.depth, newgroup.depth])\n\n # hosts list (host objects are by now already added to self.hosts)\n for host in newgroup.hosts:\n grouphosts = dict([(h.name, h) for h in group.hosts])\n if host.name in grouphosts:\n # same host name but different object, merge\n self._merge_hosts(grouphosts[host.name], host)\n else:\n # new membership, add host to group from self\n # group from self will also be added again to host.groups, but\n # as different object\n group.add_host(self.hosts[host.name])\n # now remove this the old object for group in host.groups\n for hostgroup in [g for g in host.groups]:\n if hostgroup.name == group.name and hostgroup != self.groups[group.name]:\n self.hosts[host.name].groups.remove(hostgroup)\n\n\n # group child membership relation\n for newchild in newgroup.child_groups:\n # dict with existing child groups:\n childgroups = dict([(g.name, g) for g in group.child_groups])\n # check if child of new group is already known as a child\n if newchild.name not in childgroups:\n self.groups[group.name].add_child_group(newchild)\n\n # group parent membership relation\n for newparent in newgroup.parent_groups:\n # dict with existing parent groups:\n parentgroups = dict([(g.name, g) for g in group.parent_groups])\n # check if parent of new group is already known as a parent\n if newparent.name not in parentgroups:\n if newparent.name not in self.groups:\n # group does not exist yet in self, import him\n self.groups[newparent.name] = newparent\n # group now exists but not yet as a parent here\n self.groups[newparent.name].add_child_group(group)\n\n # variables\n group.vars = utils.combine_vars(group.vars, newgroup.vars)", "def test_convert_to_newgroup(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance1')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance1' in inventoryloader.groups\n assert inventoryloader.groups['glance_all'].has_group('glance1')\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance1'].has_host('localhost')\n assert \"management_bridge\" in inventoryloader.groups['glance1'].vars", "def test_convert_to_existing_group(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance_registry')\n assert 'glance_api' not in inventoryloader.groups\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance_registry'].has_host('localhost')\n assert inventoryloader.groups['glance_registry'].has_host('localhost2')\n assert \"management_bridge\" in inventoryloader.groups['glance_registry'].vars", "def addgroup(self, abspath=None, sourcetree=pbxconsts.SOURCE_TREE.group, name=None, move=True):\n group_name = os.path.basename(abspath) if name is None or len(name) == 0 else name\n abspath = abspath if not abspath is None else self.realpath()\n subgroup = func.get_list_item(func.take(\\\n lambda o: o.isa == u'PBXGroup' and o.realpath() == abspath \\\n and o.displayname() == group_name, self.pbx_children), 0)\n if subgroup is None:\n subgroup = self.project().new_object(u'PBXGroup')\n pbxpath.set_path_with_source_tree(subgroup, abspath, source_tree=sourcetree, \\\n parent_group=self)\n if not name is None:\n subgroup.pbx_name = name\n self.addchild(subgroup, move=move)\n return subgroup", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "async def mergegroup(self, ctx, original_group_id: int, duplicate_group_id: int):\n original_group = await ex.get_group(original_group_id)\n duplicate_group = await ex.get_group(duplicate_group_id)\n if not duplicate_group:\n return await ctx.send(f\"> {duplicate_group_id} could not find a Group.\")\n if not original_group:\n return await ctx.send(f\"> {original_group} could not find a Group.\")\n # move aliases\n await ex.conn.execute(\"UPDATE groupmembers.aliases SET objectid = $1 WHERE isgroup = $2 AND objectid = $3\", original_group.id, 1, duplicate_group.id)\n for member_id in duplicate_group.members:\n if member_id not in original_group.members:\n # update the member location to the original group\n await ex.conn.execute(\"UPDATE groupmembers.idoltogroup SET groupid = $1 WHERE idolid = $2 AND groupid = $3\", original_group.id, member_id, duplicate_group.id)\n # delete group\n await ex.conn.execute(\"DELETE FROM groupmembers.groups WHERE groupid = $1\", duplicate_group.id)\n # recreate cache\n await ex.create_idol_cache()\n await ex.create_group_cache()\n await ctx.send(f\"> Merged {duplicate_group_id} to {original_group_id}.\")", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def clone(self):\n return _libsbml.Group_clone(self)", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_from_src(self, cgsnapshot_id, source_cgid, name=None,\n description=None, user_id=None,\n project_id=None):\n body = {'consistencygroup-from-src': {'name': name,\n 'description': description,\n 'cgsnapshot_id': cgsnapshot_id,\n 'source_cgid': source_cgid,\n 'user_id': user_id,\n 'project_id': project_id,\n 'status': \"creating\",\n }}\n\n self.run_hooks('modify_body_for_update', body,\n 'consistencygroup-from-src')\n resp, body = self.api.client.post(\n \"/consistencygroups/create_from_src\", body=body)\n return common_base.DictWithMeta(body['consistencygroup'], resp)", "def make_group(self, id, name='', attrs={}, link='', abort=True ): \n gid = id + \"/\"\n sgd = self.get_sgd(gid, name)\n path = self.full_path\n link_info = self.file.extract_link_info(name, link, Group)\n if not abort:\n # id = sgd['id'].rstrip('/') # not sure if need this\n grp = self.file.get_existing_group(path, id, name)\n if grp:\n return grp\n grp = Group(self.file, sgd, name, path, attrs, self, link_info)\n # self.mstats[gid]['created'].append(grp)\n return grp", "def _group_append(groups, id, new_group):\n\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n\n path_inds = list(reversed(path_inds))\n\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)\n\n return groups", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def make_group(self, qid, name='', path='', attrs={}, link='', abort=True):\n gqid = qid + \"/\"\n sdef = self.get_sdef(gqid, self.default_ns, \"referenced in make_group\")\n id = sdef['id']\n ns = sdef['ns']\n path = self.deduce_path(id, ns, path)\n if not abort:\n id_noslash = id.rstrip('/') # could be different from gqid if namespace present\n grp = self.get_existing_group(path, id_noslash, name)\n if grp:\n # found already existing group\n return grp \n link_info = self.extract_link_info(name, link, Group)\n # create the group\n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent, link_info)\n return grp", "def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def copyGroupFrom(self, groupName, sourceDesign, sourceProject=None, sourceProjectPath=None):\n oName = self.project_name\n if sourceProject == oName or sourceProject is None:\n oSrcProject = self._desktop.GetActiveProject()\n else:\n self._desktop.OpenProject(sourceProjectPath)\n oSrcProject = self._desktop.SetActiveProject(sourceProject)\n\n oDesign = oSrcProject.SetActiveDesign(sourceDesign)\n oEditor = oDesign.SetActiveEditor(\"3D Modeler\")\n oEditor.Copy([\"NAME:Selections\", \"Selections:=\", groupName])\n\n self.modeler.oeditor.Paste()\n self.modeler.primitives.refresh_all_ids()\n self.materials._load_from_project()\n return True", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def test_does_not_return_duplicate_groups(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n GroupResolution.objects.create(\n group=self.group,\n release=self.release,\n type=GroupResolution.Type.in_release,\n )\n\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def test_mergeGroups(self):\n tabs = [\n widgets.Tab(u'id1', u'Title 1', None),\n widgets.Tab(u'id2', u'Title 2', None)]\n tabGroup1 = widgets.TabGroup(u'id', u'Title', tabs=tabs)\n tabs = [\n widgets.Tab(u'id3', u'Title 3', None)]\n tabGroup2 = widgets.TabGroup(u'id', u'Hello', tabs=tabs)\n\n newGroup = widgets.TabGroup.mergeGroups(tabGroup1, tabGroup2)\n self.assertEquals(newGroup.id, u'id')\n self.assertEquals(newGroup.title, u'Hello')\n self.assertEquals(newGroup.tabs, tabGroup1.tabs + tabGroup2.tabs)", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def CopyAllSubElementsTo(self, other_group, ignore):\n # pylint: disable=protected-access\n collections_to_update = [\n (self._groups_to_load, other_group._groups_to_load),\n (self._commands_to_load, other_group._commands_to_load)]\n\n for src, dst in collections_to_update:\n for name, info in src.iteritems():\n if name in ignore:\n continue\n (module_dir, module_path, name, unused_track) = info\n dst[name] = (module_dir, module_path, name,\n other_group.ReleaseTrack())", "def test_add_existing_group(self, inventoryloader):\n grp_cnt = inventoryloader.count_groups()\n grp_vars = inventoryloader.groups['glance_api'].vars\n inventoryloader.add_group(u'glance_api')\n assert inventoryloader.count_groups() == grp_cnt\n assert inventoryloader.groups['glance_api'].vars == grp_vars\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n inventoryloader.add_group(u'glance_api', {\"vars\": { u'external_bridge': u'br-ext'}})\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n assert 'br-ext' == inventoryloader.groups['glance_api'].vars['external_bridge']", "def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def test_drag_group_into_group(self):\r\n expected_ordering = [{self.container_title: [self.group_a, self.group_empty]},\r\n {self.group_a: [self.group_b, self.group_a_item_1, self.group_a_item_2]},\r\n {self.group_b: [self.group_b_item_1, self.group_b_item_2]},\r\n {self.group_empty: []}]\r\n self.drag_and_verify(self.group_b_handle, self.group_a_item_1_handle, expected_ordering)", "def test_list_role_assignment_using_inherited_sourced_groups(self):\n test_plan = {\n # A domain with 3 users, 3 groups, 3 projects, a second domain,\n # plus 3 roles.\n 'entities': {'domains': [{'users': 3, 'groups': 3, 'projects': 3},\n 1],\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'domain': 0,\n 'inherited_to_projects': True},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1,\n 'inherited_to_projects': True},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1.\n # We should see the inherited group assigned on the 3 projects\n # from domain 0, as well as the direct assignments.\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'project': 0,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 1,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 2,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)", "def _copy_from_template(\n self, source_structures, source_keys, dest_structure, new_parent_block_key, user_id, head_validation\n ):\n new_blocks = set()\n\n new_children = list() # ordered list of the new children of new_parent_block_key\n\n for usage_key in source_keys:\n src_course_key = usage_key.course_key\n hashable_source_id = src_course_key.for_version(None)\n block_key = BlockKey(usage_key.block_type, usage_key.block_id)\n source_structure = source_structures[src_course_key]\n\n if block_key not in source_structure['blocks']:\n raise ItemNotFoundError(usage_key)\n source_block_info = source_structure['blocks'][block_key]\n\n # Compute a new block ID. This new block ID must be consistent when this\n # method is called with the same (source_key, dest_structure) pair\n unique_data = \"{}:{}:{}\".format(\n str(hashable_source_id).encode(\"utf-8\"),\n block_key.id,\n new_parent_block_key.id,\n )\n new_block_id = hashlib.sha1(unique_data.encode('utf-8')).hexdigest()[:20]\n new_block_key = BlockKey(block_key.type, new_block_id)\n\n # Now clone block_key to new_block_key:\n new_block_info = copy.deepcopy(source_block_info)\n # Note that new_block_info now points to the same definition ID entry as source_block_info did\n existing_block_info = dest_structure['blocks'].get(new_block_key, BlockData())\n # Inherit the Scope.settings values from 'fields' to 'defaults'\n new_block_info.defaults = new_block_info.fields\n\n # <workaround>\n # CAPA modules store their 'markdown' value (an alternate representation of their content)\n # in Scope.settings rather than Scope.content :-/\n # markdown is a field that really should not be overridable - it fundamentally changes the content.\n # capa modules also use a custom editor that always saves their markdown field to the metadata,\n # even if it hasn't changed, which breaks our override system.\n # So until capa modules are fixed, we special-case them and remove their markdown fields,\n # forcing the inherited version to use XML only.\n if usage_key.block_type == 'problem' and 'markdown' in new_block_info.defaults:\n del new_block_info.defaults['markdown']\n # </workaround>\n\n # Preserve any existing overrides\n new_block_info.fields = existing_block_info.fields\n\n if 'children' in new_block_info.defaults:\n del new_block_info.defaults['children'] # Will be set later\n\n new_block_info.edit_info = existing_block_info.edit_info\n new_block_info.edit_info.previous_version = new_block_info.edit_info.update_version\n new_block_info.edit_info.update_version = dest_structure['_id']\n # Note we do not set 'source_version' - it's only used for copying identical blocks\n # from draft to published as part of publishing workflow.\n # Setting it to the source_block_info structure version here breaks split_draft's has_changes() method.\n new_block_info.edit_info.edited_by = user_id\n new_block_info.edit_info.edited_on = datetime.datetime.now(UTC)\n new_block_info.edit_info.original_usage = str(usage_key.replace(branch=None, version_guid=None))\n new_block_info.edit_info.original_usage_version = source_block_info.edit_info.update_version\n dest_structure['blocks'][new_block_key] = new_block_info\n\n children = source_block_info.fields.get('children')\n if children:\n children = [src_course_key.make_usage_key(child.type, child.id) for child in children]\n new_blocks |= self._copy_from_template(\n source_structures, children, dest_structure, new_block_key, user_id, head_validation\n )\n\n new_blocks.add(new_block_key)\n # And add new_block_key to the list of new_parent_block_key's new children:\n new_children.append(new_block_key)\n\n # Update the children of new_parent_block_key\n dest_structure['blocks'][new_parent_block_key].fields['children'] = new_children\n\n return new_blocks", "def test_grouping(self):\n n = self.create(NodeItem, UML.Node)\n a = self.create(ArtifactItem, UML.Artifact)\n\n self.group(n, a)\n\n assert 1 == len(n.subject.deployment)\n assert n.subject.deployment[0].deployedArtifact[0] is a.subject", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def make(self):\n numberOfGroups = np.random.randint(1, len(self.getFirstParent().getGroups()))\n secParentGroups = np.random.choice(self.secondParent.getGroups(), numberOfGroups, replace=False)\n groups = []\n allSecElements = []\n numberOfElements = 0\n\n for grpSec in secParentGroups:\n allSecElements += grpSec.getElements()\n\n for grpFst in self.getFirstParent().getGroups():\n numberOfElements += len(grpFst.getElements())\n elements = list(set(grpFst.getElements()) - set(allSecElements))\n group = Group(grpFst.getIndex(), grpFst.getMinElements(), grpFst.getMaxElements())\n group.setElements(elements)\n groups.append(group)\n\n for grpSec in secParentGroups:\n for grpFst in groups:\n if grpSec.getIndex() == grpFst.getIndex():\n grpFst.addElements(grpSec.getElements())\n\n child = Individual(np.zeros(numberOfElements))\n child.setGroups(groups)\n\n return child", "def test_add_group(self):\n pass", "def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def _resolve_grouping_node(group_node, group_tree, group_graph,\n target_graph):\n\n # Get the predecessors of the grouping node\n preds = nx.dfs_predecessors(group_tree, group_node)\n\n # Get a list of unique node identifiers among predecessors. These are\n # the nodes on which a subgraph will be induced.\n preds = list(set(list(preds.keys()) + list(preds.values())))\n\n # Induce a subgraph based on the nodes\n pred_group = group_graph.subgraph(preds).copy()\n\n # Set up edge dictionary\n edge_attrs = {}\n\n # Encode edge type information\n for s, t in pred_group.edges():\n\n # Add edge attributes to the dictionary\n edge_attrs[(s, t)] = {'kind': 'grouping'}\n\n # Set edge attributes\n nx.set_edge_attributes(pred_group, edge_attrs)\n\n # Add the nodes and edges from the subgraph to the connectivity graph\n target_graph.add_nodes_from(pred_group.nodes(data=True))\n target_graph.add_edges_from(pred_group.edges(data=True))", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def setup_group_workspaces(context):\n if context.readDataFile(\"marker.txt\") is None:\n return\n\n portal = context.getSite()\n if \"groups\" not in portal.objectIds():\n\n groups = portal[\n portal.invokeFactory(\"Folder\",id=\"groups\")]\n\n # set default properties\n groups.setTitle(\"groups\")\n groups.setDescription(\"Group workspaces container.\")\n groups._getWorkflowTool().doActionFor(groups, \"publish\" \"\")\n groups.setExcludeFromNav(True)\n groups.update() \n logger.info(\"Groups container created.\")", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def ensure_target_group_created(vpc, environment):\n name = environment + '-web'\n\n # If it already exists, create returns the existing data\n response = ELB.create_target_group(\n Name=name,\n Protocol='HTTP',\n Port=9000,\n VpcId=vpc.id,\n Matcher={\n 'HttpCode': '200,301'\n }\n )\n\n arn = response['TargetGroups'][0]['TargetGroupArn']\n\n return arn", "def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))", "def add_source(self, group_source):\n if group_source.name in self._sources:\n raise ValueError(\"GroupSource '%s': name collision\" % \\\n group_source.name)\n self._sources[group_source.name] = group_source", "def test_list_role_assignment_using_sourced_groups(self):\n test_plan = {\n # The default domain with 3 users, 3 groups, 3 projects,\n # plus 3 roles.\n 'entities': {'domains': {'id': CONF.identity.default_domain_id,\n 'users': 3, 'groups': 3, 'projects': 3},\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'project': 0},\n {'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n # Adding a role a filter should further restrict the entries\n {'params': {'source_from_group_ids': [0, 1], 'role': 2,\n 'effective': True},\n 'results': [{'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)", "def copy_all_except_candidates(self):\n groupcopy = AssignmentGroup(parentnode=self.parentnode,\n name=self.name,\n is_open=self.is_open,\n delivery_status=self.delivery_status)\n groupcopy.full_clean()\n groupcopy.save()\n for examiner in self.examiners.all():\n groupcopy.examiners.create(relatedexaminer=examiner.relatedexaminer)\n for tagobj in self.tags.all():\n groupcopy.tags.create(tag=tagobj.tag)\n return groupcopy", "def _copy_subdag(self, user_id, destination_version, block_key, source_blocks, destination_blocks, blacklist):\n orphans = set()\n destination_block = destination_blocks.get(block_key)\n new_block = source_blocks[block_key]\n if destination_block:\n # reorder children to correspond to whatever order holds for source.\n # remove any which source no longer claims (put into orphans)\n # add any which are being copied\n source_children = new_block.fields.get('children', [])\n existing_children = destination_block.fields.get('children', [])\n destination_reordered = SparseList()\n for child in existing_children:\n try:\n index = source_children.index(child)\n destination_reordered[index] = child\n except ValueError:\n orphans.add(BlockKey(*child))\n if blacklist != EXCLUDE_ALL:\n for index, child in enumerate(source_children):\n if child not in blacklist:\n destination_reordered[index] = child\n # the history of the published leaps between publications and only points to\n # previously published versions.\n previous_version = destination_block.edit_info.update_version\n destination_block = copy.deepcopy(new_block)\n destination_block.fields['children'] = destination_reordered.compact_list()\n destination_block.edit_info.previous_version = previous_version\n destination_block.edit_info.update_version = destination_version\n destination_block.edit_info.edited_by = user_id\n destination_block.edit_info.edited_on = datetime.datetime.now(UTC)\n else:\n destination_block = self._new_block(\n user_id, new_block.block_type,\n self._filter_blacklist(copy.copy(new_block.fields), blacklist),\n new_block.definition,\n destination_version,\n raw=True,\n asides=new_block.asides,\n block_defaults=new_block.defaults\n )\n # Extend the block's new edit_info with any extra edit_info fields from the source (e.g. original_usage):\n for key, val in new_block.edit_info.to_storable().items():\n if getattr(destination_block.edit_info, key) is None:\n setattr(destination_block.edit_info, key, val)\n\n # If the block we are copying from was itself a copy, then just\n # reference the original source, rather than the copy.\n destination_block.edit_info.source_version = (\n new_block.edit_info.source_version or new_block.edit_info.update_version\n )\n\n if blacklist != EXCLUDE_ALL:\n for child in destination_block.fields.get('children', []):\n if child not in blacklist:\n orphans.update(\n self._copy_subdag(\n user_id, destination_version, BlockKey(*child), source_blocks, destination_blocks, blacklist\n )\n )\n destination_blocks[block_key] = destination_block\n return orphans", "def test_create_group_409(self):\n request = {\n 'name': self.test_group1_groupid\n }\n # First create a group indirectly by making a user with a group\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now create a group that is already there\n resp = self.app.post('/groups', data=json.dumps(request))\n assert resp.status_code == 409", "def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)", "def _add_group(self, group):\n\n if group.name not in self.groups:\n # it's brand new, add him!\n self.groups[group.name] = group\n if self.groups[group.name] != group:\n # different object, merge\n self._merge_groups(self.groups[group.name], group)", "def test_taskgroup_shift(self, test_taskgroup_dag):\n # Unpack the fixture\n dag, group, (op1, op2, op3, op4) = test_taskgroup_dag\n # Arrange them with a Label in the middle\n op1 >> Label(\"Group label\") >> group >> op4\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op1.task_id, op3.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op3.task_id, op4.task_id) == {}", "def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def group_by_source(self, group_by_source):\n\n self._group_by_source = group_by_source", "async def add_parent_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n stmt = select(Group).filter(Group.parent_id is None).options(selectinload(Group.parent))\n result = await dbsession.execute(stmt)\n stmt = select(func.count(Group.id)).filter(Group.parent_id is None)\n result_count = await dbsession.execute(stmt)\n with click.progressbar(\n result.scalars(), length=result_count.scalar_one(), label=\"Adding parent groups\"\n ) as progress:\n for group in progress:\n if \"aat\" in config[\"data\"][\"hierarchy\"][\"expansions\"]:\n categories = apply_aat(group.value, merge=False)\n if categories:\n for category_list in categories:\n mapped = False\n for category in category_list:\n stmt = select(Group).filter(Group.value == category)\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if not parent_group:\n parent_group = Group(\n value=category, label=category[0].upper() + category[1:], split=\"parent\"\n )\n dbsession.add(group)\n group.parent = parent_group\n mapped = True\n group = parent_group # noqa: PLW2901\n if group.parent_id:\n break\n if mapped:\n break\n else:\n mapped = False\n for category in apply_nlp(group.value):\n stmt = select(Group).filter(\n or_(Group.value == category, Group.value == inflection.pluralize(category))\n )\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if parent_group:\n group.parent = parent_group\n await dbsession.commit()\n mapped = True\n break\n if not mapped:\n if group.value not in [\"styles and periods\"]:\n for category in apply_nlp(group.value):\n hierarchies = apply_aat(category, merge=False)\n groups = []\n for hierarchy in hierarchies:\n if group.value not in hierarchy:\n stmt = (\n select(Group)\n .filter(Group.value.in_(hierarchy))\n .options(selectinload(Group.items))\n )\n result = await dbsession.execute(stmt)\n for potential_group in result.scalars():\n depth = 0\n tmp = potential_group\n while tmp:\n depth = depth + 1\n tmp = tmp.parent\n groups.append((potential_group, depth, len(potential_group.items)))\n if groups:\n groups.sort(key=lambda g: (g[1], g[2]), reverse=True)\n group.parent = groups[0][0]\n break\n await dbsession.commit()", "def _duplicate_item(parent_usage_key, duplicate_source_usage_key, display_name=None, user=None):\r\n store = get_modulestore(duplicate_source_usage_key)\r\n source_item = store.get_item(duplicate_source_usage_key)\r\n # Change the blockID to be unique.\r\n dest_usage_key = duplicate_source_usage_key.replace(name=uuid4().hex)\r\n category = dest_usage_key.category\r\n\r\n # Update the display name to indicate this is a duplicate (unless display name provided).\r\n duplicate_metadata = own_metadata(source_item)\r\n if display_name is not None:\r\n duplicate_metadata['display_name'] = display_name\r\n else:\r\n if source_item.display_name is None:\r\n duplicate_metadata['display_name'] = _(\"Duplicate of {0}\").format(source_item.category)\r\n else:\r\n duplicate_metadata['display_name'] = _(\"Duplicate of '{0}'\").format(source_item.display_name)\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=source_item.data if hasattr(source_item, 'data') else None,\r\n metadata=duplicate_metadata,\r\n system=source_item.runtime,\r\n )\r\n\r\n dest_module = get_modulestore(category).get_item(dest_usage_key)\r\n # Children are not automatically copied over (and not all xblocks have a 'children' attribute).\r\n # Because DAGs are not fully supported, we need to actually duplicate each child as well.\r\n if source_item.has_children:\r\n dest_module.children = []\r\n for child in source_item.children:\r\n dupe = _duplicate_item(dest_usage_key, child, user=user)\r\n dest_module.children.append(dupe)\r\n get_modulestore(dest_usage_key).update_item(dest_module, user.id if user else None)\r\n\r\n if not 'detached' in source_item.runtime.load_block_type(category)._class_tags:\r\n parent = get_modulestore(parent_usage_key).get_item(parent_usage_key)\r\n # If source was already a child of the parent, add duplicate immediately afterward.\r\n # Otherwise, add child to end.\r\n if duplicate_source_usage_key in parent.children:\r\n source_index = parent.children.index(duplicate_source_usage_key)\r\n parent.children.insert(source_index + 1, dest_usage_key)\r\n else:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent_usage_key).update_item(parent, user.id if user else None)\r\n\r\n return dest_usage_key", "def _duplicate_as_linked_tree(self, source_root):\n logging.debug(\"Started traversing %s \\'s tree for file linkage and directory duplication.\" % self.directory)\n # Create the containing directory that resides within the share\n within_share_dir_path = os.path.join(self.directory, os.path.basename(source_root))\n self._makedir(within_share_dir_path)\n for root, subdirectories, files in os.walk(source_root, followlinks=True):\n share_root = root.replace(str(source_root), within_share_dir_path, 1)\n for subdir in subdirectories:\n target = os.path.join(share_root, subdir)\n self._makedir(target)\n for file in files:\n source = os.path.join(root, file)\n target = os.path.join(share_root, file)\n self._link_files(source, target)", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def test_add_parent(self):\n _, _, groupa, groupb = create_objects()\n groupa.add_parent(groupb)\n assert groupb in groupa.parents\n assert groupa in groupb.children\n return (groupa, groupb)", "def add_subgroup(self, new_subgroup):\n self.subgroups[new_subgroup.get_title()] = new_subgroup", "def add_group(self):\n items = self.group_list.selectedItems()\n for item in items:\n self.parent.add_group_data(item.text())", "def clone(self):\n return _libsbml.GroupsExtension_clone(self)", "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def group_image(directory, image, group):\r\n\tif os.path.exists(directory + \"\\\\\" + group):\r\n\t\tpass\r\n\telse:\r\n\t\ttry:\r\n\t\t\tos.mkdir(directory + '\\\\' + group)\r\n\t\t\tprint(\"Successfully created directory\", group)\r\n\t\texcept OSError:\r\n\t\t\tprint(\"Creation of directory failed.\")\r\n\ttry:\r\n\t\tshutil.copy(str(directory + '\\\\' + image), str(directory + \"\\\\\" + group + \"\\\\\" + image))\r\n\texcept OSError as OSe:\r\n\t\tprint(OSe)", "def generate_website_group_edges(website_group_json, dst):\n with open(website_group_json) as f_h:\n with gremlin_writer(GremlinEdgeCSV, dst, attributes=[]) as writer:\n for data in json_lines_file(f_h):\n root_id = data[\"id\"]\n websites = data[\"websites\"]\n for website in websites:\n writer.add(\n _id=get_id(root_id, website, {}),\n _from=root_id,\n to=website,\n label=WEBISTE_GROUP_EDGE_LABEL,\n attribute_map={}\n )", "def test_list_role_assignment_fails_with_userid_and_source_groups(self):\n group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)\n group = self.identity_api.create_group(group)\n self.assertRaises(exception.UnexpectedError,\n self.assignment_api.list_role_assignments,\n effective=True,\n user_id=self.user_foo['id'],\n source_from_group_ids=[group['id']])", "def addObjectsToGroup(self):\n\t\tmc.delete( self.objects, ch = True )\n\t\tmc.parent( self.objects, self.grp.name )\n\t\tmc.makeIdentity( self.objects, apply=True,t=1,r=1,s=1,n=2)\n\t\t#self.lockObjects()", "def merge_groups(self, groups):\n if len(groups) < 2:\n raise ValidationError(gettext_lazy('Cannot merge less than 2 groups'))\n\n from devilry.apps.core.models import AssignmentGroupHistory\n\n target_group = groups.pop(0)\n # Check if we can merge\n for group in groups:\n group.can_merge(target_group)\n\n # Create or get target group history\n try:\n grouphistory = target_group.assignmentgrouphistory\n except AssignmentGroupHistory.DoesNotExist:\n grouphistory = AssignmentGroupHistory(assignment_group=target_group)\n # Insert groups in history\n grouphistory.merge_assignment_group_history(groups)\n\n # Merge groups\n with transaction.atomic():\n for group in groups:\n group.merge_into(target=target_group)\n group.set_all_target_feedbacksets_to_merge_type(target=target_group)\n group.create_new_first_attempt_for_target_group(target=target_group)\n grouphistory.save()", "def write_copy(file_source_path, original_file_name, **kwargs):\r\n\r\n group_dir, subject_dir, year_session_dir = None, None, None\r\n\r\n matched_groups = kwargs.get(\"matched_groups\")\r\n if matched_groups is not None:\r\n \"\"\"\r\n matched_groups[0] = year\r\n matched_groups[1] = session\r\n matched_groups[2] = subject group number with hyphen (if applicable)\r\n matched_groups[3] = subject group number (if applicable)\r\n matched_groups[4] = subject group name\r\n matched_groups[5] = subject name\r\n matched_groups[8] = paper number\r\n matched_groups[9] = further info\r\n \"\"\"\r\n\r\n # Handling computer science's change of group\r\n if \"Computer_science\" in matched_groups[5] and \"Mathematics\" in matched_groups[4]:\r\n group_dir = \"Group 4 - Sciences\"\r\n # Continuing regular group handing\r\n elif matched_groups[3] is None:\r\n group_dir = format_group_name(matched_groups[4])\r\n else:\r\n group_dir = matched_groups[2] + matched_groups[4]\r\n\r\n # Handling difficulty. Bulk of it is handling HLSL files and files with no difficulty stated.\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n elif \"HLSL\" in original_file_name:\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"SL\")\r\n return\r\n elif \"HL\" in original_file_name:\r\n difficulty = \"HL\"\r\n elif \"SL\" in original_file_name:\r\n difficulty = \"SL\"\r\n else:\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n # This is where we handle deprecated/changed subject names\r\n subject = matched_groups[5]\r\n if \"Business_and_management\" in subject:\r\n subject = subject.replace(\"Business_and_management\", \"Business_management\")\r\n elif \"Belarussian\" in subject:\r\n subject = subject.replace(\"Belarussian\", \"Belarusian\")\r\n elif \"Biology_HL\" in subject:\r\n subject = subject.replace(\"Biology_HL\", \"Biology\")\r\n elif \"Biology_SL\" in subject:\r\n subject = subject.replace(\"Biology_SL\", \"Biology\")\r\n elif \"Ecosystems_and_societies_SL\" in subject:\r\n subject = subject.replace(\"Ecosystems_and_societies_SL\", \"Ecosystems_and_societies\")\r\n elif \"Environmental_systems_SL\" in subject:\r\n subject = subject.replace(\"Environmental_systems_SL\", \"Environmental_systems\")\r\n elif \"History_route_1\" in subject:\r\n subject = subject.replace(\"History_route_1\", \"History\")\r\n elif \"History_route_2\" in subject:\r\n subject = subject.replace(\"History_route_2\", \"History\")\r\n elif \"History_of_the_Islamic_World\" in subject:\r\n subject = subject.replace(\"History_of_the_Islamic_World\", \"Islamic_history\")\r\n\r\n subject_dir = f\"{subject}_{difficulty}\"\r\n year_session_dir = f\"{matched_groups[0]} {matched_groups[1]} Examination Session\"\r\n\r\n music_groups = kwargs.get(\"music_groups\")\r\n if music_groups is not None:\r\n \"\"\"\r\n music_groups[0] = year\r\n music_groups[1] = session\r\n music_groups[2] = subject group number with hyphen (if applicable)\r\n music_groups[3] = subject group number (if applicable)\r\n music_groups[4] = subject group name\r\n music_groups[5] = file name\r\n \"\"\"\r\n\r\n group_dir = \"Group 6 - The Arts\"\r\n subject = \"Music\"\r\n\r\n # Handling difficulty. Bulk of it is handling HLSL files and files with no difficulty stated.\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n elif \"HLSL\" in original_file_name:\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"SL\")\r\n return\r\n elif \"HL\" in original_file_name:\r\n difficulty = \"HL\"\r\n elif \"SL\" in original_file_name:\r\n difficulty = \"SL\"\r\n else:\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n subject_dir = f\"{subject}_{difficulty}\"\r\n year_session_dir = f\"{music_groups[0]} {music_groups[1]} Examination Session\"\r\n\r\n audio_groups = kwargs.get(\"audio_groups\")\r\n if audio_groups is not None:\r\n \"\"\"\r\n audio_groups[0] = year\r\n audio_groups[1] = session\r\n audio_groups[2] = subject group number with hyphen (if applicable)\r\n audio_groups[3] = subject group number (if applicable)\r\n audio_groups[4] = subject group name (contains '\\\\audio' in some instances)\r\n audio_groups[5] = file name\r\n \"\"\"\r\n\r\n group_dir = \"Group 6 - The Arts\"\r\n\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n else:\r\n write_copy(file_source_path, original_file_name, audio_groups=audio_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, audio_groups=audio_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n subject_dir = f\"Music_{difficulty}\"\r\n year_session_dir = f\"{audio_groups[0]} {audio_groups[1]} Examination Session\"\r\n year_session_dir = os.path.join(year_session_dir, \"audio\")\r\n\r\n if None not in [group_dir, subject_dir, year_session_dir]:\r\n new_filepath = os.path.join(abs_destination_directory, group_dir, subject_dir, year_session_dir,\r\n original_file_name)\r\n os.makedirs(os.path.dirname(new_filepath), exist_ok=True)\r\n shutil.copy(file_source_path, new_filepath)\r\n else:\r\n print(f\"CRITICAL ERROR: File had 'None' path attributes: {file_source_path}\")", "def test_clone_scenario(self):\n pass", "def _create_group_rules(self, group_object):\n\n for rule in ctx.node.properties['rules']:\n\n if 'src_group_id' in rule:\n\n if 'cidr_ip' in rule:\n raise NonRecoverableError(\n 'You need to pass either src_group_id OR cidr_ip.')\n\n if not group_object.vpc_id:\n src_group_object = self.get_resource()\n else:\n src_group_object = self._get_vpc_security_group_from_name(\n rule['src_group_id'])\n\n if not src_group_object:\n raise NonRecoverableError(\n 'Supplied src_group_id {0} doesn ot exist in '\n 'the given account.'.format(rule['src_group_id']))\n\n del rule['src_group_id']\n rule['src_group'] = src_group_object\n\n elif 'cidr_ip' not in rule:\n raise NonRecoverableError(\n 'You need to pass either src_group_id OR cidr_ip.')\n\n try:\n group_object.authorize(**rule)\n except (exception.EC2ResponseError,\n exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n except Exception as e:\n self._delete_security_group(group_object.id)\n raise", "def __make_group_by_res(self, group_name, name_list):\r\n if group_name not in self.groups:\r\n res_group = self.group['Residue'].getChildGrps()\r\n groups = [ res for res in res_groups if res.name in name_list ]\r\n new_group = Group(parent=[], id=-1, type=group_name, childs=groups)\r\n self.groups[group_name] = new_group", "def add_to_group(self, group):\n\n if not self.in_group(group):\n self.secondary_groups.append(group)\n return self", "def create_subgroup_accounts(request_ctx, account_id, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def test_grouping(self):\n n1 = self.create(NodeItem, UML.Node)\n n2 = self.create(NodeItem, UML.Node)\n\n self.group(n1, n2)\n\n assert n2.subject in n1.subject.nestedNode\n assert n1.subject not in n2.subject.nestedNode", "def construct_relation_group(\n self,\n group_name,\n domain_type,\n range_type,\n group_members = None):\n if not group_members:\n group_members = sorted([\n rel for rel in self.get_relation_names()\n if self.get_domain(rel) == domain_type and\n self.get_range(rel) == range_type\n ])\n if self.is_type(group_name):\n raise RelationNameError(group_name, 'Group already exists.')\n\n self.declare_entity_type(\n group_name, fixed_vocab=group_members, unknown_marker=None)\n\n for r in group_members:\n if self.is_dense(r):\n raise ValueError('Dense relation %r is unsupported.' % r)\n\n group = RelationGroup(group_name, group_members)\n self._group[group_name] = group\n # declare the schema for the necessary extension to the KG\n self.declare_relation(group.relation_rel, group.triple_type, group_name)\n self.declare_relation(group.subject_rel, group.triple_type, domain_type)\n self.declare_relation(group.object_rel, group.triple_type, range_type)\n self.declare_relation(group.weight_rel, group.triple_type,\n group.triple_type)\n # relation i in this group has num_rows[i] rows\n try:\n num_rows = [self._np_initval[r].data.shape[0] for r in group.members]\n except KeyError as err:\n raise RelationNameError(\n str(err), 'An undefined relation was encountered. '\n 'All relations in a relation group must be defined before calling '\n 'construct_relation_group.')\n total_num_rows = sum(num_rows)\n # names of all those triples\n self.extend_type(\n group.triple_type,\n [group.triple_prefix + str(i) for i in range(total_num_rows)])\n # now populate the sparse matrixes\n triple_indices = np.arange(total_num_rows, dtype='int32')\n rel_indices = np.hstack([\n np.ones(num_rows[i], dtype='int32') * i\n for i in range(len(group.members))\n ])\n subj_indices = np.hstack([self._np_initval[r].col for r in group.members])\n obj_indices = np.hstack([self._np_initval[r].row for r in group.members])\n weight_data = np.hstack([self._np_initval[r].data for r in group.members])\n ones_data = np.ones_like(weight_data)\n # weights are in a diagonal matrix\n self._np_initval[group.weight_rel] = scipy.sparse.coo_matrix(\n (weight_data, (triple_indices, triple_indices)),\n shape=(total_num_rows, total_num_rows),\n dtype='float32')\n self._np_initval[group.relation_rel] = scipy.sparse.coo_matrix(\n (weight_data, (rel_indices, triple_indices)),\n shape=(len(group.members), total_num_rows),\n dtype='float32')\n self._np_initval[group.subject_rel] = scipy.sparse.coo_matrix(\n (ones_data, (subj_indices, triple_indices)),\n shape=(self.get_max_id(domain_type), total_num_rows),\n dtype='float32')\n self._np_initval[group.object_rel] = scipy.sparse.coo_matrix(\n (ones_data, (obj_indices, triple_indices)),\n shape=(self.get_max_id(range_type), total_num_rows),\n dtype='float32')\n self.freeze(group.triple_type, unknown_marker=None)\n return group", "def test_merge_nooverlap(self):\n self.open_url('/group/list')\n \n # Sanity check\n el = self.wd.find_element(By.LINK_TEXT, \"Second Group\")\n el.click()\n time.sleep(0.5)\n \n self.assert_num_rows(1)\n \n self.open_url('/group/list')\n self.wd.find_element(By.ID, \"subnav-merge\").click()\n time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...\n self.assertEquals('Merge Group', self.wd.title)\n \n sel = Select(self.wd.find_element(By.ID, \"from_group_id\"))\n sel.select_by_visible_text(\"6th group\")\n \n sel = Select(self.wd.find_element(By.ID, \"to_group_id\"))\n sel.select_by_visible_text(\"Second Group\")\n \n self.submit_form(\"merge_form\")\n \n self.open_url('/group/list')\n self.assert_not_in_list_table(\"6th group\")\n \n el = self.wd.find_element(By.LINK_TEXT, \"Second Group\")\n el.click()\n \n self.assert_num_rows(3)", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def interactive_insert(group_name=None, package_name=None):\n file_name = \"\"\n file_destination = \"\"\n file_source = None\n file_create_link = False\n file_sudo = False\n file_comments = []\n\n def ask_file_source():\n return message.question(\"What is the full source file path?\")\n\n def ask_sudo():\n return message.question(\"Is sudo needed for this operation?\", \"boolean\")\n\n while True:\n file_source = None\n file_create_link = False\n file_sudo = False\n\n message.heading(\"Creating a new file. (${vars} is supported, '~' is not)\")\n if group_name is not None:\n message.info(f\"Current group: {group_name}\")\n if package_name is not None:\n message.info(f\"Current package: {package_name}\")\n\n file_destination = message.question(\n \"Where will this file be (created/linked/copied) to? (no basename)\"\n )\n\n if message.question(\n \"Will this file be linked to [destination]?\", \"boolean\"\n ):\n file_create_link = True\n file_source = ask_file_source()\n file_sudo = ask_sudo()\n elif message.question(\n \"Will this file be copied to [destination]?\", \"boolean\"\n ):\n file_source = ask_file_source()\n file_sudo = ask_sudo()\n\n if file_source is not None:\n [_, file_name] = os.path.split(os.path.expandvars(file_source))\n else:\n file_name = message.question(\"What will be the file's name?\")\n\n if message.question(\n \"Will the file have comments to aid the user?\", \"boolean\"\n ):\n while True:\n comment = message.question(\"New comment:\")\n file_comments.append(comment)\n if not message.question(\"Add another comment?\", \"boolean\"):\n break\n\n new_file = File(\n file_name,\n file_destination,\n os.path.split(file_source)[0] if file_source is not None else None,\n \"\",\n file_create_link,\n file_sudo,\n file_comments,\n )\n\n new_file.evaluate()\n\n message.info(\n f\"\"\"File info:\n [Name]: '{new_file.name}'\n [Destination]: '{new_file.path_destination}'\n [Source]: '{new_file.path_source}'\n [Link?]: '{'Yes' if new_file.create_link else 'No'}'\n [Need superuser?]: '{'Yes' if new_file.sudo else 'No'}'\n [Comments]: {new_file.comments}\"\"\"\n )\n if message.question(\"Confirm?\", \"boolean\"):\n break\n\n return new_file", "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def do_push_group(dbsync, group):\n pass", "def test_taskgroup_set(self, test_taskgroup_dag):\n # Unpack the fixture\n dag, group, (op1, op2, op3, op4) = test_taskgroup_dag\n # Arrange them with a Label in the middle\n op1.set_downstream(group, Label(\"Group label\"))\n group.set_downstream(op4)\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op1.task_id, op3.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op3.task_id, op4.task_id) == {}", "def clone(self):\n return _libsbml.ListOfGroups_clone(self)", "def add_group(self, groupname, grouptitle, path_to_group='/'):\n self.open_db()\n group = self.group_exists(path_to_group, groupname)\n if group is False:\n group = self.h5file.create_group(path_to_group, groupname,\n grouptitle)\n return group", "def _merge_feedbackset_into(self, target):\n from devilry.devilry_group.models import FeedbackSet\n\n # Map feedbackset_type to merge prefix\n feedbackset_type_merge_map = {\n FeedbackSet.FEEDBACKSET_TYPE_FIRST_ATTEMPT: FeedbackSet.FEEDBACKSET_TYPE_MERGE_FIRST_ATTEMPT,\n FeedbackSet.FEEDBACKSET_TYPE_NEW_ATTEMPT: FeedbackSet.FEEDBACKSET_TYPE_MERGE_NEW_ATTEMPT,\n FeedbackSet.FEEDBACKSET_TYPE_RE_EDIT: FeedbackSet.FEEDBACKSET_TYPE_MERGE_RE_EDIT\n }\n\n feedbacksets = self.feedbackset_set.order_by_deadline_datetime()\\\n .select_related('group__parentnode')\n\n for feedbackset in feedbacksets:\n # change feedbackset_type to merge prefix\n if feedbackset.feedbackset_type in list(feedbackset_type_merge_map.keys()):\n feedbackset.feedbackset_type = feedbackset_type_merge_map[feedbackset.feedbackset_type]\n feedbackset.group = target\n feedbackset.save()", "def grp(self, grpNode):\n\t\tself._grp = grpNode", "def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()", "def test_groups_group_ref_put(self):\n pass", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs):\n\n path = '/v1/global/root_outcome_group'\n url = request_ctx.base_api_url + path.format()\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def test_duplicate_groups(self):\n\n auag = UsersAndGroups()\n\n # create a duplicate with default flag to raise an error.\n auag.add_group(Group(name=\"group1\"))\n with self.assertRaises(Exception):\n auag.add_group(Group(name=\"group1\"))\n\n # create with overwrite.\n auag.add_group(\n Group(name=\"group2\", group_names=[\"group2\"]),\n duplicate=UsersAndGroups.OVERWRITE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group2\")\n self.assertEqual(u.name, \"group2\")\n self.assertEqual(u.groupNames, [\"group2\"])\n\n auag.add_group(\n Group(name=\"group2\", group_names=[\"group3\"]),\n duplicate=UsersAndGroups.OVERWRITE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group2\")\n self.assertEqual(u.name, \"group2\")\n self.assertEqual(u.groupNames, [\"group3\"])\n\n # create with update.\n auag.add_group(\n Group(name=\"group3\", group_names=[\"group2\"]),\n duplicate=UsersAndGroups.OVERWRITE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group3\")\n self.assertEqual(u.name, \"group3\")\n self.assertEqual(u.groupNames, [\"group2\"])\n\n auag.add_group(\n Group(name=\"group3\", group_names=[\"group3\"]),\n duplicate=UsersAndGroups.UPDATE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group3\")\n self.assertEqual(u.groupNames, [\"group2\", \"group3\"])", "def test_rsync_set_group(self):\n \n root = tempfile.mkdtemp(prefix=\"rsync_test_set_group_\")\n avail_groups = os.getgroups()\n exp_group = grp.getgrgid(avail_groups[random.randint(1,len(avail_groups))-1])[0]\n \n # Create some files to move\n to_copy = self._create_test_files(root)\n \n # Run rsync\n with open(os.devnull, 'w') as f:\n old_stdout = sys.stdout\n sys.stdout = f\n rsync_files(to_copy,sys.stdout,exp_group,False)\n sys.stdout = old_stdout\n \n # Verify the copy process set the correct group on created directories\n for ddir in set([d[1] for d in to_copy]):\n gid = os.stat(ddir).st_gid\n obs_group = grp.getgrgid(gid)[0]\n self.assertEqual(obs_group,\n exp_group,\n \"Failed to set group '{}' on directory. Group is {}\".format(exp_group,\n obs_group))\n \n # Verify the copy process set the correct group\n for src, ddir, dname in to_copy:\n dfile = os.path.join(ddir,dname)\n gid = os.stat(dfile).st_gid\n obs_group = grp.getgrgid(gid)[0]\n self.assertEqual(obs_group,\n exp_group,\n \"Failed to set group '{}' on file. Group is {}\".format(exp_group,\n obs_group))", "def test_drag_into_different_group(self):\r\n expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},\r\n {self.group_a: [self.group_b_item_1, self.group_a_item_1, self.group_a_item_2]},\r\n {self.group_b: [self.group_b_item_2]},\r\n {self.group_empty: []}]\r\n self.drag_and_verify(self.group_b_item_1_handle, self.group_a_item_1_handle, expected_ordering)", "def copy_children(self):\n\n # Create a group\n self.fileh.create_group('/', 'agroup')\n # Create several objects there\n for i in range(10):\n # Create a new array\n self.fileh.create_array('/agroup', 'array' + str(i), self.a1)\n # Excercise copy_children\n for i in range(self.nobjects):\n # Create another group for destination\n self.fileh.create_group('/', 'anothergroup' + str(i))\n # Copy children from /agroup to /anothergroup+i\n self.fileh.copy_children('/agroup', '/anothergroup' + str(i))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print((\"Time for Undo, Redo (copy_children):\", undo, \"s, \",\n redo, \"s\"))", "def _internal_copy(source, source_path, target, target_path, maintain_flag):\n if maintain_flag:\n try:\n target.create_group(target_path)\n except ValueError:\n pass # In case the copy_to() function failed previously and the group already exists.\n\n if target_path == \"/\":\n source.copy(target_path, \"/\") if source == target else source.copy(\n target_path, target\n )\n else:\n if maintain_flag:\n if dest_path != \"\":\n source.copy(source_path, target[dest_path])\n else:\n source.copy(source_path, target)\n else:\n group_name_old = source_path.split(\"/\")[-1]\n try:\n target.create_group(\"/tmp\")\n except ValueError:\n pass\n source.copy(source_path, target[\"/tmp\"])\n try:\n target.move(\"/tmp/\" + group_name_old, target_path)\n except ValueError:\n del target[dest_path]\n target.move(\"/tmp/\" + group_name_old, target_path)\n del target[\"/tmp\"]" ]
[ "0.5898711", "0.5813369", "0.57389224", "0.5706337", "0.56055593", "0.55976254", "0.5595738", "0.55490917", "0.5544522", "0.5541461", "0.5518073", "0.53831655", "0.53829265", "0.53379554", "0.531845", "0.5302578", "0.52945095", "0.52933514", "0.5258613", "0.5246323", "0.52400297", "0.5225952", "0.5201779", "0.5191769", "0.5140609", "0.50881827", "0.50875765", "0.50842255", "0.50807875", "0.5079402", "0.5074019", "0.50512403", "0.5050658", "0.50449747", "0.50314426", "0.501575", "0.5015151", "0.5008633", "0.5008148", "0.50077254", "0.49875265", "0.49526322", "0.49441025", "0.49416566", "0.49363503", "0.49342582", "0.49342582", "0.4924192", "0.4912886", "0.49026668", "0.49015865", "0.49010867", "0.48911935", "0.4890151", "0.48841816", "0.48499066", "0.4844977", "0.4841554", "0.48306534", "0.48294988", "0.4825032", "0.48163125", "0.48045567", "0.4800212", "0.47978723", "0.47894487", "0.47880772", "0.4760176", "0.47558516", "0.47499806", "0.47430947", "0.47366083", "0.4732884", "0.47253084", "0.47247282", "0.47220522", "0.4721433", "0.47198492", "0.47170034", "0.47099406", "0.470273", "0.46990228", "0.46927205", "0.46876273", "0.46869034", "0.46855444", "0.46782595", "0.46760097", "0.4666705", "0.46601993", "0.4642347", "0.46380064", "0.46338528", "0.46334374", "0.46315676", "0.4629545", "0.46279424", "0.46268773", "0.4621315", "0.46150658" ]
0.5874123
1
Creates a new subgroup of the outcome group with the same title and description as the source group, then creates links in that new subgroup to the same outcomes that are linked in the source group. Recurses on the subgroups of the source group, importing them each in turn into the new subgroup. Allows you to copy organizational structure, but does not create copies of the outcomes themselves, only new links. The source group must be either global, from the same context as this outcome group, or from an associated account. The source group cannot be the root outcome group of its context.
def import_outcome_group_courses(request_ctx, course_id, id, source_outcome_group_id, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}/import' payload = { 'source_outcome_group_id' : source_outcome_group_id, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_outcome_group_accounts(request_ctx, account_id, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def copy_group(self):\n dd = self.destination_directory\n sg = self.source_group\n dg = self.destination_group\n\n data = {\n 'description': sg.description,\n 'name': sg.name,\n 'status': sg.status,\n }\n\n # If this Group already exists, we'll just update it.\n if dg:\n for key, value in data.items():\n setattr(dg, key, value)\n\n while True:\n try:\n dg.save()\n return dg\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))\n\n # If we get here, it means we need to create the Group from scratch.\n while True:\n try:\n return dd.groups.create(data)\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")", "def migrate(self):\n self.destination_group = self.get_destination_group()\n self.destination_group = self.copy_group()\n self.copy_custom_data()\n\n logger.info('Successfully copied Group: {}'.format(self.destination_group.name.encode('utf-8')))\n return self.destination_group", "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def test_convert_to_existing_group2(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance_all')\n inventoryloader.convert_group('glance_registry', 'glance_all')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance_registry' not in inventoryloader.groups\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert not inventoryloader.groups['glance_all'].has_group('glance_registry')\n assert inventoryloader.groups['glance_all'].has_host('localhost')\n assert inventoryloader.groups['glance_all'].has_host('localhost2')\n assert \"management_bridge\" in inventoryloader.groups['glance_all'].vars", "def _merge_groups(self, group, newgroup):\n\n # name\n if group.name != newgroup.name:\n raise errors.AnsibleError(\"Cannot merge group %s with %s\" % (group.name, newgroup.name))\n\n # depth\n group.depth = max([group.depth, newgroup.depth])\n\n # hosts list (host objects are by now already added to self.hosts)\n for host in newgroup.hosts:\n grouphosts = dict([(h.name, h) for h in group.hosts])\n if host.name in grouphosts:\n # same host name but different object, merge\n self._merge_hosts(grouphosts[host.name], host)\n else:\n # new membership, add host to group from self\n # group from self will also be added again to host.groups, but\n # as different object\n group.add_host(self.hosts[host.name])\n # now remove this the old object for group in host.groups\n for hostgroup in [g for g in host.groups]:\n if hostgroup.name == group.name and hostgroup != self.groups[group.name]:\n self.hosts[host.name].groups.remove(hostgroup)\n\n\n # group child membership relation\n for newchild in newgroup.child_groups:\n # dict with existing child groups:\n childgroups = dict([(g.name, g) for g in group.child_groups])\n # check if child of new group is already known as a child\n if newchild.name not in childgroups:\n self.groups[group.name].add_child_group(newchild)\n\n # group parent membership relation\n for newparent in newgroup.parent_groups:\n # dict with existing parent groups:\n parentgroups = dict([(g.name, g) for g in group.parent_groups])\n # check if parent of new group is already known as a parent\n if newparent.name not in parentgroups:\n if newparent.name not in self.groups:\n # group does not exist yet in self, import him\n self.groups[newparent.name] = newparent\n # group now exists but not yet as a parent here\n self.groups[newparent.name].add_child_group(group)\n\n # variables\n group.vars = utils.combine_vars(group.vars, newgroup.vars)", "def test_convert_to_newgroup(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance1')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance1' in inventoryloader.groups\n assert inventoryloader.groups['glance_all'].has_group('glance1')\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance1'].has_host('localhost')\n assert \"management_bridge\" in inventoryloader.groups['glance1'].vars", "def test_convert_to_existing_group(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance_registry')\n assert 'glance_api' not in inventoryloader.groups\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance_registry'].has_host('localhost')\n assert inventoryloader.groups['glance_registry'].has_host('localhost2')\n assert \"management_bridge\" in inventoryloader.groups['glance_registry'].vars", "def addgroup(self, abspath=None, sourcetree=pbxconsts.SOURCE_TREE.group, name=None, move=True):\n group_name = os.path.basename(abspath) if name is None or len(name) == 0 else name\n abspath = abspath if not abspath is None else self.realpath()\n subgroup = func.get_list_item(func.take(\\\n lambda o: o.isa == u'PBXGroup' and o.realpath() == abspath \\\n and o.displayname() == group_name, self.pbx_children), 0)\n if subgroup is None:\n subgroup = self.project().new_object(u'PBXGroup')\n pbxpath.set_path_with_source_tree(subgroup, abspath, source_tree=sourcetree, \\\n parent_group=self)\n if not name is None:\n subgroup.pbx_name = name\n self.addchild(subgroup, move=move)\n return subgroup", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "async def mergegroup(self, ctx, original_group_id: int, duplicate_group_id: int):\n original_group = await ex.get_group(original_group_id)\n duplicate_group = await ex.get_group(duplicate_group_id)\n if not duplicate_group:\n return await ctx.send(f\"> {duplicate_group_id} could not find a Group.\")\n if not original_group:\n return await ctx.send(f\"> {original_group} could not find a Group.\")\n # move aliases\n await ex.conn.execute(\"UPDATE groupmembers.aliases SET objectid = $1 WHERE isgroup = $2 AND objectid = $3\", original_group.id, 1, duplicate_group.id)\n for member_id in duplicate_group.members:\n if member_id not in original_group.members:\n # update the member location to the original group\n await ex.conn.execute(\"UPDATE groupmembers.idoltogroup SET groupid = $1 WHERE idolid = $2 AND groupid = $3\", original_group.id, member_id, duplicate_group.id)\n # delete group\n await ex.conn.execute(\"DELETE FROM groupmembers.groups WHERE groupid = $1\", duplicate_group.id)\n # recreate cache\n await ex.create_idol_cache()\n await ex.create_group_cache()\n await ctx.send(f\"> Merged {duplicate_group_id} to {original_group_id}.\")", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def clone(self):\n return _libsbml.Group_clone(self)", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_from_src(self, cgsnapshot_id, source_cgid, name=None,\n description=None, user_id=None,\n project_id=None):\n body = {'consistencygroup-from-src': {'name': name,\n 'description': description,\n 'cgsnapshot_id': cgsnapshot_id,\n 'source_cgid': source_cgid,\n 'user_id': user_id,\n 'project_id': project_id,\n 'status': \"creating\",\n }}\n\n self.run_hooks('modify_body_for_update', body,\n 'consistencygroup-from-src')\n resp, body = self.api.client.post(\n \"/consistencygroups/create_from_src\", body=body)\n return common_base.DictWithMeta(body['consistencygroup'], resp)", "def make_group(self, id, name='', attrs={}, link='', abort=True ): \n gid = id + \"/\"\n sgd = self.get_sgd(gid, name)\n path = self.full_path\n link_info = self.file.extract_link_info(name, link, Group)\n if not abort:\n # id = sgd['id'].rstrip('/') # not sure if need this\n grp = self.file.get_existing_group(path, id, name)\n if grp:\n return grp\n grp = Group(self.file, sgd, name, path, attrs, self, link_info)\n # self.mstats[gid]['created'].append(grp)\n return grp", "def _group_append(groups, id, new_group):\n\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n\n path_inds = list(reversed(path_inds))\n\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)\n\n return groups", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def make_group(self, qid, name='', path='', attrs={}, link='', abort=True):\n gqid = qid + \"/\"\n sdef = self.get_sdef(gqid, self.default_ns, \"referenced in make_group\")\n id = sdef['id']\n ns = sdef['ns']\n path = self.deduce_path(id, ns, path)\n if not abort:\n id_noslash = id.rstrip('/') # could be different from gqid if namespace present\n grp = self.get_existing_group(path, id_noslash, name)\n if grp:\n # found already existing group\n return grp \n link_info = self.extract_link_info(name, link, Group)\n # create the group\n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent, link_info)\n return grp", "def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def copyGroupFrom(self, groupName, sourceDesign, sourceProject=None, sourceProjectPath=None):\n oName = self.project_name\n if sourceProject == oName or sourceProject is None:\n oSrcProject = self._desktop.GetActiveProject()\n else:\n self._desktop.OpenProject(sourceProjectPath)\n oSrcProject = self._desktop.SetActiveProject(sourceProject)\n\n oDesign = oSrcProject.SetActiveDesign(sourceDesign)\n oEditor = oDesign.SetActiveEditor(\"3D Modeler\")\n oEditor.Copy([\"NAME:Selections\", \"Selections:=\", groupName])\n\n self.modeler.oeditor.Paste()\n self.modeler.primitives.refresh_all_ids()\n self.materials._load_from_project()\n return True", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def test_does_not_return_duplicate_groups(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n GroupResolution.objects.create(\n group=self.group,\n release=self.release,\n type=GroupResolution.Type.in_release,\n )\n\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def test_mergeGroups(self):\n tabs = [\n widgets.Tab(u'id1', u'Title 1', None),\n widgets.Tab(u'id2', u'Title 2', None)]\n tabGroup1 = widgets.TabGroup(u'id', u'Title', tabs=tabs)\n tabs = [\n widgets.Tab(u'id3', u'Title 3', None)]\n tabGroup2 = widgets.TabGroup(u'id', u'Hello', tabs=tabs)\n\n newGroup = widgets.TabGroup.mergeGroups(tabGroup1, tabGroup2)\n self.assertEquals(newGroup.id, u'id')\n self.assertEquals(newGroup.title, u'Hello')\n self.assertEquals(newGroup.tabs, tabGroup1.tabs + tabGroup2.tabs)", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def CopyAllSubElementsTo(self, other_group, ignore):\n # pylint: disable=protected-access\n collections_to_update = [\n (self._groups_to_load, other_group._groups_to_load),\n (self._commands_to_load, other_group._commands_to_load)]\n\n for src, dst in collections_to_update:\n for name, info in src.iteritems():\n if name in ignore:\n continue\n (module_dir, module_path, name, unused_track) = info\n dst[name] = (module_dir, module_path, name,\n other_group.ReleaseTrack())", "def test_add_existing_group(self, inventoryloader):\n grp_cnt = inventoryloader.count_groups()\n grp_vars = inventoryloader.groups['glance_api'].vars\n inventoryloader.add_group(u'glance_api')\n assert inventoryloader.count_groups() == grp_cnt\n assert inventoryloader.groups['glance_api'].vars == grp_vars\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n inventoryloader.add_group(u'glance_api', {\"vars\": { u'external_bridge': u'br-ext'}})\n assert 'br-mgmt' == inventoryloader.groups['glance_api'].vars['management_bridge']\n assert 'br-ext' == inventoryloader.groups['glance_api'].vars['external_bridge']", "def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def test_drag_group_into_group(self):\r\n expected_ordering = [{self.container_title: [self.group_a, self.group_empty]},\r\n {self.group_a: [self.group_b, self.group_a_item_1, self.group_a_item_2]},\r\n {self.group_b: [self.group_b_item_1, self.group_b_item_2]},\r\n {self.group_empty: []}]\r\n self.drag_and_verify(self.group_b_handle, self.group_a_item_1_handle, expected_ordering)", "def test_list_role_assignment_using_inherited_sourced_groups(self):\n test_plan = {\n # A domain with 3 users, 3 groups, 3 projects, a second domain,\n # plus 3 roles.\n 'entities': {'domains': [{'users': 3, 'groups': 3, 'projects': 3},\n 1],\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'domain': 0,\n 'inherited_to_projects': True},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1,\n 'inherited_to_projects': True},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1.\n # We should see the inherited group assigned on the 3 projects\n # from domain 0, as well as the direct assignments.\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'project': 0,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 1,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 2,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)", "def _copy_from_template(\n self, source_structures, source_keys, dest_structure, new_parent_block_key, user_id, head_validation\n ):\n new_blocks = set()\n\n new_children = list() # ordered list of the new children of new_parent_block_key\n\n for usage_key in source_keys:\n src_course_key = usage_key.course_key\n hashable_source_id = src_course_key.for_version(None)\n block_key = BlockKey(usage_key.block_type, usage_key.block_id)\n source_structure = source_structures[src_course_key]\n\n if block_key not in source_structure['blocks']:\n raise ItemNotFoundError(usage_key)\n source_block_info = source_structure['blocks'][block_key]\n\n # Compute a new block ID. This new block ID must be consistent when this\n # method is called with the same (source_key, dest_structure) pair\n unique_data = \"{}:{}:{}\".format(\n str(hashable_source_id).encode(\"utf-8\"),\n block_key.id,\n new_parent_block_key.id,\n )\n new_block_id = hashlib.sha1(unique_data.encode('utf-8')).hexdigest()[:20]\n new_block_key = BlockKey(block_key.type, new_block_id)\n\n # Now clone block_key to new_block_key:\n new_block_info = copy.deepcopy(source_block_info)\n # Note that new_block_info now points to the same definition ID entry as source_block_info did\n existing_block_info = dest_structure['blocks'].get(new_block_key, BlockData())\n # Inherit the Scope.settings values from 'fields' to 'defaults'\n new_block_info.defaults = new_block_info.fields\n\n # <workaround>\n # CAPA modules store their 'markdown' value (an alternate representation of their content)\n # in Scope.settings rather than Scope.content :-/\n # markdown is a field that really should not be overridable - it fundamentally changes the content.\n # capa modules also use a custom editor that always saves their markdown field to the metadata,\n # even if it hasn't changed, which breaks our override system.\n # So until capa modules are fixed, we special-case them and remove their markdown fields,\n # forcing the inherited version to use XML only.\n if usage_key.block_type == 'problem' and 'markdown' in new_block_info.defaults:\n del new_block_info.defaults['markdown']\n # </workaround>\n\n # Preserve any existing overrides\n new_block_info.fields = existing_block_info.fields\n\n if 'children' in new_block_info.defaults:\n del new_block_info.defaults['children'] # Will be set later\n\n new_block_info.edit_info = existing_block_info.edit_info\n new_block_info.edit_info.previous_version = new_block_info.edit_info.update_version\n new_block_info.edit_info.update_version = dest_structure['_id']\n # Note we do not set 'source_version' - it's only used for copying identical blocks\n # from draft to published as part of publishing workflow.\n # Setting it to the source_block_info structure version here breaks split_draft's has_changes() method.\n new_block_info.edit_info.edited_by = user_id\n new_block_info.edit_info.edited_on = datetime.datetime.now(UTC)\n new_block_info.edit_info.original_usage = str(usage_key.replace(branch=None, version_guid=None))\n new_block_info.edit_info.original_usage_version = source_block_info.edit_info.update_version\n dest_structure['blocks'][new_block_key] = new_block_info\n\n children = source_block_info.fields.get('children')\n if children:\n children = [src_course_key.make_usage_key(child.type, child.id) for child in children]\n new_blocks |= self._copy_from_template(\n source_structures, children, dest_structure, new_block_key, user_id, head_validation\n )\n\n new_blocks.add(new_block_key)\n # And add new_block_key to the list of new_parent_block_key's new children:\n new_children.append(new_block_key)\n\n # Update the children of new_parent_block_key\n dest_structure['blocks'][new_parent_block_key].fields['children'] = new_children\n\n return new_blocks", "def test_grouping(self):\n n = self.create(NodeItem, UML.Node)\n a = self.create(ArtifactItem, UML.Artifact)\n\n self.group(n, a)\n\n assert 1 == len(n.subject.deployment)\n assert n.subject.deployment[0].deployedArtifact[0] is a.subject", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def make(self):\n numberOfGroups = np.random.randint(1, len(self.getFirstParent().getGroups()))\n secParentGroups = np.random.choice(self.secondParent.getGroups(), numberOfGroups, replace=False)\n groups = []\n allSecElements = []\n numberOfElements = 0\n\n for grpSec in secParentGroups:\n allSecElements += grpSec.getElements()\n\n for grpFst in self.getFirstParent().getGroups():\n numberOfElements += len(grpFst.getElements())\n elements = list(set(grpFst.getElements()) - set(allSecElements))\n group = Group(grpFst.getIndex(), grpFst.getMinElements(), grpFst.getMaxElements())\n group.setElements(elements)\n groups.append(group)\n\n for grpSec in secParentGroups:\n for grpFst in groups:\n if grpSec.getIndex() == grpFst.getIndex():\n grpFst.addElements(grpSec.getElements())\n\n child = Individual(np.zeros(numberOfElements))\n child.setGroups(groups)\n\n return child", "def test_add_group(self):\n pass", "def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def _resolve_grouping_node(group_node, group_tree, group_graph,\n target_graph):\n\n # Get the predecessors of the grouping node\n preds = nx.dfs_predecessors(group_tree, group_node)\n\n # Get a list of unique node identifiers among predecessors. These are\n # the nodes on which a subgraph will be induced.\n preds = list(set(list(preds.keys()) + list(preds.values())))\n\n # Induce a subgraph based on the nodes\n pred_group = group_graph.subgraph(preds).copy()\n\n # Set up edge dictionary\n edge_attrs = {}\n\n # Encode edge type information\n for s, t in pred_group.edges():\n\n # Add edge attributes to the dictionary\n edge_attrs[(s, t)] = {'kind': 'grouping'}\n\n # Set edge attributes\n nx.set_edge_attributes(pred_group, edge_attrs)\n\n # Add the nodes and edges from the subgraph to the connectivity graph\n target_graph.add_nodes_from(pred_group.nodes(data=True))\n target_graph.add_edges_from(pred_group.edges(data=True))", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def setup_group_workspaces(context):\n if context.readDataFile(\"marker.txt\") is None:\n return\n\n portal = context.getSite()\n if \"groups\" not in portal.objectIds():\n\n groups = portal[\n portal.invokeFactory(\"Folder\",id=\"groups\")]\n\n # set default properties\n groups.setTitle(\"groups\")\n groups.setDescription(\"Group workspaces container.\")\n groups._getWorkflowTool().doActionFor(groups, \"publish\" \"\")\n groups.setExcludeFromNav(True)\n groups.update() \n logger.info(\"Groups container created.\")", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def ensure_target_group_created(vpc, environment):\n name = environment + '-web'\n\n # If it already exists, create returns the existing data\n response = ELB.create_target_group(\n Name=name,\n Protocol='HTTP',\n Port=9000,\n VpcId=vpc.id,\n Matcher={\n 'HttpCode': '200,301'\n }\n )\n\n arn = response['TargetGroups'][0]['TargetGroupArn']\n\n return arn", "def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))", "def add_source(self, group_source):\n if group_source.name in self._sources:\n raise ValueError(\"GroupSource '%s': name collision\" % \\\n group_source.name)\n self._sources[group_source.name] = group_source", "def test_list_role_assignment_using_sourced_groups(self):\n test_plan = {\n # The default domain with 3 users, 3 groups, 3 projects,\n # plus 3 roles.\n 'entities': {'domains': {'id': CONF.identity.default_domain_id,\n 'users': 3, 'groups': 3, 'projects': 3},\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'project': 0},\n {'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n # Adding a role a filter should further restrict the entries\n {'params': {'source_from_group_ids': [0, 1], 'role': 2,\n 'effective': True},\n 'results': [{'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)", "def copy_all_except_candidates(self):\n groupcopy = AssignmentGroup(parentnode=self.parentnode,\n name=self.name,\n is_open=self.is_open,\n delivery_status=self.delivery_status)\n groupcopy.full_clean()\n groupcopy.save()\n for examiner in self.examiners.all():\n groupcopy.examiners.create(relatedexaminer=examiner.relatedexaminer)\n for tagobj in self.tags.all():\n groupcopy.tags.create(tag=tagobj.tag)\n return groupcopy", "def _copy_subdag(self, user_id, destination_version, block_key, source_blocks, destination_blocks, blacklist):\n orphans = set()\n destination_block = destination_blocks.get(block_key)\n new_block = source_blocks[block_key]\n if destination_block:\n # reorder children to correspond to whatever order holds for source.\n # remove any which source no longer claims (put into orphans)\n # add any which are being copied\n source_children = new_block.fields.get('children', [])\n existing_children = destination_block.fields.get('children', [])\n destination_reordered = SparseList()\n for child in existing_children:\n try:\n index = source_children.index(child)\n destination_reordered[index] = child\n except ValueError:\n orphans.add(BlockKey(*child))\n if blacklist != EXCLUDE_ALL:\n for index, child in enumerate(source_children):\n if child not in blacklist:\n destination_reordered[index] = child\n # the history of the published leaps between publications and only points to\n # previously published versions.\n previous_version = destination_block.edit_info.update_version\n destination_block = copy.deepcopy(new_block)\n destination_block.fields['children'] = destination_reordered.compact_list()\n destination_block.edit_info.previous_version = previous_version\n destination_block.edit_info.update_version = destination_version\n destination_block.edit_info.edited_by = user_id\n destination_block.edit_info.edited_on = datetime.datetime.now(UTC)\n else:\n destination_block = self._new_block(\n user_id, new_block.block_type,\n self._filter_blacklist(copy.copy(new_block.fields), blacklist),\n new_block.definition,\n destination_version,\n raw=True,\n asides=new_block.asides,\n block_defaults=new_block.defaults\n )\n # Extend the block's new edit_info with any extra edit_info fields from the source (e.g. original_usage):\n for key, val in new_block.edit_info.to_storable().items():\n if getattr(destination_block.edit_info, key) is None:\n setattr(destination_block.edit_info, key, val)\n\n # If the block we are copying from was itself a copy, then just\n # reference the original source, rather than the copy.\n destination_block.edit_info.source_version = (\n new_block.edit_info.source_version or new_block.edit_info.update_version\n )\n\n if blacklist != EXCLUDE_ALL:\n for child in destination_block.fields.get('children', []):\n if child not in blacklist:\n orphans.update(\n self._copy_subdag(\n user_id, destination_version, BlockKey(*child), source_blocks, destination_blocks, blacklist\n )\n )\n destination_blocks[block_key] = destination_block\n return orphans", "def test_create_group_409(self):\n request = {\n 'name': self.test_group1_groupid\n }\n # First create a group indirectly by making a user with a group\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now create a group that is already there\n resp = self.app.post('/groups', data=json.dumps(request))\n assert resp.status_code == 409", "def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)", "def _add_group(self, group):\n\n if group.name not in self.groups:\n # it's brand new, add him!\n self.groups[group.name] = group\n if self.groups[group.name] != group:\n # different object, merge\n self._merge_groups(self.groups[group.name], group)", "def test_taskgroup_shift(self, test_taskgroup_dag):\n # Unpack the fixture\n dag, group, (op1, op2, op3, op4) = test_taskgroup_dag\n # Arrange them with a Label in the middle\n op1 >> Label(\"Group label\") >> group >> op4\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op1.task_id, op3.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op3.task_id, op4.task_id) == {}", "def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def group_by_source(self, group_by_source):\n\n self._group_by_source = group_by_source", "async def add_parent_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n stmt = select(Group).filter(Group.parent_id is None).options(selectinload(Group.parent))\n result = await dbsession.execute(stmt)\n stmt = select(func.count(Group.id)).filter(Group.parent_id is None)\n result_count = await dbsession.execute(stmt)\n with click.progressbar(\n result.scalars(), length=result_count.scalar_one(), label=\"Adding parent groups\"\n ) as progress:\n for group in progress:\n if \"aat\" in config[\"data\"][\"hierarchy\"][\"expansions\"]:\n categories = apply_aat(group.value, merge=False)\n if categories:\n for category_list in categories:\n mapped = False\n for category in category_list:\n stmt = select(Group).filter(Group.value == category)\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if not parent_group:\n parent_group = Group(\n value=category, label=category[0].upper() + category[1:], split=\"parent\"\n )\n dbsession.add(group)\n group.parent = parent_group\n mapped = True\n group = parent_group # noqa: PLW2901\n if group.parent_id:\n break\n if mapped:\n break\n else:\n mapped = False\n for category in apply_nlp(group.value):\n stmt = select(Group).filter(\n or_(Group.value == category, Group.value == inflection.pluralize(category))\n )\n result = await dbsession.execute(stmt)\n parent_group = result.scalars().first()\n if parent_group:\n group.parent = parent_group\n await dbsession.commit()\n mapped = True\n break\n if not mapped:\n if group.value not in [\"styles and periods\"]:\n for category in apply_nlp(group.value):\n hierarchies = apply_aat(category, merge=False)\n groups = []\n for hierarchy in hierarchies:\n if group.value not in hierarchy:\n stmt = (\n select(Group)\n .filter(Group.value.in_(hierarchy))\n .options(selectinload(Group.items))\n )\n result = await dbsession.execute(stmt)\n for potential_group in result.scalars():\n depth = 0\n tmp = potential_group\n while tmp:\n depth = depth + 1\n tmp = tmp.parent\n groups.append((potential_group, depth, len(potential_group.items)))\n if groups:\n groups.sort(key=lambda g: (g[1], g[2]), reverse=True)\n group.parent = groups[0][0]\n break\n await dbsession.commit()", "def _duplicate_item(parent_usage_key, duplicate_source_usage_key, display_name=None, user=None):\r\n store = get_modulestore(duplicate_source_usage_key)\r\n source_item = store.get_item(duplicate_source_usage_key)\r\n # Change the blockID to be unique.\r\n dest_usage_key = duplicate_source_usage_key.replace(name=uuid4().hex)\r\n category = dest_usage_key.category\r\n\r\n # Update the display name to indicate this is a duplicate (unless display name provided).\r\n duplicate_metadata = own_metadata(source_item)\r\n if display_name is not None:\r\n duplicate_metadata['display_name'] = display_name\r\n else:\r\n if source_item.display_name is None:\r\n duplicate_metadata['display_name'] = _(\"Duplicate of {0}\").format(source_item.category)\r\n else:\r\n duplicate_metadata['display_name'] = _(\"Duplicate of '{0}'\").format(source_item.display_name)\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=source_item.data if hasattr(source_item, 'data') else None,\r\n metadata=duplicate_metadata,\r\n system=source_item.runtime,\r\n )\r\n\r\n dest_module = get_modulestore(category).get_item(dest_usage_key)\r\n # Children are not automatically copied over (and not all xblocks have a 'children' attribute).\r\n # Because DAGs are not fully supported, we need to actually duplicate each child as well.\r\n if source_item.has_children:\r\n dest_module.children = []\r\n for child in source_item.children:\r\n dupe = _duplicate_item(dest_usage_key, child, user=user)\r\n dest_module.children.append(dupe)\r\n get_modulestore(dest_usage_key).update_item(dest_module, user.id if user else None)\r\n\r\n if not 'detached' in source_item.runtime.load_block_type(category)._class_tags:\r\n parent = get_modulestore(parent_usage_key).get_item(parent_usage_key)\r\n # If source was already a child of the parent, add duplicate immediately afterward.\r\n # Otherwise, add child to end.\r\n if duplicate_source_usage_key in parent.children:\r\n source_index = parent.children.index(duplicate_source_usage_key)\r\n parent.children.insert(source_index + 1, dest_usage_key)\r\n else:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent_usage_key).update_item(parent, user.id if user else None)\r\n\r\n return dest_usage_key", "def _duplicate_as_linked_tree(self, source_root):\n logging.debug(\"Started traversing %s \\'s tree for file linkage and directory duplication.\" % self.directory)\n # Create the containing directory that resides within the share\n within_share_dir_path = os.path.join(self.directory, os.path.basename(source_root))\n self._makedir(within_share_dir_path)\n for root, subdirectories, files in os.walk(source_root, followlinks=True):\n share_root = root.replace(str(source_root), within_share_dir_path, 1)\n for subdir in subdirectories:\n target = os.path.join(share_root, subdir)\n self._makedir(target)\n for file in files:\n source = os.path.join(root, file)\n target = os.path.join(share_root, file)\n self._link_files(source, target)", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def test_add_parent(self):\n _, _, groupa, groupb = create_objects()\n groupa.add_parent(groupb)\n assert groupb in groupa.parents\n assert groupa in groupb.children\n return (groupa, groupb)", "def add_subgroup(self, new_subgroup):\n self.subgroups[new_subgroup.get_title()] = new_subgroup", "def add_group(self):\n items = self.group_list.selectedItems()\n for item in items:\n self.parent.add_group_data(item.text())", "def clone(self):\n return _libsbml.GroupsExtension_clone(self)", "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def group_image(directory, image, group):\r\n\tif os.path.exists(directory + \"\\\\\" + group):\r\n\t\tpass\r\n\telse:\r\n\t\ttry:\r\n\t\t\tos.mkdir(directory + '\\\\' + group)\r\n\t\t\tprint(\"Successfully created directory\", group)\r\n\t\texcept OSError:\r\n\t\t\tprint(\"Creation of directory failed.\")\r\n\ttry:\r\n\t\tshutil.copy(str(directory + '\\\\' + image), str(directory + \"\\\\\" + group + \"\\\\\" + image))\r\n\texcept OSError as OSe:\r\n\t\tprint(OSe)", "def generate_website_group_edges(website_group_json, dst):\n with open(website_group_json) as f_h:\n with gremlin_writer(GremlinEdgeCSV, dst, attributes=[]) as writer:\n for data in json_lines_file(f_h):\n root_id = data[\"id\"]\n websites = data[\"websites\"]\n for website in websites:\n writer.add(\n _id=get_id(root_id, website, {}),\n _from=root_id,\n to=website,\n label=WEBISTE_GROUP_EDGE_LABEL,\n attribute_map={}\n )", "def test_list_role_assignment_fails_with_userid_and_source_groups(self):\n group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)\n group = self.identity_api.create_group(group)\n self.assertRaises(exception.UnexpectedError,\n self.assignment_api.list_role_assignments,\n effective=True,\n user_id=self.user_foo['id'],\n source_from_group_ids=[group['id']])", "def addObjectsToGroup(self):\n\t\tmc.delete( self.objects, ch = True )\n\t\tmc.parent( self.objects, self.grp.name )\n\t\tmc.makeIdentity( self.objects, apply=True,t=1,r=1,s=1,n=2)\n\t\t#self.lockObjects()", "def merge_groups(self, groups):\n if len(groups) < 2:\n raise ValidationError(gettext_lazy('Cannot merge less than 2 groups'))\n\n from devilry.apps.core.models import AssignmentGroupHistory\n\n target_group = groups.pop(0)\n # Check if we can merge\n for group in groups:\n group.can_merge(target_group)\n\n # Create or get target group history\n try:\n grouphistory = target_group.assignmentgrouphistory\n except AssignmentGroupHistory.DoesNotExist:\n grouphistory = AssignmentGroupHistory(assignment_group=target_group)\n # Insert groups in history\n grouphistory.merge_assignment_group_history(groups)\n\n # Merge groups\n with transaction.atomic():\n for group in groups:\n group.merge_into(target=target_group)\n group.set_all_target_feedbacksets_to_merge_type(target=target_group)\n group.create_new_first_attempt_for_target_group(target=target_group)\n grouphistory.save()", "def write_copy(file_source_path, original_file_name, **kwargs):\r\n\r\n group_dir, subject_dir, year_session_dir = None, None, None\r\n\r\n matched_groups = kwargs.get(\"matched_groups\")\r\n if matched_groups is not None:\r\n \"\"\"\r\n matched_groups[0] = year\r\n matched_groups[1] = session\r\n matched_groups[2] = subject group number with hyphen (if applicable)\r\n matched_groups[3] = subject group number (if applicable)\r\n matched_groups[4] = subject group name\r\n matched_groups[5] = subject name\r\n matched_groups[8] = paper number\r\n matched_groups[9] = further info\r\n \"\"\"\r\n\r\n # Handling computer science's change of group\r\n if \"Computer_science\" in matched_groups[5] and \"Mathematics\" in matched_groups[4]:\r\n group_dir = \"Group 4 - Sciences\"\r\n # Continuing regular group handing\r\n elif matched_groups[3] is None:\r\n group_dir = format_group_name(matched_groups[4])\r\n else:\r\n group_dir = matched_groups[2] + matched_groups[4]\r\n\r\n # Handling difficulty. Bulk of it is handling HLSL files and files with no difficulty stated.\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n elif \"HLSL\" in original_file_name:\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"SL\")\r\n return\r\n elif \"HL\" in original_file_name:\r\n difficulty = \"HL\"\r\n elif \"SL\" in original_file_name:\r\n difficulty = \"SL\"\r\n else:\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, matched_groups=matched_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n # This is where we handle deprecated/changed subject names\r\n subject = matched_groups[5]\r\n if \"Business_and_management\" in subject:\r\n subject = subject.replace(\"Business_and_management\", \"Business_management\")\r\n elif \"Belarussian\" in subject:\r\n subject = subject.replace(\"Belarussian\", \"Belarusian\")\r\n elif \"Biology_HL\" in subject:\r\n subject = subject.replace(\"Biology_HL\", \"Biology\")\r\n elif \"Biology_SL\" in subject:\r\n subject = subject.replace(\"Biology_SL\", \"Biology\")\r\n elif \"Ecosystems_and_societies_SL\" in subject:\r\n subject = subject.replace(\"Ecosystems_and_societies_SL\", \"Ecosystems_and_societies\")\r\n elif \"Environmental_systems_SL\" in subject:\r\n subject = subject.replace(\"Environmental_systems_SL\", \"Environmental_systems\")\r\n elif \"History_route_1\" in subject:\r\n subject = subject.replace(\"History_route_1\", \"History\")\r\n elif \"History_route_2\" in subject:\r\n subject = subject.replace(\"History_route_2\", \"History\")\r\n elif \"History_of_the_Islamic_World\" in subject:\r\n subject = subject.replace(\"History_of_the_Islamic_World\", \"Islamic_history\")\r\n\r\n subject_dir = f\"{subject}_{difficulty}\"\r\n year_session_dir = f\"{matched_groups[0]} {matched_groups[1]} Examination Session\"\r\n\r\n music_groups = kwargs.get(\"music_groups\")\r\n if music_groups is not None:\r\n \"\"\"\r\n music_groups[0] = year\r\n music_groups[1] = session\r\n music_groups[2] = subject group number with hyphen (if applicable)\r\n music_groups[3] = subject group number (if applicable)\r\n music_groups[4] = subject group name\r\n music_groups[5] = file name\r\n \"\"\"\r\n\r\n group_dir = \"Group 6 - The Arts\"\r\n subject = \"Music\"\r\n\r\n # Handling difficulty. Bulk of it is handling HLSL files and files with no difficulty stated.\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n elif \"HLSL\" in original_file_name:\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"SL\")\r\n return\r\n elif \"HL\" in original_file_name:\r\n difficulty = \"HL\"\r\n elif \"SL\" in original_file_name:\r\n difficulty = \"SL\"\r\n else:\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, music_groups=music_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n subject_dir = f\"{subject}_{difficulty}\"\r\n year_session_dir = f\"{music_groups[0]} {music_groups[1]} Examination Session\"\r\n\r\n audio_groups = kwargs.get(\"audio_groups\")\r\n if audio_groups is not None:\r\n \"\"\"\r\n audio_groups[0] = year\r\n audio_groups[1] = session\r\n audio_groups[2] = subject group number with hyphen (if applicable)\r\n audio_groups[3] = subject group number (if applicable)\r\n audio_groups[4] = subject group name (contains '\\\\audio' in some instances)\r\n audio_groups[5] = file name\r\n \"\"\"\r\n\r\n group_dir = \"Group 6 - The Arts\"\r\n\r\n fake_difficulty = kwargs.get(\"fake_difficulty\")\r\n if fake_difficulty is not None:\r\n difficulty = fake_difficulty\r\n else:\r\n write_copy(file_source_path, original_file_name, audio_groups=audio_groups, fake_difficulty=\"HL\")\r\n write_copy(file_source_path, original_file_name, audio_groups=audio_groups, fake_difficulty=\"SL\")\r\n return\r\n\r\n subject_dir = f\"Music_{difficulty}\"\r\n year_session_dir = f\"{audio_groups[0]} {audio_groups[1]} Examination Session\"\r\n year_session_dir = os.path.join(year_session_dir, \"audio\")\r\n\r\n if None not in [group_dir, subject_dir, year_session_dir]:\r\n new_filepath = os.path.join(abs_destination_directory, group_dir, subject_dir, year_session_dir,\r\n original_file_name)\r\n os.makedirs(os.path.dirname(new_filepath), exist_ok=True)\r\n shutil.copy(file_source_path, new_filepath)\r\n else:\r\n print(f\"CRITICAL ERROR: File had 'None' path attributes: {file_source_path}\")", "def test_clone_scenario(self):\n pass", "def _create_group_rules(self, group_object):\n\n for rule in ctx.node.properties['rules']:\n\n if 'src_group_id' in rule:\n\n if 'cidr_ip' in rule:\n raise NonRecoverableError(\n 'You need to pass either src_group_id OR cidr_ip.')\n\n if not group_object.vpc_id:\n src_group_object = self.get_resource()\n else:\n src_group_object = self._get_vpc_security_group_from_name(\n rule['src_group_id'])\n\n if not src_group_object:\n raise NonRecoverableError(\n 'Supplied src_group_id {0} doesn ot exist in '\n 'the given account.'.format(rule['src_group_id']))\n\n del rule['src_group_id']\n rule['src_group'] = src_group_object\n\n elif 'cidr_ip' not in rule:\n raise NonRecoverableError(\n 'You need to pass either src_group_id OR cidr_ip.')\n\n try:\n group_object.authorize(**rule)\n except (exception.EC2ResponseError,\n exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n except Exception as e:\n self._delete_security_group(group_object.id)\n raise", "def __make_group_by_res(self, group_name, name_list):\r\n if group_name not in self.groups:\r\n res_group = self.group['Residue'].getChildGrps()\r\n groups = [ res for res in res_groups if res.name in name_list ]\r\n new_group = Group(parent=[], id=-1, type=group_name, childs=groups)\r\n self.groups[group_name] = new_group", "def add_to_group(self, group):\n\n if not self.in_group(group):\n self.secondary_groups.append(group)\n return self", "def create_subgroup_accounts(request_ctx, account_id, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def test_grouping(self):\n n1 = self.create(NodeItem, UML.Node)\n n2 = self.create(NodeItem, UML.Node)\n\n self.group(n1, n2)\n\n assert n2.subject in n1.subject.nestedNode\n assert n1.subject not in n2.subject.nestedNode", "def construct_relation_group(\n self,\n group_name,\n domain_type,\n range_type,\n group_members = None):\n if not group_members:\n group_members = sorted([\n rel for rel in self.get_relation_names()\n if self.get_domain(rel) == domain_type and\n self.get_range(rel) == range_type\n ])\n if self.is_type(group_name):\n raise RelationNameError(group_name, 'Group already exists.')\n\n self.declare_entity_type(\n group_name, fixed_vocab=group_members, unknown_marker=None)\n\n for r in group_members:\n if self.is_dense(r):\n raise ValueError('Dense relation %r is unsupported.' % r)\n\n group = RelationGroup(group_name, group_members)\n self._group[group_name] = group\n # declare the schema for the necessary extension to the KG\n self.declare_relation(group.relation_rel, group.triple_type, group_name)\n self.declare_relation(group.subject_rel, group.triple_type, domain_type)\n self.declare_relation(group.object_rel, group.triple_type, range_type)\n self.declare_relation(group.weight_rel, group.triple_type,\n group.triple_type)\n # relation i in this group has num_rows[i] rows\n try:\n num_rows = [self._np_initval[r].data.shape[0] for r in group.members]\n except KeyError as err:\n raise RelationNameError(\n str(err), 'An undefined relation was encountered. '\n 'All relations in a relation group must be defined before calling '\n 'construct_relation_group.')\n total_num_rows = sum(num_rows)\n # names of all those triples\n self.extend_type(\n group.triple_type,\n [group.triple_prefix + str(i) for i in range(total_num_rows)])\n # now populate the sparse matrixes\n triple_indices = np.arange(total_num_rows, dtype='int32')\n rel_indices = np.hstack([\n np.ones(num_rows[i], dtype='int32') * i\n for i in range(len(group.members))\n ])\n subj_indices = np.hstack([self._np_initval[r].col for r in group.members])\n obj_indices = np.hstack([self._np_initval[r].row for r in group.members])\n weight_data = np.hstack([self._np_initval[r].data for r in group.members])\n ones_data = np.ones_like(weight_data)\n # weights are in a diagonal matrix\n self._np_initval[group.weight_rel] = scipy.sparse.coo_matrix(\n (weight_data, (triple_indices, triple_indices)),\n shape=(total_num_rows, total_num_rows),\n dtype='float32')\n self._np_initval[group.relation_rel] = scipy.sparse.coo_matrix(\n (weight_data, (rel_indices, triple_indices)),\n shape=(len(group.members), total_num_rows),\n dtype='float32')\n self._np_initval[group.subject_rel] = scipy.sparse.coo_matrix(\n (ones_data, (subj_indices, triple_indices)),\n shape=(self.get_max_id(domain_type), total_num_rows),\n dtype='float32')\n self._np_initval[group.object_rel] = scipy.sparse.coo_matrix(\n (ones_data, (obj_indices, triple_indices)),\n shape=(self.get_max_id(range_type), total_num_rows),\n dtype='float32')\n self.freeze(group.triple_type, unknown_marker=None)\n return group", "def test_merge_nooverlap(self):\n self.open_url('/group/list')\n \n # Sanity check\n el = self.wd.find_element(By.LINK_TEXT, \"Second Group\")\n el.click()\n time.sleep(0.5)\n \n self.assert_num_rows(1)\n \n self.open_url('/group/list')\n self.wd.find_element(By.ID, \"subnav-merge\").click()\n time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...\n self.assertEquals('Merge Group', self.wd.title)\n \n sel = Select(self.wd.find_element(By.ID, \"from_group_id\"))\n sel.select_by_visible_text(\"6th group\")\n \n sel = Select(self.wd.find_element(By.ID, \"to_group_id\"))\n sel.select_by_visible_text(\"Second Group\")\n \n self.submit_form(\"merge_form\")\n \n self.open_url('/group/list')\n self.assert_not_in_list_table(\"6th group\")\n \n el = self.wd.find_element(By.LINK_TEXT, \"Second Group\")\n el.click()\n \n self.assert_num_rows(3)", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def interactive_insert(group_name=None, package_name=None):\n file_name = \"\"\n file_destination = \"\"\n file_source = None\n file_create_link = False\n file_sudo = False\n file_comments = []\n\n def ask_file_source():\n return message.question(\"What is the full source file path?\")\n\n def ask_sudo():\n return message.question(\"Is sudo needed for this operation?\", \"boolean\")\n\n while True:\n file_source = None\n file_create_link = False\n file_sudo = False\n\n message.heading(\"Creating a new file. (${vars} is supported, '~' is not)\")\n if group_name is not None:\n message.info(f\"Current group: {group_name}\")\n if package_name is not None:\n message.info(f\"Current package: {package_name}\")\n\n file_destination = message.question(\n \"Where will this file be (created/linked/copied) to? (no basename)\"\n )\n\n if message.question(\n \"Will this file be linked to [destination]?\", \"boolean\"\n ):\n file_create_link = True\n file_source = ask_file_source()\n file_sudo = ask_sudo()\n elif message.question(\n \"Will this file be copied to [destination]?\", \"boolean\"\n ):\n file_source = ask_file_source()\n file_sudo = ask_sudo()\n\n if file_source is not None:\n [_, file_name] = os.path.split(os.path.expandvars(file_source))\n else:\n file_name = message.question(\"What will be the file's name?\")\n\n if message.question(\n \"Will the file have comments to aid the user?\", \"boolean\"\n ):\n while True:\n comment = message.question(\"New comment:\")\n file_comments.append(comment)\n if not message.question(\"Add another comment?\", \"boolean\"):\n break\n\n new_file = File(\n file_name,\n file_destination,\n os.path.split(file_source)[0] if file_source is not None else None,\n \"\",\n file_create_link,\n file_sudo,\n file_comments,\n )\n\n new_file.evaluate()\n\n message.info(\n f\"\"\"File info:\n [Name]: '{new_file.name}'\n [Destination]: '{new_file.path_destination}'\n [Source]: '{new_file.path_source}'\n [Link?]: '{'Yes' if new_file.create_link else 'No'}'\n [Need superuser?]: '{'Yes' if new_file.sudo else 'No'}'\n [Comments]: {new_file.comments}\"\"\"\n )\n if message.question(\"Confirm?\", \"boolean\"):\n break\n\n return new_file", "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def do_push_group(dbsync, group):\n pass", "def test_taskgroup_set(self, test_taskgroup_dag):\n # Unpack the fixture\n dag, group, (op1, op2, op3, op4) = test_taskgroup_dag\n # Arrange them with a Label in the middle\n op1.set_downstream(group, Label(\"Group label\"))\n group.set_downstream(op4)\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op1.task_id, op3.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op3.task_id, op4.task_id) == {}", "def clone(self):\n return _libsbml.ListOfGroups_clone(self)", "def add_group(self, groupname, grouptitle, path_to_group='/'):\n self.open_db()\n group = self.group_exists(path_to_group, groupname)\n if group is False:\n group = self.h5file.create_group(path_to_group, groupname,\n grouptitle)\n return group", "def _merge_feedbackset_into(self, target):\n from devilry.devilry_group.models import FeedbackSet\n\n # Map feedbackset_type to merge prefix\n feedbackset_type_merge_map = {\n FeedbackSet.FEEDBACKSET_TYPE_FIRST_ATTEMPT: FeedbackSet.FEEDBACKSET_TYPE_MERGE_FIRST_ATTEMPT,\n FeedbackSet.FEEDBACKSET_TYPE_NEW_ATTEMPT: FeedbackSet.FEEDBACKSET_TYPE_MERGE_NEW_ATTEMPT,\n FeedbackSet.FEEDBACKSET_TYPE_RE_EDIT: FeedbackSet.FEEDBACKSET_TYPE_MERGE_RE_EDIT\n }\n\n feedbacksets = self.feedbackset_set.order_by_deadline_datetime()\\\n .select_related('group__parentnode')\n\n for feedbackset in feedbacksets:\n # change feedbackset_type to merge prefix\n if feedbackset.feedbackset_type in list(feedbackset_type_merge_map.keys()):\n feedbackset.feedbackset_type = feedbackset_type_merge_map[feedbackset.feedbackset_type]\n feedbackset.group = target\n feedbackset.save()", "def grp(self, grpNode):\n\t\tself._grp = grpNode", "def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()", "def test_groups_group_ref_put(self):\n pass", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs):\n\n path = '/v1/global/root_outcome_group'\n url = request_ctx.base_api_url + path.format()\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def test_duplicate_groups(self):\n\n auag = UsersAndGroups()\n\n # create a duplicate with default flag to raise an error.\n auag.add_group(Group(name=\"group1\"))\n with self.assertRaises(Exception):\n auag.add_group(Group(name=\"group1\"))\n\n # create with overwrite.\n auag.add_group(\n Group(name=\"group2\", group_names=[\"group2\"]),\n duplicate=UsersAndGroups.OVERWRITE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group2\")\n self.assertEqual(u.name, \"group2\")\n self.assertEqual(u.groupNames, [\"group2\"])\n\n auag.add_group(\n Group(name=\"group2\", group_names=[\"group3\"]),\n duplicate=UsersAndGroups.OVERWRITE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group2\")\n self.assertEqual(u.name, \"group2\")\n self.assertEqual(u.groupNames, [\"group3\"])\n\n # create with update.\n auag.add_group(\n Group(name=\"group3\", group_names=[\"group2\"]),\n duplicate=UsersAndGroups.OVERWRITE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group3\")\n self.assertEqual(u.name, \"group3\")\n self.assertEqual(u.groupNames, [\"group2\"])\n\n auag.add_group(\n Group(name=\"group3\", group_names=[\"group3\"]),\n duplicate=UsersAndGroups.UPDATE_ON_DUPLICATE,\n )\n u = auag.get_group(\"group3\")\n self.assertEqual(u.groupNames, [\"group2\", \"group3\"])", "def test_rsync_set_group(self):\n \n root = tempfile.mkdtemp(prefix=\"rsync_test_set_group_\")\n avail_groups = os.getgroups()\n exp_group = grp.getgrgid(avail_groups[random.randint(1,len(avail_groups))-1])[0]\n \n # Create some files to move\n to_copy = self._create_test_files(root)\n \n # Run rsync\n with open(os.devnull, 'w') as f:\n old_stdout = sys.stdout\n sys.stdout = f\n rsync_files(to_copy,sys.stdout,exp_group,False)\n sys.stdout = old_stdout\n \n # Verify the copy process set the correct group on created directories\n for ddir in set([d[1] for d in to_copy]):\n gid = os.stat(ddir).st_gid\n obs_group = grp.getgrgid(gid)[0]\n self.assertEqual(obs_group,\n exp_group,\n \"Failed to set group '{}' on directory. Group is {}\".format(exp_group,\n obs_group))\n \n # Verify the copy process set the correct group\n for src, ddir, dname in to_copy:\n dfile = os.path.join(ddir,dname)\n gid = os.stat(dfile).st_gid\n obs_group = grp.getgrgid(gid)[0]\n self.assertEqual(obs_group,\n exp_group,\n \"Failed to set group '{}' on file. Group is {}\".format(exp_group,\n obs_group))", "def test_drag_into_different_group(self):\r\n expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},\r\n {self.group_a: [self.group_b_item_1, self.group_a_item_1, self.group_a_item_2]},\r\n {self.group_b: [self.group_b_item_2]},\r\n {self.group_empty: []}]\r\n self.drag_and_verify(self.group_b_item_1_handle, self.group_a_item_1_handle, expected_ordering)", "def copy_children(self):\n\n # Create a group\n self.fileh.create_group('/', 'agroup')\n # Create several objects there\n for i in range(10):\n # Create a new array\n self.fileh.create_array('/agroup', 'array' + str(i), self.a1)\n # Excercise copy_children\n for i in range(self.nobjects):\n # Create another group for destination\n self.fileh.create_group('/', 'anothergroup' + str(i))\n # Copy children from /agroup to /anothergroup+i\n self.fileh.copy_children('/agroup', '/anothergroup' + str(i))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print((\"Time for Undo, Redo (copy_children):\", undo, \"s, \",\n redo, \"s\"))", "def _internal_copy(source, source_path, target, target_path, maintain_flag):\n if maintain_flag:\n try:\n target.create_group(target_path)\n except ValueError:\n pass # In case the copy_to() function failed previously and the group already exists.\n\n if target_path == \"/\":\n source.copy(target_path, \"/\") if source == target else source.copy(\n target_path, target\n )\n else:\n if maintain_flag:\n if dest_path != \"\":\n source.copy(source_path, target[dest_path])\n else:\n source.copy(source_path, target)\n else:\n group_name_old = source_path.split(\"/\")[-1]\n try:\n target.create_group(\"/tmp\")\n except ValueError:\n pass\n source.copy(source_path, target[\"/tmp\"])\n try:\n target.move(\"/tmp/\" + group_name_old, target_path)\n except ValueError:\n del target[dest_path]\n target.move(\"/tmp/\" + group_name_old, target_path)\n del target[\"/tmp\"]" ]
[ "0.5874123", "0.5813369", "0.57389224", "0.5706337", "0.56055593", "0.55976254", "0.5595738", "0.55490917", "0.5544522", "0.5541461", "0.5518073", "0.53831655", "0.53829265", "0.53379554", "0.531845", "0.5302578", "0.52945095", "0.52933514", "0.5258613", "0.5246323", "0.52400297", "0.5225952", "0.5201779", "0.5191769", "0.5140609", "0.50881827", "0.50875765", "0.50842255", "0.50807875", "0.5079402", "0.5074019", "0.50512403", "0.5050658", "0.50449747", "0.50314426", "0.501575", "0.5015151", "0.5008633", "0.5008148", "0.50077254", "0.49875265", "0.49526322", "0.49441025", "0.49416566", "0.49363503", "0.49342582", "0.49342582", "0.4924192", "0.4912886", "0.49026668", "0.49015865", "0.49010867", "0.48911935", "0.4890151", "0.48841816", "0.48499066", "0.4844977", "0.4841554", "0.48306534", "0.48294988", "0.4825032", "0.48163125", "0.48045567", "0.4800212", "0.47978723", "0.47894487", "0.47880772", "0.4760176", "0.47558516", "0.47499806", "0.47430947", "0.47366083", "0.4732884", "0.47253084", "0.47247282", "0.47220522", "0.4721433", "0.47198492", "0.47170034", "0.47099406", "0.470273", "0.46990228", "0.46927205", "0.46876273", "0.46869034", "0.46855444", "0.46782595", "0.46760097", "0.4666705", "0.46601993", "0.4642347", "0.46380064", "0.46338528", "0.46334374", "0.46315676", "0.4629545", "0.46279424", "0.46268773", "0.4621315", "0.46150658" ]
0.5898711
0
Parse challenge from a challenge response, cache it, and return it.
def _update_challenge(request: PipelineRequest, challenger: "PipelineResponse") -> HttpChallenge: challenge = HttpChallenge( request.http_request.url, challenger.http_response.headers.get("WWW-Authenticate"), response_headers=challenger.http_response.headers, ) ChallengeCache.set_challenge_for_url(request.http_request.url, challenge) return challenge
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_challenge(cls, response):\n links = _parse_header_links(response)\n try:\n authzr_uri = links['up']['url']\n except KeyError:\n raise errors.ClientError('\"up\" link missing')\n return (\n response.json()\n .addCallback(\n lambda body: messages.ChallengeResource(\n authzr_uri=authzr_uri,\n body=messages.ChallengeBody.from_json(body)))\n )", "def solve_challenge():\n\treturn (challenge[0]*challenge[1]-challenge[2]) * challenge[3] - challenge[4]", "def _parse_challenge(header):\n # type: (str) -> Dict[str, str]\n ret = {}\n if header.startswith(BEARER):\n challenge_params = header[len(BEARER) + 1 :]\n\n matches = re.split(AUTHENTICATION_CHALLENGE_PARAMS_PATTERN, challenge_params)\n _clean(matches)\n ret = {}\n for i in range(0, len(matches), 2):\n ret[matches[i]] = matches[i + 1]\n\n return ret", "def create_challenge_response(\n self,\n room_code: str,\n challenge_response: ChallengeResponse,\n ) -> GameInfo:\n game = self.read_game(room_code)\n\n if game.challenge is None:\n msg = f\"No challenge exists on game {room_code!r}\"\n raise InvalidMove(msg)\n if game.challenge.state != ChallengeState.AWAITING_RESPONSE:\n state = game.challenge.state.value\n msg = f\"Challenge is in {state!r} state, not 'AWAITING_RESPONSE'\"\n raise InvalidMove(msg)\n\n self.games_table.update_item(\n Key={\"room_code\": room_code},\n UpdateExpression=(\"set challenge.#chalresp=:r, challenge.#chalstate=:s\"),\n ExpressionAttributeValues={\n \":r\": challenge_response.dict(),\n \":s\": ChallengeState.VOTING,\n },\n ExpressionAttributeNames={\n # \"response\" and \"state\" are reserved words\n \"#chalstate\": \"state\",\n \"#chalresp\": \"response\",\n },\n ConditionExpression=Attr(\"challenge\").eq(game.dict()[\"challenge\"]),\n )\n\n return self.read_game(room_code)", "def get_challenge(self, obj):\n return obj.challenge_phase.challenge", "def parse(self, response):\n if self._has_captcha(response):\n result = self._handle_captcha(response, self.parse)\n else:\n result = super(AmazonBaseClass, self).parse(response)\n\n return result", "def read(challenge):\n\n data = {\n 'id': challenge.id,\n 'name': challenge.name,\n 'value': challenge.value,\n 'description': \"This challenge has not been unlocked yet. You need at least {} points to play.\".format(challenge.unlock_at),\n 'category': challenge.category,\n 'hidden': challenge.hidden,\n 'max_attempts': challenge.max_attempts,\n 'unlock_at': challenge.unlock_at,\n 'locked': True,\n 'type': challenge.type,\n 'type_data': {\n 'id': CTFdLockingChallenge.id,\n 'name': CTFdLockingChallenge.name,\n 'templates': CTFdLockingChallenge.templates,\n 'scripts': CTFdLockingChallenge.scripts,\n },\n }\n\n if session.get('admin') or not locked(challenge):\n data['locked'] = False\n data['description'] = str(challenge.description)\n\n return challenge, data", "def decode(self, response, request):\n log.debug(\"Decoding authorization.\")\n auth = self._parseAuth(response)\n try:\n self._verifyChallenge(auth[\"challenge\"], request)\n creds = self.buildCredentials(auth, request)\n except KeyError, ke:\n raise LoginFailed(\"{0!r} not in authorization\".format(*ke.args))\n except LoginFailed, lf:\n log.warn(lf)\n raise\n log.debug(\"Decoded credentials: {0}\".format(creds))\n return creds", "def get(self):\n try:\n imageFilename = random.choice(os.listdir(self.cacheDir))\n imagePath = os.path.join(self.cacheDir, imageFilename)\n with open(imagePath) as imageFile:\n self.image = imageFile.read()\n except IndexError:\n raise GimpCaptchaError(\"CAPTCHA cache dir appears empty: %r\"\n % self.cacheDir)\n except (OSError, IOError):\n raise GimpCaptchaError(\"Could not read Gimp captcha image file: %r\"\n % imageFilename)\n\n self.answer = imageFilename.rsplit(os.path.extsep, 1)[0]\n self.challenge = self.createChallenge(self.answer)\n\n return (self.image, self.challenge)", "def get_challenge(self, obj):\n return obj.challenge_phase_split.challenge_phase.challenge", "def create_challenge(\n self,\n room_code: str,\n challenge: NewChallenge,\n ) -> GameInfo:\n game = self.read_game(room_code)\n\n if game.challenge is not None:\n raise InvalidMove(f\"Game {room_code!r} already has an open challenge\")\n\n if challenge.challenger_name not in [player.name for player in game.players]:\n msg = f\"Player {challenge.challenger_name!r} not in game {room_code!r}\"\n raise InvalidMove(msg)\n\n if (len(game.moves) == 0) or (challenge.move != game.moves[-1]):\n raise InvalidMove(\"Can only challenge the most recent move\")\n\n initial_state = (\n ChallengeState.AWAITING_RESPONSE\n if challenge.type is ChallengeType.NO_VALID_WORDS\n else ChallengeState.VOTING\n )\n\n game_challenge = Challenge(\n challenger_name=challenge.challenger_name,\n move=challenge.move,\n type=challenge.type,\n state=initial_state,\n response=None,\n votes=[],\n )\n\n self.games_table.update_item(\n Key={\"room_code\": room_code},\n UpdateExpression=(\"set challenge=:c\"),\n ExpressionAttributeValues={\":c\": game_challenge.dict()},\n ConditionExpression=Attr(\"challenge\").eq(None),\n )\n\n self._advance_turn(game)\n\n return self.read_game(room_code)", "def _parse_response(response):\n m = re.match(r\"^(?P<alias>[^\\s]*)\\s+(?P<resp>.*)$\", response)\n return m.group('alias'), m.group('resp')", "def get_response_from_cache(responsefile):\n global __response_cache\n\n if responsefile not in __response_cache:\n return\n\n if not goodfile(responsefile):\n try:\n del __response_cache[responsefile]\n except KeyError: # pragma: no cover\n pass\n return\n\n modtime = str(os.path.getmtime(responsefile))\n if modtime not in __response_cache.get(responsefile, {}):\n return\n\n log.debug(\"Retrieving data from response file (%s) in cache\" %\n responsefile)\n return __response_cache.get(responsefile, {}).get(modtime)", "def generate_response(self, challenge, name):\n response_plain = challenge.identifier + self.secret + challenge.value\n response_hashed = hashlib.sha1(response_plain)\n response_obj = Response(challenge.identifier, response_hashed, name)\n return response_obj", "def parse_response(self, buffer):\n # Begin by copying the data out of the buffer. This is necessary\n # because as much as possible we want to use the built-in bytestring\n # methods, rather than looping over the data in Python.\n temp_buffer = buffer.tobytes()\n\n index = temp_buffer.find(b'\\n')\n if index == -1:\n return None\n\n version, status, reason = (\n temp_buffer[0:index].split(None, 2) + [b''])[:3]\n if not version.startswith(b'HTTP/1.'):\n raise ParseError(\"Not HTTP/1.X!\")\n\n minor_version = int(version[7:])\n status = int(status)\n reason = memoryview(reason.strip())\n\n # Chomp the newline.\n index += 1\n\n # Now, parse the headers out.\n end_index = index\n headers = []\n\n while True:\n end_index = temp_buffer.find(b'\\n', index)\n if end_index == -1:\n return None\n elif (end_index - index) <= 1:\n # Chomp the newline\n end_index += 1\n break\n\n name, value = temp_buffer[index:end_index].split(b':', 1)\n value = value.strip()\n headers.append((memoryview(name), memoryview(value)))\n index = end_index + 1\n\n resp = Response(status, reason, minor_version, headers, end_index)\n return resp", "async def get_response(self, key: str) -> Optional[CachedResponse]:\n # Attempt to fetch response from the cache\n logger.debug(f'Attempting to get cached response for key: {key}')\n try:\n if not await self.responses.contains(key):\n key = str(await self.redirects.read(key))\n response = await self.responses.read(key)\n except (KeyError, TypeError):\n logger.debug('No cached response found')\n return None\n if not isinstance(response, CachedResponse):\n logger.debug('Cached response is invalid')\n return None\n # If the item is expired or filtered out, delete it from the cache\n if not self.is_cacheable(response):\n logger.info('Cached response expired; deleting')\n await self.delete(key)\n return None\n\n # Optionally update last_used time\n if self.lru:\n response.last_used = datetime.utcnow()\n await self.responses.write(key, response)\n\n logger.info(f'Cached response found for key: {key}')\n return response", "def _parse_response(self, response):\n if response is not None:\n return response.string\n return response", "def createChallenge(self, answer):\n timestamp = str(int(time.time())).zfill(12)\n blob = timestamp + answer\n encBlob = self.publicKey.encrypt(blob)\n hmac = crypto.getHMAC(self.hmacKey, encBlob)\n challenge = urlsafe_b64encode(hmac + encBlob)\n return challenge", "def answer_challenge(self, challenge_body, response):\n action = LOG_ACME_ANSWER_CHALLENGE(\n challenge_body=challenge_body, response=response)\n with action.context():\n return (\n DeferredContext(\n self._client.post(challenge_body.uri, response))\n .addCallback(self._parse_challenge)\n .addCallback(self._check_challenge, challenge_body)\n .addCallback(\n tap(lambda c:\n action.add_success_fields(challenge_resource=c)))\n .addActionFinish())", "def check_response(self, challenge, response):\n if challenge is not None:\n expected_response = challenge.identifier + self.secret + challenge.challenge\n expected_response_hashed = hashlib.sha1(expected_response)\n if expected_response_hashed == response.response_hash:\n return True\n else:\n return False\n else:\n raise Exception", "async def on_challenge_update(self, challenge_data):\n pass", "async def parse_handle_response(self, json_response):\n try:\n vasp = self.vasp\n other_key = vasp.info_context.get_peer_compliance_verification_key(\n self.other_address_str\n )\n message = await other_key.verify_message(json_response)\n response = json.loads(message)\n response = CommandResponseObject.from_json_data_dict(\n response, JSONFlag.NET\n )\n\n return self.handle_response(response)\n\n except OffChainInvalidSignature as e:\n logger.warning(\n f'(other:{self.other_address_str}) '\n f'Signature verification failed. OffChainInvalidSignature: {e}'\n )\n raise e\n except JSONParsingError as e:\n logger.warning(\n f'(other:{self.other_address_str}) JSONParsingError: {e}'\n )\n raise e\n except OffChainException or OffChainProtocolError as e:\n logger.warning(\n f'(other:{self.other_address_str}) '\n f'OffChainException/OffChainProtocolError: {e}',\n )\n raise e", "def process_response(self, request, response):\n #if not self._should_update_cache(request, response):\n # # We don't need to update the cache, just return.\n # return response\n\n if response.streaming or response.status_code != 200:\n return response\n \n # Don't cache responses that set a user-specific (and maybe security\n # sensitive) cookie in response to a cookie-less request.\n if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):\n return response\n\n # Try to get the timeout from the \"max-age\" section of the \"Cache-\n # Control\" header before reverting to using the default cache_timeout\n # length.\n timeout = get_max_age(response)\n if timeout == None:\n timeout = self.cache_timeout\n elif timeout == 0:\n # max-age was set to 0, don't bother caching.\n return response\n patch_response_headers(response, timeout)\n if timeout:\n cache_key = \"%s-%s\" % (self.key_prefix, request.get_full_path())\n #raise ValueError(cache_key)\n if hasattr(response, 'render') and isinstance(response.render, collections.Callable):\n response.add_post_render_callback(\n lambda r: cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(r.content, 9), timeout)\n )\n else:\n # we use the highest compression level, because since it is cached we hope for it to pay off\n cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(response.content, 9), timeout)\n return response", "def ping_challenge(self):\n return self._ping_data_raw['challenge']", "def challenge_response(self, challenge_response):\n\n self._challenge_response = challenge_response", "def parse_answers(dns_resp: str, session_cache):\n\n ID = dns_resp[:4]\n other_flags = dns_resp[4:8]\n questions_count = dns_resp[8:12]\n answers_count = dns_resp[12:16]\n auth_serv_info = dns_resp[16:20]\n additional_info = dns_resp[20:24]\n offset = 0\n ip = \"0.0.0.0\"\n\n # может придти несколько ответов, из каждого вычленим нужные записи\n for i in range(int(answers_count, 16)):\n try:\n ip, offset = DNSHandler.parse_answer(dns_resp, session_cache, offset=offset * i)\n except ValueError:\n print(\"url does not exist\")\n sys.exit(0)\n return ip", "def _process_response(self, response):\n\n self.log.debug(\"Received Response: %r\", response)\n\n return self.token_manager.process_response(response)", "def parse_line(cls, line):\n assert isinstance(line, bytes)\n\n match = HEADER_FIELD_REGEX.match(line)\n\n if not match:\n raise HeaderParseError(line)\n\n name, content = (s.decode(\"ascii\").strip() for s in match.groups(b\"\"))\n name = name.lower()\n\n if name != \"set-cookie\" or is_rfc1123_datetime(content):\n content = cls.split_field_content(content)\n\n return (name, content)", "def get_h_parser(*, allow_cache=True):\n\n # Singleton pattern\n global _parser\n if _parser and allow_cache:\n return _parser\n\n source = _get_wgpu_header(\n os.path.join(lib_dir, \"resources\", \"webgpu.h\"),\n os.path.join(lib_dir, \"resources\", \"wgpu.h\"),\n )\n\n # Create parser\n hp = HParser(source)\n hp.parse()\n _parser = hp\n return hp", "async def _handle_challenge_request(self, split_message: List[str]) -> None:\n challenging_player = split_message[2].strip()\n\n if challenging_player != self.username:\n if len(split_message) >= 6:\n if split_message[5] == self._format:\n await self._challenge_queue.put(challenging_player)", "def _parse_cookie(self, request, response, cookie_header_value):\n try:\n # Note to self: This line may print some chars to the console\n return parse_cookie(cookie_header_value)\n except Cookie.CookieError:\n desc = 'The remote Web application sent a cookie with an' \\\n ' incorrect format: \"%s\" that does NOT respect the RFC.'\n desc = desc % cookie_header_value\n\n i = CookieInfo('Invalid cookie', desc, response.id, self.get_name())\n i.set_url(response.get_url())\n i.set_cookie_string(cookie_header_value)\n\n # The cookie is invalid, this is worth mentioning ;)\n kb.kb.append(self, 'invalid-cookies', i)\n return None", "def parse(self, response=None):\n if response:\n return ParseResponse(response)\n else:\n return ParseResponse(self.last_response)", "def cn_unpack_rsp(cls, rsp_str):\n return cls.hk_unpack_rsp(rsp_str)", "def cn_unpack_rsp(cls, rsp_str):\n return cls.hk_unpack_rsp(rsp_str)", "def cn_unpack_rsp(cls, rsp_str):\n return cls.hk_unpack_rsp(rsp_str)", "def cn_unpack_rsp(cls, rsp_str):\n return cls.hk_unpack_rsp(rsp_str)", "def cn_unpack_rsp(cls, rsp_str):\n return cls.hk_unpack_rsp(rsp_str)", "def cn_unpack_rsp(cls, rsp_str):\n return cls.hk_unpack_rsp(rsp_str)", "def cn_unpack_rsp(cls, rsp_str):\n return cls.hk_unpack_rsp(rsp_str)", "def _login_challenge(self):\n headers, items = self._get('/login', {\n 'dbus': 'AUTH DBUS_COOKIE_SHA1 %s' % self.username\n })\n\n if headers.get('request_result') != 'success':\n raise ApiException(\"Failed receiving challenge\")\n\n return items[0].get('dbus').split(' ')[-1]", "def parse_response(response):\n return json.loads(response.read()[MAGIC_PREFIX_OFFSET:])", "def challenge(self, challenge, mode='HMAC', slot=1, variable=True, may_block=True):\n pass", "def cache_response(hash_v, response, show_progress=True):\n f_name = os.path.join(CACHE_DIR, hash_v)\n if os.path.isfile(f_name):\n response = open(f_name).read()\n return response\n elif response is not None:\n with open(f_name, \"w+\") as out_f:\n if show_progress:\n response = tqdm.tqdm(response)\n out_f.write(\"\".join(response))\n return open(f_name).read()", "def challenge(self, challenge):\n\n self._challenge = challenge", "def generate_challenge(self):\n return None", "def _get_options(self, *, cache_key: str) -> Union[PublicKeyCredentialRequestOptions, None]:\n options: str = self.redis.retrieve(key=cache_key)\n if options is None:\n return options\n\n # We can't use PublicKeyCredentialRequestOptions.parse_raw() because\n # json_loads_base64url_to_bytes() doesn't know to convert these few values to bytes, so we\n # have to do it manually\n options_json: dict = json_loads_base64url_to_bytes(options)\n options_json[\"challenge\"] = base64url_to_bytes(options_json[\"challenge\"])\n options_json[\"allowCredentials\"] = [\n {**cred, \"id\": base64url_to_bytes(cred[\"id\"])}\n for cred in options_json[\"allowCredentials\"]\n ]\n\n return PublicKeyCredentialRequestOptions.parse_obj(options_json)", "def parse_response(self, raw_response):\n \n parsed_response = {\n 'success': False,\n 'raw_response': raw_response,\n }\n \n # Try to make sense of the response status\n try:\n status, msg = raw_response.split('\\r\\n')\n parsed_response['success'] = status == 'OK'\n parsed_response['message'] = msg\n except:\n msg = None\n \n # Try to parse the message ID\n try:\n key, val = msg.split('=')\n parsed_response[key] = val\n except:\n pass\n \n return parsed_response", "def __parse_response(source: str, response_data):\n lower_source = source.lower()\n if lower_source == 'cortx':\n return json.loads(response_data['Body'].read().decode('utf-8'))\n elif lower_source == 'fhir':\n return response_data.as_json()\n else:\n abort(400, 'Unknown source - only CORTX and FHIR available')", "def handle_response(response: Response) -> dict:\n try:\n response_data = response.json()\n except json.decoder.JSONDecodeError as err:\n raise PluginException(preset=PluginException.Preset.INVALID_JSON, data=err)\n\n if response.status_code in SUCCESS_RESPONSE_CODES:\n return clean(response_data)\n\n if response.status_code == 401:\n raise ConnectionTestException(preset=PluginException.Preset.USERNAME_PASSWORD)\n if response.status_code == 403:\n raise PluginException(preset=PluginException.Preset.API_KEY, data=response.text)\n if response.status_code == 429:\n raise PluginException(preset=PluginException.Preset.RATE_LIMIT, data=response.text)\n if response.status_code >= 500:\n raise PluginException(preset=PluginException.Preset.SERVER_ERROR, data=response.text)\n\n raise PluginException(cause=response_data[\"description\"], assistance=response_data[\"message\"])", "def _verifyChallenge(self, challenge, request):\n log.debug(\"Verifying challenge: {0}\".format(challenge))\n try:\n signature, encoded = challenge.split(self.sep)\n raw = encoded.decode(\"base64\")\n realm, clientIP, sigTime, seed = raw.split(self.sep)\n except ValueError:\n raise LoginFailed(\"Invalid challenge value\")\n if not self._verify(signature, raw):\n raise LoginFailed(\"Invalid signature\")\n if realm != self.realm:\n raise LoginFailed(\"Incorrect realm\")\n if self._timeExpired(sigTime):\n raise LoginFailed(\"Session expired\")\n if clientIP != (request.getClientIP() or \"0.0.0.0\"):\n raise LoginFailed(\"Incorrect client\")\n return True", "def get_challenge(email, sid):\n params = {'email_address': email, 'assignment_part_sid': sid, 'response_encoding': 'delim'}\n\n challenge_url = '%s%schallenge' % (protocol, base_url)\n data = urllib.parse.urlencode(params).encode('utf-8')\n req = urllib.request.Request(challenge_url, data)\n resp = urllib.request.urlopen(req)\n text = resp.readall().decode('utf-8').strip().split('|')\n\n if len(text) != 9:\n print(' !! %s' % '|'.join(text))\n sys.exit(1)\n \n return tuple(text[x] for x in [2,4,6,8])", "def parse_response(response):\n LOGGER.debug('Parsing WSAPI response')\n if isinstance(response, basestring):\n response = response.splitlines()\n\n data = {}\n for line in response:\n try:\n key, value = line.split('=', 1)\n data[key] = value.strip()\n except ValueError:\n # Skip empty lines and lines that aren't valid results\n pass\n\n LOGGER.debug('Parser got ' + str(data))\n return data", "def parse_cookies( headers ):", "def get_captcha_challenge(http_body, \n captcha_base_url='http://www.google.com/accounts/'):\n contains_captcha_challenge = False\n captcha_parameters = {}\n for response_line in http_body.splitlines():\n if response_line.startswith('Error=CaptchaRequired'):\n contains_captcha_challenge = True\n elif response_line.startswith('CaptchaToken='):\n # Strip off the leading CaptchaToken=\n captcha_parameters['token'] = response_line[13:]\n elif response_line.startswith('CaptchaUrl='):\n captcha_parameters['url'] = '%s%s' % (captcha_base_url,\n response_line[11:])\n if contains_captcha_challenge:\n return captcha_parameters\n else:\n return None", "def _handle_response(response: str) -> Result:\n logger.debug('SoapService - _handle_response(response: {})'.format(response))\n result = Result()\n parsed_response = xmltodict.parse(response)['xmlreply']\n response_result = parsed_response['messages']['result']\n\n if response_result == 'OK':\n refno = parsed_response['apmdata']['prospect']['p.cm']['refno']\n result.data = {'Refno': refno}\n result.status = True\n elif response_result == 'Error':\n errors = parsed_response['messages']['error'] if 'error' in parsed_response['messages'] else None # type: list\n result.status = False\n\n return result", "def _parse_response(request_state, response):\n # type: (str, Mapping[str, Any]) -> List[str]\n\n if \"error\" in response:\n message = \"Authentication failed: {}\".format(response.get(\"error_description\") or response[\"error\"])\n raise ClientAuthenticationError(message=message)\n if \"code\" not in response:\n # a response with no error or code is malformed; we don't know what to do with it\n message = \"Authentication server didn't send an authorization code\"\n raise ClientAuthenticationError(message=message)\n\n # response must include the state sent in the auth request\n if \"state\" not in response:\n raise ClientAuthenticationError(message=\"Authentication response doesn't include OAuth state\")\n if response[\"state\"][0] != request_state:\n raise ClientAuthenticationError(message=\"Authentication response's OAuth state doesn't match the request's\")\n\n return response[\"code\"]", "def challenge_view(self, request):\n headerlist = [(\"Content-Type\", \"text/plain\")]\n headerlist.extend(self._get_challenge_headers(request))\n return Response(\"Unauthorized\", status=\"401 Unauthorized\",\n headerlist=headerlist)", "def fetch_har(self):\n har = ''\n retries = 30\n time.sleep(5)\n if self.remote is True:\n har = self._fetch_remote_har()\n else:\n self.log_output('Retrieving Local HAR file')\n for _ in range(retries):\n if os.path.exists(self.har_path):\n break\n time.sleep(1)\n har = open(self.har_path, 'r').read()\n return json.loads(har)", "def _decode_ocsp_response_cache(ocsp_response_cache_json, ocsp_response_cache):\n current_time = int(time.time())\n for cert_id_base64, (ts, ocsp_response) in ocsp_response_cache_json.items():\n cert_id, _ = der_decoder.decode(b64decode(cert_id_base64), CertID())\n hkey = _decode_cert_id_key(cert_id)\n if ts - CACHE_EXPIRATION <= current_time <= ts + CACHE_EXPIRATION:\n ocsp_response_cache[hkey] = (ts, b64decode(ocsp_response))\n elif hkey in ocsp_response_cache:\n # invalidate the cache if exists\n del ocsp_response_cache[hkey]", "def _parse_take_response(self, response: str) -> Optional[Package]:\n for source_package, descriptor in yaml.safe_load(response)[0].items():\n data = {} # type: Dict[str, str]\n for elem in descriptor:\n for k, v in elem.items():\n data[k] = v\n break\n if data['status'] != 'ok':\n return None\n return Package(self, source_package, data)", "def solvePasswordChallenge(password,challenge,nonce):\n\t\n\tdata = (password +str(challenge) + str(nonce)).encode(\"utf8\")\n\treturn hash(data= data)", "def parse_line(cls, line):\n regex = re.compile(cls.pattern)\n m = regex.search(line)\n if m:\n data = m.groupdict()\n data = cls.post_process(data)\n if cls.date_format:\n data['time'] = cls.convert_time(data['time'])\n else:\n data['time'] = datetime.now()\n return data\n else:\n return {}", "def parse_response(self, response):\n try:\n response = json.loads(response)\n if 'error' in response:\n if 'message' in response['error']:\n raise self.CMoreError(response['error']['message'])\n elif 'description' in response['error']:\n raise self.CMoreError(response['error']['description'])\n elif 'code' in response['error']:\n raise self.CMoreError(response['error']['error'])\n\n except ValueError: # when response is not in json\n pass\n\n return response", "def _parse_certificate(cls, response):\n links = _parse_header_links(response)\n try:\n cert_chain_uri = links[u'up'][u'url']\n except KeyError:\n cert_chain_uri = None\n return (\n response.content()\n .addCallback(\n lambda body: messages.CertificateResource(\n uri=cls._maybe_location(response),\n cert_chain_uri=cert_chain_uri,\n body=body))\n )", "def _parse_result(self, result, *, verbose=False, **kwargs):\n return get_fermilat_datafile(result)", "def _check_challenge(cls, challenge, challenge_body):\n if challenge.uri != challenge_body.uri:\n raise errors.UnexpectedUpdate(challenge.uri)\n return challenge", "def _get_challenge_headers(self, request, check_stale=True):\n params = {}\n params[\"realm\"] = self.realm\n if self.domain is not None:\n params[\"domain\"] = self.domain\n # Escape any special characters in those values, so we can send\n # them as quoted-strings. The extra values added below are under\n # our control so we know they don't contain quotes.\n for key, value in params.iteritems():\n params[key] = value.replace('\"', '\\\\\"')\n # Add a fresh set of challenge parameters.\n params.update(self._get_challenge_params(request))\n # Mark the nonce as stale if told so by the environment.\n if check_stale and request.environ.get(_ENVKEY_STALE_NONCE):\n params[\"stale\"] = \"TRUE\"\n # Construct the final header as quoted-string k/v pairs.\n value = \", \".join('%s=\"%s\"' % itm for itm in params.iteritems())\n return [(\"WWW-Authenticate\", \"SRP-HMAC \" + value)]", "def get_data(request_meta, hash_result=True):\n\n hash_table_ref = read_pickle_data()\n\n # Traverse the hash key structure to find data\n # @TODO rather than iterate through REQUEST_META_BASE &\n # REQUEST_META_QUERY_STR look only at existing attributes\n\n logging.debug(__name__ + \" - Attempting to pull data for request \" \\\n \"COHORT {0}, METRIC {1}\".\n format(request_meta.cohort_expr, request_meta.metric))\n\n key_sig = build_key_signature(request_meta, hash_result=hash_result)\n item = find_item(hash_table_ref, key_sig)\n\n if item:\n # item[0] will be a stringified structure that\n # is initialized, see set_data.\n try:\n return eval(item[0])\n except SyntaxError:\n logging.error(__name__ + ' :: Failed to retrieve {0}'.\n format(key_sig))\n return None\n else:\n return None", "async def parse(self, raw: str) -> dict:", "def parse_response(response):\n # a result should always have a status\n status = response['status']\n\n # a result _may_ have a results or a reason\n result = response.get('results', [])\n reason = response.get('reason', None)\n\n return status, result, reason", "def process_request(self, request):\n match = self._match_request(request)\n try:\n (version, method, subject_id) = match\n except TypeError:\n # Trying to unpack None raises this exception\n return None\n\n self._stash_request_info(request, subject_id, method, version)\n\n if request.method != 'GET' or not self.cache.is_cached(subject_id):\n return None\n method = getattr(self, '_get_%s_subject_metadata' % version)\n subject_metadata = method(request, subject_id)\n\n # Deactivated subjects shall not be served from cache\n if subject_metadata['status'] == 'deactivated':\n return None\n\n try:\n self._enforce(request, 'download_subject', target=subject_metadata)\n except exception.Forbidden:\n return None\n\n LOG.debug(\"Cache hit for subject '%s'\", subject_id)\n subject_iterator = self.get_from_cache(subject_id)\n method = getattr(self, '_process_%s_request' % version)\n\n try:\n return method(request, subject_id, subject_iterator, subject_metadata)\n except exception.SubjectNotFound:\n msg = _LE(\"Subject cache contained subject file for subject '%s', \"\n \"however the registry did not contain metadata for \"\n \"that subject!\") % subject_id\n LOG.error(msg)\n self.cache.delete_cached_subject(subject_id)", "def parse_response(self, response, case):\n request = response.request\n parsed = {\n 'request': {\n 'method': request.method,\n 'url': request.url,\n 'body': request.body,\n },\n 'response': {\n 'headers': OrderedDict(),\n 'status_code': response.status_code,\n 'reason': response.reason,\n }\n }\n\n # Re-assemble request line\n url_parts = urlparse(request.url)\n parsed['request']['request_line'] = '%s %s%s%s HTTP/1.1' % (\n request.method, url_parts.path, '?' if url_parts.query else '',\n url_parts.query)\n\n # Process request headers\n if self.mode == 'display':\n hostname = url_parts.hostname\n else:\n hostname = self.doc_hostname\n parsed['request']['headers'] = OrderedDict((('Host', hostname),))\n for header in sorted([h.title() for h in request.headers]):\n raw_value = request.headers[header]\n value = self.parse_header(header, raw_value, 'request')\n if value:\n parsed['request']['headers'][header.title()] = value\n\n # Re-assemble response line\n parsed['response']['response_line'] = 'HTTP/1.1 %s %s' % (\n response.status_code, response.reason)\n\n # Process response headers\n for header in sorted([h.title() for h in response.headers]):\n raw_value = response.headers[header]\n value = self.parse_header(header, raw_value, 'response')\n if value:\n fixed_header = header.title().replace('Www', 'WWW')\n parsed['response']['headers'][fixed_header] = value\n\n # Process response body\n response.encoding = 'utf-8'\n body = response.text\n if self.standardize:\n body = body.replace(api, self.doc_base_url)\n for key, value in case.get('standardize', {}).items():\n assert key in ('created', 'modified', 'date')\n pattern = r\"\"\"(?x)(?s) # Be verbose, . include newlines\n \"%s\":\\s\" # Key and quote\n \\d{4}-\\d{2}-\\d{2} # Date\n T\\d{2}:\\d{2}:\\d{2} # Time\n \\.\\d{0,6}Z # Microseconds and UTC timezone\n \", # End quote and comma\n \"\"\" % key\n replace = '\"%s\": \"%s\",' % (key, value)\n body = re.sub(pattern, replace, body)\n parsed['response']['body'] = body\n\n return parsed", "def process_response(self, response):\n return response", "def check_detailed(secret,\n response,\n remote_ip=None,\n check_url=DEFAULT_RECAPTCHA_CHECK_URL):\n check_data = {\n 'secret': secret,\n 'response': response}\n if remote_ip:\n check_data['remoteip'] = remote_ip\n reply = requests.post(check_url, check_data).json()\n result = {\n 'success': reply['success'],\n 'timestamp': parse_date(reply['challenge_ts']),\n 'hostname': reply['hostname'],\n }\n if 'error-codes' in reply:\n result['error'] = reply['error-codes']\n return result", "def parse_cache_2_get_result_info(self, cropped_imgs):\n res_info = copy.deepcopy(self.res_info_dict)\n\n # +++ students names\n _students = self.img_to_text(cropped_imgs[\"students\"])\n students = _students.split(\"\\n\")\n st_cnt = len(students)\n if st_cnt > 3 or st_cnt < 1:\n # print(_students, students)\n _error_msg = \"Parsing Students Failed: \" \\\n \"too Many/Few Items (expected 0~3, got %d)\" % st_cnt\n raise ParsingError(_error_msg)\n try:\n res_info[\"student1\"] = students[0]\n res_info[\"student2\"] = students[1]\n res_info[\"student3\"] = students[2]\n except IndexError:\n pass\n\n # +++ advisor name\n advisor = self.img_to_text(cropped_imgs[\"advisor\"]).replace(\"\\n\", \" \")\n advisor = re.sub(\" {2,}\", \" \", advisor)\n res_info[\"advisor\"] = advisor\n\n # +++ advisor type\n _advisor_type = self.img_to_text(cropped_imgs[\"advisor_type\"])\n if not _advisor_type:\n advisor_type = None\n elif \"Faculty\" in _advisor_type:\n advisor_type = \"Faculty\"\n elif \"Student\" in _advisor_type:\n advisor_type = \"Faculty\"\n else:\n try:\n advisor_type = _advisor_type.split()[1]\n except IndexError as err:\n self.logger.warning(\"[ERROR] Parsing Advisor Type Failed: %s\" % err)\n advisor_type = None\n res_info[\"advisor_type\"] = advisor_type\n\n # +++ school\n school = self.img_to_text(cropped_imgs[\"school\"]).replace(\"\\n\", \" \")\n school = re.sub(\" {2,}\", \" \", school)\n res_info[\"school\"] = school\n\n # +++ prize\n prize = self.img_to_text(cropped_imgs[\"prize\"])\n res_info[\"prize\"] = prize\n\n self.logger.debug(\"\\tResult Information Parsed\")\n return res_info", "def challenge_get(self, environ, start_response):\n environ['wsgiorg.routing_args'][1]['recipe_name'] = self.CHALLENGER_RECIPE\n environ['tiddlyweb.type'] = 'text/x-tiddlywiki'\n return get_tiddlers(environ, start_response)", "def handle_response(self, response):\n\n self._tmp_request_args = {}\n self.cache_response(response)", "def tc_request(resource, *, md5_reg=re.compile(\"[0-9a-f]{32}\")):\n resource = resource.lower()\n\n if md5_reg.fullmatch(resource) is None:\n sys.exit(\"Incorrect hash\" + resource)\n\n return tc_parse(getoutput(\"whois -h hash.cymru.com \" + resource))", "def parsePkt(pkt):\r\n meta = dict()\r\n headers = dict()\r\n for h,pattern in SIP_PKT_PATTERNS.iteritems():\r\n if h in ['reqfirstline', 'respfirstline']:\r\n continue\r\n headers[h] = None\r\n match = pattern.search(pkt)\r\n if match:\r\n headers[h] = re.sub(h + ': ', '', match.group()).rstrip('\\r\\n')\r\n if h == 'User-Agent' and headers[h]:\r\n headers[h] = re.sub(\"Server: \", \"\", headers[h])\r\n match_1 = SIP_PKT_PATTERNS['respfirstline'].search(pkt)\r\n match_2 = SIP_PKT_PATTERNS['reqfirstline'].search(pkt)\r\n if match_1:\r\n meta['respfirstline'] = match_1.group().rstrip(' \\r\\n')\r\n meta['code'] = int(match_1.group('code'))\r\n elif match_2:\r\n meta['reqfirstline'] = match_2.group().rstrip(' \\r\\n')\r\n meta['code'] = None\r\n else:\r\n print \"can't parse rotten SIP pkt:\\r\\n%s\" %(pkt)\r\n return \r\n if meta['code'] == AUTHREQ \\\r\n or meta['code'] == PROXYAUTHREQ:\r\n meta['auth-header'] = dict()\r\n auth_match = re.search('(?P<www_or_proxy>(?:WWW|Proxy)-Authenticate): Digest (?P<other_meta>.*)\\r\\n', pkt)\r\n if auth_match:\r\n meta['auth-header']['type'] = auth_match.group('www_or_proxy')\r\n if meta['auth-header']['type'] == 'WWW-Auth-Header':\r\n meta['auth-header']['domain'] = re.search('domain=\"([-\\/\\\\:\\.a-zA-Z0-9]+)\"', other_meta).group(1)\r\n meta['auth-header']['qop'] = re.search('qop=\"([-\\/\\\\:\\.a-zA-Z0-9]+)\"', other_meta).group(1)\r\n meta['auth-header']['stale'] = re.search('stale=(?:True|False)', other_meta).group(1)\r\n meta['auth-header']['opaque'] = re.search('opaque=\"([-\\/\\\\:\\.a-zA-Z0-9]+)\"', other_meta).group(1)\r\n other_meta = auth_match.group('other_meta')\r\n algo_match = re.search('algorithm=([a-zA-Z0-9]+)', other_meta)\r\n meta['auth-header']['realm'] = re.search('realm=\"([-\\/\\\\:_\\.a-zA-Z0-9]+)\"', other_meta).group(1)\r\n meta['auth-header']['nonce'] = re.search('nonce=\"([-\\/\\\\+:_\\.a-zA-Z0-9]+)\"', other_meta).group(1)\r\n if algo_match:\r\n meta['auth-header']['algorithm'] = algo_match.group(1)\r\n else:\r\n meta['auth-header']['algorithm'] = 'MD5' \r\n else:\r\n del meta['auth-header']\r\n meta['headers'] = headers\r\n return meta", "def _parse_response(self, future, response):\n if response.error:\n logging.warning(\"HTTP error from Github get user: %s\", response.error)\n future.set_exception(AuthError('Github auth get user info error: %s' % str(response)))\n return\n try:\n json = tornado.escape.json_decode(response.body)\n except Exception:\n logging.warning(\"Invalid JSON from Github: %r\", response.body)\n future.set_exception(AuthError('Invalid JSON from Github: %s' % str(response)))\n return\n\n if isinstance(json, dict) and json.get(\"error_code\"):\n logging.warning(\"Github error: %d: %r\", json[\"error_code\"],\n json.get(\"error_msg\"))\n future.set_exception(AuthError(\"Github error: %d: %r\" % ( json[\"error_code\"],\n json.get(\"error_msg\")) ) )\n return\n future.set_result(json)", "def acme_challenge(self, domain):\n return self.network.send_and_receive_expected(\n messages.ChallengeRequest(identifier=domain),\n messages.Challenge)", "def test_authenticatorChallengeResponse(self):\n username = b'testuser'\n secret = b'secret'\n chal = b'challenge'\n cAuth = imap4.PLAINAuthenticator(username)\n response = cAuth.challengeResponse(secret, chal)\n self.assertEqual(response, b'\\0' + username + b'\\0' + secret)", "def _parse_results(self):\n for line in self.file_dic['output'].splitlines():\n if line.startswith(' * GAMESS VERSION = '):\n temp = line.split('=')[1]\n temp = temp.split('*')[0]\n self.version = temp.strip()\n\n if line[1:25] == 'FREE ENERGY OF SOLVATION' and line.find('1 ATM') == -1:\n temp = line.split()\n #Take the next number after =\n #In KCAL/MOL\n self.solvation_energy = float(temp[temp.index(\"=\") + 1])", "def _ParseCacheEntry(\n self, parser_mediator, file_object, display_name, block_size):\n file_offset = file_object.get_offset()\n\n # Seeing that this parser tries to read each block for a possible\n # cache entry, we read the fixed-size values first.\n cache_entry_header_map = self._GetDataTypeMap('firefox_cache1_entry_header')\n\n try:\n cache_entry_header, header_data_size = self._ReadStructureFromFileObject(\n file_object, file_offset, cache_entry_header_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError(\n 'Unable to parse Firefox cache entry header with error: {0!s}'.format(\n exception))\n\n if not self._ValidateCacheEntryHeader(cache_entry_header):\n # Skip to the next block potentially containing a cache entry.\n file_offset = block_size - header_data_size\n file_object.seek(file_offset, os.SEEK_CUR)\n raise IOError('Not a valid Firefox cache record.')\n\n file_offset += header_data_size\n body_data_size = (\n cache_entry_header.request_size + cache_entry_header.information_size)\n\n cache_entry_body_data = self._ReadData(\n file_object, file_offset, body_data_size)\n\n context = dtfabric_data_maps.DataTypeMapContext(values={\n 'firefox_cache1_entry_header': cache_entry_header})\n\n cache_entry_body_map = self._GetDataTypeMap('firefox_cache1_entry_body')\n\n try:\n cache_entry_body = self._ReadStructureFromByteStream(\n cache_entry_body_data, file_offset, cache_entry_body_map,\n context=context)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError((\n 'Unable to map cache entry body data at offset: 0x{0:08x} with '\n 'error: {1!s}').format(file_offset, exception))\n\n file_offset += cache_entry_header.request_size\n\n request_method, response_code = self._ParseHTTPHeaders(\n cache_entry_body.information, file_offset, display_name)\n\n # A request can span multiple blocks, so we use modulo.\n cache_entry_data_size = header_data_size + body_data_size\n _, remaining_data_size = divmod(cache_entry_data_size, block_size)\n if remaining_data_size > 0:\n file_object.seek(block_size - remaining_data_size, os.SEEK_CUR)\n\n if parser_mediator:\n event_data = FirefoxCacheEventData()\n event_data.data_size = cache_entry_header.cached_data_size\n event_data.fetch_count = cache_entry_header.fetch_count\n event_data.info_size = cache_entry_header.information_size\n event_data.last_fetched_time = dfdatetime_posix_time.PosixTime(\n timestamp=cache_entry_header.last_fetched_time)\n event_data.location = cache_entry_header.location\n event_data.request_method = request_method\n event_data.request_size = cache_entry_header.request_size\n event_data.response_code = response_code\n event_data.url = cache_entry_body.request\n event_data.version = '{0:d}.{1:d}'.format(\n cache_entry_header.major_format_version,\n cache_entry_header.minor_format_version)\n\n if cache_entry_header.last_modified_time:\n event_data.last_modified_time = dfdatetime_posix_time.PosixTime(\n timestamp=cache_entry_header.last_modified_time)\n\n if cache_entry_header.expiration_time:\n event_data.expiration_time = dfdatetime_posix_time.PosixTime(\n timestamp=cache_entry_header.expiration_time)\n\n parser_mediator.ProduceEventData(event_data)\n\n return cache_entry_header", "def parse_git_response(utf8_response):\n response_dict = collect_response(utf8_response.strip().split('\\n'))\n\n # Branch-related meta data\n if '#' not in response_dict:\n raise KeyError('git did not return branch information')\n branch_info = parse_branches(response_dict['#'])\n branch = get_branch(branch_info)\n n_ahead, n_behind = parse_ab(branch_info)\n\n n_untracked = len(response_dict['?']) if '?' in response_dict else 0\n n_modified, n_staged = parse_modified(response_dict)\n\n return [branch, n_untracked, n_staged, n_modified, n_ahead, n_behind]", "def decodeline(self, line):\n result = ApacheLogLine()\n result.full_line = line\n linepatternmatch = self._linepattern.match(line)\n if linepatternmatch:\n result.hostname = linepatternmatch.group(1)\n result.user = linepatternmatch.group(2)\n if result.user == '-':\n result.user = ''\n (result.accesstime_seconds, result.serveroffset) = self.parsedate(linepatternmatch.group(3))\n result.accesstime_string = stringdate(result.accesstime_seconds, offset=result.serveroffset)\n result.file = linepatternmatch.group(4)\n result.code = linepatternmatch.group(5)\n result.code_description = self._codetranslator.get_description(result.code)\n result.size = linepatternmatch.group(6)\n if result.size == '-':\n result.size = 0\n result.referer = linepatternmatch.group(7)\n if result.referer == '-':\n result.referer = ''\n result.browser = linepatternmatch.group(8)\n else:\n self._notparsable += 1\n warn(\"The line '%s' could not be parsed\" % line)\n return None\n if self._line_fits_pattern(result):\n self._acceptedlines += 1\n return result\n else:\n self._rejectedlines += 1\n return None", "def cert_challenge_http(self) -> 'outputs.CertHttpChallengeResponse':\n return pulumi.get(self, \"cert_challenge_http\")", "def get_cached_token(self):\n token_info = None\n try:\n token_info_string = get_spotify_token_info(self.discord_uid)\n token_info = json.loads(token_info_string)\n\n # if scopes don't match, then bail\n if \"scope\" not in token_info or not self._is_scope_subset(\n self.scope, token_info[\"scope\"]\n ):\n return None\n\n if self.is_token_expired(token_info):\n token_info = self.refresh_access_token(\n token_info[\"refresh_token\"]\n )\n except Exception as e:\n logger.warning(f\"Couldn't read cache: {e}\")\n\n return token_info", "def parse_response(response):\n return json.loads(response.text)", "def _parse_content(response):\n if response.status_code != 200:\n raise ApiError(f'unknown error: {response.content.decode()}')\n result = json.loads(response.content)\n if not result['ok']:\n raise ApiError(f'{result[\"error\"]}: {result.get(\"detail\")}')\n return result", "def decode_response(enc_response, task):\n\n if enc_response == 'NVC':\n return [['NVC']]\n if enc_response == ['NVC']:\n return [enc_response]\n if enc_response == [['NVC']]:\n return enc_response\n\n obj_a = set(task[0][1:]) - set(task[1][1:])\n obj_c = set(task[1][1:]) - set(task[0][1:])\n\n # Determine quantifier\n quant = None\n for resp, enc in QUANTIFIERS_SYLLOGISTIC_GENERALIZED_ENCODING.items():\n if enc == enc_response[0]:\n quant = resp\n break\n\n if quant is None:\n raise ValueError('Invalid Quantifier in response encoding: {}'.format(enc_response))\n\n # Handle response direction\n if enc_response[1:] == 'ac':\n return [[quant, list(obj_a)[0], list(obj_c)[0]]]\n return [[quant, list(obj_c)[0], list(obj_a)[0]]]", "def parse_auth_response(text: str) -> dict[str, str]:\n response_data = {}\n for line in text.split(\"\\n\"):\n if not line:\n continue\n\n key, _, val = line.partition(\"=\")\n response_data[key] = val\n\n return response_data", "async def complete_challenge(\n self,\n key: josepy.jwk.JWK,\n identifier: acme.messages.Identifier,\n challenge: acme.messages.ChallengeBody,\n ):\n logger.debug(\n f\"(not) solving challenge {challenge.uri}, type {challenge.chall.typ}, identifier {identifier}\"\n )\n # await asyncio.sleep(1)", "def _deserialize_response(self, response):\n text = response.content.decode(errors='replace')\n text = _remove_control_characters(text)\n doc = json.loads(text, cls=_TransmissionJSONDecoder)\n\n if doc['result'] != 'success':\n raise TransmissionError(\"Request failed: '%s'\" % doc['result'])\n\n if doc['tag'] != self.tag:\n raise TransmissionError(\"Tag mismatch: (got %d, expected %d)\" % (doc['tag'], self.tag))\n else:\n self.tag += 1\n\n if 'arguments' in doc:\n return doc['arguments'] or None\n\n return None", "def process_response(self, result):\r\n if len(result) == 3:\r\n data = result[0]\r\n headers = result[2]\r\n if self.HEADER_API_VERSION in headers:\r\n api_version = headers[self.HEADER_API_VERSION]\r\n if (not self.already_printed_version_warning\r\n and not self.is_up_to_date(api_version)):\r\n print('Warning: Looks like you\\'re using an outdated API '\r\n 'Version, please consider updating (server ' +\r\n api_version + ' / client ' + self.__version__ + ')')\r\n self.already_printed_version_warning = True\r\n return data\r\n return result", "def process_response(self, req, resp, resource, req_succeeded):\n\n # Step 1: for 'rest-based' and 'rest&time-based' eviction strategies the\n # POST/PATCH/PUT/DELETE calls are never cached and even more they\n # invalidate the record cached by the GET method\n if self.cache_config['CACHE_EVICTION_STRATEGY'] in [CacheEvictionStrategy.rest_based,\n CacheEvictionStrategy.rest_and_time_based] \\\n and req.method.upper() in [HttpMethods.POST,\n HttpMethods.PATCH,\n HttpMethods.PUT,\n HttpMethods.DELETE]:\n # get the cache key created by the GET method (assuming there was one)\n key = self.generate_cache_key(req, method='GET')\n self.cache.delete(key)\n return\n\n # Step 2: if it is marked to be cached, but has not yet been cached\n # then we cache it\n if hasattr(req.context, 'cache') and req.context.cache \\\n and (not hasattr(req.context, 'cached') or not req.context.cached):\n key = self.generate_cache_key(req)\n value = self.serialize(req, resp, resource)\n\n # for the REST-based strategy there is no timeout, the cached record never expires\n if self.cache_config['CACHE_EVICTION_STRATEGY'] in [CacheEvictionStrategy.rest_based]:\n # timeout 0 - never expires\n timeout = 0\n else:\n # for the time-based and rest-and-time-based eviction strategy the\n # cached record expires\n timeout = req.context.cache_timeout if hasattr(req.context, 'cache_timeout') else 600\n\n self.cache.set(key, value, timeout=timeout)", "def update_cached_response(self, request, response):\r\n cache_url = self.cache_url(request.url)\r\n\r\n cached_response = self.serializer.loads(request, self.cache.get(cache_url))\r\n\r\n if not cached_response:\r\n # we didn't have a cached response\r\n return response\r\n\r\n # Lets update our headers with the headers from the new request:\r\n # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1\r\n #\r\n # The server isn't supposed to send headers that would make\r\n # the cached body invalid. But... just in case, we'll be sure\r\n # to strip out ones we know that might be problmatic due to\r\n # typical assumptions.\r\n excluded_headers = [\r\n \"content-length\",\r\n ]\r\n\r\n cached_response.headers.update(\r\n dict((k, v) for k, v in response.headers.items()\r\n if k.lower() not in excluded_headers)\r\n )\r\n\r\n # we want a 200 b/c we have content via the cache\r\n cached_response.status = 200\r\n\r\n # update our cache\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, cached_response),\r\n )\r\n\r\n return cached_response", "async def get_challenges(user: str, request: web.Request):\n r8.log(request, \"get-challenges\", request.headers.get(\"User-Agent\"), uid=user)\n challenges = await r8.util.get_challenges(user)\n return web.json_response(\n {\n \"user\": user,\n \"team\": r8.util.get_team(user),\n \"challenges\": challenges,\n }\n )", "def challenge_id_to_captcha(self, challenge_id):\n try:\n secret = self._store.get(challenge_id).decode('utf-8')\n except redis.exceptions.ConnectionError as e:\n self.app.logger.error(\"Unable to connect to Redis database: '{}'.\".format(self.db_url))\n raise RuntimeError(\"Unable to connect to Redis database\")\n except redis.exceptions.ResponseError as e:\n self.app.logger.error(\"Unable to get challenge from Redis database: {}.\".format(e))\n raise RuntimeError(\"Unable to get challenge.\")\n\n if not secret:\n raise ValueError(\"No such challenge\")\n\n image_bytes = self._imageCaptcha.generate(secret)\n return image_bytes", "def answer_challenge(authzr, client, responders):\n responder, challb = _find_supported_challenge(authzr, responders)\n response = challb.response(client.key)\n\n def _stop_responding():\n return maybeDeferred(\n responder.stop_responding,\n authzr.body.identifier.value,\n challb.chall,\n response)\n return (\n maybeDeferred(\n responder.start_responding,\n authzr.body.identifier.value,\n challb.chall,\n response)\n .addCallback(lambda _: client.answer_challenge(challb, response))\n .addCallback(lambda _: _stop_responding)\n )" ]
[ "0.69334584", "0.57358587", "0.56640327", "0.56572354", "0.5530632", "0.54902357", "0.54403126", "0.54084736", "0.54046005", "0.54018176", "0.5199438", "0.5182611", "0.517387", "0.5165005", "0.512281", "0.5000999", "0.49986807", "0.4956543", "0.4911077", "0.49107736", "0.4908334", "0.4896391", "0.48870412", "0.4884257", "0.48749492", "0.4874675", "0.48739746", "0.48563156", "0.48544717", "0.4850815", "0.48457637", "0.4836415", "0.48249125", "0.48249125", "0.48249125", "0.48249125", "0.48249125", "0.48249125", "0.48249125", "0.48231548", "0.4810197", "0.48055348", "0.47807965", "0.47294146", "0.47215217", "0.47084352", "0.46931526", "0.4692176", "0.46726653", "0.46675688", "0.46523866", "0.46491584", "0.46446213", "0.46430877", "0.46193895", "0.46086413", "0.4606111", "0.45921052", "0.45893607", "0.45881408", "0.45814177", "0.45734864", "0.45725882", "0.4565051", "0.4563879", "0.455734", "0.4550483", "0.45475012", "0.45433256", "0.4543194", "0.45348147", "0.45232275", "0.45232093", "0.4522626", "0.45132717", "0.4503003", "0.4496034", "0.44881958", "0.44860107", "0.4484537", "0.446823", "0.4462269", "0.44494826", "0.44474077", "0.4445938", "0.4433081", "0.4430716", "0.44290766", "0.44290242", "0.44288704", "0.44254866", "0.44251084", "0.44168428", "0.44124523", "0.44061875", "0.44045788", "0.44044045", "0.4401948", "0.43932134", "0.43878692" ]
0.6099478
1
check if the reference folder is in place and all attributes are ready
def check_reference_ready(): # check to see if there is a manifest file in the default reference path manifest_file = os.path.join(settings.DEFAULT_REFERENCE_PATH, 'manifest.json') if not os.path.isfile(manifest_file): _log("manifest.json file cannot be found in the reference folder; simulation will NOT work!") return _log("reading manifest.json ..") # read the manifest file with open(manifest_file, 'r') as manifest: data = json.load(manifest) reference_fasta = os.path.join(settings.DEFAULT_REFERENCE_PATH, data["reference"]) if not os.path.isfile(reference_fasta): _log("genome reference file (.fasta | .fa) cannot be found in the reference folder; simulation will NOT work!") return _log("found all required simulation files in place; simulation is READY!") settings.REFERENCE_READY = True settings.INPUT_FILES = {"reference": data['reference'], "targets": 'dummy'}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'processed/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'processed/test.pkl')))", "def copy_file_check(self):\n pass", "def _before_reference_check(self, maya_file, client_data=None):\n\n if self.is_artella_path():\n self.validate_environment_for_callback('BeforeReferenceCheck')\n\n raw_full_name = maya_file.rawFullName()\n if not dccplugin.DccPlugin().is_path_translated(\n raw_full_name) and dccplugin.DccPlugin().is_artella_path(raw_full_name):\n convert_path = dccplugin.DccPlugin().convert_path(raw_full_name)\n maya_file.setRawFullName(convert_path)\n\n return True", "def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False", "def checkRefs(self, export_refs):\r\n return True", "def init_check(self):\n for required_file in self._required_files:\n # Check if required files are there\n # FIXME Sometimes it doesn't work :?\n if required_file not in self.files:\n self.valid = False", "def _check_before_run(self):\n if not osp.exists(self.root):\n raise RuntimeError(\"'{}' is not available\".format(self.root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def needs_sync(self):\n\n affected_attributes = [\n 'css_files', 'js_files',\n 'scss_files', 'widgets']\n\n for attr in affected_attributes:\n if len(getattr(self, attr)) > 0:\n return True\n return False", "def __checkDestination(self):\n return os.path.exists(self.__targetPath)", "def verify_attrs(self):\n self.verify_namespace_attrs(self.newlibrary.wrap_namespace)", "def base_data_check_shot(self):\n\n #alembic_dir\n alembic_dir = self.alembic_functionality.get_parm_value(self.node, 'alembic_dir')\n \n #is False\n if not (alembic_dir):\n #log\n self.logger.debug('Parameter alembic dir empty.')\n return False\n\n #dir exists\n if not (os.path.isdir(alembic_dir)):\n #log\n self.logger.debug('Alembic dir {0} does not exist.'.format(alembic_dir))\n return False\n\n\n #alembic_path_list\n alembic_path_list = [os.path.join(alembic_dir, file).replace('\\\\', '/') for \n file in \n os.listdir(alembic_dir) if \n (os.path.isfile(os.path.join(alembic_dir, file)) and file.split('.')[-1] == 'abc')]\n #alembic_path_list empty\n if not (alembic_path_list):\n #log\n self.logger.debug('alembic_path_list empty. Alembic dir {0} does not seem to contain alembic files.'.format(alembic_dir))\n return False\n\n\n #checked_alembic_path_list\n checked_alembic_path_list = []\n\n #iterate\n for alembic_path in alembic_path_list:\n\n #object_path_list\n object_path_list = self.alembic_functionality.get_alembic_object_path_list(alembic_path)\n #object_path_list empty\n if not (object_path_list):\n #log\n self.logger.debug('Object path list for alembic {0} empty. Continuing'.format(alembic_path))\n continue\n\n #iterate, check and create\n for object_path in object_path_list:\n\n #helga_locator_attr_exists\n helga_locator_attr_exists = self.alembic_functionality.alembic_attribute_exists(alembic_path, object_path, 'helga_locator')\n\n #helga_highpoly_rendergeo_attr_exists\n helga_highpoly_rendergeo_attr_exists = self.alembic_functionality.alembic_attribute_exists(alembic_path, object_path, 'helga_highpoly_rendergeo')\n\n #if attr exists append and break\n if (helga_locator_attr_exists and helga_highpoly_rendergeo_attr_exists):\n\n #append\n checked_alembic_path_list.append(alembic_path)\n break\n\n #checked_alembic_path_list empty\n if not (checked_alembic_path_list):\n #log\n self.logger.debug('checked_alembic_path_list empty. Alembic dir {0} does not seem to contain alembic files with helga_highpoly_rendergeo attribute.'.format(alembic_dir))\n return False\n\n\n #alembic_highpoly_rendergeo_dir\n alembic_highpoly_rendergeo_dir = self.alembic_functionality.get_parm_value(self.node, 'alembic_highpoly_rendergeo_dir')\n \n #is False\n if not (alembic_highpoly_rendergeo_dir):\n #log\n self.logger.debug('Parameter alembic highpoly rendergeo dir empty.')\n return False\n\n #dir exists\n if not (os.path.isdir(alembic_highpoly_rendergeo_dir)):\n #log\n self.logger.debug('Alembic highpoly rendergeo dir {0} does not exist.'.format(alembic_highpoly_rendergeo_dir))\n return False\n\n\n #return\n return [checked_alembic_path_list, alembic_highpoly_rendergeo_dir]", "def test_exist_entry_on_rebuild(self):\n self.validate_attributes_in_exist_response()", "def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'relu5-3/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'relu5-3/test.pkl')))", "def check_folder_state(self):\n while self:\n diff = self.get_diff()\n print(diff or 'No changes detected')\n if diff:\n self.parent.send_diff_data(diff)\n time.sleep(1)", "def _verify(self) -> None:\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, self.data_dir)\n if os.path.exists(pathname):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.data_dir) + \".zip\"\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n # Download the dataset\n self._download()\n self._extract()", "def _check_before_run(self):\n\t\tif not osp.exists(self.dataset_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n\t\tif not osp.exists(self.train_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n\t\tif not osp.exists(self.query_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n\t\tif not osp.exists(self.gallery_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.probe_gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.probe_gallery_dir))", "def check_structure_is_modified(self):\n if not self.structure_has_been_modified: \n print('NEED TO MODIFY STRUCTURE BEFORE PROCEEDING FURTHER!')\n sys.exit()", "def ensure_loaded(self):\n if not (Asset.list_all(self)):\n self.update_list()", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.list_query_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_query_path))\n if not osp.exists(self.list_gallery_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_gallery_path))", "def check(self):\r\n self._check_object(self._config.name)", "def already_processed(self):\n # If the flag file has been created by a previous run\n # or if any of the rules have already been re-ordered\n # then we shouldn't make any more changes and instead\n # the system needs to be rebooted.\n return self.syspaths.flag_exists", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.data_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.data_dir))\n if not osp.exists(self.split_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.split_dir))", "def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)", "def valid(self):\r\n if self.dir_exists and self.files_exist:\r\n return True\r\n else:\r\n return False", "def santityCheckInitialization(self):\r\n\r\n for obj in self.config[\"repos\"]:\r\n if not isdir(obj[\"path\"]):\r\n print(\"ERROR : Initialization Failed missing {} at path {}\".format(obj[\"name\"], obj[\"path\"]))", "def checkProperUnpack(self):\n print(\"Checking Randomizer files\")\n\n self.folderStatus = False\n self.aiRefStatus = False\n self.ffxRefStatus = True\n self.validNewStatus = False\n self.validReplaceStatus = False\n self.originalRefMissing = 0\n\n if (os.path.isdir(\"enemyRandomizerData/\")):\n self.folderStatus = True\n\n if (os.path.isfile(\"enemyRandomizerData/airef.csv\")):\n self.aiRefStatus = True\n\n if (os.path.isfile(\"enemyRandomizerData/replacement_ref/valid_new.txt\")):\n self.validNewStatus = True\n\n if (os.path.isfile(\"enemyRandomizerData/replacement_ref/valid_replacements.txt\")):\n self.validReplaceStatus = True\n\n for iFile in [\"m10_00_00_00\", \"m10_01_00_00\", \"m10_02_00_00\", \"m11_00_00_00\", \"m12_00_00_00\", \"m12_01_00_00.ptde\", \"m12_01_00_00.remaster\", \"m12_00_00_01\", \"m13_00_00_00.remaster\", \"m13_01_00_00\", \"m13_02_00_00\", \"m14_00_00_00\", \"m14_01_00_00\", \"m15_00_00_00\", \"m15_01_00_00\", \"m16_00_00_00\", \"m17_00_00_00\", \"m18_00_00_00\", \"m18_01_00_00\"]:\n if not (os.path.isfile('enemyRandomizerData/original_enemies_ref/' + iFile + '.txt')):\n self.originalRefMissing += 1", "def _after_load_reference(self, *args):\n\n if not self.is_artella_path():\n return\n\n self.validate_environment_for_callback('AfterLoadReference')", "def _verify(self) -> None:\n # Check if the files already exist\n if os.path.exists(os.path.join(self.root, self.image_root)):\n return\n\n # Check if .zip files already exists (if so extract)\n exists = []\n for filename, md5 in zip(self.filenames, self.md5s):\n filepath = os.path.join(self.root, filename)\n if os.path.isfile(filepath):\n if self.checksum and not check_integrity(filepath, md5):\n raise RuntimeError(\"Dataset found, but corrupted.\")\n exists.append(True)\n extract_archive(filepath)\n else:\n exists.append(False)\n\n if all(exists):\n return\n\n # Check if the user requested to download the dataset\n raise RuntimeError(\n \"Dataset not found in `root` directory, either specify a different\"\n + \" `root` directory or manually download the dataset to this directory.\"\n )", "def is_ref_known(self):\r\n \r\n if \"N\" in self.ref:\r\n return False\r\n else:\r\n return True", "def check_paths(self):\r\n\t\tself.check_line_edits_and_refresh_filestate()\r\n\t\t# paths\r\n\t\tsource_img_filename = self.source_img_entry.text().replace(\"\\\\\", \"/\")\r\n\t\tsink_dir_name = self.sink_dir_entry.text().replace(\"\\\\\", \"/\")\r\n\t\tsink_db_name_entry_text = self.sink_db_name_entry.text()\r\n\t\tdb_ext = \".db\" if not sink_db_name_entry_text.lower().endswith(\".db\") else \"\"\r\n\t\tsink_db_filename = os.path.join(sink_dir_name, sink_db_name_entry_text + db_ext).replace(\"\\\\\", \"/\")\r\n\t\tsource_db_filename = \"\"\r\n\r\n\t\t# check validity\r\n\t\tsource_img_filename_valid = self.filestate.is_valid(source_img_filename, SOURCE_IMG)\r\n\t\tsink_dir_name_valid = self.filestate.is_valid(sink_dir_name, SINK_DIR)\r\n\t\tsink_db_filename_valid = self.filestate.is_valid(sink_db_filename, SINK_DB)\r\n\t\tsource_db_filename_valid = True\r\n\r\n\t\tall_paths_valid = source_img_filename_valid and sink_dir_name_valid and sink_db_filename_valid\r\n\r\n\t\tif self.existing_case:\r\n\t\t\tsource_db_filename = self.source_db_entry.text()\r\n\t\t\tsource_db_filename_valid = self.filestate.is_valid(source_db_filename, SOURCE_DB)\r\n\t\t\tall_paths_valid = all_paths_valid and source_db_filename_valid\r\n\r\n\t\tif all_paths_valid:\r\n\t\t\tself.filestate.set_source_img_filename(source_img_filename)\r\n\t\t\tself.filestate.set_sink_dir_name(sink_dir_name)\r\n\t\t\tself.filestate.set_sink_db_filename(sink_db_filename)\r\n\t\t\tif self.existing_case:\r\n\t\t\t\tself.filestate.set_source_db_filename(source_db_filename)\r\n\t\t\tself.refresh_UI()\r\n\t\t\treturn True\r\n\r\n\t\t# in the case of invalidity\r\n\t\tif not source_img_filename_valid:\r\n\t\t\tif not self.filestate.source_img_file_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source image file at does not exist.\")\r\n\t\t\telif not self.filestate.source_img_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source image file type is invalid (must be .npy).\")\r\n\t\t\tself.filestate.set_source_img_filename(\"\")\r\n\t\tif not source_db_filename_valid: # only if existing case\r\n\t\t\tif not self.source_db_file_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source database file does not exist.\")\r\n\t\t\telif not self.filestate.source_db_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source database file type is invalid (must be .db)\")\r\n\t\t\tself.filestate.set_source_db_filename(\"\")\r\n\t\tif not sink_dir_name_valid:\r\n\t\t\tif not self.filestate.sink_dir_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided sink directory does not exist.\")\r\n\t\t\telif not self.sink_dir_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided sink directory format is invalid.\")\r\n\t\t\tself.filestate.set_sink_dir_name(\"\")\r\n\t\tif not sink_db_filename_valid:\r\n\t\t\tif sink_dir_name_valid and not self.filestate.sink_db_file_preexists and \\\r\n\t\t\t\t\tself.filestate.sink_db_file_format_valid and \\\r\n\t\t\t\t\tdisplay_yes_no_message(self, \"Create file at \" + sink_db_filename + \"?\"):\r\n\t\t\t\t# create file with read write permissions\r\n\t\t\t\t###########################################\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsink_db_file = open(sink_db_filename, \"w+\")\r\n\t\t\t\t\tsink_db_file.close()\r\n\t\t\t\texcept IOError as error:\r\n\t\t\t\t\tdisplay_warning_message(self, \"Failed to create provided sink database file: \" + error)\r\n\t\t\t\t###########################################\r\n\t\t\t\t# set sink db filename\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.filestate.set_sink_db_filename(sink_db_filename)\r\n\t\t\t\t\tself.refresh_UI()\r\n\t\t\t\t\treturn True\r\n\t\t\telif not self.filestate.sink_db_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Be sure to specify a name for the sink database.\")\r\n\t\t\tself.filestate.set_sink_db_filename(\"\")\r\n\r\n\t\t# print(\"paths invalid\")\r\n\t\tself.refresh_UI()\r\n\t\treturn False", "def check_file_exist(self):\n return False", "def missingOrStale(target, reference=None):\n if not os.path.isfile(target):\n return True\n if reference:\n return os.path.getmtime(target) < os.path.getmtime(reference)\n else:\n return False", "def before_update(mapper, conn, target):\n\n assert bool(target.ref), \"File.ref can't be null (before_update)\"", "def exists_attrs(proj):\n if not os.path.exists(proj.ballot_attributesfile):\n return False\n ballot_attributesfile = pickle.load(open(proj.ballot_attributesfile, 'rb'))\n if not ballot_attributesfile:\n return False\n else:\n return True", "def _check_before_run(self):\r\n if not osp.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\r\n if not osp.exists(self.train_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.data_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.data_dir))\n if not osp.exists(self.raw_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.raw_mat_path))\n if not osp.exists(self.split_new_det_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.split_new_det_mat_path))\n if not osp.exists(self.split_new_lab_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.split_new_lab_mat_path))", "def check_for_setup_error(self):\n if self.share2nms:\n for nfs_share in self.share2nms:\n nms = self.share2nms[nfs_share]\n volume_name, dataset = self._get_share_datasets(nfs_share)\n if not nms.volume.object_exists(volume_name):\n raise LookupError(_(\"Volume %s does not exist in Nexenta \"\n \"Store appliance\"), volume_name)\n folder = '%s/%s' % (volume_name, dataset)\n if not nms.folder.object_exists(folder):\n raise LookupError(_(\"Folder %s does not exist in Nexenta \"\n \"Store appliance\"), folder)\n if (folder not in nms.netstorsvc.get_shared_folders(\n 'svc:/network/nfs/server:default', '')):\n self._share_folder(nms, volume_name, dataset)\n self._get_capacity_info(nfs_share)", "def _check_file_not_used(self):\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(\n set(self._get_xml_referenced_files())\n )\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [\n f for f in (module_files - referenced_files)\n if f.split(os.path.sep)[0] not in excluded_dirs\n ]\n self.msg_args = no_referenced_files\n return not no_referenced_files", "def verify(self):\n\t\t\n\t\tif not os.path.exists(self.objects_root):\n\t\t\tself.error = \"no such directory: %s\" % \\\n\t\t\t\tself.objects_root\n\t\t\treturn False\n\t\t\n\t\tif not os.path.isdir(self.objects_root):\n\t\t\tself.error = \"not a directory: %s\" % \\\n\t\t\t\tself.objects_root\n\t\t\treturn False\n\t\t\n\t\treturn True", "def __is_complete__(self,configs,*args,**kwargs):\n current_dir = self.output_dir\n if GenericProcess.__is_complete__(self,*args,**kwargs):\n return True\n elif not os.path.isfile(self.complete_file):\n if hasattr(self,\"upload_dir\"):\n current_dir = self.upload_dir\n if not os.path.isfile(self.complete_file.replace(self.output_dir,self.upload_dir)): #If the output directory has already been cleaned, check the upload dir.\n return False\n else: \n return False\n if hasattr(self, \"snp_path\") and not self.snp_path is None and hasattr(self,\"analysis_ready_bam_path\") and not self.analysis_ready_bam_path is None:\n if not os.path.isdir(os.path.dirname(self.snp_path)) or not os.path.dirname(os.path.isfile(self.analysis_ready_bam_path)):\n return False\n if not os.path.isfile(self.snp_path) or not os.path.isfile(self.analysis_ready_bam_path):\n snp_file = False\n bam_file = False\n return False\n if not self.upload_dir is None:\n for file in os.listdir(os.path.join(self.upload_dir,self.description)):\n if file.endswith('.vcf'):\n snp_file = True \n if file.endswith('.bam'):\n bam_file = True \n if not snp_file or not bam_file:\n if configs[\"system\"].get(\"Logging\",\"debug\") is \"True\":\n print \"At least one of the output files is missing for sample \" + str(self.sample_key) + \":\"\n if not os.path.isfile(self.snp_path):\n print \"Missing \"+ self.snp_path\n if not os.path.isfile(self.analysis_ready_bam_path):\n print \"Missing \"+ self.analysis_ready_bam_path\n #os.remove(self.complete_file)\n #template_dir = configs['system'].get('Common_directories','template')\n #qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio_no_postprocess'))\n #self.__fill_template__(qsub_template,os.path.join(self.output_dir,\"bcbio_no_postprocess.sh\"))\n #self.__launch__(configs['system'],os.path.join(self.output_dir,\"bcbio_no_postprocess.sh\"))\n return False\n else:\n check_file = os.path.join(current_dir,'project-summary.csv')\n #If the process is complete, check to make sure that the check file is created. If not, send email once.\n if not os.path.isfile(check_file) and configs['pipeline'].has_option('Template_files','bcbio_no_postprocess') and current_dir==self.output_dir:\n #subject, body = self.__generate_general_error_text__(config)\n #send_email(subject,body)\n #self.fail_reported = True\n os.remove(self.complete_file)\n template_dir = configs['system'].get('Common_directories','template')\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio_no_postprocess'))\n self.__fill_template__(qsub_template,os.path.join(self.output_dir,\"bcbio_no_postprocess.sh\"))\n self.__launch__(configs['system'],os.path.join(self.output_dir,\"bcbio_no_postprocess.sh\"))\n return False\n #store_stats_in_db(self)\n self.__finish__(*args,**kwargs)\n return True", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.test_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.test_dir))", "def exist(self):", "def check_reference(ref):\n obj_ref_regex = re.compile(\"^(?P<wsid>\\d+)\\/(?P<objid>\\d+)(\\/(?P<ver>\\d+))?$\")\n ref_path = ref.strip().split(\";\")\n for step in ref_path:\n if not obj_ref_regex.match(step):\n return False\n return True", "def testRef(self):\n self.assertEqual(\n self.ref,\n self.mr.ref\n )", "def data_loaded_check(self):\n return True", "def CheckPrerequisites(_):\n _LocalDataPath(RUN_SCRIPT)\n _LocalDataPath(CONFIG_FILE)", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))", "def check_paths(self):\n self.data[\"app_path\"] = list(map(\n self.replace_vars_path, self.data[\"app_path\"]))\n self.data[\"icons_path\"] = list(map(\n self.replace_vars_path, self.data[\"icons_path\"]))\n new_app_path = []\n for app_path in self.data[\"app_path\"]:\n if path.isdir(app_path) or path.isfile(app_path):\n new_app_path.append(app_path)\n self.data[\"app_path\"] = new_app_path\n if not len(self.data[\"app_path\"]) == 0:\n new_icons_path = []\n for icon_path in self.data[\"icons_path\"]:\n if (self.data[\"force_create_folder\"] and\n not path.exists(icon_path)):\n log(\"Creating application folder for {0}\".format(self.data[\"name\"]))\n create_dir(icon_path)\n if path.isdir(icon_path):\n if (\"binary\" in self.data.keys()\n and path.isfile(icon_path + self.data[\"binary\"])):\n new_icons_path.append(icon_path)\n elif \"binary\" not in self.data.keys():\n new_icons_path.append(icon_path)\n self.data[\"icons_path\"] = new_icons_path", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))", "def check_is_event_valid(self, event):\n if event.src_path == template_file_path:\n self.__init__()\n if not hasattr(event, 'dest_path'):\n event.dest_path = None\n for path in [event.src_path, event.dest_path]:\n if path is not None:\n dir, name = self.__parse_full_path(path)\n if dir.find(controlled_path\n ) >= 0 and not self._check_is_name_valid(name):\n if path == event.dest_path:\n os.system('cp {dest} {src}'.format(\n dest=event.dest_path, src=event.src_path))\n os.system('rm -rf {dir}{name}'.format(dir=dir, name=name))", "def checkConflicts(self):\n\t\treturn", "def __validate_location(self):\n if not os.path.exists(self._file_path):\n raise FileNotFoundError(\"Directory does not exist\")\n if not os.path.isfile(self._path_name):\n raise FileNotFoundError('File does not exist')", "def _setJob_checkShot(shotPath):\n\tvalid = True\n\n\tjobPath = os.path.split(shotPath)[0]\n\t#jobDataDir = os.path.join(jobPath, os.environ['IC_METADATA'])\n\tshotDataDir = os.path.join(shotPath, os.environ['IC_METADATA'])\n\n\t# if not os.path.isdir(jobDataDir):\n\t# \tvalid = False\n\n\tif not os.path.isdir(shotDataDir):\n\t\tvalid = False\n\n\treturn valid", "def check_loader(self, dt):\n if EVENTS['FILE_PATH'] and EVENTS['CAN_WRITE']:\n self.editor.load_file(EVENTS['FILE_PATH'])\n EVENTS['CAN_WRITE'] = False", "def _does_not_exist_or_forced(self) -> bool:\n if os.path.exists(self.extracted_path) and self.force:\n logger.debug(f\"'-f/--force' flag set, deleting directory: '{self.extracted_path}'\")\n shutil.rmtree(self.extracted_path)\n logger.debug(f\"Deletion successful.\")\n elif os.path.exists(self.extracted_path) and not self.force:\n logger.warning(f\"{self.dataset_name} already exists at the destination directory '{self.extracted_path}'\")\n logger.warning(f\"If you wish to re-download the dataset, try 'sla-cli download -f/--force <DATASET>'\")\n logger.warning(f\"Skipping...\")\n return False\n\n return True", "def sanity_check(self):\n return True", "def sanity_check(self):\n pass", "def loadSuccessful(self):\r\n\r\n return (self.config != None)", "def found_empty_file(self):\n self.is_empty = True", "def fileIsComplete(self):\n return True", "def missing_references(self):\n return [ ref for ref in self.reference_names() if not config.file_in_cache(self.name, self.observatory) ]", "def test_attribute(self):\n\n new_jawn = Amenity()\n self.assertTrue(\"name\" in new_jawn.__dir__())", "def syncfolder():", "def sceneRefCheck(silent=False):\n uptodate = True\n logger.debug('init sceneChecking...')\n currentProject = database.getCurrentProject()\n projName = pm.fileInfo.get('projectName')\n\n if currentProject != projName:\n logger.error('This file is from a project different from the current project')\n return\n\n item = Item(fromScene=True) # get current scene metadata\n\n\n # compare references and metadata and create lists of references to add, delete, update and replace\n logger.debug('creating lists of changes...')\n refOnSceneList = pm.getReferences()\n toDelete = [x for x in refOnSceneList if x not in item.components]\n toAdd = [x for x in item.components if x not in refOnSceneList and x != 'cam']\n toReplace = [x for x in item.components if item.components[x]['task'] != item.components[x]['proxyMode']]\n refToCheckUpdate = [x for x in refOnSceneList if x not in toDelete and x not in toReplace]\n toUpdate = {}\n\n # create the list of references to update depending on the assemble mode\n logger.debug('check update...')\n for ns in refToCheckUpdate:\n logger.info('updating ns:%s' % ns)\n if item.components[ns]['assembleMode'] == 'camera':\n continue\n\n if item.components[ns]['assembleMode'] == 'reference':\n logger.debug('reference')\n start_time = time.time()\n component = ReferenceComponent(ns, item.components[ns], parent=item)\n toUpdate[ns] = component.updateVersion(refOnSceneList[ns])\n elapsed_time = time.time () - start_time\n logger.debug ('%s Total info' % elapsed_time)\n\n if item.components[ns]['assembleMode'] == 'xlo':\n component = XloComponent(ns, item.components[ns], parent=item)\n toUpdate[ns] = component.updateVersion(refOnSceneList[ns])\n\n if item.components[ns]['assembleMode'] == 'cache':\n cache = CacheComponent(ns, item.components[ns], parent=item)\n toUpdate[ns] = cache.updateVersion(refOnSceneList[ns])\n\n # If not in silent mode, show dialogs to the user choose which references should be processed\n logger.debug('prompt if needed')\n if not silent:\n if toDelete:\n uptodate = False\n toDelete = pm.layoutDialog(ui=lambda: refCheckPrompt(toDelete, 'delete')).split(',')\n\n if toAdd:\n uptodate = False\n toAdd = pm.layoutDialog(ui=lambda: refCheckPrompt(toAdd, 'add')).split(',')\n\n if toReplace:\n uptodate = False\n toReplace = pm.layoutDialog(ui=lambda: refCheckPrompt(toReplace, 'replace')).split(',')\n\n upDateList = [x for x, y in toUpdate.iteritems() if y]\n if upDateList:\n uptodate = False\n upDateList = pm.layoutDialog(ui=lambda: refCheckPrompt(upDateList, 'update')).split(',')\n toUpdate = {x: y for x, y in toUpdate.iteritems() if x in upDateList}\n else:\n toUpdate = {}\n\n if uptodate:\n pm.confirmDialog(title='Scene Check', ma='center',\n message='Versions ok!',\n button=['OK'], defaultButton='OK', dismissString='OK')\n\n\n logger.debug('processing...')\n # Do the processing\n # delete\n logger.debug('toDelete:%s' % toDelete)\n for ns in toDelete:\n refOnSceneList[ns].remove()\n\n # add\n logger.debug('toAdd:%s' % toAdd)\n for ns in toAdd:\n if item.components[ns]['assembleMode'] == 'camera':\n continue\n\n if item.components[ns]['assembleMode'] == 'reference':\n component = ReferenceComponent(ns, item.components[ns], parent=item)\n component.addToScene()\n\n elif item.components[ns]['assembleMode'] == 'xlo':\n component = XloComponent(ns, item.components[ns], parent=item)\n component.addToScene()\n\n cache = CacheComponent(ns, item.components[ns], parent=item)\n cache.importCache()\n\n elif item.components[ns]['assembleMode'] == 'cache':\n cache = CacheComponent(ns, item.components[ns], parent=item)\n cache.addToScene()\n\n #update versions\n for ns, versions in toUpdate.iteritems():\n if item.components[ns]['assembleMode'] == 'camera':\n continue\n\n if item.components[ns]['assembleMode'] == 'reference':\n component = ReferenceComponent(ns, item.components[ns], parent=item)\n componentPath = component.getPublishPath()\n refOnSceneList[ns].replaceWith(componentPath)\n\n if item.components[ns]['assembleMode'] == 'xlo':\n if 'ver' in versions:\n component = XloComponent(ns, item.components[ns], parent=item)\n componentPath = component.getPublishPath()\n refOnSceneList[ns].replaceWith(componentPath)\n\n if 'cacheVer' in versions:\n #todo check if need to delete old cache node\n cache = CacheComponent(ns, item.components[ns], parent=item)\n cache.importCache()\n\n if item.components[ns]['assembleMode'] == 'cache':\n component = CacheComponent(ns, item.components[ns], parent=item)\n componentPath = component.getPublishPath()\n refOnSceneList[ns].replaceWith(componentPath)\n\n # Replace\n for ns in toReplace:\n if item.components[ns]['assembleMode'] == 'reference':\n oldProxyMode = item.components[ns]['task']\n\n item.components[ns]['task'] = item.components[ns]['proxyMode']\n component = ReferenceComponent(ns, item.components[ns], parent=item)\n componentItem = component.getItem()\n if componentItem.publishVer > 0:\n refOnSceneList[ns].replaceWith(component.getPublishPath())\n else:\n item.components[ns]['task'] = oldProxyMode\n\n item.putDataToDB()\n\n logger.info('done sceneChecking!')", "def complete(self):\n if bool(self.namespace) and bool(self.kind) and bool(self.id):\n return True\n else:\n return False", "def _check_config(self):", "def check(self, evidence, path_on_disk):\n return True", "def valid(self):\n return (self.get(\"~#mtime\", 0) and\n self[\"~#mtime\"] == util.mtime(self[\"~filename\"]))", "def check_for_new_data(self):\n return", "def test_fileinuse (self):\t\t\n\t\t# open a file:\n\t\ttestfilepath = os.path.join (self.testfolder, \"fileinuse.txt\")\n\t\tf = open(testfilepath,\"a\") #opens file with name of \"test.txt\"\n\t\tf.write(\"This file is now opened and i'm writting on it \\n\")\n\t\tself.assertEqual (MD.fileinuse(testfilepath), True) # Checks a file that it is beign written.\n\t\tself.assertEqual (MD.folderinuse(self.testfolder), True) # Checks if any file inside the folder is beign used.\n\t\tf.close()\n\t\tself.assertEqual (MD.fileinuse(testfilepath), False) # Cheks a file that it is closed.\n\t\tself.assertEqual (MD.folderinuse(self.testfolder), False)", "def exists(self):\r\n return os.path.exists(self.full_path)", "def check(self):\n\n self.check_auto_update()\n assert not self.empty()", "def is_up(self):\n self.loop = file_to_loop(self.loopFile)\n if len(self.loop) == 0:\n return False\n return True", "def resources():\n check_resources()", "def __check_exist_path(self):\n if 'path_out' not in self.params:\n raise ValueError('missing \"path_out\" among parameters')\n self.params['path_out'] = update_path(self.params.get('path_out'))\n list_names = [n for n in self.params if any(m in n.lower() for m in ['path', 'dir', 'file'])]\n for n in list_names:\n p = os.path.abspath(os.path.expanduser(self.params[n]))\n if not os.path.exists(p):\n raise FileNotFoundError('given path/file/dir \"%s\" does not exist!' % p)\n self.params[n] = p\n for n in [n for n in self.params if 'exec' in n]:\n # in case you define executable in your home\n if os.path.expanduser(self.params[n]) != self.params[n]:\n self.params[n] = os.path.expanduser(self.params[n])", "def initial_check(experiment, ln):\n # Start sharing\n SHARING.Start(experiment)\n # This is the name of the folder that we are searching for\n folder = \"library\" + str(ln)\n # If the results are already completed\n if folder in os.listdir(experiment[\"Folder\"] + \"results/\"):\n SHARING.End(experiment)\n return False, False\n # Or if the affinity maturation is ongoing\n elif folder in os.listdir(experiment[\"Folder\"]):\n SHARING.End(experiment)\n return False, True\n # If the initial folder for that library does not yet exist, construct it\n folder = \"initial_\" + folder\n if folder not in os.listdir(experiment[\"Folder\"]):\n os.mkdir(folder)\n os.mkdir(folder + \"/Current\")\n # Otherwise, another processor is performing the initialization\n else:\n SHARING.End(experiment)\n return False, False\n # Remove any existing design molecule information\n molecules = []\n for mol in experiment[\"Molecules\"]:\n if mol[0] != None:\n molecules.append(mol)\n # Recreate the initial Molecules \n experiment[\"Molecules\"] = molecules\n experiment.make_DesignGroups()\n experiment.finish_creation()\n # Load the unpositioned antigen information and scoring information\n SHARING.update_Current(experiment, experiment[\"Folder\"] + \\\n \"results/initial/\")\n SHARING.load_scores(experiment, experiment[\"Folder\"] + \\\n \"results/initial/\")\n # End sharing\n SHARING.End(experiment)\n return True, True", "def verify_paths(self) -> None:\n bad_keys = []\n for key, path in self.paths.items():\n # we only check specified paths, and drop them otherwise\n if path is not None:\n if isinstance(path, str):\n path = Path(path)\n if not path.exists():\n warn(\n f\"A path for {key} dataset was specified but unresolvable, please check {path.absolute()} exists and contains *.lmdb files.\"\n )\n bad_keys.append(key)\n else:\n bad_keys.append(key)\n for key in bad_keys:\n del self.paths[key]", "def is_prepared(self):\n return os.path.exists(os.path.join(self.location, INFO_NM))", "def verify(self):\r\n self.title = self.title and self.title or '' \r\n self.descr = self.descr and self.descr or '' \r\n self.link = self.link and self.link or ''\r\n self.channelURL = self.channelURL and self.channelURL or ''", "def _check_before_run(self):\r\n if not os.path.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def _source_filename_field_was_properly_initialized(self):\n if not Rule.sources_list_is_initialized:\n Rule.sources_list.append(self.source)\n Rule.sources_list_is_initialized = True\n # print(f\"if {self.source} not in {Rule.sources_list}\")\n if self.source not in Rule.sources_list:\n # print(f\"In rule: {self}\")\n # print(f\"Rule.sources_list = {Rule.sources_list}\")\n raise UninitializedSourceError(f\"{repr(self.source)} not initialized.\")\n if self.target not in Rule.sources_list:\n Rule.sources_list.append(self.target)\n return True", "def check_reference_open(refpath):\n if refpath != \"N/A\" and refpath.strip() != \"\":\n if s3_utils.is_s3_uri(refpath):\n if not s3_utils.object_exists(refpath):\n raise RuntimeError(\"S3 object does not exist: \" + refpath)\n else:\n with open(refpath, \"rb\"):\n pass\n return refpath", "def ensure_file(self):\n if not self.has_file():\n raise AttributeError(\"No file set\")", "def check_dirty(args):\n man = load_manifest()\n any_dirty = False\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n any_dirty = check_dirty_repo(repo) or any_dirty\n return any_dirty", "def exists(self):\n return (bolt.PickleDict.exists(self) or self.oldPath.exists())", "def test_object_has_no_copy_uploaded_marker(self):\n self.assertTrue('copy_uploaded' not in self.eightythreeb.data['markers']) # should not be present" ]
[ "0.6192987", "0.60495603", "0.5934274", "0.59128857", "0.58630824", "0.5849084", "0.5817839", "0.5815572", "0.5815572", "0.57971984", "0.57678586", "0.5761644", "0.57452965", "0.5725083", "0.5716483", "0.5684965", "0.5675776", "0.5654527", "0.5647706", "0.5647706", "0.5647706", "0.5647706", "0.5647706", "0.5647706", "0.5647706", "0.5642257", "0.56371033", "0.55947846", "0.5581424", "0.55641794", "0.55628455", "0.55617857", "0.55512583", "0.55497503", "0.55491984", "0.5548772", "0.55251026", "0.55175996", "0.54946166", "0.5492309", "0.5469659", "0.5463717", "0.545703", "0.5453513", "0.54535013", "0.5434158", "0.54214054", "0.54208845", "0.54088", "0.5396638", "0.53955185", "0.5393579", "0.53842396", "0.5382941", "0.5382387", "0.53736335", "0.537071", "0.5357348", "0.5341355", "0.5335965", "0.5327755", "0.53259736", "0.531701", "0.5305413", "0.5300688", "0.5295427", "0.52939206", "0.5292993", "0.52856517", "0.52847177", "0.5281584", "0.5281098", "0.5277121", "0.5263199", "0.52630776", "0.52630746", "0.5255011", "0.5254474", "0.5244584", "0.52408046", "0.5240607", "0.52316546", "0.5226874", "0.52181154", "0.5212732", "0.5197472", "0.5194497", "0.5179839", "0.51746565", "0.5174497", "0.5173594", "0.5173594", "0.5173594", "0.5173594", "0.51721406", "0.51616067", "0.5160602", "0.5146964", "0.5143735", "0.51325375" ]
0.7016824
0
Callback to be called whenever the system state has changed. Checks whether or not the step has to be advanced or not
def updateState(self): if ('cutting' in self.step_ops) and (self.cut_state.user_cutting): self.step_ops['cutting'] = True if ('cooking' in self.step_ops) and (self.cut_state.user_cooking): self.step_ops['cooking'] = True # TODO: add the rest of the operations advance = True # Check if ALL operations are complete for op in self.step_ops: if self.step_ops[op] == False: advance = False break if advance: self.nextStep()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()", "def has_state_changed(self) -> bool:\r\n ...", "def _on_step(self) -> bool:\n # print(\"locals \", self.locals)\n # # what timestep you think\n # print(\"timestep \",CustomCallback.step)\n # # what timestep a2c or ppo2 learn() is on \n # print(\"a2c/ppo2 num timestep\",self.num_timesteps)\n \n # TODO: add flag to save screenshots or not\n subfolder = os.path.join(self.directory, 'screen/')\n filepath = os.path.join(subfolder)\n img_name = '_screenshot_' + str(self.num_timesteps)\n \n if(self.algo == \"A2C\" or self.algo == \"PPO2\"):\n # self.locals['obs'] gives black and white imgs\n obs = self.env.get_images()\n for i in range(self.num_envs):\n mpl.image.imsave(subfolder+\"env_\" + str(i) + img_name + \"_.png\", obs[i])\n elif (self.algo == \"DQN\"):\n self.env.ale.saveScreenPNG(subfolder+\"env_\" + str(0) + img_name + \"_.png\")\n\n step_stats = {self.num_timesteps: {\n 'num_timesteps': self.num_timesteps,\n 'state': self.num_timesteps/self.num_envs,\n }\n }\n # add step to dict\n CustomCallback.main_data_dict.update(step_stats)\n key = self.num_timesteps\n\n # collection of minimum data: action, reward, lives\n if(self.algo == \"DQN\"):\n CustomCallback.main_data_dict[key]['action_env_0'] = self.locals['action']\n CustomCallback.main_data_dict[key]['action_name_env_0'] = self.actions[self.locals['env_action']]\n if(self.game == \"Pong\"):\n CustomCallback.main_data_dict[key]['curr_score_env_0'] = self.locals['episode_rewards'][-1]\n else:\n CustomCallback.main_data_dict[key]['cumulative_life_reward'] = self.locals['episode_rewards'][-1]\n if(self.isLives == True):\n CustomCallback.main_data_dict[CustomCallback.step]['lives'] = self.locals['info']['ale.lives']\n else:\n for i in range(self.num_envs):\n CustomCallback.main_data_dict[key]['action_env_'+str(i)] = self.locals['actions'][i]\n CustomCallback.main_data_dict[key]['action_name_env_'+str(i)] = self.actions[self.locals['actions'][i]]\n CustomCallback.main_data_dict[key]['step_reward_env_'+str(i)] = self.locals['rewards'][i]\n if(self.isLives == True):\n if(CustomCallback.step == 1):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = 3\n if(CustomCallback.step >= 2):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = self.locals['infos'][i]['ale.lives']\n\n if(self.game == \"Pong\" and self.algo != \"DQN\"):\n # extra processing for Pong scores\n self.find_life_game_info_a2c_ppo2_pong()\n\n # at the last step, write data into csv files\n if(CustomCallback.step == (self.num_steps/self.num_envs)):\n self.make_dataframes(self.df_list)\n # save minimal data\n self.df_to_csv(\"df_og.csv\", self.df_list)\n self.df_to_parquet()\n CustomCallback.step = CustomCallback.step + 1\n return True", "def _on_step(self) -> bool:\n\t\t#self.model.get_env().env_method(\"set_model_reference\", self.model.get_parameters())\n\t\tself.env.set_model_reference(self.model.get_parameters())\n\t\tprint(\"current timestep\", self.num_timesteps)\n\t\treturn True", "def assumed_state(self):\n # Progtime Blue does NOT update the handles when the manual\n # switch button is pressed, so the state may be wrong!\n return True", "def segmentNeedle(self):\r\n # productive #event\r\n profprint()\r\n if self.fiducialButton.isEnabled():\r\n print \"new checked state: \", not self.fiducialButton.checked\r\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)", "def update(self):\n\t\t# If being controlled by COM\n\t\tif self.controled_by_com :\n\t\t\t# Substract 1 from the update counter\n\t\t\tself.update_counter -= 1\n\t\t\t# If the update counter reaches zero\n\t\t\tif self.update_counter == 0. :\n\t\t\t\t# then ask for an action \n\t\t\t\tif self.intermediate_phase is False :\n\t\t\t\t\tself.action_required = True \n\t\t\t\t\t\t\n\t\t\t\t# if during a change\n\t\t\t\t# then make the change\n\t\t\t\tif self.intermediate_phase is True : \n\t\t\t\t\tself.action_required = False\n\t\t\t\t\tself._color_changer() #Make the change in the Simulator\n\t\telse :\n\t\t\tpass", "def segmentNeedle(self):\n #productive #event\n profprint()\n if self.fiducialButton.isEnabled():\n print \"new checked state: \",not self.fiducialButton.checked\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)", "def state_wait_validate(cfg, app, win, events):", "def step(self):\n self.state_estimator.step()", "def step(self, state):", "def check_state(self):\n pass", "def stepText2Changed(build, step, text2):", "def update(self, elapsed):\n delta = 10 * elapsed\n rel = self.behavior_system.robot.perception_system.get_releaser('desired-stimulus-releaser')\n solo = self.behavior_system.robot.drive_system.solo_drive\n joy = self.behavior_system.robot.emotion_system.emotion_joy\n\n if rel.is_active() and self.behavior_system.robot.drive_system.active_drive == solo and self.behavior_system.robot.emotion_system.active_emotion == joy:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def state_chosen_validate(cfg, app, win, events):", "def state_processing_validate(cfg, app, win, events):", "def update(self, elapsed):\n delta = 10 * elapsed\n rel = self.behavior_system.robot.perception_system.get_releaser('desired-stimulus-releaser')\n social = self.behavior_system.robot.drive_system.social_drive\n joy = self.behavior_system.robot.emotion_system.emotion_joy\n\n if rel.is_active() and self.behavior_system.robot.drive_system.active_drive == social and self.behavior_system.robot.emotion_system.active_emotion == joy:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def update(self, elapsed):\n delta = 18 * elapsed\n rel = self.behavior_system.robot.perception_system.get_releaser('undesired-stimulus-releaser')\n sorry = self.behavior_system.robot.emotion_system.emotion_sorrow\n\n if rel.is_active() and self.behavior_system.robot.emotion_system.active_emotion == sorry:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def state_finish_validate(cfg, app, win, events):", "def state_wait_do(cfg, app, win, events):", "def do_step(self) -> None:", "def try_advance(self):\n if not self.step.toclick:\n self.step.finished = True\n return True\n return False", "def notify_wizard(self):\n self.emit_datachanged()\n #self.emit(SIG(\"condition_update\"), self._conds or None)", "def _on_step(self) -> None:\n self._n_calls += 1\n # Account for multiple environments\n # each call to step() corresponds to n_envs transitions\n if self._n_calls % max(self.target_update_interval // self.n_envs, 1) == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n # Copy running stats, see GH issue #996\n polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n self.logger.record(\"rollout/exploration_rate\", self.exploration_rate)", "def state_chosen_do(cfg, app, win, events):", "def update_shuttle_state(self):\n if len(self.steps) > self.current_step >= 0:\n step = self.steps[self.current_step]\n if step.is_fulfilled():\n step.end(True)", "def onTimeStep(self, timeStep):\n pass", "def _update_status(self):\n if any([abs(v) > LIMITS[i] for i, v in enumerate(self.state)]):\n self.terminal = True\n elif abs(self.q[3]) < LIMITS[9]:\n self.terminal = True\n elif self.steps + 1 >= self.max_steps:\n self.terminal = True", "def _autooff_changed(hass, entity_id=None, old_state=None, new_state=None):\n PERSIST['states'][0] = new_state.state == 'on'\n _eval_state(hass)", "def handle_robot_step_changed(self, step):\n\n #Save the last step if some lost\n last_known_step = self.step\n super(WeldTask, self).handle_robot_step_changed(step)\n\n if step < 0 or step >= len(self.welding_parameters):\n # invalid step\n return\n\n if self.job is None:\n # no jobs\n return\n\n if self.welding_parameters[step] == WeldingState():\n # default state, skip\n return\n\n if last_known_step > step:\n # moving to the other direction\n return\n\n if self.welding_parameters[last_known_step] != self.welding_parameters[step]:\n # if there is a difference, send the new params\n RosProxy().call_service(\n '/welding_driver/set_params',\n SetWeldingParameters,\n self.welding_parameters[step])", "def _check_ready(self, _widget, __event=None, __page=0):\r\n\r\n if self.cmbHardware.get_active() > 0:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, True)\r\n else:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, False)\r\n\r\n return False", "def _step(self):\n pass", "def event_m20_11_5000():\n \"\"\"State 0,2: [Preset] Living Altar_SubState\"\"\"\n assert event_m20_11_x82()\n \"\"\"State 1: Rerun\"\"\"\n RestartMachine()", "def state_changed(self, oldstate, newstate, event, *args, **kwargs):", "def state_changed(self, oldstate, newstate, event, *args, **kwargs):", "def after_step():\n raise NotImplementedError", "def state_wait_enter(cfg, app, win):", "def StatusChanged(self, state, info):\n pass", "def _foyermot_changed(hass, entity_id=None, old_state=None, new_state=None):\n PERSIST['states'][4] = new_state.state == 'on'\n _eval_state(hass)", "def _system_changed_phase(self, prev_comp, comp):\n return np.abs(prev_comp - comp) > self._max_singlet_change", "def update(self, elapsed):\n delta = 10 * elapsed\n rel = self.behavior_system.robot.perception_system.get_releaser('absence-of-desired-stimulus-releaser')\n sorrow = self.behavior_system.robot.emotion_system.emotion_sorrow\n\n if rel.is_active() and self.behavior_system.robot.emotion_system.active_emotion == sorrow:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = 0", "def _sun_chaged(hass, entity_id=None, old_state=None, new_state=None):\n PERSIST['states'][1] = new_state.state == 'off'\n _eval_state(hass)", "def update_state(self, act):\n\n # check the checkbox logic\n if act in ['follow', 'not_follow', 'locate', 'not_locate']:\n self.check_locate_follow_logic(act)\n # test/record logic\n print(\"update function not implemented\")", "def update(self, elapsed):\n delta = 8 * elapsed\n rest = self.behavior_system.robot.drive_system.rest_drive\n\n if self.behavior_system.robot.drive_system.active_drive == rest:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def state_processing_do(cfg, app, win, events):", "def update_action(self):\n self.action = self.automata > self.states\n self.inv_action = self.inv_automata > self.states", "def opt_statechange(self, opt, new_state):\n self.opt_dict[opt]['enabled'] = new_state", "def step(self) -> bool:\n raise NotImplementedError()", "def take_action(self, state):", "def notify_wizard(self):\n if (self._wfield != None):\n self._wfield.update(self._conds or None)", "def state_processing_enter(cfg, app, win):", "def check_device_state(self):", "def _check_ready(self, _widget, __event=None, page=0):\r\n# WARNING: Refactor _check_ready; current McCabe Complexity metric = 12.\r\n if self.cmbSoftware.get_active() > 0:\r\n self.cmbDetectMethod.set_sensitive(True)\r\n self.txtTestProcedure.set_sensitive(True)\r\n self.txtTestCase.set_sensitive(True)\r\n self.txtExecutionTime.set_sensitive(True)\r\n else:\r\n self.cmbDetectMethod.set_sensitive(False)\r\n self.txtTestProcedure.set_sensitive(False)\r\n self.txtTestCase.set_sensitive(False)\r\n self.txtExecutionTime.set_sensitive(False)\r\n\r\n if page == 2 and self.cmbSoftware.get_active() <= 0:\r\n if(self.txtIncidentDate.get_text() != '' and\r\n self.cmbReportedBy.get_active_text() != '' and\r\n self.cmbCategory.get_active() > 0 and\r\n self.cmbHardware.get_active() > 0):\r\n self.assistant.set_page_complete(self.fxdPageGeneral, True)\r\n else:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, False)\r\n elif page == 2 and self.cmbSoftware.get_active() > 0:\r\n if(self.txtIncidentDate.get_text() != '' and\r\n self.cmbReportedBy.get_active_text() != '' and\r\n self.cmbCategory.get_active() > 0 and\r\n self.cmbHardware.get_active() > 0 and\r\n self.cmbDetectMethod.get_active() > 0 and\r\n self.txtExecutionTime.get_text() != ''):\r\n self.assistant.set_page_complete(self.fxdPageGeneral, True)\r\n else:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, False)\r\n elif page == 3:\r\n if(self.txtDescription.get_text() != '' and\r\n self.txtDetails.get_text(*self.txtDetails.get_bounds()) != ''):\r\n self.assistant.set_page_complete(self.fxdPageDescription, True)\r\n else:\r\n self.assistant.set_page_complete(self.fxdPageDescription,\r\n False)\r\n\r\n return False", "def _on_step(self):\n # self.logger.record(\"current_reward\")\n # self.n_calls is automatically updated because\n # we derive from BaseCallback\n if self.n_calls % self.eval_freq == 0:\n # === YOUR CODE HERE ===#\n # Evaluate the agent:\n # you need to do self.n_eval_episodes loop using self.eval_env\n # hint: you can use self.model.predict(obs, deterministic=True)\n mean_reward, std_reward = evaluate_policy(self.model, self.eval_env, n_eval_episodes=self.n_eval_episodes)\n # Save the latest agent\n self.logger.record(\"eval_mean_reward\", mean_reward)\n self.model.save(self.save_latest)\n # and update self.best_mean_reward\n if mean_reward > self.best_mean_reward:\n self.best_mean_reward = mean_reward\n self.model.save(self.save_path)\n if self.verbose > 0:\n print(\"Saving new best model at {} timesteps\".format(self.n_calls))\n print(\"Saving new best model to {}.zip\".format(self.save_best))\n \n print(\"Best mean reward: {:.2f}\".format(self.best_mean_reward))\n \n\n # ====================== # \n return True", "def state_choose_validate(cfg, app, win, events):", "def step(self):\n\n pass", "def the_changed_brightness_should_be_reflected_in_the_state_5():\n assert web_app.get_state()\n assert web_app.check_value_in_state(\"brightness\",\"5\")", "def _step(self) -> None:", "def getCurrentStep():", "def check_for_energy_tax_update(self):\n\t\tisChanged = False\n\t\t# TODO: discuss the validity of such approach when checking on the global optimality\n\t\t# if self.currentEnergy < self._globalMinimumEnergy and self.currentTax == 0:\n\t\tif self.currentEnergy < self._globalMinimumEnergy and self.contains_several_vertices(self.currentState):\n\t\t\toutput(\"\\t New global optimum registered: old value = {}, new value = {}, state = {}\"\\\n\t\t\t\t\t .format(str(self._globalMinimumEnergy),str(self.currentEnergy),self.getCurrentState()),isDebug=False)\n\t\t\tself._globalMinimumEnergy = self.currentEnergy\n\t\t\tself._globalMinimumState = copy(self.currentState)\n\t\t\tisChanged = True\n\t\tif self.currentEnergy < self.localMinimumEnergy and not self.isAllZeros(self.currentState): #should we update global as well? Now I do it\n\t\t\toutput(\"\\t New local optimum registered: old value = {}, new value = {}, state = {}\"\\\n\t\t\t\t\t .format(self.localMinimumEnergy,self.currentEnergy,self.getCurrentState()),isDebug=False)\n\t\t\tself.localMinimumEnergy = self.currentEnergy\n\t\t\tself.localMinimumState = copy(self.currentState)\n\t\t\tself.localMinimumTax = self.currentTax\n\t\t\tisChanged = True\n\t\treturn isChanged", "def on_step(self) -> None:\r\n\r\n if self.board == None:\r\n return\r\n\r\n TkState.disable(self.edit_menu.winfo_children())\r\n TkState.enable([self.reset_button])\r\n self.anim_board.next_gen()\r\n self.on_new_generation()\r\n self.painter.draw_board()", "def the_changed_brightness_should_be_reflected_in_the_state_10():\n assert web_app.get_state()\n assert web_app.check_value_in_state(\"brightness\",\"10\")", "def update(self):\n startstate = self.state\n goalstates =self.env.getGoalStates()\n inputs = self.env.sense(self)\n self.action_sequence = self.drive(goalstates,inputs)\n action = self.choose_action() # Choose an action\n self.state = self.env.act(self,action) \n return", "def game_control_updates(self):\r\n if self.game_control is not None:\r\n self.speed_step = self.game_control.get_prop_val(\"running.speed_step\", -1)", "def step(self):\n self.driver.step()", "def state_print_validate(cfg, app, win, events):", "def on_checkBox_kongtoukai_stateChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def on_checkBox_duotouping_stateChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def observe_step(self, state1, action1, reward2, state2, terminal=False):\n pass", "def unit_state_change_cb (unit, state) :\n\n print \"[Callback]: ComputeUnit '%s' state: %s.\" % (unit.uid, state)\n\n if state == rp.FAILED :\n sys.exit (1)", "def check_status(self, base):\n change = False\n # Trigger intensification\n if self.curr_i == self.I:\n self.curr_i = 0\n base = self.search_intensification()\n change = True\n # Trigger diversification\n elif self.curr_d == self.D:\n self.curr_d = 0\n base = self.search_diversification()\n change = True\n # Trigger step reduction\n elif self.curr_r == self.R:\n self.curr_r = 0\n # Start from best point found so far\n base = self.MTM[[-1], :-1].T\n self.update_STM(base)\n self.update_LTM(base)\n self.step = self.step_red * self.step\n\n if change:\n curr_obj = self.obj_wrap(base)\n self.update_MTM(base, curr_obj)\n self.update_STM(base)\n self.update_LTM(base)\n self.bases = np.block([[self.bases],[base.T, curr_obj]])\n\n return base", "def on_checkBox_duotoukai_stateChanged(self, p0):\n # TODO: not implemented yet\n raise NotImplementedError", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def perform_step(self) -> None:\n pass", "def on_step_clicked(self):\n self.start_threading()\n self.stepping = True\n self.step_event.set()", "def event_m20_11_x78():\n \"\"\"State 0,1: State\"\"\"\n ChangeOwnObjState(30)\n \"\"\"State 2: End state\"\"\"\n return 0", "def __handle_view_win_condition(self, gamestate_component):", "def __bool__(self) -> bool:\n if self.initial_value == 1 and self.number_of_steps == 0:\n return True\n return False", "def state_changed(self, old_state, new_state, target_state):\n pass", "def should_trigger(self, previous_result, *_args, **_kwargs):\n return self.extension.config.get('enabled', True)", "def func(self):\n if (not self.switches or \"online\" in self.switches) and not self.args:\n self.display_lists()\n return\n if \"claim\" in self.switches or (not self.switches and self.args):\n self.claim_scene()\n return\n if \"validate\" in self.switches:\n self.validate_scene()\n return\n if \"viewrequests\" in self.switches:\n self.view_requests()\n return\n self.msg(\"Invalid switch.\")", "def _state_update(self):\n self.state = self.mesh.state()\n if self.state < 0:\n self.state = self.STATE_DISABLED\n return self.state", "def state_chosen_enter(cfg, app, win):", "def nextCheckState(self):\n\n self._explicit = not self._explicit\n self._update_state()\n\n # Qt (on Linux) can insert shortcut characters into the text by itself.\n module = self.text().replace('&', '')\n\n self.explicitly_required_changed.emit(module, self._explicit)", "def goal_test(self, state):\n #return state == self.goal", "def stepStarted(build, step):", "def _ryanloc_changed(hass, entity_id=None, old_state=None, new_state=None):\n PERSIST['states'][3] = new_state.state == 'home'\n _eval_state(hass)", "def state_preview_validate(cfg, app, win, events):", "def update_waiting(self):\n if not self.inputs[0]:\n self.set_value(False, 0)\n if self.desc_value in self.struct_variables:\n struct = self.struct_variables[self.desc_value]\n if struct[\"structure\"] in (\"list\", \"array\"):\n values = self.struct_variables[self.desc_value][\"values\"]\n if self.get_value(1) in values:\n self.set_value(True, 0)\n elif struct[\"structure\"] in (\"dict\", ):\n values = self.struct_variables[self.desc_value][\"values\"].keys()\n if self.get_value(1) in values:\n self.set_value(True, 0)\n self.state = ACTIVE\n elif self.variant(map(lambda x: x is not None, self.get_value(0, True))):\n self.set_value(False, 0)\n for value in self.get_value(0, True):\n if self.get_value(1) == value:\n self.set_value(True, 0)\n break\n self.state = ACTIVE", "def _on_power_change(\n self, entity: Union[str, dict], attribute: str, old: str, new: str, kwargs: dict\n ) -> None:\n power = float(new)\n if (\n self.app.state != self.app.States.running\n and power >= self.args[CONF_RUNNING_THRESHOLD]\n ):\n self.log('Setting dishwasher to \"Running\"')\n self.app.state = self.app.States.running\n elif (\n self.app.state == self.app.States.running\n and power <= self.args[CONF_DRYING_THRESHOLD]\n ):\n self.log('Setting dishwasher to \"Drying\"')\n self.app.state = self.app.States.drying\n elif (\n self.app.state == self.app.States.drying\n and power == self.args[CONF_CLEAN_THRESHOLD]\n ):\n self.log('Setting dishwasher to \"Clean\"')\n self.app.state = self.app.States.clean", "def onTimeStepEnd(self, timeStep):\n pass", "def event_m20_11_4010():\n \"\"\"State 0,2: [Lib] [Preset] Elevator lever_SubState\"\"\"\n assert event_m20_11_x17(z113=20111400, z114=20111410, z115=10)\n \"\"\"State 1: Rerun\"\"\"\n RestartMachine()", "def _propertyStateChangedSlot(self):\r\n \r\n self._updateButtonStates()", "def testoptstatus(self): \r\n assert len(self.data.optstatus) == len(self.data.geovalues)\r\n # We only have the final energy available, so there's no point looking for OPT_NEW.\r\n for i in range(1, len(self.data.optstatus)-1):\r\n assert self.data.optstatus[i] == self.data.OPT_UNKNOWN\r\n assert self.data.optstatus[-1] == self.data.OPT_DONE", "def onTimeStepStart(self, timeStep):\n pass", "def changed(self):\n\t\tpass", "def _timestep_before_hook(self, *args, **kwargs):\n pass", "def changed_event(self):\n return True", "def stats_change(self):\n return True if self.board.prev_state != self.board.shot_count else False", "def curr_state(self, in_call):\n self.error_code = self.cnd['err_code']\n val_1 = self.values[self.dname + '.' + self.cnd['chans'][0]]\n val_2 = self.values[self.dname + '.' + self.cnd['chans'][1]]\n # print('curr_state', self.dname, in_call, val_1, val_2)\n if not in_call: # Non-timer called curr_state\n if val_1 and val_2:\n if self.cnd['up_lim'] > abs(val_1) >= self.cnd['down_lim'] and \\\n self.cnd['up_lim'] > abs(val_2) >= self.cnd['down_lim']:\n if abs(val_2 - val_1) > 0.05 * abs(val_1):\n if not self.tout_run:\n self.tout_run = True\n QTimer().singleShot(self.cnd['wait_time'], functools.partial(self.curr_state, True))\n else:\n self.fail_count['curr_state'] = 0\n self.log_manager('curr_state')\n else: # Timer called curr_state\n if self.cnd['up_lim'] > abs(val_1) >= self.cnd['down_lim'] and \\\n self.cnd['up_lim'] > abs(val_2) >= self.cnd['down_lim']:\n if abs(val_2 - val_1) > 0.05 * abs(val_1):\n self.fail_count['curr_state'] = 1\n self.log_manager('curr_state')\n self.tout_run = False" ]
[ "0.6339768", "0.6194187", "0.6136971", "0.5964047", "0.5904102", "0.5903155", "0.5893415", "0.5889973", "0.58527935", "0.58454347", "0.5844172", "0.58329093", "0.57805914", "0.57780147", "0.5772509", "0.5751888", "0.573898", "0.57344913", "0.5711796", "0.5696087", "0.56890595", "0.56870806", "0.568033", "0.566892", "0.5665165", "0.56595486", "0.5652686", "0.56413996", "0.5639269", "0.56331325", "0.5616539", "0.55990183", "0.5596667", "0.55790305", "0.55790305", "0.5578851", "0.5571813", "0.5571793", "0.5568129", "0.5557887", "0.55541956", "0.55448925", "0.55315006", "0.55311537", "0.55299157", "0.55178773", "0.5509131", "0.5498417", "0.5495485", "0.5490001", "0.5485721", "0.54856277", "0.54681784", "0.5442904", "0.5442313", "0.5436992", "0.54147804", "0.5411969", "0.54058146", "0.5405797", "0.53972673", "0.5393999", "0.53932244", "0.5390205", "0.53836226", "0.53706735", "0.5361762", "0.53616494", "0.5348104", "0.53395826", "0.5329849", "0.5327228", "0.5316444", "0.5314298", "0.5310603", "0.530462", "0.5298377", "0.5297286", "0.5288774", "0.52833915", "0.52824867", "0.5282178", "0.52800494", "0.5279399", "0.5278939", "0.52785397", "0.5277922", "0.5277565", "0.5277057", "0.5274775", "0.5265023", "0.52541894", "0.52472854", "0.5246845", "0.5244539", "0.5238851", "0.5235009", "0.52342325", "0.52327716", "0.523063" ]
0.6468291
0
Roll a 6 sided dice.
def roll_dice(): numbers = ['1', '2', '3', '4', '5', '6'] return random.choice(numbers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roll_dice():\n return (random.randint(1, 6) + random.randint(1, 6))", "def roll_dice(self):\n self.roll = (random.randint(1,6), random.randint(1,6))\n return self.roll", "def roll_dice():\n roll = random.randint(1, 6)\n return roll", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n num_roll = 0\n sum = 0\n pig_out = False # Pig Out rule\n while num_roll < num_rolls:\n roll = dice()\n if roll == 1:\n pig_out = True\n sum += roll\n num_roll += 1\n if pig_out: return 1\n else: return sum\n # END PROBLEM 1", "def two_d6_plus_6() -> list:\n rolls: List[int] = []\n for i in range(1, 7):\n roll: int = multi_die(2, 6) + 6\n rolls.append(roll)\n return rolls", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n \"*** YOUR CODE HERE ***\"\n count, return_sum = 0, 0\n while count < num_rolls:\n roll = dice()\n if roll == 1:\n count += 1\n while count < num_rolls:\n dice()\n count += 1\n return 1\n return_sum += roll\n count += 1\n return return_sum\n # END PROBLEM 1", "def roll_die(number_of_rolls, number_of_sides):\n\n roll = random.randint(1, number_of_sides) # Used recursion for this\n if number_of_rolls == 0:\n return 0 # Base case is 0. If it's 1, then I can roll a 7 with 6 sides\n else:\n return roll + roll_die(number_of_rolls - 1, number_of_sides) # Subtract 1 roll and keep calling function", "def diceRoll():\n return random.randint(1, 6) # generates a random integer between 1 and 6 (inclusive) and returns it.", "def roll_die(sides = 6, maxi = 6):\n d = 1000\n # discard highest roll(s)\n while d > maxi:\n d = random.randint(1,sides)\n return d", "def diceRoll():\n return randint(1,6)", "def roll_die(self, number_of_rolls):\n\t\tfor roll in range(0, number_of_rolls):\n\t\t\tprint(str(randint(1, self.sides)), end = \", \")\n\t\tprint()", "def sixes(dice):\n return sum([x for x in dice if x == 6])", "def roll_die(number_of_rolls: int, number_of_sides: int) -> int:\r\n if number_of_rolls <= 0 or number_of_sides <= 0:\r\n return 0\r\n\r\n max_total = number_of_sides * number_of_rolls\r\n\r\n return random.randint(number_of_rolls, max_total)", "def roll_dice(self):\r\n return randint(1,self.sides)", "def roll_dice(num_of_dice=1):\r\n sides = 6\r\n return [random.randrange(1, sides+1) for _ in xrange(num_of_dice)]", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n roll_sum = 0 # sums values of rolled dice\n ones_total = 0 # counts number of times the value 1 is rolled\n while num_rolls>0:\n current_roll = dice()\n if current_roll==1:\n ones_total += 1\n roll_sum += current_roll\n num_rolls -= 1\n if ones_total > 0:\n return ones_total\n else:\n return roll_sum\n # END PROBLEM 1", "def roll_dice():\n die1 = random.randrange(1, 7)\n die2 = random.randrange(1, 7)\n return (die1, die2) # pack die face values into a tuple", "def rollDices():\n for i in range(5):\n dices[i] = randint(1, 6)", "def roll_dice(num_rolls, dice=six_sided_dice, who='Boss Hogg'):\r\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\r\n assert num_rolls > 0, 'Must roll at least once.'\r\n assert num_rolls <= 10, 'Cannot roll more than 10 dice.'\r\n total, is_one = 0, False\r\n while num_rolls >= 1:\r\n x = dice()\r\n if commentary:\r\n announce(x, who)\r\n if x==1:\r\n is_one = True\r\n total = total + x\r\n num_rolls = num_rolls - 1\r\n if is_one: return 1\r\n else: return total", "def _rollOneDie(self):\n return random.randint(1, 6)", "def roll(self):\n return randint(1,6)", "def die_roll():\n roll = random.randint(1,6)\n return roll", "def rolldie():\n return int(random.random()*6)+1 # or use randrange()", "def throw_dice():\n return randint(1, 6) + randint(1, 6)", "def roll(dice):\n\n dice = str(dice).upper().strip()\n dice_mod = 0\n if dice == 'FLUX':\n return randint(1, 6) - randint(1, 6)\n else:\n if dice == 'GOODFLUX':\n flux1 = randint(1, 6)\n flux2 = randint(1, 6)\n if flux1 < flux2:\n return flux2 - flux1\n else:\n return flux1 - flux2\n else:\n if dice == 'BADFLUX':\n flux1 = randint(1, 6)\n flux2 = randint(1, 6)\n if flux1 > flux2:\n return flux2 - flux1\n else:\n return flux1 - flux2\n \n ichar1 = dice.find('DD')\n if ichar1 == -1:\n ichar1 = dice.find('D')\n if ichar1 == 0:\n num_dice = 1\n\n if ichar1 <> -1:\n if ichar1 <> 0:\n num_dice = int(dice[0:ichar1])\n# print 'Number of dice =', num_dice\n ichar2 = dice.find('+')\n if ichar2 <> -1:\n dice_mod = int(dice[ichar2:len(dice)])\n# print 'dice mod =', dice_mod\n else:\n ichar2 = dice.find('-')\n if ichar2 <> -1:\n dice_mod = int(dice[ichar2:len(dice)])\n# print 'dice mod =', dice_mod\n\n if ichar2 <> -1:\n dice_type = dice[ichar1: ichar2]\n dice_type = dice_type.rstrip()\n else:\n dice_type = dice[ichar1: len(dice)]\n# print 'dice type =', dice_type, 'Len = ', len(dice_type)\n\n if dice_type == 'D6': \n return die_rolls(6, num_dice) + dice_mod\n else:\n if dice_type == 'D66' and num_dice == 1 and dice_mod == 0:\n return randint(1, 6) * 10 + randint(1, 6)\n else:\n if dice_type == 'D100' and num_dice == 1: \n return (randint(1, 10) - 1) * 10 + randint(1, 10) + dice_mod \n else:\n if dice_type == 'D10': \n return die_rolls(10, num_dice) + dice_mod\n else: \n if dice_type == 'D20': \n return die_rolls(20, num_dice) + dice_mod\n else:\n if dice_type == 'D30': \n return die_rolls(30, num_dice) + dice_mod\n else:\n if dice_type == 'D12': \n return die_rolls(12, num_dice) + dice_mod\n else:\n if dice_type == 'D8': \n return die_rolls(8, num_dice) + dice_mod\n else:\n if dice_type == 'D4': \n return die_rolls(4, num_dice) + dice_mod\n else:\n if dice_type == 'D9': \n return die_rolls(9, num_dice) + dice_mod\n else:\n if dice_type == 'D3': \n return die_rolls(3, num_dice) + dice_mod\n else:\n if dice_type == 'DD':\n return (die_rolls(6, num_dice) + dice_mod) * 10\n \n print\n print \"** DICE ERROR! '%s' is unknown **\" % dice\n print \n print \"roll() is a dice rolling program.\"\n print\n print \"The types of dice to roll are (in string values):\"\n print \"roll('D6') -- roll one 6-sided die\"\n print \"roll('1D6') -- roll one 6-sided die\"\n print \"roll('2D6') -- roll two 6-sided dice\"\n print \"roll('D10') -- roll a 10-sided die\"\n print \"roll('D100') -- roll a 100-sided die (1 - 100)\"\n print \"roll('D66') -- roll for a D66 chart\"\n print \"roll('2DD+3') -- roll (2D6+3) x 10\"\n print\n print \"-/+ DMs can be added to rolls:\"\n print \"roll('3D6+6') -- add +6 DM to roll\"\n print \"roll('4D4-4') -- add -4 DM to roll\"\n print\n return 0", "def roll(self):\n\t\trnd = random.randint(1, 6)\n\t\tself.draw_number(rnd)\n\t\treturn rnd", "def roll(self):\n\n # Return a random integer between 1 and 6\n return random.randint(1, 6)", "def rollDie(self):\n return random.randint(1, self.sides)", "def roll(dice):\n rolled_dice = []\n for die in dice[1]:\n rolled_dice.append(randint(1, CUBE_DICE_MAX_VALUE()))\n dice[1] = rolled_dice\n return dice", "async def dice(self, ctx, diceroll: str = '1d6'):\n times, num = diceroll.split('d')\n times = int(times) if times else 1\n num = int(num) if num else 6\n maxscore = times*num\n score = random.randint(times, maxscore)\n await ctx.send(ctx._(\"roll_result\").format(score=score, maxscore=maxscore))", "def roll_die(self):\n number = randint(1, self.sides) \n print(number)", "def roll(self):\n self.rolled = random.randint(1, 6)\n return self.rolled", "def roll_dices(self):\n dice1 = random.randint(1, 6)\n dice2 = random.randint(1, 6)\n\n self.client.send_player_end_dices()\n self.game.player_rolled_dices([dice1, dice2])\n asyncio.ensure_future(self.move(dice1 + dice2))", "def dice():\n return random.randrange(1, 7)", "async def roll_l5r(ctx, dice):\n await ctx.send(display_counts(resolve_l5r(roll_many(dice, L5R_DICE))))", "def roll_cheating_dice(number,faces):\n \n dice_number = 0\n cheating_list = list(range(1,faces+1))\n cheating_list.append(3)\n for i in range(number):\n dice_number += random.choice(cheating_list)\n return dice_number", "def roll(self) -> int:\n return self.rand.randint(1, self.sides)", "def roll_dice(num_dice, die_type):\n result = 0\n for i in range(num_dice):\n result += random.randint(1, die_type)\n\n return result", "def roll_dice(player: int) -> int:\n sides = 6\n roll_again = input(\"Player {}: Press ENTER to roll your dice...\".format(player))\n num_rolled = roll(sides)\n print(\"You rolled {}.\".format(num_rolled))\n return num_rolled", "def roll_2_dice():\n return random.randint(2, 13)", "def roll_dice():\n print(colored(\"Lanzando tu dado...\", \"green\", attrs=['bold']))\n while True:\n dice = random.randint(1, 6)\n if dice != 3:\n return dice\n else:\n print(colored(\"Tu dado es 3, lancemos de nuevo\", \"green\", attrs=['bold']))\n continue", "def q6(array):\n a = array[2]\n b = array[6]\n c = array[10]\n d = array[14]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return array[0], array[1], a1, array[3], array[4], array[5], b1, array[7], array[8], array[9], c1, array[11], array[\n 12], array[13], d1, array[15]", "def roll(self, *diceNums):\n if not diceNums:\n diceNums = list(range(0,5))\n for i in diceNums:\n self._dice[i - 1] = random.randint(1,6);\n self._dice = Dice.normalize(self._dice)", "def roll_the_dice(self, dice):\n if type(dice) == list:\n for die in dice:\n die.roll()", "def dice_roll(name):\n roll = random.randint(1,6)\n print \"{name} shook the die \\\nand rolled a {roll}.\".format(name=name, roll=roll)\n return roll", "def dice_roll(name):\n roll = random.randint(1,6)\n print \"{name} shook the die \\\nand rolled a {roll}.\".format( name=name, roll=roll)\n return roll", "def simple_roll(dice):\n return roll(dice).total", "def roll(self):\n return randint(1, self.sides)", "def roll(self):\n return randint(1, self.sides)", "def rolldice(self):\n raise NotImplementedError()", "def roll(self, dice_num, modifier, sides=6) -> int:\n result = 0\n for i in range(dice_num):\n result += r.randint(1, sides)\n result += modifier\n return result", "def three_d6() -> list:\n rolls: List[int] = []\n for i in range(1, 7):\n roll: int = multi_die(3, 6)\n rolls.append(roll)\n return rolls", "def roll(self):\n\t\treturn randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll(self):\n return randint(1, self.num_sides)", "def roll_the_dice(self, index):\n # first roll\n first_roll_result = self._rolls_list[index].roll_dice()\n print(f'FIRST ROLL: {first_roll_result}\\n')\n\n # first roll: prompt player to keep, reroll, or select dice\n keep_first_roll = self._rolls_list[index].keep_dice(\n self._players_list[index].name.upper())\n\n # second roll\n second_roll_result = self._rolls_list[index].reroll_dice(\n keep_first_roll)\n print(f'\\nSECOND ROLL: {second_roll_result}\\n')\n\n # second roll: prompt player to keep, reroll, or select dice\n keep_second_roll = self._rolls_list[index].keep_dice(\n self._players_list[index].name.upper())\n\n # third roll\n self.final_roll = self._rolls_list[index].reroll_dice(\n keep_second_roll)\n print(f'\\nFINAL ROLL: {self.final_roll}\\n')", "def roll_dice(number,faces):\n \n dice_number = 0\n for i in range(number):\n dice_number += random.randint(1, faces)\n return dice_number", "def select_dice(score, opponent_score):\r\n if (score+opponent_score)%7 == 0:\r\n return four_sided\r\n return six_sided", "def roll(self):\n return random.randint(1,self.sides)\n #return int(self.sides*random.random() + 1.0)", "def rollDie():\n return random.choice([1, 2, 3, 4, 5, 6])", "def roll(number, sides):\n total = 0\n for _ in range(number):\n total += random.randint(1, sides + 1)\n return total", "def roll_dice(check_double=True):\n\n roll = np.random.choice(np.arange(1, 7), 2)\n\n if check_double:\n return roll.sum(), roll[0] == roll[1]\n else:\n return roll.sum()", "def roll(d=20):\n\treturn random.randint(1, d)", "def roll(self):\n return random.randrange(1, sides + 1)", "def __dice_generator(self):\n self.current_dice = np.random.randint(1, 6 + 1)", "def four_d6_drop_lowest() -> list:\n rolls: List[int] = []\n for x in range(1, 7):\n new_val: int = 0\n i: int = 0\n while i < 7:\n roll: int = multi_die(3, 6)\n if roll >= new_val:\n new_val = roll\n i += 1\n rolls.append(new_val)\n return rolls", "def sixes_points(dice_list):\n return dice_list.count(6) * 6", "def roll(self):\r\n import random as _random\r\n return _random.randint(1, self.__sides_count)", "async def roll_sw(ctx, dice):\n await ctx.send(display_counts(resolve_sw(roll_many(dice, SW_DICE))))", "def roll_the_dices(num_of_iterations: int) -> None:\n # initial variables\n player_wins: int = 0\n theoretical_win_chance: float = round(15/36, 4)\n\n # main loop\n for _ in range(num_of_iterations):\n croupier_roll = random.randint(1, 6)\n player_roll = random.randint(1, 6)\n if player_roll < croupier_roll:\n player_wins += 1\n\n experimental_win_chance = round(player_wins / num_of_iterations, 4)\n print(f\"Results: \\n\"\n f\"Theoretical probability of winning a single game: {theoretical_win_chance:.2%}\\n\"\n f\"Experimental probability of winning a single game: {experimental_win_chance:.2%}\")", "def dice(name):", "def roll(self):\n rolls = []\n if self.dice_array is not None:\n for dice in self.dice_array:\n rolls.append(np.random.randint(1, dice+1))\n else:\n for _ in range(0,self.number):\n rolls.append(np.random.randint(1, self.sides+1))\n #Fast way from stack overflow to determine if all\n #entries in \"rolls\" are equal, i.e. when doubles are rolled\n #but for arbitrary number of dice\n doubles = not rolls or [rolls[0]]*len(rolls) == rolls\n return np.sum(rolls), rolls, doubles", "def roll_1d10() -> int:\n ten_percent = Die(10)\n ten_percent.roll_die()\n chance = ten_percent.get_value()\n return chance", "def take_turn(num_rolls, opponent_score, dice=six_sided):\r\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\r\n assert num_rolls >= 0, 'Cannot roll a negative number of dice.'\r\n assert num_rolls <= 10, 'Cannot roll more than 10 dice.'\r\n #assert opponent_score > 100, 'The game should be over.'\r\n if num_rolls == 0:\r\n if opponent_score//10 > opponent_score%10:\r\n return opponent_score//10 + 1\r\n else:\r\n return opponent_score%10 + 1\r\n return roll_dice(num_rolls, dice)", "def roll(self):\n return cbrandom.throwDices(\"1d20\")", "def throw_dice(self):\n self.dice = []\n for i in range (6):\n die = random.randint(1, 6)\n self.dice.append(die)\n self.num_throws += 1", "def throw_dice(N: int, faces: int, total: int) -> int:\n if total == 0:\n return 1\n\n # dp[i][j] returns the number of ways to get to the sum `i` using `j` dice\n dp = [[0 for _ in range(total + 1)] for _ in range(N)]\n\n # Initialize the array for the first die, which can only achieve the total for each face it\n # rolls\n for curr_roll in range(1, min(faces + 1, total + 1)):\n dp[0][curr_roll] = 1\n\n # For each die, iterate through each potential total and simulate a roll from the die. We can\n # add the number of ways to reach `current_total - current_roll` using n - 1 die (if we are\n # currently using n die).\n for die in range(1, N):\n for curr_total in range(1, total + 1):\n for curr_roll in range(1, min(curr_total, faces + 1)):\n dp[die][curr_total] += dp[die - 1][curr_total - curr_roll]\n return dp[-1][-1]", "def select_dice(score, opponent_score, dice_swapped):\n # BEGIN PROBLEM 4\n dice = six_sided\n if dice_swapped == True:\n dice = four_sided\n # END PROBLEM 3\n if (score + opponent_score) % 7 == 0:\n dice = reroll(dice)\n return dice", "def roll_dice():\n result = random.randint(1, 101)\n if result <= 5:\n return True\n else:\n return False", "def fours(dice):\n for i, j, k, l in combinations(dice, 4):\n if i == j == k == l:\n return 4 * i\n\n return 0", "def roll(*args):\n try:\n sides = int(args[0])\n except:\n return \"I need an integer if you want me to roll these dice.\"\n return \"The result is...{0}!\".format(randint(1, sides))", "def take_turn(num_rolls, opponent_score, dice=six_sided_dice, who='Boss Hogg'):\r\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\r\n assert num_rolls >= 0, 'Cannot roll a negative number of dice.'\r\n if commentary:\r\n print(who, 'is going to roll', num_rolls, 'dice')\r\n if num_rolls == 0:\r\n return (opponent_score // 10) + 1\r\n return roll_dice(num_rolls, dice, who)", "async def roll(ctx, num_dice=0, num_sides=0):\n\n if num_sides == 0 or num_dice == 0:\n await ctx.send(\"Usage: `{}roll [num dice] [num sides]`\".format(PREFIX))\n return\n\n if num_sides > 1_000_000:\n num_sides = 1_000_000\n if num_dice > 1_000_000:\n num_dice = 1_000_000\n\n dice = [random.randint(1, num_sides) for _ in range(num_dice)]\n total = 0\n for die in dice:\n total += die\n\n message = \"**Total:** \" + str(total) + \"\\n**Rolls:** \" + str(dice)\n if len(message) >= 2000:\n message = \"**Total:** \" + str(total) + \"\\nRolls emitted due to length\"\n await ctx.send(message)", "def roll(self):\n return tuple(d.roll() for d in self.dice) ## note: len(result) == 2 always", "def exercise9():\n#seed here is 8\n np.random.seed(seed=8)\n#Generate random numbers from 2 dice for 1000o trials. ranges from [0,5]\n#diceThrows = np.random.randint(6, size=(1000,2))\n #print(diceThrows.shape)\n for i in range(1,11):\n count=0\n diceThrows = np.random.randint(6, size=(1000, 2))\n for x,y in diceThrows:\n if x == 5 and y == 5: #double sixes\n count = count + 1\n\n print(\"Trial \", i, \"= \", count/1000)", "def roll(count=2):\n # Generate a list of dice\n try:\n count = int(count)\n except ValueError:\n print(\"%s is not an integer\" % count)\n return -1\n if count < 1:\n raise ValueError(\"You must have at least one die\")\n print(\"Rolling {0} dice\".format(count))\n dice = [Die() for i in range(count)]\n total = 0\n idx = 0\n for d in dice:\n d.throw()\n idx += 1\n d.log.info(\"Die {} was {}\".format(idx, d.get_value()))\n total += d.get_value()\n if count == 2 and dice[0].get_value() == dice[1].get_value():\n print(\"Doubles!!\")\n print(\"Total number rolled is {}\".format(total))\n return tuple([d.get_value() for d in dice])", "async def roll(self, dice: str):\n try:\n rolls, limit = map(int, dice.split('d'))\n except Exception:\n await self.bot.say('Format has to be in NdN!')\n return\n\n result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))\n await self.bot.say(result)", "def select_dice(score, opponent_score):\r\n k = score + opponent_score\r\n if k%7 == 0:\r\n return four_sided_dice\r\n else:\r\n return six_sided_dice", "def roll(self):\n return random.choice(self.sides)", "def q5(array):\n a = array[3]\n b = array[7]\n c = array[11]\n d = array[15]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return array[0], array[1], array[2], a1, array[4], array[5], array[6], b1, array[8], array[9], array[10], c1, array[\n 12], array[13], array[14], d1", "def fours(dice):\n return sum([x for x in dice if x == 4])", "def roll(self, mask, target, args):\n d = re.match(r'(?P<rolls>\\d#)?(?P<dice>\\d+)d(?P<sides>\\d+)(?P<math>[\\+\\-\\*]\\d+)?', args['<dice>'])\n count = int(d.group('dice'))\n sides = int(d.group('sides'))\n\n rolls = int(d.group('rolls')) if d.group('rolls') else None\n math = d.group('math') if d.group('math') else None\n\n if rolls and rolls > 10:\n self.bot.privmsg(target, \"That's way too many rolls.\")\n irc3.base.logging.log(irc3.base.logging.WARN,\n \"%s in %s tried to roll %d sets of dice\" % (mask.nick, target, rolls))\n if sides > 100:\n self.bot.privmsg(target, \"That's an absurd number of sides.\")\n irc3.base.logging.log(irc3.base.logging.WARN,\n \"%s in %s tried to roll a %d sided dice\" % (mask.nick, target, sides))\n return\n if count > 100:\n self.bot.privmsg(target, \"That's too many dice!\")\n irc3.base.logging.log(irc3.base.logging.WARN,\n \"%s in %s tried to roll %d dice\" % (mask.nick, target, count))\n return\n\n dice = []\n result = \"\"\n\n for n in range(0, count):\n dice.append(self.rng.randint(1, sides))\n\n # Sum the results\n result += (\"=> %s\" % sum(dice))\n\n # Apply any math transforms\n if math:\n result += \"%s ==> %s\" % (d.group('math'), int(eval(\"%s %s\" % (sum(dice), math))))\n\n # Concatenate the description test\n if args[\"<description_text>\"]:\n result += (\": %s\" % ' '.join(args[\"<description_text>\"]))\n\n # Shell out to shadowrun if necessary\n if args[\"-s\"]:\n result += (\" %s\" % count_shadowrun(dice))\n\n self.msg(mask, target, str(dice) + result)", "async def roll(self, ctx, sides: int = None):\n if sides is None:\n await ctx.send(\"You have to tell me how many sides the die has!\")\n return\n roll = random.randint(1, sides)\n await ctx.send(\"You rolled: \" + str(roll))", "def take_turn(num_rolls, opponent_score, dice=six_sided):\n # Leave these assert statements here; they help check for errors.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls >= 0, 'Cannot roll a negative number of dice in take_turn.'\n assert num_rolls <= 10, 'Cannot roll more than 10 dice.'\n assert opponent_score < 100, 'The game should be over.'\n # BEGIN PROBLEM 2\n score = 0\n # free bacon rule implementation\n if num_rolls == 0:\n score = free_bacon(opponent_score)\n else:\n score = roll_dice(num_rolls, dice)\n # hogtimus prime rule implementation\n if score == 19:\n score = 23\n if score == 17:\n score = 19\n if score == 13:\n score = 17\n if score == 11:\n score = 13\n if score == 7:\n score = 11\n if score == 5:\n score = 7\n if score == 3:\n score = 5\n if score == 2:\n score = 3\n # when pigs fly rule implementation\n if score > 25 - num_rolls:\n score = 25\n score -= num_rolls\n return score\n\n # END PROBLEM 2", "def roll_dices():\n dices = []\n\n for i in range(DICE_COUNT):\n dice = random.randrange(MIN_DICE, MAX_DICE + 1)\n dices.append(dice)\n\n return dices", "def roll_dice(self, number_of_dice, size_of_dice):\n # makes a list of random numbers based on the information\n # that was put in\n dice = []\n for roll in range(int(number_of_dice)):\n roll = random.randint(1, int(size_of_dice))\n dice.append(roll)\n\n # Checks wether the result needs to be sorted or not\n if self.sort is True:\n dice.sort()\n # Turns ints into strings after sorting\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n # Sets the last roll flag and returns to sort flag to false\n self.last_roll = converted_dice\n self.sort = False\n # Sets the last roll flag for easy cross function use.\n else:\n # Turns Ints into strings incase it had to be sorted\n converted_dice = []\n for i in range(len(dice)):\n roll_to_convert = dice[i]\n roll_to_convert = str(roll_to_convert)\n converted_dice.append(roll_to_convert)\n self.last_roll = converted_dice", "def testRoll(self):\n \n nsides=3\n die = BaseDie(nsides)\n lighted_die = LightedDie(nsides,colors={1:'blue',2:'yellow',3:'gold'})\n\n self.assertEqual(die.last_roll,None)\n\n die.roll()\n lighted_die.roll()\n\n for d in [die,lighted_die]:\n self.assertTrue(d.last_roll>0 and d.last_roll <= nsides)", "async def rolldice(self, ctx, ndm):\n\n output = '__Dice roll:__\\n'\n\n if ndm.lower() == 'dnd' or ndm.lower() == 'd&d':\n for d in (4, 6, 8, 10, 10, 12, 20):\n output += f'd{d}: {random.randint(1, int(d))}\\n'\n else:\n n, d = ndm.lower().split('d')\n\n for i in range(1, int(n) + 1):\n output += f'd{d}: {random.randint(1, int(d))}\\n'\n\n await ctx.send(output)" ]
[ "0.7968151", "0.7608066", "0.7531591", "0.7409087", "0.73609334", "0.72943085", "0.7153998", "0.7130241", "0.71107805", "0.7101462", "0.70783186", "0.7048907", "0.7019933", "0.70183617", "0.70042896", "0.6980534", "0.695242", "0.69513273", "0.69413614", "0.69020563", "0.68830836", "0.6879309", "0.68481946", "0.68404114", "0.6818825", "0.68029165", "0.67846316", "0.67430085", "0.67363703", "0.6660018", "0.6655006", "0.66489184", "0.66386294", "0.6615616", "0.6601279", "0.6559032", "0.6558753", "0.6558604", "0.65585864", "0.655132", "0.6549716", "0.6528109", "0.6487111", "0.6475718", "0.6472705", "0.646448", "0.6459245", "0.6451324", "0.6451324", "0.6450676", "0.64404553", "0.64309627", "0.64143556", "0.6414117", "0.6414117", "0.6414117", "0.6414117", "0.6382783", "0.6379531", "0.63658017", "0.6326513", "0.6324421", "0.6313751", "0.63093686", "0.6287955", "0.62838054", "0.62770766", "0.6262429", "0.62534475", "0.62217224", "0.620924", "0.6200591", "0.6200026", "0.61954546", "0.616202", "0.61575496", "0.6154245", "0.6142151", "0.6131661", "0.61124516", "0.6096537", "0.60883576", "0.6075171", "0.60700625", "0.6043588", "0.6041401", "0.6022673", "0.6021222", "0.60172105", "0.6014682", "0.60011315", "0.599895", "0.5998324", "0.5993698", "0.5993317", "0.5989951", "0.59868145", "0.5972785", "0.5960922", "0.5948878" ]
0.6365586
60
Constructor for thread that will request the RSS of a particular podcast series, parse the series details and episode information, and save the information w/`storer`
def __init__(self, storer, series, i): super(EpisodeWorker, self).__init__() self.storer = storer self.series = series # All series self.i = i
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n while self.i < len(self.series):\n # Grab line + RSS\n s = self.series[self.i]\n rss = self.request_rss(s.feedUrl)\n\n # Compose Episodes\n ep_dicts = []\n for entry in rss['entries']:\n ep_dicts.append(Episode(s, entry).__dict__)\n\n # Build result dict\n result_dict = dict()\n result_dict['series'] = deepcopy(s.__dict__)\n result_dict['series']['genres'] = \\\n result_dict['series']['genres'].split(';')\n result_dict['series']['type'] = 'series'\n result_dict['episodes'] = ep_dicts\n\n # Store podcast\n self.storer.store(result_dict)\n\n # Move onto the next one\n self.i += 20\n print(\"Retrieved \" + str(s.id))", "def __init__(self, **kwargs):\n self.identifier = kwargs.get(\"identifier\")\n self.playback_state = kwargs.get(\"playback_state\")\n self.title = kwargs.get(\"title\")\n self.series_name = kwargs.get(\"series_name\")\n self.artist = kwargs.get(\"artist\")\n self.album = kwargs.get(\"album\")\n self.genre = kwargs.get(\"genre\")\n self.total_time = kwargs.get(\"total_time\")\n self.position = kwargs.get(\"position\")\n self.season_number = kwargs.get(\"season_number\")\n self.episode_number = kwargs.get(\"episode_number\")\n self.repeat = kwargs.get(\"repeat\")\n self.shuffle = kwargs.get(\"shuffle\")\n self.media_type = kwargs.get(\"media_type\")\n self.playback_rate = kwargs.get(\"playback_rate\")\n self.supported_commands = kwargs.get(\"supported_commands\")\n self.artwork = kwargs.get(\"artwork\")\n self.artwork_identifier = kwargs.get(\"artwork_identifier\")\n self.artwork_mimetype = kwargs.get(\"artwork_mimetype\")\n self.artwork_width = kwargs.get(\"artwork_width\")\n self.artwork_height = kwargs.get(\"artwork_height\")\n self.skip_time = kwargs.get(\"skip_time\")\n self.app_name = kwargs.get(\"app_name\")\n self.content_identifier = kwargs.get(\"content_identifier\")", "def __init__(self):\n\t\t\n\t\tprint \"Getting latest stories from Hacker News...\"\n\t\t#try:\n\t\tself.stories = self.h.getLatestStories(self.newestOrTop, self.alreadyReadList)\n\t\t\n\t\tfor i in range(1,self.h.numberOfStoriesOnFrontPage+1):\n\t\t\tself.oneToThirty.append(str(i))\n\t\t\tself.oneToThirtyComments.append(\"c\" + str(i))\n\t\t\tself.oneToThirtyPlusComments.append(str(i) + \"+\")\n\t\t\tself.oneToThirtySubmitters.append(\"s\" + str(i))\n\t\t\tself.oneToThirtyInstapaper.append(\"i\" + str(i))\n\t\t\t\n\t\tself.setPreferencesAtStartup()\n\n\t\tif self.hnUserName != \"\":\n\t\t\tprint \"Getting \" + self.hnUserName + \"'s karma from HN...\"\n\t\t\tuser = HackerNewsUser(self.hnUserName)\n\t\t\tself.karma = user.karma\n\n\t\tself.printStories()\n\t\t\n\t\t#except:\n\t\t#\tprint \"error\"\n\t\t#\tself.quit = 1\n\n\t\tself.loop()", "def __init__(self, json):\n\n self.id = json[\"id\"]\n self.alternateId = json[\"alternateId\"]\n\n if \"airDate\" in json:\n self.airDate = datetime.strptime(json[\"airDate\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"name\" in json:\n self.name = json[\"name\"]\n\n if \"title\" in json:\n self.title = json[\"title\"]\n\n if \"description\" in json:\n self.description = json[\"description\"]\n\n if \"episode\" in json:\n self.episode = json[\"episode\"]\n\n if \"episodeNumber\" in json:\n self.episodeNumber = json[\"episodeNumber\"]\n else:\n self.episodeNumber = None\n\n if \"season\" in json:\n self.season = json[\"season\"]\n\n if \"seasonNumber\" in json:\n self.seasonNumber = json[\"seasonNumber\"]\n else:\n self.seasonNumber = None\n\n if \"publishStart\" in json:\n self.publishStart = datetime.strptime(json[\"publishStart\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"publishEnd\" in json:\n self.publishEnd = datetime.strptime(json[\"publishEnd\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"videoDuration\" in json:\n self.videoDuration = timedelta(milliseconds=json[\"videoDuration\"])\n\n if \"isFreePlayable\" in json:\n self.isFreePlayable = json[\"isFreePlayable\"]\n\n if \"isPlayable\" in json:\n self.isPlayable = json[\"isPlayable\"]\n\n if \"isNew\" in json:\n self.isNew = json[\"isNew\"]\n\n if \"image\" in json:\n self.image = Image(json[\"image\"])", "def __init__(self, url=URL):\n self.entries = feedparser.parse(url).entries", "def __init__(self, data, feed_repo):\n super(Feed, self).__init__()\n self.url = data['url']\n self.name = data.get('name')\n read_stamp = data.get(\"last_read\")\n if read_stamp:\n self.last_read = datetime.datetime.fromtimestamp(read_stamp, tz=datetime.timezone.utc)\n else:\n self.last_read = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc)\n self._repo = feed_repo", "def __init__(self, thoonk, feed):\n Queue.__init__(self, thoonk, feed)\n\n self.feed_publishes = 'feed.publishes:%s' % feed\n self.feed_published = 'feed.published:%s' % feed\n self.feed_cancelled = 'feed.cancelled:%s' % feed\n self.feed_retried = 'feed.retried:%s' % feed\n self.feed_finishes = 'feed.finishes:%s' % feed\n self.feed_claimed = 'feed.claimed:%s' % feed\n self.feed_stalled = 'feed.stalled:%s' % feed\n self.feed_running = 'feed.running:%s' % feed\n \n self.job_finish = 'job.finish:%s' % feed", "def __init__(self, number, json):\n\n self.number = number\n self.episodes = []\n for episode in json:\n self.episodes.append(Episode(episode))", "def __init__(self: object) -> None:\n self.empty: bool = True\n self.episode_broadcast: str = \"\"\n self.episode_id: int = 0\n self.episode_inspectors: str = \"\"\n self.episode_name: str = \"\"\n self.episode_sequence: str = \"\"\n self.episode_url: str = \"\"\n self.episode_year: int = 0", "def get_podcast_episodes(url):\n\n def parse_pubdate(date_string):\n \"\"\"\n Change pubdate string to datetime object. Tries a bunch of\n possible formats, but if none of them is a match, it will\n return a epoch = 0 datetime object\n\n :param date_string: A string representing a date\n :return: datetime object\n \"\"\"\n date_formats = (\n '%a, %d %b %Y %H:%M:%S +0000',\n '%a, %d %b %Y',\n '%a, %d %b %Y%H:%M:%S +0000',\n '%a, %d %b %Y %H:%M',\n '%a, %d %b %Y %H.%M'\n )\n df_generator = (format for format in date_formats)\n\n date = None\n while date is None:\n try:\n date = datetime.strptime(date_string, next(df_generator))\n except ValueError:\n pass\n except StopIteration:\n date = datetime.fromtimestamp(0)\n\n return date\n\n doc = get_document(url)\n\n return (\n {\n 'url': item.select('guid')[0].text,\n 'Premiered': parse_pubdate(\n item.select('pubdate')[0].text\n ).strftime(\"%d.%m.%Y\"),\n # 'Duration': duration_to_seconds(item.find('itunes:duration').text),\n 'title': item.title.text,\n 'Plot': item.description.text\n }\n for item in doc.find_all(\"item\")\n )", "def __init__(self, rss_url=None, cell_num=None, cache_filename=None):\n if not any([rss_url, cell_num]):\n raise Exception('Must pass rss url and cell number.')\n self.rss_url = str(rss_url)\n self.cell_num = cell_num\n self.cache_filename = str(cache_filename)\n self.from_num = '4088685453'\n\n # Set up twilio client for sending text messages\n account = os.environ.get('TWILIO_ACCT')\n token = os.environ.get('TWILIO_TOKEN')\n self.twilio_client = TwilioRestClient(account, token)\n\n self.load_last_post()", "def __init__(self, url, epRange):\n self.driver = webdriver.PhantomJS()\n self.downloads = OrderedDict() # sort episodes in asending order\n self.pbar = \"\" # Download Progressbar\n self.Main(url, epRange)", "def run_rss(self):\n\n pass", "def __init__( self ):\n\n self.log = gLogger.getSubLogger( self.__class__.__name__ )\n self.rssConfig = RssConfiguration()\n self.__opHelper = Operations()\n self.rssClient = None\n\n # We can set CacheLifetime and CacheHistory from CS, so that we can tune them.\n cacheLifeTime = int( self.rssConfig.getConfigCache() )\n\n # RSSCache only affects the calls directed to RSS, if using the CS it is not\n # used.\n self.seCache = RSSCache( 'StorageElement', cacheLifeTime, self.__updateSECache )", "def __init__(self, url, start_pos, end_pos, f):\n\n super(DownloadThread, self).__init__()\n self.url = url\n self.start_pos = start_pos\n self.end_pos = end_pos\n self.fd = f", "def __init__(self, SONG):\n self.track_name = SONG['name']\n self.artist_name = SONG['artist']\n self.provider = 'lastfm'\n self.track_number = \"1\"\n self.collection_name = \"\"\n self.release_date = \"\"\n self.artwork_url_100 = SONG[\"image\"][-1][\"#text\"]\n self.track_time = \"\"\n self.primary_genre_name = \"N/A\"", "def __init__(self, name, storyline, trailer, poster):\n # Assigning the values of the instances to the class variables\n self.title = name\n self.mov_story = storyline\n self.trailer_youtube_url = trailer\n self.poster_image_url = poster", "def __init__(self):\n self.reddit = praw.Reddit('bot1')\n self.thread = None\n self.handle = None\n self.refresh_delay = None\n # Manage the checked comments\n if not os.path.isfile(\"reddit_comments_replied_to.txt\"):\n self.checked_comments = []\n else:\n # Read the file into a list and remove any empty values\n with open(\"reddit_comments_replied_to.txt\", \"r\") as f:\n self.checked_comments = f.read()\n self.checked_comments = self.checked_comments.split(\"\\n\")\n self.checked_comments = list(filter(None, self.checked_comments))\n # Manage the checked posts\n if not os.path.isfile(\"reddit_posts_replied_to.txt\"):\n self.checked_posts = []\n else:\n with open(\"reddit_posts_replied_to.txt\", \"r\") as f:\n self.checked_posts = f.read()\n self.checked_posts = self.checked_posts.split(\"\\n\")\n self.checked_posts = list(filter(None, self.checked_posts))", "def __init__(self):\n self.site = ('http://vortex.plymouth.edu/cgi-bin/gen_statlog-u.cgi')\n \"\"\"Root of URL to query for data.\"\"\"\n yesterday = datetime.today() - timedelta(days=1)\n self.year = yesterday.year\n \"\"\"Year to get data for.\"\"\"\n self.month = yesterday.month\n \"\"\"Month to get data for.\"\"\"\n self.day = yesterday.day\n \"\"\"Day to get data for.\"\"\"\n self.stns = dict(yvr=\"CYVR\",\n sandheads=\"CWVF\")\n \"\"\"Mapping of common station names to official station IDs.\"\"\"", "def __init__(self, title, year,story, poster_url, trailer_url):\n self.title = title\n self.year = year\n self.story = story\n self.poster_url = poster_url\n self.trailer_url = trailer_url", "def __init__(self):\n self.score = None\n self.avg_score = None\n self.std_dev = None\n self.scores = [] # list containing scores from each episode\n self.avg_scores = [] # list containing average scores after each episode\n self.scores_window = deque(maxlen=100) # last 100 scores\n self.best_avg_score = -np.Inf # best score for a single episode\n self.time_start = time.time() # track cumulative wall time\n self.total_steps = 0 # track cumulative steps taken\n self.writer = SummaryWriter(\"../results/\") # this is where tensorboard results are stored", "def __init__(self, seriesDir, parQ, interval=.2):\n # start the thread upon completion\n Thread.__init__(self)\n\n # set up logger\n self.logger = logging.getLogger(__name__)\n\n # initialize class parameters\n self.interval = interval # interval for polling for new files\n self.seriesDir = seriesDir # full path to series directory\n self.parQ = parQ # queue to store par header files\n self.alive = True # thread status\n self.numParsAdded = 0 # counter to keep track of # mosaics\n self.queued_par_files = set() # empty set to store names of queued mosaic\n self.par_pattern = re.compile(fnmatch.translate('*.par'), re.IGNORECASE)", "def __init__(self, title):\n # will hit the TMDB API on every instantiation\n search = tmdb.Search()\n response = search.movie({'query': title})\n\n # if there are any results to querying for the title, take the first result\n if len(search.results) > 0:\n self.ID = uuid.uuid4()\n self.TMDB_ID = search.results[0]['id']\n movie = tmdb.Movies(self.TMDB_ID).info() # get all the information available\n\n # save off a few interesting attributes\n self.title = movie['title']\n self.release_date = movie['release_date']\n self.popularity = movie['popularity']\n self.overview = movie['overview']\n else:\n self.initialize()\n print \" ##### Warning: could not find any matches for %s\" % title", "def __init__(self, URL):\n\n # add to topics list to retreive different topics from CBC RSS feed\n base = \"/cmlink/rss-\"\n topics = [\"politics\", \"technology\", \"sports\"]\n article_id = 1\n self.articles = []\n\n for topic in topics:\n\n # build our url string to make it dynamic\n full_url = URL + base + topic\n # gives us all article urls\n urls = getArticleURLs(full_url)\n\n for url in urls:\n new_article = Article(url, topic, article_id)\n\n # is it a valid article url?\n if new_article.article_id != -1:\n article_id += 1\n self.articles.append(new_article)\n \n # break # remove this to get all articles", "def __init__(self, API, playlist_uri):\n\n self.API = API\n self.playlist_uri = playlist_uri\n self.metadata = None", "def __init__(self, movie_title, release_date, movie_storyline, poster_image,\n trailer_youtube, more_link):\n\n self.title = movie_title\n self.date = release_date\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n self.more_url = more_link", "def __init__(self, ticker):\n EClient.__init__(self, self)\n self.previous_ts = None\n # Collect 1 min of data with 5 sec frequency\n self.data_collection = list()", "def __init__(self, server_id, subsection_id, output_queue):\n self.online = False\n self.id = server_id\n self.type = subsection_id\n self.queue = deque()\n self.is_serving = False\n self.current_passenger = None\n self.output_queue = output_queue\n self.max_queue_size = 1\n self.utilization = 0.0\n self.utilization_anchor = 0\n self.utilization_series = pd.Series(np.nan,\n index=hourly_timestamps,\n name=self.id)", "def __init__(self, _id, a_cookie, a_user_agent):\n self._id = _id\n self.ticker = ''\n self.pub_date = '0001-01-01'\n self.author = ''\n self.title = ''\n self.text = ''\n self.includes = ''\n\n self.comments = []\n self.valid = True\n self._parse_article(a_cookie, a_user_agent)", "def __init__(self, series):\n if series < 0 or series > 11:\n raise ValueError('Series has to be between 1 and 11')\n self._series = series\n self._list = []\n self._dict = {}\n self._dictID = 0\n\n self.hero_series()", "def __init__(self, id, show_name, author, description=None,episodes=None):\n self.id = id\n self.show_name = show_name\n self.author = author\n self.description = description\n self.episodes = episodes", "def __init__(self, movie_title, movie_storyline, poster_image, trailer):\r\n self.title = movie_title\r\n self.storyline = movie_storyline\r\n self.poster_image_url = poster_image\r\n self.trailer_youtube_url = trailer", "def __init__(\r\n self, movie_title, movie_storyline, poster_image, \r\n trailer_youtube): \r\n self.title = movie_title\r\n self.storyline = movie_storyline\r\n self.poster_image_url = poster_image\r\n self.trailer_youtube_url = trailer_youtube", "def start(self, ev_channel=None, rss_url=None, rss_rate=None):\n global _SESSIONS\n if not _SESSIONS:\n from evennia.server.sessionhandler import SESSIONS as _SESSIONS\n\n if ev_channel:\n # connect to Evennia channel\n channel = search.channel_search(ev_channel)\n if not channel:\n raise RuntimeError(\"Evennia Channel '%s' not found.\" % ev_channel)\n channel = channel[0]\n self.db.ev_channel = channel\n if rss_url:\n self.db.rss_url = rss_url\n if rss_rate:\n self.db.rss_rate = rss_rate\n # instruct the server and portal to create a new session with\n # the stored configuration\n configdict = {\"uid\": self.dbid,\n \"url\": self.db.rss_url,\n \"rate\": self.db.rss_rate}\n _SESSIONS.start_bot_session(\"evennia.server.portal.rss.RSSBotFactory\", configdict)", "def __init__(self, title, storyline, poster_image_url, trailer_youtube_url):\n self.title = title\n self.storyline = storyline\n self.poster_image_url = poster_image_url\n self.trailer_youtube_url = trailer_youtube_url", "def __init__(self, thoonk):\n self._feeds = {}\n self.thoonk = thoonk\n self.lock = threading.Lock()\n self.instance = uuid.uuid4().hex", "def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):\n\t\tself.title = movie_title\n\t\tself.storyline = movie_storyline\n \t\tself.poster_image_url = poster_image\n\t\tself.trailer_youtube_url = trailer_youtube", "def __init__(self,movie_title,movie_storyline,poster_image,youtube_trailer,release_date):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = youtube_trailer\n self.release_date = release_date", "def __init__(self, movie_title, movie_storyline,\n poster_image, trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube", "def parse_series(self, response):\n\n articles = response.css(\"div.blog.list.taglist > div.inner > div#secondlist > article\")\n\n for raw_post in articles:\n onclick_attr = raw_post.css(\"::attr(onclick)\").extract_first()\n serie_link_match_group = re.search('location.href=\\'(.+?)\\';', onclick_attr)\n if serie_link_match_group:\n serie_link = serie_link_match_group.group(1)\n else:\n bad_serie_id = raw_post.css(\"::attr(id)\").extract_first()\n raise ValueError(\"Series url not found ! Series ID: {}\".format(bad_serie_id))\n\n yield scrapy.Request(url=\"https://blog.griddynamics.com{}\".format(serie_link), callback=self.parse_article)", "def __init__(self, main_url, year, folder):\n\n # logger setting\n logging.basicConfig(\n filename='crawler.log',\n level=logging.INFO,\n format='[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s'\n '- %(message)s',\n datefmt='%H:%M:%S'\n )\n\n # set up logging to console\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s '\n '%(message)s')\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('crawler_app').addHandler(console)\n\n self.logger = logging.getLogger('crawler_app')\n\n # configuration / init\n\n self.shelve_obj = None\n self.maven_url = main_url\n self.year = year\n self.file_ext = '.txt'\n self.counter = 0\n self.url_to_parse = list()\n self.list_year_month_url = list()\n self.folder = 'mailbox/'\n self.meta_file_name = self.folder + 'meta.shelve'\n self.process_folder(folder)\n\n list_url = self.parse_main_page()\n msg_year_month = self.parse_year_month_link()\n self.parse_raw_msg()", "def scan_thread(self, response):\n story_item = response.meta.get(\"story_item\")\n print(\"\\nscraping thread {0}\\n\".format(response.url))\n\n # div_tmarks is a list of all threadmarked posts on this story thread\n # ...at least on this PAGE of the story.\n div_tmarks = response.xpath(\"//li[contains(@class, 'hasThreadmark')]\")\n \n if div_tmarks is not None and len(div_tmarks) > 0:\n\n for div_tmark in div_tmarks:\n # story_seg = StorySegment()\n\n author = div_tmark.xpath(\"@data-author\").extract_first()\n\n author_seg, created = Author.objects.get_or_create(name=author)\n\n title = \"\".join(div_tmark.xpath(\"div/span/text()\").extract()).encode('utf-8')\n title = \" \".join(title.split())\n\n # Get the Date and clean it up/format it ======================================\n date = div_tmark.xpath(\".//span[@class='DateTime' and ../@class!='editDate']/@title\").extract_first()\n if date is None:\n date = div_tmark.xpath(\".//abbr[@class='DateTime']/text()\").extract_first()\n date_obj = datetime.strptime(date, \"%b %d, %Y at %I:%M %p\")\n date_obj = date_obj.replace(tzinfo=utc)\n # story_seg.published = date_obj\n # =============================================================================\n\n story_seg, seg_created = StorySegment.objects.get_or_create(story=story_item,\n title=title,\n published=date_obj)\n\n # If you want to include the formatting of the original page, change the following\n # line to ..... .//blockquote/node()\").extract()\n # As it stands, we don't necessarily need the <br /> tags and such.\n content = \"\".join(div_tmark.xpath(\".//blockquote//text()\").extract())\n story_seg.contents = content\n\n story_item.authors.add(author_seg)\n\n print(\"Title: {0} Author: {1}\".format(story_seg.title, author))\n print(\"date_time: {0}\".format(date_obj))\n print(\"content length: {0}\".format(len(content)))\n\n story_seg.save()\n story_item.save()\n\n div_next_tmark = div_tmarks[-1].xpath(\".//span[@class='next']\")\n\n # navigate to the next threadmark.\n if div_next_tmark is not None:\n next_mark = div_next_tmark.xpath(\"a/@href\").extract_first() \n print(\"Next url: {0}\".format(next_mark))\n next_mark_url = response.urljoin(next_mark)\n yield scrapy.Request(\n next_mark_url,\n callback=self.scan_thread,\n priority=2,\n meta={\"story_item\": story_item}\n )", "def download_podcasts(podcast_rss, write_folder='/', use_episode_titles_as_filename=False):\n\n # create folder if doesn't exist\n Path(write_folder).mkdir(parents=True, exist_ok=True)\n\n feed = urllib.request.urlopen(podcast_rss)\n\n tree = ET.parse(feed)\n root = tree.getroot()\n channel = root[0]\n\n total_eps = len(channel.findall('item'))\n\n print ('*** Starting downloads *** ')\n print ('break (ctrl-C or stop button) to stop and continue later')\n\n for (idx,episode) in enumerate(channel.findall('item')):\n title = episode.find('title').text\n url = episode.find('enclosure').get('url')\n fname = title+Path(url).suffix if use_episode_titles_as_filename == True else Path(url).name\n write_path = Path(write_folder).joinpath(fname)\n \n if write_path.is_file():\n #don't attempt to redownload existing files\n print(f'Episode {idx+1} of {total_eps}: {title} already downloaded!')\n else:\n #download episode\n print(f'Downloading episode {idx+1} of {total_eps}: {title}', end ='...')\n try:\n urllib.request.urlretrieve(url, write_path)\n except KeyboardInterrupt:\n print ('Cancelled!')\n # Delete partial file and stop downloading\n if write_path.is_file():\n Path(write_path).unlink()\n print('Downloads stopped and partially downloaded file removed.')\n break\n else:\n print('Done')", "def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline,\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube", "def __init__(self, stream):\n self.stream = stream\n self.queue = Queue()\n self.start_thread()", "def __init__(self, movie_title, movie_storyline, poster_image,\r\n trailer_youtube):\r\n self.title = movie_title\r\n self.storyline = movie_storyline\r\n self.poster_image_url = poster_image\r\n self.trailer_youtube_url = trailer_youtube", "def __init__(self, title, poster, trailer):\n\t\tself.title = title\n\t\tself.poster_image_url = poster\n\t\tself.trailer_youtube_url = trailer", "def __init__(self, movie_title, movie_storyline, poster_image,\n trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube", "def __init__(self, id, movie_data):\n self.id = id\n self.info = movie_data['film']\n self.casts = movie_data['cast']", "def __init__(self, name, trackstr, db=None, callbacks=None, **kwargs):\n Thread.__init__(self)\n TwythonStreamer.__init__(self, **kwargs)\n self.db = db\n self.name = name\n self.trackstr = trackstr\n self.running = True\n\n if not type(callbacks) == list:\n self.callbacks = [callbacks]\n else:\n self.callbacks = callbacks", "def __init__(self, json):\n\n if \"show\" not in json or \"videos\" not in json:\n raise Exception(\"Invalid JSON.\")\n\n self.show = Show(json[\"show\"])\n self.seasons = []\n for seasonNumber in self.show.seasonNumbers:\n try:\n season_json = json[\"videos\"][\"episode\"][str(seasonNumber)]\n except KeyError:\n continue\n self.seasons.append(Season(seasonNumber, season_json))\n\n self.specials = []\n if \"standalone\" in json[\"videos\"]:\n for special in json[\"videos\"][\"standalone\"]:\n self.specials.append(Episode(special))", "def __init__(self, scraper_xml, logger = logging.getLogger()):\n self.logger = logging.getLogger(logger.name + \".Scraper\")\n self.m_result = \"\"\n self.s_xml = scraper_xml\n self.parser = ScraperParser(scraper_xml, self.logger)\n self.written_data = { }\n\n # As we fetch data from various web resources we store\n # the results in a cache because later fetches may refer to\n # cached items to parse additional data out. No need to re-fetch\n # this from the network if we already have fetched a copy.\n #\n self.cache = { }\n\n # We need the settings parsed before the user does any lookups\n #\n try:\n settings_xml = self.parser.parse(FN_GET_SETTINGS)\n except BadXML:\n settings_xml = None\n self.settings = Settings(settings_xml)\n\n return", "def __init__(self, count=10000, producer=None, verbose_=True, topic='trump'):\n # instantiating the super class StreamListener\n StreamListener.__init__(self)\n self.producer = producer # adding a producer to the attributes of the StreamListener\n self.verbose = verbose # boolean to print out the tweets in the terminal\n self.topic = topic # topic into which we want to publish the tweets\n self.max_count = count\n self.counter = 0", "def __init__(self):\n super().__init__()\n\n # Will only reply to every 3rd or so tweet, defined in settings\n self.received_tweet_count = 0\n\n # Twitter api init\n self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n self.auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n self.twitter_api = tweepy.API(self.auth)\n\n print('Authenticated, creating stream...')\n\n self._init_stream()", "def __init__(self, query, time, filters):\n\t\t# Setup output vars\n\t\tself.stations = None\n\t\tself.data = None\n\t\tself.xml = u''\n\t\tself.xmlinfo = u''\n\t\tself.error = None\n\t\tself.xmlerror = u''\n\t\t\n\t\t# Setup privates\n\t\tself._time = time\n\t\tself._query = query\n\t\tself._filters = filters\n\t\tself._backendtime = 0\n\t\t\n\t\tself.location = None\n\t\tself.cachekey = query['value'] + query['type'] + str(time)\n\t\t\n\t\ttry:\n\t\t\tself._setup_xmlinfo()\n\t\texcept Exception:\n\t\t\tself.error = {'message':u'Internal error', 'id':105}\n\t\t\tself._setup_xmlerror()\n\t\t\treturn\n\t\t\n\t\t# Try to get the stations\n\t\ttry:\n\t\t\tself._setup_stations()\n\t\texcept Exception:\n\t\t\traise\n\t\t\tself.error = {'message':u'Malformed input','id':101}\n\t\t\tself._setup_xmlerror()\n\t\t\treturn\n\t\t\n\t\t\n\t\tif len(self.stations) == 0:\n\t\t\tself.error = {'message':u'Could not find any Stations','id':102}\n\t\t\tself._setup_xmlerror()\n\t\t\treturn\n\t\t\n\t\t# Try to fetch the data via StationThread\n\t\ttry:\n\t\t\tself._load_data()\n\t\texcept Exception:\n\t\t\traise\n\t\t\tself.error = {'message':u'Error retrieving data', 'id':103}\n\t\t\tself._setup_xmlerror()\n\t\t\treturn\n\t\t\n\t\t\n\t\t# Try to setup the xml\n\t\ttry:\n\t\t\tself._setup_xml()\n\t\texcept Exception:\n\t\t\tself.error = {'message':u'Internal error', 'id':104}\n\t\t\tself._setup_xmlerror()\n\t\t\treturn\n\t\t\n\t\tif self.xml == '':\n\t\t\tself.error = {'message':u'guru meditation - unexpected error occured', 'id':1099}\n\t\t\tself._setup_xmlerror()\n\t\t\treturn", "def __init__(self, rss_targets, limit=None):\n self._rss_targets = rss_targets\n self._limit = limit", "def __init__(self, movie_title, movie_storyline, movie_poster_image_url, movie_trailer):\n\t\tself.title = movie_title\n\t\tself.storyline = movie_storyline\n\t\tself.poster_image_url = movie_poster_image_url\n\t\tself.trailer_youtube_url = movie_trailer", "def __init__(self, title):\n\n self.__recent = []\n self.__lasturl = ''", "def __init__(self, params={}):\n self.lt_ses = lt.session() # pylint: disable=no-member\n self.lt_ses.listen_on(6881, 6891)\n\n self.params = params\n self.queue = deque()\n self.stream_thread = None\n self.handle = None", "def addEpisode(config, title, desc, mp3File, duration):\n logger.info(\"Adding episode to the RSS feed...\")\n\n # Create the item for the new episode\n item = ET.Element('item')\n\n # Add the sub elements from the config file\n addSubElementFromConfig(item, 'itunes:author', config, 'episodeAuthor')\n # addSubElementFromConfig(item, 'itunes:subtitle', config)\n addSubElementFromConfig(item, 'itunes:explicit', config, 'episodeExplicit')\n addSubElementFromConfig(item, 'itunes:image', config, 'episodeImage')\n\n # Add the remaining sub elements\n addSubElement(item, 'title', title)\n addSubElement(item, 'description', desc)\n addSubElement(item, 'itunes:summary', desc)\n addSubElement(item, 'pubDate', getFormattedUtcTime())\n\n # Format the duration\n durationStr = \"%0.f:%02.f:%02.f\" % duration\n addSubElement(item, 'itunes:duration', durationStr)\n\n # Create the public link to the episode\n episodeDir = config['episodeDir']\n if not episodeDir.endswith('/'):\n episodeDir += \"/\"\n\n episodeLink = generateLink(config['episodeDir'], os.path.basename(mp3File))\n\n addSubElement(item, 'guid', episodeLink)\n\n # Create the enclosure tag\n byteLength = os.path.getsize(mp3File)\n ET.SubElement(item, 'enclosure',\n url=episodeLink,\n length=str(byteLength),\n type=\"audio/mpeg3\")\n\n # Now generate the XML\n generateXml(config, item)", "def __init__(self, title, storyline, poster_image, trailer_youtube):\n\n self.title = title\n self.storyine = storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube", "def podcast(user_uid, podcast_id):\n try:\n podcast = Podcast.load(user_uid, podcast_id)\n podcast.last_accessed = datetime.datetime.utcnow()\n podcast.save()\n except Exception:\n abort(404)\n return Response(podcast.feed.to_rss(), mimetype=\"text/xml\")", "def OnEpisodeStart(self):\n pass", "def __init__(self, \n movie_title,\n movie_storyline,\n movie_poster,\n movie_trailer):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = movie_poster\n self.trailer_youtube_url = movie_trailer", "def __init__(self, main_feed):\n self.main_feed = main_feed", "def __init__(self, reddit):\n self._reddit = reddit", "def __init__(self, name):\r\n self.scraper_name = name\r\n self.stream = None\r\n \r\n params = {\r\n \"name\": self.scraper_name,\r\n \"format\": \"csv\"\r\n }\r\n\r\n params_str = urllib.urlencode(params)\r\n data_url = SWIKI_BASEURL + \"?\" + params_str\r\n\r\n super(ScraperWikiDataSource, self).__init__(data_url, read_header = True, \r\n encoding = \"utf-8\")", "def _get_feed_episodes(self, show_key, **kwargs):\n\t\tinfo(\"Getting episodes for Nyaa/{}\".format(show_key))\n\t\tif \"domain\" not in self.config or not self.config[\"domain\"]:\n\t\t\terror(\" Domain not specified in config\")\n\t\t\treturn list()\n\t\t\n\t\t# Send request\n\t\tquery = re.sub(\"[`~!@#$%^&*()+=:;,.<>?/|\\\"]+\", \" \", show_key)\n\t\tquery = re.sub(\"season\", \" \", query, flags=re.I)\n\t\tquery = re.sub(\" +\", \" \", query)\n\t\tquery = re.sub(\"(?:[^ ])-\", \" \", query) # do not ignore the NOT operator\n\t\tdebug(\" query={}\".format(query))\n\t\tquery = url_quote(query, safe=\"\", errors=\"ignore\")\n\t\t\n\t\tdomain = self.config.get(\"domain\", \"nyaa.si\")\n\t\tfilter_ = self.config.get(\"filter\", \"2\")\n\t\texcludes = self.config.get(\"excluded_users\", \"\").replace(\" \", \"\")\n\t\turl = self._search_base.format(domain=domain, filter=filter_, excludes=excludes, q=query)\n\t\tresponse = self.request(url, rss=True, **kwargs)\n\t\tif response is None:\n\t\t\terror(\"Cannot get latest show for Nyaa/{}\".format(show_key))\n\t\t\treturn list()\n\t\t\n\t\t# Parse RSS feed\n\t\tif not _verify_feed(response):\n\t\t\twarning(\"Parsed feed could not be verified, may have unexpected results\")\n\t\treturn response.get(\"entries\", list())", "def __init__(self, movie_title, movie_storyline, poster_img, trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_img\n self.trailer_youtube_url = trailer_youtube", "def __init__(\n self,\n movie_title,\n movie_storyLine,\n movie_posterImage,\n movie_trailerlink,\n movie_actor,\n movie_director):\n self.title = movie_title\n self.storyLine = movie_storyLine\n self.poster_image_url = movie_posterImage\n self.trailer_youtube_url = movie_trailerlink\n self.actor=movie_actor\n self.director=movie_director", "def __init__(self, testing=False):\n\t\tthreading.Thread.__init__(self)\n\t\tself.sources = []\n\t\tself._testing_cache = None if not testing else []\n\t\tself._c_lock = threading.Lock()\n\t\tself._total_count = 0\n\t\tself._queue = queue.Queue(maxsize=1000)\n\t\tself._keep_running = False\n\t\tself.daemon = True\n\t\tself.name = 'RedditElementLoader'", "def __init__(self, twitter_consumer_key, twitter_consumer_secret,\n twitter_access_key, twitter_access_secret,\n search_terms, search_on='news',\n bitly_access_token='',\n news_api_key=''):\n\n # Access Keys and Secrets for Twitter API obtained at: https://developer.twitter.com/\n auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)\n auth.set_access_token(twitter_access_key, twitter_access_secret)\n\n # Store API object for access to Twitter REST API\n self.__api = tweepy.API(auth)\n\n # Term(s) to search news feeds or Twitter on\n self.search_terms = search_terms\n\n # Method TwitterBot will use to search on. Current options are 'news' or 'twitter'\n self.search_on = search_on\n\n # Access token for optional Bitly API: https://dev.bitly.com/\n self.__bitly_access_token = bitly_access_token\n\n # Access token for optional News API: https://newsapi.org/\n self.__news_api_key = news_api_key\n\n # Will store list of items scraped from news or Twitter\n self.list = []", "def __init__(self, movie_title, poster_image_url, trailer_youtube_url):", "def download_episode(self, filmid):\n self.logger.debug('download_episode')\n if not self._test_download_path(self.settings.getDownloadPathEpisode()):\n return\n film = self.database.retrieve_film_info(filmid)\n if film is None:\n return\n\n (filmurl, extension,) = self._get_film_url_and_extension(film)\n\n # detect season and episode\n (season, episode, fninfo,) = self._season_and_episode_detect(film)\n\n # determine names\n showname = mvutils.cleanup_filename(film.show)[:64]\n namestem = mvutils.cleanup_filename(film.title)[:80]\n if not namestem:\n namestem = u'Episode-{}'.format(film.filmid)\n if not showname:\n showname = namestem\n\n # review name\n if self.settings.getReviewName():\n (namestem, confirmed) = self.notifier.get_entered_text(namestem, 30986)\n namestem = mvutils.cleanup_filename(namestem)\n if len(namestem) < 1 or confirmed is False:\n return\n\n # prepare download directory and determine sequence number\n pathname = self.settings.getDownloadPathEpisode() + showname + '/'\n sequence = 1\n if xbmcvfs.exists(pathname):\n (_, epfiles,) = xbmcvfs.listdir(pathname)\n for epfile in epfiles:\n match = re.search(r'^.* - \\(([0-9]*)\\)\\.[^/]*$', epfile)\n if match and match.groups():\n if sequence <= int(match.group(1)):\n sequence = int(match.group(1)) + 1\n else:\n xbmcvfs.mkdir(pathname)\n\n filename = showname + ' - ' + fninfo + \\\n namestem + (u' - (%04d)' % sequence)\n # download the stuff\n if self._download_files(film, filmurl, pathname, filename, extension):\n self._make_series_nfo_files(\n film, filmurl, pathname, filename, season, episode, sequence)", "def __init__(self, api_key, season, week):\n\n self._ak = api_key\n self._base_url = 'https://api.sportsdata.io/v3/nfl/'\n self.season = season\n self.week = week\n self._player_dict = filter_players(load_players_file(), position='QB')", "def __init__(\n self,\n app, # SlackApp\n response_url: Optional[str] = None,\n channel: Optional[str] = None,\n thread_ts: Optional[str] = None,\n ):\n super(Messenger, self).__init__()\n self.app = app\n self.response_url = response_url\n self.channel = channel\n\n if thread_ts:\n self[\"thread_ts\"] = thread_ts\n\n self.client = AsyncWebClient(self.app.config.token)", "def __init__(self, node):\r\n super(_StreamNodeThread, self).__init__()\r\n self.node = node\r\n self.exception = None\r\n self.traceback = None\r\n self.logger = get_logger()", "def __init__(self, movie_title, poster_image, trailer_youtube):\n self.title = movie_title\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube", "def update_stories(self, response):\n\n print(\"Scraping threadmarks at: {0}\".format(response.url))\n story_item = response.meta.get(\"story_item\")\n\n threadmarks_list = response.xpath(\"//li[contains(@class, 'threadmarkItem')]\")\n\n url = None\n\n # scan the threadmarks until we reach one that **WAS NOT ALREADY CREATED***\n # get_or_create will return created to be TRUE, meaning this threadmark wasn't\n # already in the database. Set the url to that threadmark, and scan that thread.\n for tmark in threadmarks_list:\n chapter_title = \"\".join(tmark.xpath(\"./a/text()\").extract()).encode('utf-8')\n chapter_title = \" \".join(chapter_title.split())\n published_date = tmark.xpath(\".//span[@class='DateTime']/@title\").extract_first()\n if published_date is None:\n published_date = tmark.xpath(\".//abbr[@class='DateTime']/text()\").extract_first()\n published_date = datetime.strptime(published_date, \"%b %d, %Y at %I:%M %p\").replace(tzinfo=utc)\n\n print(\"Trying to update story: {0}\".format(story_item.title))\n print(\"\\tchapter title: {0} date: {1}\".format(chapter_title, published_date))\n\n story_seg, created = StorySegment.objects.get_or_create(story=story_item,\n title=chapter_title,\n published=published_date)\n if created:\n url = tmark.xpath(\"./a/@href\").extract_first()\n url = response.urljoin(url)\n break\n\n if url is not None:\n # Set priority to slightly higher, and make sure to pass in the corresponding story_item.\n yield scrapy.Request(url=url, callback=self.scan_thread, priority=2, meta={\"story_item\": story_item})", "def get_details(self):\n # The basic details are put sussed out by our super class\n # method and put in 'self.xml_details'\n #\n super(Series, self).get_details()\n # And now we get the rest of the details\n #\n self.premiered = None\n self.rating = None\n self.plot = ''\n self.genres = []\n self.thumbs = []\n self.fanart = []\n self.episode_guide_urls = []\n self.episodes = None\n\n # Further lookups for this item may only give us partial URL's\n # We take the first lookup detail link's url and use that as a\n # base url for further lookups.\n #\n self.base_url = self.links[0].url\n\n dom = parseString(self.xml_details)\n ep = dom.firstChild\n\n self.title = get_child_data(ep, \"title\", self.title)\n self.plot = get_child_data(ep, \"plot\", \"\")\n self.premiered = get_child_data(ep, \"premiered\")\n self.rating = try_float(get_child_data(ep, \"rating\"))\n\n genre = first_child(ep, \"genre\")\n while genre:\n if genre.firstChild and len(genre.firstChild.data) > 0:\n self.genres.append(genre.firstChild.data)\n genre = next_sibling(genre, \"genre\")\n\n # Thumbs have not only url's, but they can have informative attributes\n # so we store this data all as a Dict.. it will always at least have\n # the 'url' key.\n #\n thumbs = first_child(ep, \"thumbs\")\n if thumbs:\n thumb = first_child(thumbs, \"thumb\")\n while thumb:\n td = { \"url\" : thumb.firstChild.data }\n attrs = thumb.attributes\n for i in range(0,attrs.length):\n attr = attrs.item(i)\n td[attr.name] = attr.value\n self.thumbs.append(td)\n thumb = next_sibling(thumb, \"thumb\")\n\n fanart = first_child(ep, \"fanart\")\n if fanart:\n # The 'url' attribute of the <fanart> tag is the base url for the\n # poster images and their previews. We do not store that, we just\n # construct the full urls.\n #\n url_base = fanart.getAttribute(\"url\")\n\n self.fanart = []\n\n thumb = first_child(fanart, \"thumb\")\n while thumb:\n self.fanart.append(url_base + thumb.firstChild.data)\n thumb = next_sibling(thumb, \"thumb\")\n\n episodeguide = first_child(ep, \"episodeguide\")\n if episodeguide:\n url = first_child(episodeguide, \"url\")\n while url:\n self.episode_guide_urls.append(\\\n ScrapeURL(url,cache = self.scraper.cache,\n base_url = self.base_url))\n url = next_sibling(url, \"url\")\n\n # And at this point we have parsed out all of the series specific\n # data from our XML response, and also got a handle on where to get\n # the episode information.\n #\n dom.unlink()\n dom = None\n return", "def __init__(self,streamDAO):\n self._streamDAO = streamDAO\n self._streams = {stream.name : stream for\n stream in streamDAO.getStreams()}\n serverStreamTpls = [ (svr,stream) for stream in self.streams \\\n for svr in stream.servers ]\n self._streamsByServer = {}\n for key, val in serverStreamTpls:\n self._streamsByServer.setdefault(key, []).append(val)\n\n # Create thread status update Thread\n self._t = threading.Thread(target=self.__updateStreamStatus)\n self._t.daemon = True\n self._t.start()", "def __init__(self, title, poster_image_url, trailer_youtube_id):\n\t\tself.title = title\n\t\tself.poster_image_url = poster_image_url\n\t\tself.trailer_youtube_url = trailer_youtube_id", "def __init__(self):\n self._data = None\n self._forecast_data = None\n self._today_data = None\n self.last_updated = None", "def __init__(self):\n log.msg(\"Initializing Twitch parser.\")\n\n # initialize our data members\n self.streams = tuple()\n self.crc32 = 0", "def __init__(self, **kwargs):\n self.paused = self._get('paused', **kwargs)\n self.title = self._get('title', **kwargs)\n self.artist = self._get('artist', **kwargs)\n self.album = self._get('album', **kwargs)\n self.total_time = self._get('total_time', **kwargs)\n self.position = self._get('position', **kwargs)\n self.mediakind = self._get('mediakind', **kwargs)\n self.playstatus = self._get('playstatus', **kwargs)", "def __init__(self, fetcher, sender, sleep_time=1, command_interpreter=None, port=16981):\n self.__fetcher = fetcher\n self.__sender = sender\n self.sleep_time = sleep_time\n #if port:\n # self.__external_server = StreamServer((\"127.0.0.0\", port), self.__listen_external)\n #else:\n # self.__external_server = None\n\n self.__ticket_counter = 1\n #self.__ticket_counter_lock = Semaphore()\n\n self.interpreter = command_interpreter\n #self.__default_interpreter = PytheasCommandInterpreter(self)", "def parse(self):\n \n r = requests.get(self.url)\n if r:\n self.title = fetch_title(self.url)\n self.domain = self.fetch_domain()\n self.favicon = self.fetch_favicon()\n self.topics = self.classify_topics()\n self.description = self.fetch_description()\n return self", "def get_thread_urls(self, response):\n\n print(\"scraping {0}\".format(response.url))\n url_stories = []\n\n # <li_tags> is a list of all the <li> tags in the html doc with a certain class value.\n # This corresponds to all threads that are NOT sticky.\n li_tags = response.xpath(\"//li[@class='discussionListItem visible ']\")\n\n for thread_tag in li_tags:\n\n author_name = thread_tag.xpath('@data-author').extract_first()\n\n # Get the last post date for a thread ========================================================\n last_post_date = thread_tag.xpath(\".//dl[@class='lastPostInfo']//abbr/text()\").extract_first()\n if last_post_date is not None:\n last_post_date = datetime.strptime(last_post_date, \"%b %d, %Y at %I:%M %p\").replace(tzinfo=utc)\n else:\n # fix with line continuation.\n last_post_date = thread_tag.xpath(\".//span[@class='DateTime']/@title\").extract_first()\n last_post_date = datetime.strptime(last_post_date, \"%b %d, %Y at %I:%M %p\").replace(tzinfo=utc)\n\n # ============================================================================================\n\n author, created = Author.objects.get_or_create(name=author_name)\n if created:\n author.save()\n\n title = thread_tag.xpath(\".//h3[@class='title']/a/text()\").extract_first().encode('utf-8')\n story, created = Story.objects.get_or_create(title=title)\n\n # if created is true, then it's a brand new story, so make sure to save it.\n if created:\n story.save()\n story.authors.add(author)\n\n a_node = thread_tag.xpath(\"div/div/h3/a\")\n thread_url = a_node.xpath(\"@href\").extract_first()\n\n cur_date = datetime.now(tz=utc)\n oldest_date = datetime.min.replace(tzinfo=utc)\n\n created = False\n \"\"\"\n Over here, I am attempting to either update an existing storyhost\n object, OR I am creating a new one. It looks redundant, but I found that\n if I just used get_or_create, I was forced to set last_date automatically.\n\n I didn't always want to create a brand new object, so this verbose code\n was necessary.\n \"\"\"\n try:\n # TRY TO UPDATE EXISTING object\n storyhost = StoryHost.objects.get(host=self.HOST, story=story, url=thread_url)\n storyhost.save()\n except StoryHost.DoesNotExist:\n\n # CREATE BRAND NEW STORYHOST OBJECT\n storyhost, created = StoryHost.objects.get_or_create(host=self.HOST,\n story=story,\n url=thread_url,\n last_scraped=oldest_date)\n\n storyhost.save()\n\n \"\"\"\n Check if the last post date is more recent than the\n storyhost's last scraped date. If it's not, skip it.\n\n If it is, update the last scraped date, and add it to the\n list of url_stories to be returned at the end of this function.\n \"\"\"\n\n last_seg_date = self.get_last_seg_date(story)\n if thread_url is not None:\n if last_post_date > storyhost.last_scraped or last_seg_date < last_post_date:\n storyhost.last_scraped = cur_date\n storyhost.save()\n thread_link = response.urljoin(thread_url)\n\n # Add this story to two separate lists, one for updating, one for just\n # scraping.\n if created:\n url_stories.append((thread_link, story))\n else:\n self.update_list.append((\"{0}threadmarks\".format(thread_link), story))\n else:\n print(\"Skipping {0}\".format(storyhost.url))\n\n return url_stories", "def __init__(self, launcher, pid_folder, frequency, fields_specs):\n self.launcher = launcher\n self.pid_file = os.path.join(pid_folder, 'proc.pid')\n self.frequency = frequency\n self.queue = None\n self.monitor_process = None\n # Parse fields specs\n # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8:\n self.fields = {}\n raw_fields = fields_specs.split(',')\n for raw_field in raw_fields:\n fields_split = raw_field.split(':')\n assert len(fields_split) in (3, 4),\\\n \"Invalid format of field specification (%s). Must be name:type:index, name:type:index: or name:type:index:count\" % raw_field\n field_name = fields_split[0]\n assert field_name not in self.fields,\\\n \"Found duplicate timeseries field (%s)\" % field_name\n field_type = fields_split[1]\n assert field_type in ('str', 'int', 'float', 'bool'),\\\n \"Invalid field type (%s). Must be one of ('str', 'int', 'float', 'bool')\" % field_type\n index = int(fields_split[2])\n if len(fields_split) == 3:\n count = -1\n elif fields_split[3] == '':\n count = 0\n else:\n count = int(fields_split[3])\n self.fields[field_name] = {\n 'type': field_type,\n 'index': index,\n 'count': count\n }", "def scrape_series_summaries(target_url, create_summary_master_df=False):\n if create_summary_master_df == True:\n summary_master_df = pd.DataFrame(columns=[\"ep_title\", \"summary\"])\n\n print(\"URL of the show we'll be scraping:\", target_url)\n logger.info(f\"URL of the show we'll be scraping: {target_url}\")\n\n # Make a request for the series in question, so we can grab links to each of its episodes:\n series_response = requests.get(target_url)\n logger.info(f\"Scraping episode list from {target_url}\")\n\n # Checks to see if scraping goes okay. 200 means all systems are go!\n if series_response.status_code != 200:\n warn(f\"Warning: status code {series_response.status_code}\")\n logger.warning(f\"Warning: status code {series_response.status_code}\")\n else:\n print(\"Status code 200; all good in the hood\")\n logger.info(f\"Status code: {series_response.status_code}\")\n\n # If the status code is all good, we request on!\n series_soup = BeautifulSoup(series_response.content, \"html.parser\")\n\n # Pulling out all the td tags, where our episode links live:\n td = series_soup.find_all(\"td\")\n\n # Storing episode links to a list (episode links occur every 6 entries):\n series_link_lst = []\n for idx, title in enumerate(td):\n if title.a != None:\n if \"(episode)\" in title.a[\"href\"]:\n series_link_lst.append(f'https://memory-alpha.fandom.com{title.a[\"href\"]}')\n\n # And from here, we start scraping episode summary content from our list of links:\n for link in series_link_lst:\n\n ep_target = link\n\n # Sleep for a moment, so that we don't get IP banned:\n sleep(randint(3,5))\n\n ep_resp = requests.get(ep_target)\n\n # Checks to see if scraping goes okay. 200 means all systems are go!\n if ep_resp.status_code != 200:\n warn(f\"Warning: status code {ep_resp.status_code}\")\n logger.warning(f\"Warning: status code {ep_resp.status_code}\")\n else:\n print(\"Status code 200; all good in the hood\")\n logger.info(f\"Status code: {ep_resp.status_code}\")\n logger.info(f\"Scraping episode summary from {ep_target}\")\n\n # Creating our episode soup object:\n ep_soup = BeautifulSoup(ep_resp.content, \"html.parser\")\n\n # Stripping away the extraneous junk from the page's episode title and then saving it for later:\n # ep_title_until_idx = ep_soup.title.text.index(\"(episode)\")\n # ep_title = ep_soup.title.text[:ep_title_until_idx].rstrip()\n\n # Saving our episode content div:\n ep_content_div = ep_soup.find(\"div\", {\"class\": \"mw-content-ltr mw-content-text\"})\n\n # Setting the indices from and to the point that we want to capture in the next step (e.g., we want all text from \"Summary\" until \"Memorable Quotes\" on the page):\n # from_summary = ep_content_div.text.index(\"Summary\")\n # until_memorable_quotes = ep_content_div.text.lower().index(\"memorable quotes\")\n\n # The aforementioned \"next step\" where we save title and summary to a dictionary:\n # summary_content = {\"ep_title\": [ep_soup.title.text[:ep_soup.title.text.index(\"(episode)\")].rstrip()],\n # \"summary\": [ep_content_div.text[from_summary+13:until_memorable_quotes].lstrip()]}\n\n # Now, we're just scraping ALL the text on the episode page, not just the summary:\n summary_content = {\"ep_title\": [ep_soup.title.text[:ep_soup.title.text.index(\"(episode)\")].rstrip()],\n \"summary\": [ep_content_div.text]}\n\n # Turning dictionary into a DF, and then appending it to the master DF:\n ep_summary_df = pd.DataFrame.from_dict(summary_content)\n summary_master_df = pd.concat([summary_master_df, ep_summary_df])\n print(\"Shape of Master DF:\", summary_master_df.shape)\n\n # Once the episode summary scraping loop completes, we pickle our master DF:\n title_until = series_soup.title.text.index(\" |\")\n summary_master_df.to_pickle(f\"data/summary{series_soup.title.text[:title_until]}.pkl.bz2\", compression=\"bz2\")\n logger.info(\"Saved Summary Master DF to pickle object\")\n\n # And we save it as a CSV too:\n summary_master_df.to_csv(f\"data/summary{series_soup.title.text[:title_until]}.csv\")\n logger.info(\"Saved Summary Master DF to CSV\")\n\n return summary_master_df", "def __init__(self, dirname, sites, news_types):\n self.dirname = dirname\n self.sites = []\n self.news_types = []\n if type(sites) == str:\n self.sites.append(sites)\n if type(news_types) == str:\n self.news_types.append(news_types)\n else:\n self.sites = sites\n self.news_types = news_types\n self.list_news_path = list(self.get_list_news_files())\n # self.list_news_path = Parallel(n_jobs=-1)(delayed(list(self.get_list_news_files())))\n # self.feature_type = feature_type", "def podcast_download(self):\r\n warnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n now = datetime.datetime.now()\r\n\r\n for podcast_file in self.podcast_list:\r\n published, name, link, title = podcast_file\r\n if self.podcast_list != []:\r\n line_file = (published + ';' + title + ';' + name + ';' + link).encode(\"utf-8\") \r\n if line_file in open(self.download_log).read():\r\n pass\r\n else:\r\n title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore')\r\n download_folder = os.path.join('downloads', title)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n try:\r\n published = str(parser.parse(published))[:10]\r\n except IOError as error:\r\n print 'Error' + (error) + ': File - ' + str(title)\r\n download_folder = os.path.join(download_folder, published)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n namefile_unicode = link[link.rfind('/')+1:]\r\n namefile_str = unicodedata.normalize('NFKD', \r\n namefile_unicode).encode('ascii', 'ignore')\r\n namefile_str = namefile_str.decode('utf-8', 'ignore').encode(\"utf-8\")\r\n if '.mp3' in namefile_str:\r\n len_name = namefile_str.index('.mp3')\r\n elif '.MP3' in namefile_str:\r\n len_name = namefile_str.index('.MP3')\r\n namefile_str = namefile_str[:len_name + 4]\r\n fileoutput = os.path.join(download_folder, namefile_str)\r\n name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\r\n print str(published) + '; ' + name\r\n ## downlink\r\n download_file(link, fileoutput) \r\n ## tagging\r\n mp3_tagging(fileoutput, podcast_file)\r\n ## write log\r\n write_file(self.download_log, line_file)\r\n end = datetime.datetime.now()\r\n print '\\r' + 'Download Time = ' + str(end-now) + '\\r'\r\n return None", "def __init__(self, title, author):\n self._title = title\n self._author = author\n self._patron = None\n self._waitList = []", "def __init__(self):\n # FIXME: IS this needed?\n super(ArduinoStation, self).__init__()\n\n self.serial_port_pattern = '/dev/ttyACM{port_num}'\n self.serial_port_num = None\n self.baudrate = 9600\n self.ser = self._setup_serial_connection()\n\n\n # Sensor 1 (DHT11) has 2 readings, Sensor 2 has 1\n ## FIXME: Should look for key pairs in list and submit when no more unique readings are coming through\n if config.SCB_CONFIGURATION == 'standard':\n self.lines_per_observation = 3\n else:\n self.lines_per_observation = 7 # Allows for up to 5 DS18B20 along w/ DHT-11.", "def __init__(self, streams):\r\n streams = list(streams)\r\n self._labels = dict(streams)\r\n self._refresh = set(stream for (stream, _) in streams)\r\n self._heads = set()", "def __init__(self, title, image_url, trailer_url):\n self.title = title\n self.trailer_youtube_url = trailer_url\n self.poster_image_url = image_url", "def request_rss(self, url):\n return feedparser.parse(url)", "def qa_series_feed(self, feed_class=AcquisitionFeed):\n def factory(library, facets):\n wl = WorkList()\n wl.initialize(library)\n return wl\n\n return self._qa_feed(\n feed_factory=feed_class.page,\n feed_title=\"QA series test feed\",\n controller_name=\"qa_series_feed\",\n facet_class=HasSeriesFacets,\n worklist_factory=factory\n )", "def __init__(self, station_definition=None, number_of_packets_in_record=None, packet_number_in_record=None):", "def __init__(self, station_definition=None, number_of_packets_in_record=None, packet_number_in_record=None):" ]
[ "0.666735", "0.582353", "0.5599041", "0.5515614", "0.5502834", "0.54765725", "0.54175603", "0.5364501", "0.533091", "0.53269607", "0.5278374", "0.5247649", "0.5233488", "0.5222864", "0.5213202", "0.5204529", "0.51684767", "0.516591", "0.51639456", "0.5141017", "0.513316", "0.51284343", "0.50994885", "0.5091677", "0.50807124", "0.5075149", "0.506984", "0.5063355", "0.50569385", "0.5055379", "0.5048896", "0.5032452", "0.5018004", "0.50127316", "0.500825", "0.50063443", "0.4987976", "0.49836308", "0.4966925", "0.49646336", "0.495975", "0.49574277", "0.49557355", "0.49525344", "0.49520588", "0.49519947", "0.49499905", "0.49459016", "0.49456954", "0.4942256", "0.4937608", "0.49327648", "0.49294314", "0.49282524", "0.49280044", "0.49194646", "0.49142188", "0.49110353", "0.49101007", "0.4909491", "0.49062234", "0.49055567", "0.489848", "0.4894729", "0.48941797", "0.48889303", "0.48871225", "0.4885433", "0.4881081", "0.48650214", "0.48616597", "0.48582733", "0.4858085", "0.48579612", "0.48548412", "0.48520523", "0.4843032", "0.48340434", "0.4830844", "0.48300886", "0.48267922", "0.48262045", "0.48220143", "0.48212478", "0.4817542", "0.48022202", "0.47989997", "0.4796239", "0.4790945", "0.47855085", "0.47809654", "0.47800168", "0.47799706", "0.4776485", "0.4772604", "0.477084", "0.47679755", "0.47663105", "0.4765807", "0.4765807" ]
0.6067558
1
Uses information in `line` to request and return the RSS feed
def request_rss(self, url): return feedparser.parse(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_rss(url):", "def get_news(url):\r\n \r\n # parse RSS feed into list of dictionaries\r\n feed = feedparser.parse(url)\r\n\r\n # no RSS feed articles for url\r\n if len(feed['entries']) == 0:\r\n return []\r\n \r\n # get first ten articles from the RSS feed\r\n news = []\r\n i = 0\r\n while True:\r\n if i == len(feed['entries']) or i > 30:\r\n break\r\n \r\n try:\r\n # get link to article\r\n link = feed[\"entries\"][i][\"link\"]\r\n\r\n # get title of article\r\n title = feed[\"entries\"][i][\"title\"]\r\n \r\n try:\r\n # get raw summary of article\r\n summary_raw = feed[\"entries\"][i][\"summary\"]\r\n \r\n # format summary\r\n summary = \"\"\r\n for c in summary_raw:\r\n if c == \"<\":\r\n summary += \"...\"\r\n break\r\n summary += c\r\n except KeyError as e:\r\n logging.error(\"no summary for RSS feed article: {}\".format(link))\r\n summary = \"read more here...\"\r\n \r\n # get raw date \r\n date_raw = feed[\"entries\"][i][\"published_parsed\"]\r\n \r\n if date_raw is None:\r\n date = feed[\"entries\"][i][\"published\"]\r\n \r\n else:\r\n # format date\r\n year = str(date_raw.tm_year)\r\n months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\r\n month = months[date_raw.tm_mon - 1]\r\n day = str(date_raw.tm_mday)\r\n weekdays = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\r\n wday = weekdays[date_raw.tm_wday]\r\n hour = str(date_raw.tm_hour)\r\n hour = \"{:2}\".format(hour).format(' ','0')\r\n min = str(date_raw.tm_min)\r\n min = \"{:2}\".format(min).replace(' ','0')\r\n date = hour + \":\" + min + \" - \" + wday + \" \" + month + \" \" + day + \", \" + year\r\n \r\n # compile entry and append to news list\r\n entry = {\"link\":link, \"title\":title, \"date\":date, \"summary\":summary}\r\n \r\n # sanitize entry\r\n for key in entry:\r\n # apostrophe\r\n entry[key] = entry[key].replace(\"&#39;\", \"'\")\r\n # right single quotation mark\r\n entry[key] = entry[key].replace(\"’\", \"&#8217;\")\r\n # left single quotation mark\r\n entry[key] = entry[key].replace('\"', \"&#8216;\")\r\n # right double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8221;\")\r\n # left double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8220;\")\r\n # Weird ampersand formatting\r\n entry[key] = entry[key].replace(\"&amp;\", \"&\")\r\n \r\n # prepare entry for sqlite queries\r\n entry[key] = surround(entry[key])\r\n \r\n # add entry to news list\r\n news.append(entry)\r\n \r\n # max 10 entries\r\n if len(news) == 10:\r\n break\r\n i += 1\r\n \r\n except Exception as e:\r\n logging.error(e)\r\n i += 1\r\n pass\r\n \r\n # success\r\n return news", "def get_feed(self):\n possible_endings = ('rss', 'rss/')\n if not self.url or not self.url.endswith(possible_endings):\n print('Please check URL(is RSS?) and Internet connection')\n sys.exit()\n try:\n data = feedparser.parse(self.url)\n except urllib.error.URLError:\n print('Please input correct URL')\n sys.exit()\n self.get_content(data)\n return self.items", "def rss_fetch():\n items = {}\n\n def add_item(pubDate, title, link):\n nonlocal items\n idx = float(parsedate_to_datetime(pubDate).timestamp())\n while idx in items:\n idx = idx + 0.1\n dbg(\"Adding item: %11.1f \\\"%s\\\" %s\" % (idx, title, link))\n items[idx] = {}\n items[idx]['title'] = title\n items[idx]['link'] = link\n\n state = \"\" # state parser is in (\"\", \"item\", \"title\", \"link\", \"pubDate\")\n title = \"\" # Currently parsing this title.\n link = \"\" # \" \" \" link\n pubDate = \"\" # \" \" \" pubDate (index)\n\n def start_element(name, attrs):\n nonlocal state\n nonlocal title\n nonlocal link\n nonlocal pubDate\n dbg(\"Start: %s %s %s\" %(name, str(attrs), str((state, title, link, pubDate))))\n if state == \"\":\n if name == \"item\":\n state = \"item\"\n elif state == \"item\":\n if name == \"title\":\n state = \"title\"\n if title:\n prn(\"Two titles?\")\n sys.exit(1)\n elif name == \"link\":\n state = \"link\"\n if link:\n prn(\"Two links?\")\n sys.exit(1)\n elif name == \"pubDate\":\n state = \"pubDate\"\n if pubDate:\n prn(\"Two pubDates?\")\n sys.exit(1)\n\n\n def end_element(name):\n nonlocal state\n nonlocal title\n nonlocal pubDate\n nonlocal link\n dbg(\"End: %s %s\" % (name, str((state, title, link, pubDate))))\n if state == \"item\":\n if name == \"item\":\n if title == \"\":\n prn(\"No title at end item.\")\n sys.exit(1)\n if link == \"\":\n prn(\"No link at end item.\")\n sys.exit(1)\n if pubDate == \"\":\n prn(\"No pubDate at end item.\")\n sys.exit(1)\n else:\n add_item(pubDate, title, link)\n state = \"\"\n title = \"\"\n link = \"\"\n pubDate = \"\"\n elif state == \"title\":\n if name == \"title\":\n state = \"item\"\n elif state == \"link\":\n if name == \"link\":\n state = \"item\"\n elif state == \"pubDate\":\n if name == \"pubDate\":\n state = \"item\"\n\n def char_data(data):\n nonlocal state\n nonlocal title\n nonlocal pubDate\n nonlocal link\n dbg(\"Data: %s %s)\" % (str(data), str((state, title, link, pubDate))))\n if state == \"title\":\n title = title + data\n elif state == \"link\":\n link = link + data\n elif state == \"pubDate\":\n pubDate = pubDate + data\n\n\n p = xml.parsers.expat.ParserCreate(\"UTF-8\")\n\n p.StartElementHandler = start_element\n p.EndElementHandler = end_element\n p.CharacterDataHandler = char_data\n\n with urllib.request.urlopen('https://news.ycombinator.com/rss') as f:\n xml_file = b\"\"\n while True:\n r = f.read(255)\n if r:\n xml_file = xml_file + r\n else:\n break\n\n try:\n p.Parse(xml_file.decode(\"UTF-8\"), True)\n except:\n dbg(\"Writing fetched RSS feed to file...\")\n err_f = open(parse_error_output_file, \"ab\")\n err_f.write(b\"GET URL: \")\n err_f.write(f.geturl().encode(\"UTF-8\"))\n err_f.write(b\"\\nReturn Code: \")\n err_f.write((\"%d\\n\" % (f.getcode(), )).encode(\"UTF-8\"))\n err_f.write(b\"Meta Info:\\n\")\n err_f.write(f.info().as_bytes(unixfrom=True))\n err_f.write(b\"XML output:\\n\")\n err_f.write(xml_file)\n err_f.close()\n dbg(\"Done.\")\n raise\n\n return items", "def get_rss(limit):\n rss_data = feedparser.parse(URL)\n if limit == 1:\n title = rss_data.entries[0].title\n link = rss_data.entries[0].link\n rss_print(title, link)\n else:\n for i in range(0, limit):\n title = rss_data.entries[i].title\n link = rss_data.entries[i].link\n\n print(Back.CYAN + str(i + 1) + \"\\t\")\n rss_print(title, link)", "def getFeedFromXXX(RSSlink):\n summary =\"\"\n link =\"\"\n if \"packetstormsecurity\" in RSSlink:\n link =\"link\"\n summary=\"summary_detail\"\n elif \"jetlib\" in RSSlink:\n link=\"id\"\n summary=\"summary\"\n myFeed=\"\"\n try:\n myFeed = feedparser.parse(RSSlink)\n except:\n print(\"problem with the db website.try to change the source db in option !\")\n return None\n entries = [item for item in myFeed.items() if \"entries\" in item]\n tupleInsideEntries =entries[0]\n #print len(tupleInsideEntries[1])#show the number of result founded\n for dicItem in tupleInsideEntries[1]:\n if dicItem.get(\"title\")==\"No Results Found\":\n return False #break from this loop if theres no result\n print (\"Title : \"+dicItem.get(\"title\"))#title\n if summary ==\"summary_detail\": #packetstormsecurity\n print (\"Description : \"+str(dicItem.get(summary).get(\"value\")))#description\n else:\n print (\"Description : \"+str(dicItem.get(summary)))\n print (\"Date : \"+dicItem.get(\"published\"))#date\n print (\"Link : \"+dicItem.get(link)) #link\n print (\"#################################################################################\")\n return True", "def get_feed(self):\n\t\turl=\"http://news.google.com/news?ned=%s&topic=%s&output=rss\"\n\t\tlinks=[{\"ned\":\"us\", \"type\":\"h\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"w\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"nz\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"sa\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"b\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"t\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"m\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"s\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"e\"},\n\t\t\t ]\n\t\tfeed = links[self.get_input()]\n\t\treturn url%(feed[\"ned\"],feed[\"type\"])", "def zhihu_rss_fetcher(ctx):\n URL = 'http://www.zhihu.com/rss'\n coll = ctx.get_mongo_collection()\n\n for entry in fetch_rss(URL).entries:\n try:\n coll.insert({'_id': entry.link})\n except DuplicateKeyError:\n continue\n ctx.new_item(TextOnlyItem(entry.title, entry.description), ['zhihu'],\n parse_entry_time(entry),\n {'id': entry.link})\n log_info(u'zhihu: new entry: {} {}'.format(entry.link,\n entry.title))", "def rss_feed(rss_url):\n try:\n # Use feedparser to analyze given RSS feed, if it is valid RSS.\n d = feedparser.parse(rss_url)\n except:\n return \"Sorry, invalid RSS feed. Please check and try again later.\"\n \n total = len(d['entries'])\n updates = dict()\n for index, item in enumerate(d['entries']):\n # Convert publish time from ctime format to iso-time format.\n a_time = time_convert(item.published)\n # Set article url ad dictionary key, with publish date as value. \n updates[str(item.link)] = a_time \n return (total, updates)", "def process(url):\n feed = feedparser.parse(url)\n entries = feed.entries\n ret = []\n for entry in entries:\n print entry\n guid = entry.guid\n title = translate_html(entry.title)\n link = entry.link\n summary = translate_html(entry.summary)\n try:\n subject = translate_html(entry.tags[0]['term'])\n except AttributeError:\n subject = \"\"\n newsStory = NewsStory(guid, title, subject, summary, link)\n ret.append(newsStory)\n return ret", "def parse_rss(link, mode):\n\n one_feed = []\n news_counter = 0\n app.logger.info(f'Parsing feed: {link}')\n # Get file from internet, open it with xml-parser\n rss = feedparser.parse(link)\n\n for entry in rss.entries:\n\n if mode == 'latest':\n news_item_date = get_timestamp(entry.published)\n\n # Stop reading RSS if current news is already older than time\n # when user last got the news feed\n if news_item_date < last_time_user_got_news:\n return one_feed\n\n post = {'title': entry.title,\n 'published': get_timestamp(entry.published)}\n\n # Try to get link to image from one of a place where it can be\n try:\n pic = entry.enclosures[0].href\n except(IndexError, AttributeError):\n pic = get_img_source(entry.summary)\n\n post['image'] = pic if pic else url_for('static',\n filename=\"400x400.jpg\")\n\n link = entry.link\n post['link'] = link\n domain_name = re.search(r'://(.+?)/', link).group(1)\n post['domain_name'] = domain_name if domain_name else 'unknown'\n\n one_feed.append(post)\n\n if mode != 'latest':\n return one_feed\n else:\n print('There are no new news at all.')\n return []", "def cli():\n fire.Fire(fetch_rss_file)", "def RSS2format(inputfile):\n print \"START: FEED GENERATOR[ITEM OBJECT CREATOR]: \", time.time()\n xmldocument = parse(inputfile)\n feed_title = \"\"\n try:\n feed_title = xmldocument.getElementsByTagName('dc:title')[0].firstChild.data\n except IndexError as details:\n print \"Handling IndexError: \", details\n feed_title = \"Handling IndexError...\"\n except AttributeError as details:\n print \"Handling AttributeError: \", details\n feed_title = \"Handling AttributeError...\"\n # only get first 100 characters.. RSS\n feed_description = \"\"\n try:\n feed_description = xmldocument.getElementsByTagName('dc:description')[0].firstChild.data[:100]\n except IndexError as details:\n print \"Handling IndexError: \"\n feed_description = \"Handling IndexError\"\n except AttributeError as details:\n\tfeed_description = \"Handling AttributeError\"\n feed_link = xmldocument.getElementsByTagName('identifier')[0].firstChild.data # get header identifier for link value\n feed_pubDate = xmldocument.getElementsByTagName('datestamp')[0].firstChild.data # get header datestamp for pubDate value\n feed_guid = xmldocument.getElementsByTagName('identifier')[0].firstChild.data # get header identifier for guid value\\\n # return a PyRSS2Gen object\n return PyRSS2Gen.RSSItem(\n title = feed_title,\n link = feed_link,\n description = feed_description,\n guid = feed_guid,\n pubDate = datetime.strptime(feed_pubDate.replace(\"T\", \" \").replace(\"Z\", \"\"), '%Y-%m-%d %H:%M:%S')\n )", "def get_rss_item(self) -> str:\n base_item = '''\n<item>\n <title>{display_name} tweeted {id}</title>\n <link>{url}</link>\n <pubDate>{pub_date}</pubDate>\n <dc:creator>{display_name}</dc:creator>\n <category>Tweets</category>\n <guid isPermaLink=\"false\">{url}</guid>\n <description />\n <content:encoded><![CDATA[\n RSS_ITEM_PLACE_HOLDER\n ]]></content:encoded>\n</item>'''.format(\n display_name=self.display_name,\n id=self.id,\n url=self.url,\n pub_date=_rss_time_format(self.inner.created_at_in_seconds),\n )\n try:\n return base_item.replace('RSS_ITEM_PLACE_HOLDER', self.get_content())\n except:\n logging.exception('Failed to create RSS item for %s.', self.url)\n return base_item.replace('RSS_ITEM_PLACE_HOLDER', 'RSS Error. Please read {} directly.'.format(self.url))", "def retrieveFeed(self, rss_url):\n url = 'http://{}'.format(rss_url)\n result = feedparser.parse(url)\n if result.status != 200:\n sys.stdout.write('request failed for retrieve this RSS ({})\\n'.format(url))\n else:\n self.storeFeeds(url, result['items'])", "def __init__(self, url=URL):\n self.entries = feedparser.parse(url).entries", "def process_line(self, line):\n find_result = re.findall(LINE_REGEX, line)\n line_data = {r[0]: r[1] for r in find_result}\n self.process_url(line_data.get('request_to'))\n self.process_status_code(line_data.get('response_status'))", "def parse_shaarli_rss_export(rss_file):\n\n rss_file.seek(0)\n entries = rss_file.read().split('<entry>')[1:]\n for entry in entries:\n # example entry:\n # <entry>\n # <title>Aktuelle Trojaner-Welle: Emotet lauert in gefälschten Rechnungsmails | heise online</title>\n # <link href=\"https://www.heise.de/security/meldung/Aktuelle-Trojaner-Welle-Emotet-lauert-in-gefaelschten-Rechnungsmails-4291268.html\" />\n # <id>https://demo.shaarli.org/?cEV4vw</id>\n # <published>2019-01-30T06:06:01+00:00</published>\n # <updated>2019-01-30T06:06:01+00:00</updated>\n # <content type=\"html\" xml:lang=\"en\"><![CDATA[<div class=\"markdown\"><p>&#8212; <a href=\"https://demo.shaarli.org/?cEV4vw\">Permalink</a></p></div>]]></content>\n # </entry>\n\n trailing_removed = entry.split('</entry>', 1)[0]\n leading_removed = trailing_removed.strip()\n rows = leading_removed.split('\\n')\n\n def get_row(key):\n return [r.strip() for r in rows if r.strip().startswith('<{}'.format(key))][0]\n\n title = str_between(get_row('title'), '<title>', '</title>').strip()\n url = str_between(get_row('link'), '<link href=\"', '\" />')\n ts_str = str_between(get_row('published'), '<published>', '</published>')\n time = datetime.strptime(ts_str, \"%Y-%m-%dT%H:%M:%S%z\")\n\n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title or None,\n 'tags': '',\n 'sources': [rss_file.name],\n }", "def feed(self, entry):\r\n pass", "def run_rss(self):\n\n pass", "def article_extractor(rss_feed_link):\n user_agent = {\"user-agent\": \"Mozilla/5.0 (Windows NT 6.2; Win64;\\\n x64; rv:16.0.1) Gecko/20121011 Firefox/16.0.1\"}\n try:\n feed = requests.get(rss_feed_link, headers=user_agent)\n except requests.exceptions.ConnectionError:\n print(\"No internet connection\")\n exit()\n\n dirty_content = BeautifulSoup(feed.text, \"xml\")\n return dirty_content", "def workAFeed(feed):\n print(\"::working \",feed)\n\n # add http\n if feed.find(\"http\") == -1:\n feed = \"http://\" + feed\n print (\"::feed=\",feed)\n\n return feed", "def get_news(rss_feed):\r\n\r\n class _CurrentData(object):\r\n \"\"\"Class holding a set of current attributes.\"\"\"\r\n item = None\r\n text = None\r\n\r\n def _start_element_handler(name, attrs):\r\n \"\"\"Handle XML start-elements.\"\"\"\r\n if name == 'item':\r\n # Allocate a new item.\r\n current.item = NewsItem()\r\n\r\n def _end_element_handler(name):\r\n \"\"\"Handle XML end-elements.\"\"\"\r\n if name == 'item':\r\n news_items.append(current.item)\r\n elif name in ('title', 'description', 'link', 'category'):\r\n try:\r\n setattr(current.item, name, current.text)\r\n except AttributeError:\r\n # The parser has run into a non-news item.\r\n pass\r\n\r\n def _char_data_handler(data):\r\n \"\"\"Handle XML element character data.\"\"\"\r\n current.text = data\r\n\r\n news_items = list()\r\n current = _CurrentData()\r\n\r\n parser = expat.ParserCreate()\r\n parser.StartElementHandler = _start_element_handler\r\n parser.EndElementHandler = _end_element_handler\r\n parser.CharacterDataHandler = _char_data_handler\r\n\r\n news_handle = urllib2.urlopen(rss_feed)\r\n xml_data = news_handle.read()\r\n \r\n parser.Parse(xml_data)\r\n\r\n return news_items", "def getFeed(self):\n\n entries_xml = []\n\n for entry in self.middleware.entries:\n request = entry['request']\n response = entry.get('response')\n begin = time.localtime(request['begin'])\n entry_id = self._generateEntryTagURI(entry)\n entry_title = '%s %s ' % (request['method'], request['url'])\n\n short_url = request['url']\n max_url_len = 40\n if len(short_url) > max_url_len:\n prefix = short_url[:9]\n suffix = short_url[-max_url_len+9:]\n short_url = prefix + '...' + suffix\n entry_title = '%s %s ' % (request['method'], short_url)\n\n # Make the <rz:cgi_variable> nodes into a string\n cgivars = \"\"\n for k,v in request['cgi_variables']:\n newv = escape(str(v))\n s = cgi_variable_fmt % (k, newv)\n cgivars = cgivars + s\n\n # Make the <rz:cgi_variable> nodes into a string\n wsgivars = \"\"\n for k,v in request['wsgi_variables']:\n newv = escape(str(v))\n s = wsgi_variable_fmt % (k, newv)\n wsgivars = wsgivars + s\n\n # Make the <rz:request> node\n rzrequest = rzrequest_fmt % {\n 'begin': request['begin'],\n 'cgi_variables': cgivars,\n 'wsgi_variables': wsgivars,\n 'method': request['method'],\n 'url': request['url'],\n 'body': escape(request['body']),\n }\n\n if response is not None:\n # Make the <rz:request> node\n headers = ''\n for k,v in response['headers']:\n newv = escape(str(v))\n s = header_fmt % (k, newv)\n headers = headers + s\n\n rzresponse = rzresponse_fmt % {\n 'begin': response['begin'],\n 'end': response['end'],\n 'content-length': response['content-length'],\n 'headers': headers,\n 'status': response['status'],\n 'body': escape(response['body']),\n }\n else:\n rzresponse = ''\n\n\n # Make the atom:entry/atom:content node\n content = contentfmt % {\n 'logentry_id': entry_id,\n 'rzrequest': rzrequest,\n 'rzresponse': rzresponse,\n }\n\n entry_xml = entryfmt % {\n 'entry_id':entry_id,\n 'entry_title':escape(entry_title),\n 'updated':time.strftime('%Y-%m-%dT%H:%M:%SZ', begin),\n 'summary':escape(pprint.pformat(entry)),\n 'content':content,\n }\n entries_xml.append(entry_xml)\n\n now = time.time()\n\n body = feedfmt % {\n 'title':'repoze.debug feed for pid %s' % self.middleware.pid,\n 'entries':'\\n'.join(entries_xml),\n 'feed_id':self._generateFeedTagURI(now, self.middleware.pid),\n 'updated':time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(now)),\n }\n\n resp = Response(content_type='application/atom+xml', body=body)\n return resp", "def parse_rss_export(rss_file):\n\n rss_file.seek(0)\n items = rss_file.read().split('<item>')\n items = items[1:] if items else []\n for item in items:\n # example item:\n # <item>\n # <title><![CDATA[How JavaScript works: inside the V8 engine]]></title>\n # <category>Unread</category>\n # <link>https://blog.sessionstack.com/how-javascript-works-inside</link>\n # <guid>https://blog.sessionstack.com/how-javascript-works-inside</guid>\n # <pubDate>Mon, 21 Aug 2017 14:21:58 -0500</pubDate>\n # </item>\n\n trailing_removed = item.split('</item>', 1)[0]\n leading_removed = trailing_removed.split('<item>', 1)[-1].strip()\n rows = leading_removed.split('\\n')\n\n def get_row(key):\n return [r for r in rows if r.strip().startswith('<{}>'.format(key))][0]\n\n url = str_between(get_row('link'), '<link>', '</link>')\n ts_str = str_between(get_row('pubDate'), '<pubDate>', '</pubDate>')\n time = datetime.strptime(ts_str, \"%a, %d %b %Y %H:%M:%S %z\")\n title = str_between(get_row('title'), '<![CDATA[', ']]').strip() or None\n\n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title,\n 'tags': '',\n 'sources': [rss_file.name],\n }", "async def create_rss(channel_alias: str, request: Request):\r\n global channel_hash, client\r\n channel_alias = channel_alias.lstrip('@')\r\n private_channel = channel_alias[:8] == 'joinchat'\r\n if private_channel:\r\n private_hash = channel_alias[8:]\r\n channel_alias = 't.me/joinchat/' + private_hash\r\n try:\r\n await client.start()\r\n if channel_alias not in channel_hash:\r\n if private_channel:\r\n await client(ImportChatInviteRequest(private_hash))\r\n channel = await client.get_entity(channel_alias)\r\n ch_full = await client(GetFullChannelRequest(channel=channel))\r\n username = channel.username or channel.id\r\n channel_hash[channel_alias] = {\r\n 'username': username,\r\n 'title': channel.title,\r\n 'id': channel.id,\r\n 'about': ch_full.full_chat.about or str(username),\r\n }\r\n logging.info(f\"Adding to the hash '{channel_alias}'\")\r\n with open('hash.pickle', 'wb') as f:\r\n pickle.dump(channel_hash, f)\r\n ch = channel_hash[channel_alias]\r\n messages = [m async for m in client.iter_messages(\r\n ch['username'], limit=int(config['RSS']['RECORDS']))]\r\n except Exception as e:\r\n warn = f\"{str(e)}, request: '{channel_alias}'\"\r\n logging.warning(warn)\r\n return warn\r\n\r\n fg = FeedGenerator()\r\n fg.title(f\"{ch['title']} (@{ch['username']}, id:{ch['id']})\")\r\n fg.subtitle(ch['about'])\r\n link = channel_alias if private_channel else f\"t.me/s/{ch['username']}\"\r\n fg.link(href=f'https://{link}', rel='alternate')\r\n fg.generator(config['RSS']['GENERATOR'])\r\n fg.language(config['RSS']['LANGUAGE'])\r\n for m in messages:\r\n if not (config['RSS'].getboolean('SKIP_EMPTY') and not m.text):\r\n fe = fg.add_entry(order='append')\r\n link = 'https://t.me/' + ('c/' if private_channel else '')\r\n fe.guid(guid=f\"{link}{ch['username']}/{m.id}\", permalink=True)\r\n fe.content(markdown(m.text))\r\n fe.published(m.date)\r\n\r\n logging.debug(f\"Successfully requested '{ch['username']}'\")\r\n return Response(content=fg.rss_str(), media_type='application/xml')", "def get_rss_infos():\n\n url_rss_lib = \"http://www.liberation.fr/rss\"\n soup = utils.recovery_flux_url_rss(url_rss_lib)\n\n rss_items = soup.find_all(\"li\")\n\n rss_list = []\n\n link_rss = []\n\n for ri in rss_items:\n if ri.get(\"class\") == ['rss-item']:\n rss_list.append(ri.a.get('href'))\n\n for rl in rss_list:\n soup = utils.recovery_flux_url_rss(rl)\n entre = soup.find_all('entry')\n for e in entre:\n link_rss.append(e.link.get('href'))\n\n return link_rss", "def latestEntriesRss():\n now = datetime.now()\n latestEntries = session.query(Pokemon).order_by(desc(Pokemon.date_entered))\\\n .limit(20)\n rss = render_template('rss.xml', lastBuildDate=now, entries=latestEntries)\n response = make_response(rss)\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response", "def publish_line(self, stream, line):\n pass", "def get_from_url(source):\n try:\n rss_news = feedparser.parse(source)\n result = parse_news(rss_news['entries'])\n except urllib.error.URLError:\n raise SystemExit(\"Source isn't available\")\n else:\n if len(result) == 0:\n raise SystemExit('Please, check if the entered link is correct!')\n else:\n return result", "def gnews(self):\n\t\tfeed_url = self.get_feed()\n\t\tfeed_data = feedparser.parse(feed_url)\n\t\tprint(\"\")\n\t\ttype_tiny = pyshorteners.Shortener()\n\t\tfor data in feed_data[\"items\"]:\n\t\t\ttiny_url = type_tiny.tinyurl.short(data[\"link\"])\n\t\t\t#tiny_url = tinyurl.create_one(data[\"link\"])\n\t\t\tprint('\\033[33m' + data[\"title\"] + \" : \" + Style.RESET_ALL + tiny_url)\n\t\t\tprint(\"\")", "def parse_medium_rss_export(rss_file):\n\n rss_file.seek(0)\n root = etree.parse(rss_file).getroot()\n items = root.find(\"channel\").findall(\"item\")\n for item in items:\n url = item.find(\"link\").text\n title = item.find(\"title\").text.strip()\n ts_str = item.find(\"pubDate\").text\n time = datetime.strptime(ts_str, \"%a, %d %b %Y %H:%M:%S %Z\")\n \n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title or None,\n 'tags': '',\n 'sources': [rss_file.name],\n }", "def download_feed_return_objects(rss_url):\r\n try:\r\n feed_obj = rss_exists(rss_url)\r\n except:\r\n yield None\r\n return\r\n\r\n feed_obj_found = False\r\n feed_parser_results, success = get_rss(rss_url)\r\n\r\n if feed_parser_results is None:\r\n error_reporter.captureMessage(u'Feed Parser results is None', **dict(rss_url=rss_url))\r\n yield None\r\n return\r\n\r\n if feed_obj is None:\r\n feed_obj = create_new_feed(feed_parser_results, rss_url)\r\n else:\r\n feed_obj_found = True\r\n\r\n feed_id = feed_obj.id\r\n feed_obj.title = feed_parser_results.get(\"title\", \"\") or \"\"\r\n max_length_field(feed_obj, 'title', 100)\r\n\r\n feed_obj.status_code = feed_parser_results.get(\"status\", \"\") or 200\r\n feed_obj.status = find_feed_status_from_scode(feed_obj)\r\n\r\n feed_obj.etag = cut_clean_etag(feed_parser_results.get(\"etag\", \"\"))\r\n\r\n updated_date = feed_parser_results.get(\"updated_parsed\")\r\n feed_obj.updated = dt.fromtimestamp(mktime(updated_date)) if updated_date is not None else dt.utcnow()\r\n #\tfeed_obj.published = dt.fromtimestamp(mktime(published_date)) if published_date is not None else None\r\n feed_obj.last_check = dt.utcnow()\r\n\r\n # We could be creating a new feed, or updating the existing one.\r\n yield feed_obj\r\n rss_posts = []\r\n\r\n for feed_article in feed_parser_results.get(\"entries\", []):\r\n ptime = feed_article.get(\"published_parsed\", None)\r\n post_date = dt.fromtimestamp(mktime(ptime)) if ptime is not None else dt.utcnow()\r\n #\t\tprint \"%r\" % post\r\n p = Post(\r\n id=uuid.uuid1(),\r\n title=feed_article.get(\"title\", \"\"),\r\n author=feed_article.get(\"author\", \"\"),\r\n href=feed_article.get(\"href\", \"\"),\r\n post_id=feed_article.get(\"id\", \"\"),\r\n published_at=post_date,\r\n feed_id=feed_id\r\n )\r\n\r\n p.original_title = max_length_field(p, 'title', 200)\r\n p.original_author = max_length_field(p, 'author', 200)\r\n\r\n p.content_html = feed_article.get(\"content\", \"\") or \"\"\r\n\r\n if feed_article.has_key(\"media_content\"):\r\n media_contents = feed_article.get(\"media_content\", []) or []\r\n if media_contents is not None and (not isinstance(media_contents, basestring)) and isinstance(\r\n media_contents, collections.Iterable):\r\n p.media = [media.get(\"url\") for media in media_contents]\r\n\r\n hasHash = False\r\n\r\n if feed_article.has_key(\"feedburner_origlink\"):\r\n p.original_link = feed_article.get(\"feedburner_origlink\", \"\")\r\n if non_empty_str(p.original_link):\r\n p.link_hash = url_hash(safe_str(p.original_link))\r\n hasHash = True\r\n\r\n if feed_article.has_key(\"link\"):\r\n p.href = feed_article.get(\"link\", \"\")\r\n if not hasHash and non_empty_str(p.href):\r\n p.link_hash = url_hash(safe_str(p.href))\r\n hasHash = True\r\n\r\n if not hasHash:\r\n print \"Post don't have any hash\"\r\n\r\n p.title_hash = url_hash(safe_str(p.title)) if non_empty_str(p.title) else \"\"\r\n p.post_id_hash = url_hash(safe_str(p.post_id)) if non_empty_str(p.post_id) else \"\"\r\n\r\n if feed_article.has_key(\"tags\"):\r\n if isinstance(feed_article['tags'], collections.Iterable):\r\n p.tags = [pst.get(\"term\") for pst in feed_article['tags']]\r\n\r\n rss_posts.append(p)\r\n\r\n has_posts = len(rss_posts) > 0\r\n post_id_hashes = [p.post_id_hash for p in rss_posts]\r\n #\tpost_title_hashes = [p.title_hash for p in rss_posts]\r\n post_link_hashes = [p.link_hash for p in rss_posts]\r\n\r\n found_posts_id_hashes = []\r\n found_posts_link_hashes = []\r\n\r\n if feed_obj_found and has_posts:\r\n existing_posts = find_existing_posts(feed_id, post_id_hashes, post_link_hashes)\r\n\r\n for ex_post_id_hash, ex_link_hash in existing_posts:\r\n found_posts_id_hashes.append(ex_post_id_hash)\r\n found_posts_link_hashes.append(ex_link_hash)\r\n\r\n has_existing_posts = len(found_posts_id_hashes) > 0 or len(found_posts_link_hashes) > 0\r\n\r\n new_post_count = 0\r\n if has_posts:\r\n for rss_post in rss_posts:\r\n should_skip = False\r\n\r\n if has_existing_posts:\r\n if non_empty_str(rss_post.post_id_hash) and rss_post.post_id_hash in found_posts_id_hashes:\r\n should_skip = True\r\n elif rss_post.link_hash in found_posts_link_hashes:\r\n should_skip = True # \"Link Hash found in existing records\"\r\n\r\n if not should_skip:\r\n new_post_count += 1\r\n yield rss_post\r\n\r\n feed_history = FeedHistory(id=uuid.uuid1(),\r\n feed_id=feed_obj.id,\r\n timestamp=dt.utcnow(),\r\n status=feed_obj.status_code,\r\n post_count=new_post_count,\r\n etag=feed_obj.etag)\r\n yield feed_history", "def parse(self, response):\n item = NewsScraperItem()\n containers = response.xpath(\"//div[contains(@class,'largeTitle')]/article[contains(@class,\"\n \"'articleItem')]/div[contains(@class,'textDiv')]\")\n for info in containers:\n\n try:\n date = info.xpath(\".//div[contains(@class,'articleDetails')]/span[contains(@class,'date')]/text()\").extract_first()\n date = re.sub(r'\\xa0-\\xa0', '', date)\n # Convert 'minutes ago' to datetime\n date = datetime.now() - timedelta(minutes=int(re.sub(r'[^0-9]', '', date))) # Regex = Where not numeric\n item['date'] = date.strftime(\"%Y/%m/%d %H:%M:%S\")\n earn_id = re.search(r'[0-9]{4,}', info.xpath(\".//a/@onclick\").extract_first())\n item['id'] = earn_id.group()\n item['title'] = info.xpath(\".//a/text()\").extract_first()\n item['author'] = info.xpath(\".//div[contains(@class,'articleDetails')]/span/text()\").extract_first()\n item['text'] = info.xpath(\".//p/text()\").extract_first()\n item['link'] = info.xpath(\".//a/@href\").extract_first()\n yield item\n\n except:\n print(\"Unusual format detected\")\n logging.warning(\"Item skipped due to unusual format\")", "def stream_rss(request):\n return render_rss(\n request=request,\n annotations=_annotations(request),\n rss_url=request.route_url(\"stream_rss\"),\n html_url=request.route_url(\"stream\"),\n title=request.registry.settings.get(\"h.feed.title\") or _(\"Hypothesis Stream\"),\n description=request.registry.settings.get(\"h.feed.description\")\n or _(\"The Web. Annotated\"),\n )", "def fetch_url_feed(self, url, **args):\n return self.fetch(\"/url\", url=url, **args)", "def parse_articles(self, response):\n item = NasdaqcrawlerItem()\n item['date_published'] = response.xpath('//span[@itemprop=\"datePublished\"]/text()').extract()\n item['text'] = \"\".join(self.clean_text(response.xpath('//div[@id=\"articlebody\"]//p//text()').extract()))\n item['title'] = response.xpath('//h1/text()').extract()\n item['stock_ticker'] = response.meta['ticker']\n # captures any text between symbol/ and /\n # this should only return a single item\n \n yield item", "def get_feed(url, force_reload=False):\n\n useragent = \"Answerable RSS v0.1\"\n log(\"Requesting feed {}\", fg(url, yellow))\n cache_file = url.replace(\"/\", \"_\")\n\n # Get the conditions for the GET bandwith reduction\n etag = None\n modified = None\n if not force_reload:\n hit, path = cache.check(\"spider.rss\", cache_file, td(days=999))\n if hit:\n with open(path, \"r\") as fh:\n headers = json.load(fh)\n etag = headers[\"etag\"]\n modified = headers[\"modified\"]\n log(\"with {}: {}\", bold(\"etag\"), fg(etag, yellow))\n log(\"with {}: {}\", bold(\"modified\"), fg(modified, yellow))\n\n # Get the feed\n feed = feedparser.parse(url, agent=useragent, etag=etag, modified=modified)\n\n # Store the etag and/or modified headers\n if feed.status != 304:\n etag = feed.etag if \"etag\" in feed else None\n modified = feed.modified if \"modified\" in feed else None\n new_headers = {\n \"etag\": etag,\n \"modified\": modified,\n }\n cache.update(\"spider.rss\", cache_file, new_headers)\n log(\"Stored new {}: {}\", bold(\"etag\"), fg(etag, green))\n log(\"Stored new {}: {}\", bold(\"modified\"), fg(modified, green))\n\n return feed", "def rss(request, blog):\n\tblog = Blog.objects.get(urlname=blog)\n\tarticles = BlogEntry.objects.filter(blog=blog).order_by('-posting_time')[:RSS_COUNT]\n\treturn render_to_response('rss/blog.html', {'blog': blog, 'articles': articles}, context_instance=RequestContext(request))", "def get_rss(address, website):\n #print address\n try:\n results = pattern.web.Newsfeed().search(address, count=100,\n cached=False, timeout=30)\n logger.debug('There are {} results from {}'.format(len(results),\n website))\n \n #print \"Results found\"\n except Exception as e:\n print 'There was an error. Check the log file for more information.'\n logger.warning('Problem fetching RSS feed for {}. {}'.format(address,\n e))\n results = None\n\n return results", "def feed() -> None:\n ...", "def parse(self, response):\n \n response.selector.register_namespace('n', 'http://www.sitemaps.org/schemas/sitemap/0.9')\n news_urls = response.xpath(\"//n:url/n:loc/text()\").extract()\n for url in news_urls:\n yield Request(url, callback = self.parse_news)", "def _retrieveFeed(self):\n url = self.url\n if url!='':\n self._last_update_time_in_minutes = time.time()/60\n self._last_update_time = DateTime()\n d = feedparser.parse(url)\n if getattr(d, 'bozo', 0) == 1 and not isinstance(d.get('bozo_exception'),\n ACCEPTED_FEEDPARSER_EXCEPTIONS):\n self._loaded = True # we tried at least but have a failed load\n self._failed = True\n return False\n self._title = d.feed.title\n self._siteurl = d.feed.link\n self._items = []\n for item in d['items']:\n try:\n link = item.links[0]['href']\n itemdict = {\n 'title': item.title,\n 'url': link,\n 'summary': item.get('description', ''),\n }\n if hasattr(item, \"updated\"):\n try:\n itemdict['updated'] = DateTime(item.updated)\n except DateTimeError:\n # It's okay to drop it because in the\n # template, this is checked with\n # ``exists:``\n pass\n except AttributeError:\n continue\n self._items.append(itemdict)\n self._loaded = True\n self._failed = False\n return True\n self._loaded = True\n self._failed = True # no url set means failed\n return False # no url set, although that actually should not really happen", "def getLineListing(line_list_url = LINE_LIST_URL, line_list_re = LINE_LIST_RE, base_url = BASE_URL):\n line_list_re = re.compile('<tr.+?<td.+?<a\\shref=\\\"(Sched/\\S+?)\\\".+?<b>(.+?)</b>.+?size=.*?\\\"1\\\">(.+?)</font>.+?size=\\\"2\\\">(.+?)</font>')\n \n html_data = urllib2.urlopen(line_list_url).read().replace('\\n','').replace('\\t','').replace('\\r','') #get rid of unecessary whitespace\n while html_data.find(' ') > -1:\n html_data = html_data.replace(' ', ' ') #HTML doesn't care about multiple spaces, we do\n return data = line_list_re.findall(html_data)", "def guidebook_news_feed(request):\n current_site = Site.objects.get_current()\n\n feed_title = 'DjangoCon US News Updates'\n feed_description = 'The latest updates and additions for DjangoCon US.'\n feed_mimetype = 'application/rss+xml'\n feed_template = 'pinax/blog/rss_feed.xml'\n\n blog_url = 'http://%s%s' % (current_site.domain, reverse('blog'))\n # feed_url = 'http://%s%s' % (current_site.domain, reverse(url_name, kwargs=kwargs))\n\n posts = Post.objects.published().exclude(title__endswith='Sponsor')\\\n .order_by('-published')\n\n if posts:\n feed_updated = posts[0].updated\n else:\n feed_updated = datetime(2009, 8, 1, 0, 0, 0)\n\n feed = render_to_string(feed_template, {\n # 'feed_id': feed_url,\n 'feed_title': feed_title,\n 'feed_description': feed_description,\n 'blog_url': blog_url,\n # 'feed_url': feed_url,\n 'feed_updated': feed_updated,\n 'entries': posts,\n 'current_site': current_site,\n })\n\n return HttpResponse(feed, content_type=feed_mimetype)", "def scrap(date=None):\n \n global htmlclient, bloomberg\n response = requests.get(bloomberg+\"/archive/news/\"+date, headers=htmlclient)\n if response.status_code==200:\n soup = BeautifulSoup(response.text)\n \n # Getting the urls to all the posts from the archive at the given date\n for story_list in soup.findAll(\"ul\", attrs={\"class\":\"stories\"}):\n storylinks = [ story.a.get(\"href\") for story in story_list.findAll(\"li\") ]\n \n # Function call to store the link contents\n if storylinks:\n for story in storylinks:\n store_it(date, story) \n return 1\n # Set flag to -1 when there is no posts available(i.e. status code is 408) in the archive or\n # it already stored all the articles from bloomberg\n else: \n return -1", "def parse_feed(self):\n parsed_feed = feedparser.parse(self.rss_url)\n # Check for malformed feed\n if parsed_feed['bozo']:\n raise Exception('malformed rss feed!')\n self.parsed_feed = parsed_feed", "def parse_news(self, response):\n \n loader = NewsLoader(item=NewsItem(), response=response)\n loader.add_xpath('title', '//header//h1/text()')\n author = ''.join(response.xpath('//span[@class=\"byline\"]').extract())\n author = remove_tags(author).replace(\"by\", '').replace(' and ', ', ')\n loader.add_value('author', author)\n timestamp = response.xpath('//meta[@name=\"DC.date.issued\"][1]/@content').extract()[0]\n timestamp = du.normalize_timestamp(timestamp)\n loader.add_value('date', timestamp.split(' ')[0])\n loader.add_value('time', timestamp.split(' ')[1])\n list_of_contents = response.xpath(\n '//div[@id=\"storytext\"]/*[not(@class=\"cnnplayer\") and '\n 'not(@class=\"storytimestamp\")]').extract()\n content = ' '.join(list_of_contents)\n loader.add_value('content', content)\n loader.add_xpath('tags', '//meta[@name=\"keywords\"]/@content')\n return loader.load_item()", "def fetch_host_feed(self, host, **args):\n return self.fetch(\"/url\", host=host, **args)", "def call_feed(url: str) -> dict:\n\n if not url:\n return {}\n feed = feedparser.parse(url)\n return feed", "def parse_rss(database, feed, depth=1):\n # Get the updates article count, and article urls and publish dates.\n rss_a = rss_feed(feed)\n \n # Get all (article urls, publish dates) pairs\n articles = []\n pairs = rss_a[1].items()\n for url, pubdate in pairs: \n articles += crawl_url(database, url, date=pubdate, depth=depth)\n \n return articles", "def get_content(self):\n try:\n self.print_if_verbose(\n f\"Method 'get_content' is working: \\n\"\n f\"Trying to get content from RSS source: {self.source} ...\"\n )\n\n rss_xml = urlopen(self.source).read().decode(\"utf-8\")\n self.news_amount = rss_xml.count(\"<item>\")\n xml_tree = ET.ElementTree(ET.fromstring(rss_xml))\n self.content = xml_tree.find('channel')\n\n self.print_if_verbose(\n f\"Content of the RSS-source has been received successfully. \\n\"\n f\"There are {self.news_amount} news in the feed. \\n\"\n f\"Method 'get_content' is finished. \\n\"\n )\n\n return self.content\n\n except Exception as error:\n print(f\"Exception {error}- wrong URL! The program is stopped.\")", "def run(self):\n while self.i < len(self.series):\n # Grab line + RSS\n s = self.series[self.i]\n rss = self.request_rss(s.feedUrl)\n\n # Compose Episodes\n ep_dicts = []\n for entry in rss['entries']:\n ep_dicts.append(Episode(s, entry).__dict__)\n\n # Build result dict\n result_dict = dict()\n result_dict['series'] = deepcopy(s.__dict__)\n result_dict['series']['genres'] = \\\n result_dict['series']['genres'].split(';')\n result_dict['series']['type'] = 'series'\n result_dict['episodes'] = ep_dicts\n\n # Store podcast\n self.storer.store(result_dict)\n\n # Move onto the next one\n self.i += 20\n print(\"Retrieved \" + str(s.id))", "def process_content(self, channel) -> dict:\n\n self.print_if_verbose(f\"Method 'process_content' is working:\")\n\n if self.limit is None or self.limit >= self.news_amount:\n self.limit = self.news_amount\n\n rss_feed = {}\n rss_feed[\"Feed\"] = channel.findtext('title')\n rss_feed[\"Description\"] = channel.findtext('description')\n rss_feed[\"Link\"] = channel.findtext('link')\n rss_feed[\"Language\"] = channel.findtext('language')\n rss_feed[\"News\"] = []\n\n append_news_to_rss_feed = 0\n\n self.print_if_verbose(f\"Adding data to the work dict 'rss_feed'...\")\n\n POSSIBLE_IMAGE_TAGS = (\"content\", \"thumbnail\", \"image\")\n POSSIBLE_IMAGE_ATTR = (\"url\", \"href\")\n\n for item in channel.iterfind(\"item\"):\n child_news = {}\n child_news[\"Title\"] = item.findtext(\"title\")\n child_news[\"Link\"] = item.findtext(\"link\")\n child_news[\"PubDate\"] = self.get_formatted_date(item.findtext(\"pubDate\"))\n child_news[\"Source\"] = item.findtext(\"source\")\n child_news[\"ImageLink\"] = None\n child_news[\"ImageCacheName\"] = None\n\n for tag in POSSIBLE_IMAGE_TAGS:\n for item_field in item:\n if tag in item_field.tag:\n for attr in POSSIBLE_IMAGE_ATTR:\n if attr in item_field.attrib:\n child_news[\"ImageLink\"] = item_field.attrib[attr]\n child_news[\"ImageCacheName\"] = \\\n f\"{''.join(char for char in child_news['Link'] if char.isalnum())}.jpg\"\n break\n if child_news[\"ImageLink\"]:\n break\n if child_news[\"ImageLink\"]:\n break\n\n rss_feed[\"News\"].append(child_news)\n\n append_news_to_rss_feed += 1\n if append_news_to_rss_feed == self.limit:\n break\n\n self.print_if_verbose(\n f\"{append_news_to_rss_feed} news were added. \\n\"\n f\"Method 'process_content' is finished. \\n\"\n )\n\n return rss_feed", "def parse(self, response):\n yield{\n 'url': response.url,\n 'title': response.css(\"h1.article-main-title::text\").get(),\n 'sub_title': response.css(\"h2.article-sub-title::text\").get(),\n 'article_image': (response.css(\"div.article-image img::attr(src)\").get()),\n 'body': '\\n\\n'.join(response.css(\"div.article-body-container p::text\").getall()),\n 'published_date': (response.css(\"div.article-credit::text\").get().replace('|','').replace('\\r',''))[1:],\n 'source': 'One'\n }", "def fetch_news(n):\n\n # This is the list we will use the pass back the news information.\n data = []\n\n # Get news stories from the MEN RSS feed.\n response = feedparser.parse('https://www.manchestereveningnews.co.uk/?service=rss')\n\n # Loop through the news items, and the pull out the data we need.\n for news in response.entries[:n]:\n data.append({\n 'headline': news.title,\n 'content': news.description,\n })\n\n return data", "def fetchJournalEntries(date):\n\t\n\tpattern = '%d/%m/%Y'\n\tdatetime_object = datetime.datetime.strptime(date, pattern)\n\t\n\t#Getting the feeds from respective feed functions\n\tslackFeed = getFromSlack(datetime_object)\n\twebServiceFeed = getFromWebService(datetime_object)\n\tgithubFeed = getFromGitService(datetime_object)\n\tdynamoFeed = getFromDynamo(datetime_object)\n\t\n\t#Combining feeds into a single output\n\tentireFeed = reconcileFeed(slackFeed, webServiceFeed, githubFeed, dynamoFeed)\n\t\n\treturn entireFeed", "def get_content(self, data):\n self.name = name = data['feed'].get('title')\n for feed in data['entries']:\n title = feed.get('title', 'Absence of title')\n link = feed.get('link', 'Absence of link')\n date = feed.get('published_parsed', 'Absence of date')\n img = get_img_container(link)\n summary_list = []\n links = []\n if feed.get('summary'):\n summary_list = [feed.get('summary')]\n if feed.get('links'):\n uncleaned_links = feed.get('links')\n links = string_handlers.get_links(uncleaned_links)\n img.extend(if_link_is_image(uncleaned_links))\n fields = 'name, title, link, date, img, content, links'\n item = namedtuple('item', fields)._make((name, title, link, date, img, summary_list, links))\n save_feed_into_cache(item)\n self.items.append(item)", "def update_rss_feed(torrent_dir, suggested_name, url, download_url, tree_size, torrents):\n # Fetching the existing feed, if possible\n filepath = os.path.join(torrent_dir, '{}.rss'.format(suggested_name))\n try:\n with open(filepath, 'rb') as fd:\n doc = xml.dom.minidom.parse(fd)\n\n except IOError:\n # The RSS file does not exist; it is probably a first run\n doc = None\n\n # Fixing download URL, if need be, such that it ends with a slash\n if download_url[-1] != '/':\n download_url += '/'\n\n # Building/Verifying the XML structure\n try:\n chan = check_rss_dom_structure(doc)\n except:\n doc, chan = init_rss_dom_structure(url)\n\n for torrent_data in torrents:\n item = doc.createElement('item')\n chan.appendChild(item)\n\n title_elmt = doc.createElement('title')\n title_txt = doc.createTextNode('Package {} for tree_size {}'.format(torrent_data[2], tree_size))\n title_elmt.appendChild(title_txt)\n item.appendChild(title_elmt)\n\n desc_elmt = doc.createElement('description')\n desc_txt = doc.createTextNode(\n 'Comment: {} Creation Date: {}'.format(torrent_data[0]['comment'], torrent_data[0]['creation date'])\n )\n desc_elmt.appendChild(desc_txt)\n item.appendChild(desc_elmt)\n\n guid_elmt = doc.createElement('guid')\n fp = codecs.getencoder('hex')(torrent_data[1])[0]\n guid_txt = doc.createTextNode(fp.decode('UTF-8'))\n guid_elmt.appendChild(guid_txt)\n item.appendChild(guid_elmt)\n\n enclosure_elmt = doc.createElement('enclosure')\n enclosure_elmt.setAttribute('url', download_url + build_torrent_name(url, torrent_data[2], tree_size))\n enclosure_elmt.setAttribute('type', 'application/x-bittorrent')\n enclosure_elmt.setAttribute('len', str(torrent_data[3]))\n item.appendChild(enclosure_elmt)\n\n with open(filepath, 'wb') as fd:\n fd.write(doc.toxml('UTF-8'))", "def generate_feed(results, generator):\n\n for result in results:\n content = FeedContentWrapper(result)\n\n content.add_premium_logo_to_image_url()\n feed_item = generator.add_entry(order='append')\n feed_item.id(content.id)\n feed_item.author(author=content.author)\n feed_item.link(href='%s%s' % (WELT_URL, content.web_url))\n feed_item.catalogue.availability_date(content.publication_date)\n feed_item.title(content.seo_title)\n feed_item.description(content.intro)\n feed_item.content(content.premium_paragraph)\n feed_item.catalogue.id(content.id)\n feed_item.catalogue.brand('WELT Plus')\n feed_item.catalogue.condition('new')\n feed_item.catalogue.google_product_category('Media > Magazines & Newspapers')\n feed_item.catalogue.product_type(content.category)\n feed_item.catalogue.image_link(content.add_premium_logo_to_image_url())\n feed_item.catalogue.additional_image_link(content.add_premium_logo_to_image_url(default_image=False))\n feed_item.catalogue.custom_label_0(content.topic)\n feed_item.catalogue.custom_label_1(content.headline)\n feed_item.catalogue.custom_label_2(str(content.reading_time))\n feed_item.catalogue.custom_label_3(content.age)\n feed_item.catalogue.custom_label_4(content.tags)", "def get_rss_path(request_path):\r\n\r\n # e.g. the wiki homepage (LW front page) in particular needs a sensible RSS link\r\n path = '/' if request_path.startswith(\"/wiki/\") else request_path\r\n\r\n # On user profile pages pulled from the wiki the RSS feed should point to\r\n # the overview page's feed\r\n if old_user_rss_re.match(request_path) or old_user_rss_re2.match(request_path):\r\n path = path + \"overviewrss/\"\r\n\r\n if overview_rss_re.match(request_path):\r\n path = path[:-9] + \"overviewrss/\"\r\n\r\n if comments_rss_re.match(request_path):\r\n path = path[:-9] + \"commentsrss/\"\r\n\r\n return add_sr(join_urls(path, '.rss'))", "def output(self):\n feed = []\n feed.append('''<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <?xml-stylesheet href=\"http://www.blogger.com/styles/atom.css\" type=\"text/css\"?>\n <feed version=\"%(version)s\" xmlns=\"http://purl.org/atom/ns#\" xml:lang=\"%(lang)s\">\n <title mode=\"escaped\" type=\"text/html\">%(title)s</title>\n <link rel=\"alternate\" type=\"text/html\" href=\"%(url)s\" />\n <modified>%(modified)s</modified>\n ''' % self.__dict__\n )\n \n # auto-generate info if none provided\n if self.info:\n feed.append('''<info>%s</info>''' % self.info)\n else:\n feed.append('''<info mode=\"xml\" type=\"text/html\">\n <div xmlns=\"http://www.w3.org/1999/xhtml\">This is an Atom formatted XML site feed. It is intended to be viewed in a Newsreader or syndicated to another site. Please visit the <a href=\"http://help.blogger.com/bin/answer.py?answer=697\">Blogger Help</a> for more info.</div>\n </info>\n ''')\n \n if self.author:\n feed.append('''<author>%s</author>''' % self.author.output())\n for person in self.contributors:\n feed.append('''<contributor>%s</contributor>''' % person.output())\n if self.tagline:\n feed.append('''<tagline mode=\"escaped\" type=\"text/html\">%s</tagline>''' % self.tagline)\n if self.id:\n feed.append('''<id>%s</id>''' % self.id)\n if self.generator:\n feed.append('''<generator>%s</generator>''' % self.generator)\n if self.copyright:\n feed.append('''<copyright>%s</copyright>''' % self.copyright)\n for entry in self.entries:\n feed.append(entry.output())\n feed.append('''</feed>\\n''')\n return '\\n'.join(feed)", "def add_item(link, desc, title, date):\n item = rsslib.Item()\n item.link = link\n item.description = 'Keywords: ' + desc\n item.title = title\n item.pubDate = datetime.strptime(date, '%Y-%m-%d, %H:%M%p %Z')\n return item", "def parse_entry(self, response):\n hxs = HtmlXPathSelector(response)\n # crawl entry detail page and mark is_entry=True\n e = self.crawl_version( response )\n e['is_entry'] = True\n yield e\n # iterate version detail page less than ITER_VERSION_LIMIT, not include entry detail page\n cnt = 0\n for v in hxs.select(\"//ul[@class='mod-app-item']/li/p[@class='app-name']/a/@href\").extract():\n detail_url = urlparse.urljoin( response.url , v )\n detail_url = self.refactor_app_url( detail_url )\n\n if cnt > self.ITER_VERSION_MAX: break\n else: cnt+=1\n yield Request( url=detail_url , callback=self.crawl_version, \n meta={\"Referer\":response.url},\n headers={\"Referer\":response.url} )", "def test_rss_is_parseable(self):\r\n [make_bookmark() for i in range(10)]\r\n transaction.commit()\r\n\r\n res = self.app.get('/rss')\r\n\r\n self.assertEqual(\r\n res.status,\r\n \"200 OK\",\r\n msg='recent status is 200, ' + res.status)\r\n\r\n # http://packages.python.org/feedparser/\r\n # introduction.html#parsing-a-feed-from-a-string\r\n parsed = feedparser.parse(res.body)\r\n links = []\r\n for entry in parsed.entries:\r\n links.append({\r\n 'title': entry.title,\r\n 'category': entry.category,\r\n 'date': time.strftime('%d %b %Y', entry.updated_parsed),\r\n 'description': entry.description,\r\n 'link': entry.link,\r\n })\r\n\r\n self.assertTrue(links, 'The feed should have a list of links.')\r\n self.assertEqual(10, len(links), 'There are 10 links in the feed.')\r\n\r\n sample_item = links[0]\r\n self.assertTrue(sample_item['title'], 'Items have a title.')\r\n self.assertTrue(\r\n sample_item['link'],\r\n 'Items have a link to reach things.')\r\n self.assertTrue(\r\n 'description' in sample_item,\r\n 'Items have a description string.')", "def reuters_article(self, response):\n item = NewsScraperItem()\n item['source'] = 'Reuters'\n item[\"category\"] = response.meta.get(\"category\")\n item[\"article_address\"] = response.url\n item[\"heading\"] = response.xpath(\"string(//h1)\").get().strip()\n item[\"snippet\"] = (\n response.xpath(\n 'string(//p[@class=\"Paragraph-paragraph-2Bgue ArticleBody-para-TD_9x\"])'\n )\n .get()\n .strip()\n )\n item[\"image_source\"] = \"https://i1.sndcdn.com/avatars-000334209154-7w0njd-t500x500.jpg\"\n item[\"author\"] = (\n response.xpath(\n 'string(//a[@class=\"TextLabel__text-label___3oCVw TextLabel__black-to-orange___23uc0 TextLabel__serif___3lOpX Byline-author-2BSir\"])'\n )\n .get()\n .strip()\n )\n yield item", "def get_headlines(newssource):\n \n \n newssource_dict = {}\n url = 'https://newsapi.org/v1/articles?source=' + newssource + '&sortBy=top&apiKey=' + api\n request = http.request('GET',url,timeout=4.0)\n\n headline = json.loads(request.data)\n \n if not headline['articles']:\n return \"NewsAPI can not receive information from\" + newsource + \"right now\"\n \n newssource_dict['url'] = headline['articles'][0]['url']\n newssource_dict['title']= headline['articles'][0]['title']\n newssource_dict['description'] = headline['articles'][0]['description']\n \n \n return newssource_dict", "def _create_response_atom_feed(request, plans, feed_title=''):\n feed = AtomFeed(feed_title, feed_url=request.url, url=request.url_root)\n\n for p in plans:\n formatted = _format_plan(p, request.url_root)\n\n feed.add(\n title=formatted['title'],\n content=formatted['content'],\n content_type='html',\n author=\"OpenTABA.info\",\n # id=url + '&status=' + p['status'], \n # ^^ it seems like the &tblView= value keeps changing in the URL, which causes the ID to change and dlvr.it to republish items.\n id=\"%s-%s\" % (formatted['title'], p['status']),\n # this is a unique ID (not real URL) so adding status to ensure uniqueness in TBA stages\n url=formatted['url'],\n links=formatted['links'],\n updated=formatted['last_update']\n )\n\n return feed", "def general_news():\n\n return general_scraper(['http://mesva.univaq.it/'])", "def fetch(feed):\n # Fetch the feed data.\n data = feedparser.parse(feed.ext_url)\n new_articles = []\n\n # If the `bozo` value is anything\n # but 0, there was an error parsing (or connecting) to the feed.\n if data.bozo:\n # Some errors are ok.\n if not isinstance(data.bozo_exception, feedparser.CharacterEncodingOverride) and not isinstance(data.bozo_exception, feedparser.NonXMLContentType):\n raise data.bozo_exception\n\n for entry in data.entries:\n\n # URL for this entry.\n url = entry['links'][0]['href']\n\n # Check for an existing Article.\n # If one exists, skip.\n if Article.objects(ext_url=url).first():\n continue\n\n data = extractor.extract(url, existing_data=entry)\n\n if data is None:\n continue\n\n # Secondary check for an existing Article,\n # by checking the title and source.\n existing = Article.objects(title=data['title']).first()\n if existing and existing.feed.source == feed.source:\n continue\n\n data['feed'] = feed\n\n article = Article(**data)\n article.save()\n new_articles.append(article)\n\n return new_articles", "def extract_articles(self, parsed_xml):\n\n # Iterates over every item (article) in xml\n for item in parsed_xml.xpath(\"//item\"):\n\n article = {}\n\n\n article['title'] = self.get_text_or_attr(item, 'title')\n\n\n # The article's categories must be always a list, even if it has\n # only one element.\n categories = self.get_text_or_attr(item, 'category')\n\n if isinstance(categories, str):\n categories = [categories]\n\n article['categories'] = categories\n\n\n url = self.get_text_or_attr(item, 'feedburner:origLink')\n article['url'] = self.remove_query(url)\n\n self.article_url = article['url']\n\n\n # If article's URL is already stored, don't parse it again\n if Article.objects.filter(url=article['url']).count() > 0:\n continue\n\n\n # It is interesting to have the publication date as a `dateutil`\n # object, so we can do whatever manipulation we want.\n pub_date = self.get_text_or_attr(item, 'pubDate')\n article['date'] = self.parse_datetime_passing_errors(pub_date)\n\n\n # Get the author attribute and tries to fetch informations about\n # him/her. An article can have more than one author; on techcrunch's\n # feed, they are separated by a comma.\n author_names = self.get_text_or_attr(item, 'dc:creator').split(',')\n article['authors'] = []\n\n for i, name in enumerate(author_names):\n article['authors'] += [self.get_author(name, i)]\n\n\n # Tries to find the article's thumbnail url\n thumb = self.get_text_or_attr(item, 'media:thumbnail', 'url')\n if thumb and thumb[0]:\n article['thumb'] = self.remove_query(thumb[0])\n\n\n # Gets the article's description and strip all html tags from it\n content = self.clear_text(item.xpath('description'))\n content = content.strip(' Read More').strip('&nbsp;').strip()\n\n\n article['content'] = content\n\n\n yield article", "def news_from_url(url: str, limit_news: int, file_name):\n logger.info('news_from_url start') #Logs a message\n request = connect_url(url)\n if request == None:\n return None\n\n soup = BeautifulSoup(request.content, 'xml')\n feed_title = feed_title_from_soup(soup)\n if feed_title == None:\n print('This is an incorrect RSS url!')\n return None\n\n news_active = news_active_from_url(url, soup, feed_title, limit_news)\n\n if news_active == None:\n print('No news!')\n return None\n\n # read from file to news_all\n news_all = read_storage(file_name)\n\n # update news_all\n news_all = news_update(news_all, news_active)\n\n # write news_all to file\n if write_storage(storage_file_name, news_all) == False:\n print('Error while write to local storage!')\n return news_active", "def rss(self):\n if hasattr(self, \"_rss\"):\n return self._rss\n else:\n return None", "def make_entry(line):\n #focus on relevant parts\n parts = line.split(\" - \")\n visitor_id = parts[0]\n subparts = parts[1].split('\"')\n method_and_uri = subparts[1]\n method_and_uri_parts = method_and_uri.split(\" \")\n method = method_and_uri_parts[0]\n uri = method_and_uri_parts[1]\n d = dict()\n d[\"visitor_id\"] = visitor_id\n d[\"method\"] = method\n d[\"uri\"] = uri\n return d", "def feed(self):\n feed_dict = feedparser.parse(self.URL)\n return [self.entry_dict(entry) for entry in feed_dict['entries']]", "def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries", "def get_news(news_url):\n news_final = []\n try:\n news_handler = urllib.urlopen(news_url)\n news = news_handler.read()\n news = nl2br(news)\n news = string.split(news, '<br/>')\n\n news_array = {}\n value = {}\n for newsweb in news:\n value = string.split(newsweb, '|')\n if len(value[0]) > 1:\n news_array[value[0]] = value[1]\n\n info = {}\n for k in news_array:\n info = k[0:int(k.find(\"http://\") - 1)]\n info = string.split(k, ' - ')\n news_final.append((info[0], info[1], news_array[k]))\n\n news_handler.close()\n except IndexError:\n pass\n except IOError:\n pass\n\n return news_final", "def get_information(article_link):\n\n if \"video\" in article_link or \"/apps/\" in article_link or \"checknews\" in\\\n article_link or not re.search(r\"\\d\\d\\d\\d/\\d\\d/\\d\\d\", article_link):\n return None\n\n else:\n\n date_article = re.search(r\"\\d{4}/\\d{2}/\\d{2}\", article_link)[0]\n date_article = date.datetime.strptime(date_article, \"%Y/%m/%d\")\n\n diff_date = date.datetime.now() - date_article\n\n if diff_date.days > 7:\n return None\n\n else:\n req = requests.get(article_link)\n req.encoding = \"utf-8\"\n data = req.text\n soup = BeautifulSoup(data, \"lxml\")\n\n if soup.find(\n \"div\",\n class_=\"direct-headband\") or article_link != req.url:\n return None\n else:\n balise_title = soup.find(\"h1\")\n balise_title = balise_title.get_text()\n balise_title = re.sub(r\"\\s\\s+\", \"\", balise_title)\n\n newspaper = \"Liberation\"\n title = unidecode.unidecode(balise_title)\n\n author = \"\"\n for span in soup.find_all('span'):\n if span.get(\"class\") == ['author']:\n if(span.a):\n author = span.a.string\n if span.get(\"class\") == ['date']:\n if(span.time):\n date_p = date.datetime.strptime(\n span.time.get(\"datetime\"), \"%Y-%m-%dT\" +\n \"%H:%M:%S\").date()\n date_p = date_p.strftime(\"%Y-%m-%d\")\n print(date_p)\n\n content = \"\"\n for div in soup.find_all('div'):\n for p in div.find_all('p'):\n content += p.get_text() + \" \"\n content = re.sub(\"<>\", \"\", content)\n content = unidecode.unidecode(content)\n\n new_article = utils.recovery_article(\n title, newspaper, [author], date_p, content, \" \")\n\n return new_article", "def __init__(self, googleReader, type):\r\n super(SpecialFeed, self).__init__(\r\n googleReader,\r\n title = type,\r\n id = ReaderUrl.SPECIAL_FEEDS_PART_URL+type,\r\n unread = 0,\r\n categories = [],\r\n )\r\n self.type = type\r\n\r\n self.fetchUrl = ReaderUrl.CONTENT_BASE_URL + Category.urlQuote(self.id)", "def _do_fetch(self, line: str) -> None:\n while True:\n _, sha, value = line.split(\" \")\n self._fetch(sha)\n line = readline()\n if line == \"\":\n break\n _write()", "def rss2(request):\n return {'pastes': previous()}", "def get_news(category):\n get_news_url = base_url.format(category,api_key)\n\n with urllib.request.urlopen(get_news_url) as url:\n get_news_data = url.read()\n get_news_response= json.loads(get_news_data)\n\n news_results = None\n\n if get_news_response['sources']:\n news_results_list = get_news_response['sources']\n news_results = process_results(news_results_list)\n\n return news_results", "def feed(self, line):\n s = line.split()\n atom = []\n self.itype = None\n self.ln += 1\n # No sense in doing anything for an empty line or a comment line.\n if len(s) == 0 or match('^;',line): return None, None\n # Now go through all the cases.\n if match('^\\[.*\\]',line):\n # Makes a word like \"atoms\", \"bonds\" etc.\n self.sec = sub('[\\[\\] \\n]','',line)\n elif self.sec == 'counterpoise':\n self.itype = cptypes[int(s[2])]\n atom = [s[0],s[1]]\n elif self.sec == 'NDDO':\n # NDDO hasn't been tested since the refactoring.\n self.itype = '_'.join(['NDDO', s[0], s[1]])\n else:\n return [],\"Confused\"\n if len(atom) > 1 and atom[0] > atom[-1]:\n # Enforce a canonical ordering of the atom labels in a parameter ID\n atom = atom[::-1]\n self.suffix = ''.join(atom)", "def get_article(item, source, reprocess=False):\n article = dict()\n encoded = item.get('link').encode('utf-8')\n article['feed_source'] = source.replace('www.google.com', 'google.com')\n article['uuid'] = hashlib.sha256(encoded).hexdigest()\n processed = is_found(article['uuid'])\n if processed and not reprocess:\n # logger.debug(\"Skipping %s\", article['uuid'])\n return {'article': processed, 'from_store': True}\n article['title'] = item.get('title', None)\n href = item.get('link', None)\n article['href'] = strip_google(href)\n article['source'] = derive_source(article['href'])\n article['collected'] = now_time()\n article['published'] = item.get('published', None)\n article['summary'] = item.get('summary', None)\n\n page_content = get_page_content(article['href'])\n if not page_content:\n logger.debug(\"No content found: %s\" % article['href'])\n return {'article': None, 'from_store': True}\n paragraphs = justext.justext(page_content,\n justext.get_stoplist(\"English\"),\n no_headings=True,\n max_heading_distance=150,\n length_high=140,\n max_link_density=0.4,\n stopwords_low=0.2,\n stopwords_high=0.3)\n text_content = list()\n for paragraph in paragraphs:\n if paragraph.is_boilerplate:\n continue\n text_content.append(paragraph.text)\n text_content = '\\n'.join(text_content)\n tokens = get_tokens(text_content)\n\n article['word_count'] = len(tokens)\n article['read_time'] = round(float(article['word_count'])/250, 2)\n clean = cleaned_tokens(tokens)\n article['tokens'] = [{t[0]:t[1]}\n for t in nltk.FreqDist(clean).most_common(100)]\n article['tags'] = [list(x.keys())[0] for x in article['tokens'][0:7]]\n article['sentiment'] = get_sentiment(text_content)\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n if not reprocess or not processed:\n try:\n articles.insert(article)\n except Exception as e:\n pass\n if processed:\n print(processed)\n articles.update({'_id': ObjectId(processed['_id'])}, {'$set': article})\n monitor = get_monitor_obj(article['feed_source'])\n return {'article': article, 'monitor': monitor, 'from_store': False}", "def news()->str:#return array[news desc,news link]\n event_log(\"retrieve news data....\",\"\")\n c = 0\n location = read_json(\"news_api\")[0]\n main_url = \"https://newsapi.org/v2/top-headlines?country=\"+location+\"&apiKey=\"+read_json(\"news_api\")[1]+\"\"#add a country selection optin via json\n page = requests.get(main_url).json()\n article = page[\"articles\"]\n news_result = []\n for data in article:\n news_result.append([data[\"title\"],str(data[\"url\"]).replace('\"',\" \")])#exctracts the wanted data from api\n if c == 5:#add this to json file so scalibility\n break\n c+=1\n return news_result", "def main():\n\n # Title\n st.title(\"Article Clipper Helper\")\n \n # display image\n image = Image.open(\"ab_ws_logo.png\")\n st.image(image, use_column_width=True)\n \n st.subheader(\"Paste URL(s) Here:\")\n\n ####################################################################\n ### User Input Fields ###\n ####################################################################\n\n # User input -- User pastes all links here \n user_url = st.text_input(\"Paste Link(s) Here:\",\"\")\n\n # Convert user input urls to str type\n urls = str(user_url)\n\n ####################################################################\n ### Extract URL Article Info from Google RSS Feed ### \n ####################################################################\n\n # Create a submission button to parse URL information \n if st.button(\"Get URL Info\"):\n \n new_urls = []\n\n # Separate user input URLs \n splited = urls.split(\"http\")\n for each in splited:\n if \"://\" in each:\n new_urls.append(\"http\" + each.strip())\n\n data = []\n # Output column names \n columns = ['Article Title', 'Date', 'Outlet Name', 'Link']\n for s in new_urls:\n # Insert urls into google news url & query RSS feed\n url = \"https://news.google.com/rss/search?q=\" + s \n\n d = feedparser.parse(url)\n if len(d['entries']) == 0:\n data.append(['NaN', 'NaN', 'NaN', s])\n try:\n for i, entry in enumerate(d.entries, 1):\n p = entry.published_parsed\n sortkey = \"%04d%02d%02d%02d%02d%02d\" % (p.tm_year, p.tm_mon, p.tm_mday, p.tm_hour, p.tm_min, p.tm_sec)\n datetime_obj = datetime.strptime(entry.published, \"%a, %d %b %Y %H:%M:%S %Z\")\n tmp = {\n \"no\" : i,\n \"title\" : entry.title,\n \"summery\" : entry.summary,\n \"link\" : entry.link,\n \"published\" : entry.published,\n \"sortkey\" : sortkey,\n \"source\": entry.source\n }\n if tmp['link'] == s:\n src_ttl = tmp['source']['title'].split(\".\")[0].strip()\n data.append([tmp['title'], datetime_obj.strftime(\"%m/%d/%y\"), src_ttl, tmp['link']])\n except:\n print(f\"No data returned for {s}\")\n\n # Google RSS Feed Data\n df = pd.DataFrame(data=data, columns=columns)\n\n # Add Additional Media Tracking Columns \n df['Market'] = 'National'\n df['Media Type'] = 'Online'\n \n ####################################################################\n ### Similar Web Data Extraction ### \n #################################################################### \n\n # Automatically update the date to use with API \n lastmonth = int(pd.to_datetime(\"today\").strftime(\"%Y%m\"))-2\n lm = str(lastmonth)\n last_month = lm[:4] + '-' + lm[4:]\n\n # Create a list of domains \n mass_urls = df['Link'].tolist()\n\n # Get Raw Domains \n domains = []\n for x in mass_urls:\n tsd, td, tsu = extract(x) \n\n url = td + '.' + tsu\n domains.append(url)\n\n df['Outlet Domain'] = domains\n\n # Similar Web API Creds\n payload = {'api_key': st.secrets['api_key'], \n 'start_date': last_month, \n 'end_date': last_month, \n 'country': 'US', \n 'granularity': 'monthly', \n 'main_domain_only': 'False', \n 'format': 'json'}\n\n sw_data = []\n # Output column names \n sw_columns = ['Outlet Domain','Outlet Reach (Monthly)']\n \n data_list = []\n\n for visit in domains:\n\n url= 'https://api.similarweb.com/v1/website/{}/total-traffic-and-engagement/visits'.format(visit)\n\n r=requests.get(url,params=payload)\n sw_data = r.json()\n \n url_info = sw_data['meta']['request']['domain']\n\n visit_info = sw_data['visits'][0]['visits']\n #visit_info = my_value(round(visit_info,2))\n\n data_list.append([url_info, visit_info])\n\n sw_df = pd.DataFrame(data=data_list, columns=sw_columns)\n\n sw_df['Outlet Reach (Monthly)'] = pd.to_numeric(sw_df['Outlet Reach (Monthly)'], errors='coerce')\n sw_df['Outlet Reach (Weekly)'] = sw_df['Outlet Reach (Monthly)'] /4\n \n ####################################################################\n ### Merge Google RSS DF & Similar Web DF ### \n #################################################################### \n\n # config path\n cfg_path = \"config.yaml\"\n cfg = yaml.safe_load(open(cfg_path))\n\n # create temp df\n tmp_df = pd.DataFrame(columns=[*range(42)])\n tmp_df = tmp_df.rename(columns=cfg[\"tracker_cols\"])\n\n new_df = tmp_df.append(df)\n new_df[['Outlet Reach (Weekly)','Outlet Reach (Monthly)']] = new_df[['Outlet Reach (Weekly)','Outlet Reach (Monthly)']].apply(pd.to_numeric)\n\n output_df_one = pd.merge(df, sw_df, on=\"Outlet Domain\", how=\"left\")\n output_df = pd.concat([tmp_df, output_df_one])\n\n del output_df['Outlet Domain']\n\n st.dataframe(output_df)\n\n # write to file for download in Tracker Format\n tmp_download_link = download_link(\n output_df,\n \"output.csv\",\n \"Click here to download in Tracker Format!\",\n )\n st.markdown(tmp_download_link, unsafe_allow_html=True)", "async def news(self):\n url = f\"https://newsapi.org/v2/top-headlines?country=nz&apiKey={self.bot.news_api_key}\"\n async with ClientSession() as session:\n async with session.get(url) as response:\n r = await response.json()\n firstArticle = r[\"articles\"][0]\n nSource = firstArticle[\"source\"][\"name\"]\n nTitle = firstArticle[\"title\"]\n nTimestamp = firstArticle[\"publishedAt\"]\n embed = discord.Embed(\n title=f\"News Title: {nTitle}\", description=f\"News Source: {nSource}\"\n )\n embed.add_field(name=\"News Content\", value=firstArticle[\"description\"])\n embed.set_image(url=firstArticle[\"urlToImage\"])\n embed.set_footer(text=f\"News Timestamp: {nTimestamp}\")\n\n channel = self.bot.get_channel(self.bot.main_channel_id)\n await channel.send(embed=embed)", "def rssCheck(podcastName, source, url):\n try:\n headers = {'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8' ,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}\n req = requests.get(url, headers=headers)\n root = etree.fromstring(req.text)\n rssArray = []\n for element in root[0].iter('item'):\n try:\n title = element.find(\"title\").text.replace(\"''\", \"'\")\n description = element.find(\"description\").text.replace(\"<strong>\", \"\").replace(\"</strong>\", \"\").replace(\"&amp;\", \"and\").replace(\"'\",\"''\")\n date = element.find(\"pubDate\").text\n date = date.split(\" \")\n date = datetime.strptime(date[1] + date[2] + date[3], \"%d%b%Y\")\n dateString = str(date.month) + \"-\" + str(date.day) + \"-\" + str(date.year)\n url = ResolveRouter.urlRouter(podcastName, source, element)\n except:\n print(\"error in XMLDetailsDebug parsing issue\")\n if(len(title) > 0 and len(description) > 0 and len(dateString) > 0 and len(url) > 0):\n rssArray.append([title, dateString, url, description])\n else:\n print(\"error in XMLDetailsDebug parsing issue\")\n return rssArray\n except Exception as e:\n print(e)\n Tools.writeException(\"getXMLDetailsDebug\", e)", "def parse_feed(uri):\n\n if OUTPUT:\n print \"parsing \" + uri\n\n feed = urllib2.urlopen(uri)\n xml = minidom.parse(feed)\n \n # look for <enclosure> tags\n enclosures = xml.getElementsByTagName(\"enclosure\")\n\n # extract the url attribute from any <enclosure> tags found\n file_uris = []\n for enclosure in enclosures:\n file_uris.append(enclosure.attributes[\"url\"].value)\n\n download_files(file_uris)", "def entry(request, entry_id):\n\n __time_update(request.user)\n\n try:\n entry = Entry.objects.get(id=entry_id)\n feed = entry.feed\n\n if feed.user == request.user:\n entry = entry.entry.read()\n else:\n return render_to_response('message.html', {'message':\n 'There is no such entry.',\n 'back': '/feeds'})\n except:\n return render_to_response('message.html', {'message':\n 'Error opening entry file! Please, reload feed.',\n 'back': '/feeds'})\n\n return HttpResponse(entry)", "def print_feed(list_with_items):\n result_str = list_with_items[0].name + '\\n'\n for item in list_with_items:\n item_as_str = (f'Title: {item.title}\\nLink: {item.link}\\n'\n f'Date: {time.strftime(\"%y-%m-%d %H:%M\", tuple(item.date))}')\n result_str += item_as_str\n result_str += string_handlers.get_str_content(item.content)\n result_str += string_handlers.get_img_as_str(item.img)\n result_str += string_handlers.get_links_as_str(item.links) + '\\n\\n'\n return result_str", "def GetLine(line):\r\n pass", "def get(self, update, context):\n\n telegram_user = update.message.from_user\n\n if len(context.args) == 0:\n message = \"no argument please put atleast one To get the last news of your subscription please use /get <entryname> [optional: <count 1-10>]. Make sure you first add a feed using the /add command.\"\n update.message.reply_text(message)\n return\n\n if len(context.args) > 2:\n message = \"To get the last news of your subscription please use /get <entryname> [optional: <count 1-10>]. Make sure you first add a feed using the /add command.\"\n update.message.reply_text(message)\n return\n\n if len(context.args) == 2:\n args_entry = context.args[0]\n args_count = int(context.args[1])\n else:\n args_entry = context.args[0]\n args_count = 4\n\n url = self.db.get_user_bookmark(telegram_id=telegram_user.id, alias=args_entry)\n if url is None:\n message = (\n \"I can not find an entry with label \"\n + args_entry\n + \"in your subscriptions! Please check your subscriptions using /list and use the delete command \"\n \"again! \"\n )\n update.message.reply_text(message)\n return\n\n entries = FeedHandler.parse_feed(url[0], args_count)\n for entry in entries:\n soup = BeautifulSoup(entry.summary, features=\"html.parser\")\n desc = soup.get_text()\n message = (\n \"[\" + url[1] + \"] <a href='\" + entry.link + \"'>\" + entry.title + \"</a>\\n\\n\"\n + desc\n )\n print(message)\n\n try:\n update.message.reply_text(message, parse_mode=ParseMode.HTML)\n except Unauthorized:\n self.db.update_user(telegram_id=telegram_user.id, is_active=0)\n except TelegramError:\n # handle all other telegram related errors\n pass", "def parse(self, response):\n next_selector = response.xpath('//div//li/a[@id=\"quotes_content_left_lb_NextPage\"]/@href')\n ticker = re.findall('symbol/(.+?)/', response.url)[0]\n\n for url in next_selector.extract():\n yield Request(url, callback = self.parse)\n \n links = response.xpath('//div//span[@class=\"fontS14px\"]/a/@href').extract()\n for link in links:\n # meta is passed along with the response into the spider\n # allowing it to access what ticker it's using\n yield Request(link, callback = self.parse_articles, meta = {'ticker': ticker})", "def get_activity_feed(context, term):\n if not term:\n raise ValueError('You have to provide a search term!')\n url = '{}{}'.format(context.test_url, term)\n response = requests.get(url, timeout=context.request_timeout)\n context.response = response\n logging.debug('Request URL: %s', response.request.url)\n logging.debug('Request headers:\\n%s', pformat(response.request.headers))\n logging.debug('Response headers:\\n%s', pformat(response.headers))\n logging.debug('Response content:\\n%s', pformat(response.json()))", "def test_feed_render_url(self):\n moksha.feed_cache = FakeCache()\n feed = Feed()\n rendered = feed(url='http://lewk.org/rss')\n assert 'l e w k . o r g' in rendered, rendered", "def parse(self, response):\n events = response.css(\n \"div.column.scroll-item.is-one-third-tablet.is-full-mobile\"\n )\n for event in events:\n if \"Friday Nights\" in event.css(\"span.card-title.h4 span::text\").get():\n datetimes = event.css(\"div.card-text.card-subhead span::text\").get()\n start_datetime, end_datetime = self.format_datetimes(datetimes)\n if start_datetime >= datetime.now() and start_datetime < datetime.now() + timedelta(\n weeks=4\n ):\n # the link is relative\n event_link = event.css(\"div.card-image a\").attrib[\"href\"]\n full_url = self.domain + event_link\n yield SplashRequest(\n url=full_url,\n callback=self.parse_event,\n method=\"GET\",\n endpoint=\"execute\",\n args={\"wait\": 15.0, \"lua_source\": self.lua_script},\n cb_kwargs={\n \"start_datetime\": start_datetime,\n \"end_datetime\": end_datetime,\n },\n )", "def for_url(self, url):\n if url is None or url == '':\n raise BadURLException('Did you forget to provide a feed URL?')\n def txn():\n feed = RegisteredFeed.get_by_key_name(url)\n if feed is None:\n u = urlparse( url )\n q = parse_qs( u.query )\n if u.scheme != 'http' or u.netloc != 'rss.netflix.com' or 'id' not in q:\n raise BadURLException('Invalid Netflix feed URL was provided')\n feed = RegisteredFeed(\n key_name = url,\n id = q['id'][0],\n feed_type = u.path,\n rand = random.random()\n )\n feed.put()\n return feed\n feed = db.run_in_transaction(txn)\n if feed.slug is None:\n feed.slug = get_slug()\n feed.put()\n return feed", "def __init__(self, article_xml):\n self.article_xml = article_xml\n self.links = self.grab_links()\n self.first_link = self.parse_first_link()", "def main():\n # Construct the feed generator\n f = LogBufferFeed(FEED_DIR)\n f.MAX_AGE = 24 * 60 * 60 # 1 day\n f.FEED_META['feed.title'] = '%s Referrering Links' % SITE_NAME\n f.FEED_META['feed.tagline'] = \\\n 'New referring links from Apache access.log on %s' % SITE_NAME\n \n # Load up tail of access log, parse, and filter\n new_lines = bookmark_tailgrep(ACCESS_LOG, max_initial_lines=100000)\n all_events = parse_access_log(new_lines)\n events = [ x for x in all_events if event_filter(x) ]\n \n # Scan through latest events for new referrers\n referrers_seen = shelve.open(REFER_SEEN)\n new_referrers = []\n for evt in events:\n k = '%(referrer)s -> %(path)s' % evt\n if not referrers_seen.has_key(k):\n referrers_seen[k] = 1\n new_referrers.append( (evt['referrer'], evt['path']) )\n referrers_seen.close()\n \n # If there were new referrers found, insert a new entry.\n if len(new_referrers) > 0:\n \n # Build a list of hyperlinks for referrers\n links_out = [\n LINK_TMPL % {\n 'SITE_ROOT' : SITE_ROOT,\n 'referrer' : x[0],\n 'path' : x[1],\n }\n for x in new_referrers\n ]\n \n # Build a summary for this entry.\n summary = SUMMARY_TMPL % { \n 'count' : len(new_referrers), \n 'links' : \"\\n\".join(links_out)\n }\n \n # Construct and append a new entry\n entry = FeedEntryDict({\n 'title' : '%s new referrers' % len(new_referrers),\n 'link' : '',\n 'summary' : summary\n })\n f.append_entry(entry)\n\n # Output the current feed entries as both RSS and Atom\n open(FEED_NAME_FN % 'rss', 'w').write(f.scrape_rss())\n open(FEED_NAME_FN % 'atom', 'w').write(f.scrape_atom())" ]
[ "0.665676", "0.63081646", "0.6112597", "0.60895586", "0.60594904", "0.60477144", "0.60260314", "0.598809", "0.5984063", "0.59758997", "0.5936038", "0.5913608", "0.58760685", "0.5829383", "0.58091927", "0.5775324", "0.5773286", "0.5708184", "0.56792194", "0.56642944", "0.56638587", "0.56513196", "0.5647424", "0.56383437", "0.5601557", "0.55917937", "0.55866176", "0.5581136", "0.5549213", "0.5522708", "0.54775596", "0.5467682", "0.5461557", "0.5456277", "0.54296094", "0.5424006", "0.540213", "0.5394766", "0.53758824", "0.5368949", "0.5360918", "0.53481", "0.5303355", "0.5296627", "0.5281264", "0.5276535", "0.527204", "0.5251075", "0.524865", "0.52330863", "0.5225334", "0.5219443", "0.5198751", "0.5198519", "0.5196089", "0.5166735", "0.5150817", "0.51487136", "0.51430106", "0.5138407", "0.5105873", "0.5101956", "0.5101783", "0.5087487", "0.508694", "0.5082095", "0.5081625", "0.50772274", "0.50515246", "0.5048981", "0.50457066", "0.50430274", "0.5039879", "0.50369537", "0.50211155", "0.5017459", "0.50057185", "0.5001379", "0.4991074", "0.49905503", "0.49803102", "0.49797174", "0.4978431", "0.49783856", "0.49744058", "0.49729824", "0.49640775", "0.4947804", "0.49334112", "0.49318576", "0.4931665", "0.4927477", "0.4923748", "0.49127054", "0.49099547", "0.4908753", "0.49041477", "0.4903615", "0.4902919", "0.48923105" ]
0.696534
0