diff --git "a/5037.jsonl" "b/5037.jsonl" new file mode 100644--- /dev/null +++ "b/5037.jsonl" @@ -0,0 +1,880 @@ +{"seq_id":"9477506955","text":"# Import the required module for text\r\n# to speech conversion\r\nfrom gtts import gTTS\r\n\r\n# This module is imported so that we can\r\n# play the converted audio\r\nimport os\r\n\r\n# The text that you want to convert to audio\r\n# fh = open(\"abc.txt\",\"r\")\r\n# mytext= fh.read().replace(\"\\n\",\" \")\r\n\r\nmytext='welcome to chatbot'\r\n# Language in which you want to convert\r\nlanguage = 'en'\r\nmyobj = gTTS(text=mytext, lang=language, slow=False)\r\n\r\nmyobj.save(\"welcome.mp3\")\r\n# fh.close()\r\nos.system(\"start welcome.mp3\")\r\n","repo_name":"hetvi182/chatbot","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32295394159","text":"# Rotating Pattern Viewer\n# Circular Animation Display\n\nimport sys\nimport math\nfrom PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel\nfrom PyQt5.QtGui import QPainter, QColor, QPen\nfrom PyQt5.QtCore import Qt, QTimer\n\nclass RotatingPatternWidget(QWidget):\n def __init__(self):\n super().__init__()\n self.pi = math.pi\n self.circle = self.pi * 2\n self.length = 12\n self.half_length = self.length // 2\n self.cx = 115\n self.cy = 115\n self.w = 90.0\n self.h = 90.0\n self.step = 0.0\n\n self.setWindowTitle(\"Rotating Pattern\")\n self.resize(240, 240)\n\n self.timer = QTimer(self)\n self.timer.timeout.connect(self.updateAnimation)\n self.timer.start(100)\n\n def updateAnimation(self):\n self.step -= 0.09\n self.update()\n\n def paintEvent(self, event):\n painter = QPainter(self)\n painter.setRenderHint(QPainter.Antialiasing)\n\n # Clear the background\n painter.fillRect(self.rect(), Qt.black)\n\n for i in range(self.length):\n a = (i / self.length) * self.circle\n x = self.cx + round(math.cos(a) * self.w)\n y = self.cy + round(math.sin(a) * self.h)\n self.drawCircle(painter, x, y, 'white')\n\n if i < self.half_length:\n continue\n\n range_val = math.cos(a + self.step)\n x = self.cx + round(math.cos(a) * (self.w - 1) * range_val)\n y = self.cy + round(math.sin(a) * (self.h - 1) * range_val)\n self.drawCircle(painter, x, y, 'white')\n\n def drawCircle(self, painter, x, y, color):\n painter.setPen(QPen(QColor(color)))\n painter.setBrush(QColor(color))\n painter.drawEllipse(x - 5, y - 5, 10, 10)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = RotatingPatternWidget()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"sksalahuddin2828/Python","sub_path":"01. illusion in Python/Circular_Animation_Display_240_pixels.py","file_name":"Circular_Animation_Display_240_pixels.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"78"} +{"seq_id":"20136790532","text":"from DS import TreeNode\n\n\nclass Solution:\n def getTargetCopy(self, original: TreeNode, cloned: TreeNode, target: TreeNode) -> TreeNode:\n \"\"\"\n Idea, record how many steps needed for original to find target, repeat the steps on cloned and return the node\n \"\"\"\n original_stack = [original]\n cloned_stack = [cloned]\n steps_needed = 0\n ori_iter = original\n cloned_iter = cloned\n while original_stack:\n ori_iter = original_stack.pop()\n if ori_iter == target:\n break\n else:\n steps_needed += 1\n if ori_iter.left:\n original_stack.append(ori_iter.left)\n if ori_iter.right:\n original_stack.append(ori_iter.right)\n while cloned_stack and steps_needed:\n cloned_iter = cloned_stack.pop()\n steps_needed -= 1\n if cloned_iter.left:\n cloned_stack.append(cloned_iter.left)\n if cloned_iter.right:\n cloned_stack.append(cloned_iter.right)\n\n return cloned_iter\n\n\n\n\n\n","repo_name":"Rocky-Zhenxiang-Fang/LeetCode","sub_path":"1379. Find a Corresponding Node of a Binary Tree in a Clone of That Tree.py","file_name":"1379. Find a Corresponding Node of a Binary Tree in a Clone of That Tree.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12838228729","text":"import numpy as np\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\n\nArea = np.array([5.9, 45, 110, 320, 529, 1386, 3324, 4260, 24520])\nSpecies = np.array([370.0, 640.0, 680.0, 640.0, 1060.0, 1200.0, 1400.0, 1450.0, 2525.0])\n\n\n# Function for fragment equation:\n# used in call to curve_fit below\n\ndef quad(A,c,z):\n f = c * (A**z)\n return f\n\nparams = [10,0.3]\n\nfit,_ = curve_fit(quad, Area, Species, p0 = params)\nprint(fit)\n\nyfit = quad(Area, fit[0], fit[1])\n\nplt.semilogx(Area,Species,'o',Area,yfit)\nplt.xlabel('Area (mi$^2$)')\nplt.ylabel('Species')\nplt.show() \n","repo_name":"jackmcgit/Chemical-and-Biomedical-Engineering-Calculations-Using-Python-Solutions","sub_path":"Chapter 7/Ch7Q6.py","file_name":"Ch7Q6.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71050081851","text":"import matplotlib.pyplot as plt\n\n\nfrom keras.preprocessing.text import Tokenizer\n\nfrom keras.preprocessing.sequence import pad_sequences\n\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\n\nfrom keras.layers import Bidirectional, GlobalMaxPool1D\n\nfrom keras.models import Model\n\nfrom keras import initializers, regularizers, constraints, optimizers, layers\n\nfrom nltk import sent_tokenize\n\nfrom nltk.corpus import stopwords\n\nfrom nltk.stem import WordNetLemmatizer\n\nfrom nltk.stem.porter import PorterStemmer\n\nfrom nltk.tokenize import RegexpTokenizer\n\nimport string\n\nfrom sklearn.model_selection import train_test_split\n\nimport tensorflow as tf\n\n\n\nimport numpy as np\n\nimport pandas as pd \n\nimport os\n\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n\n for filename in filenames:\n\n print(os.path.join(dirname, filename))\n\n\n\ntrain = pd.read_csv(\"/kaggle/input/jigsaw-toxic-comment-classification-challenge/train.csv.zip\")\n\ntest = pd.read_csv(\"/kaggle/input/jigsaw-toxic-comment-classification-challenge/test.csv.zip\")\n\n\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\nmaxlen = 200\n\nmax_features = 20000\n\n\n\ndef remove_punctuation(text):\n\n no_punc = \"\".join([c for c in text if c not in string.punctuation])\n\n return no_punc\n\n\n\nregtok = RegexpTokenizer(r'\\w+')\n\n\n\ndef remove_stop_words(text):\n\n output = [c for c in text if c not in stopwords.words('english')]\n\n return output\n\n\n\nlemmatizer = WordNetLemmatizer()\n\n\n\ndef word_lemmatizer(text):\n\n lem_text = \" \".join([lemmatizer.lemmatize(i) for i in text])\n\n return lem_text\n\n\n\nstemmer = PorterStemmer()\n\n\n\ndef word_stemmer(text):\n\n stem_text = \" \".join([stemmer.stem(i) for i in text])\n\n return stem_text\n\n \n\ndef preprocess_text(data):\n\n data = data.apply(lambda x: remove_punctuation(x))\n\n data = data.apply(lambda x: regtok.tokenize(x.lower()))\n\n data = data.apply(lambda x: remove_stop_words(x))\n\n data = data.apply(lambda x: word_lemmatizer(x))\n\n \n\n return data\n\n\n\ndef prepare_data_for_training(train, test, tok):\n\n train = tok.texts_to_sequences(train)\n\n test = tok.texts_to_sequences(test)\n\n word_index = tok.word_index\n\n print('Found %s unique tokens.' % len(word_index))\n\n \n\n train = pad_sequences(train, maxlen=maxlen)\n\n test = pad_sequences(test, maxlen=maxlen)\n\n\n\n return train, test\n\n\n\ndef estimator(X_t, y):\n\n inp = Input(shape=(maxlen, ))\n\n embed_size = 128\n\n x = Embedding(max_features, embed_size)(inp)\n\n x = LSTM(60, return_sequences=True,name='lstm_layer')(x)\n\n x = GlobalMaxPool1D()(x)\n\n x = Dropout(0.1)(x)\n\n x = Dense(50, activation=\"relu\")(x)\n\n x = Dropout(0.1)(x)\n\n x = Dense(6, activation=\"sigmoid\")(x)\n\n \n\n model = Model(inputs=inp, outputs=x)\n\n model.compile(loss='binary_crossentropy',\n\n optimizer='adam',\n\n metrics=['accuracy', tf.keras.metrics.CategoricalAccuracy(), tf.keras.metrics.AUC()])\n\n batch_size = 32\n\n epochs = 5\n\n history = model.fit(X_t,y, batch_size=batch_size, epochs=epochs)\n\n \n\n return model, history\n\n\n\ndef evaluate_model(model, features, predictions):\n\n results = model.evaluate(features, predictions)\n\n \n\n for i in range(len(model.metrics_names)):\n\n print(model.metrics_names[i], results[i])\n# Without Preprocessing\n\nlist_classes = [\"toxic\", \"severe_toxic\", \"obscene\", \"threat\", \"insult\", \"identity_hate\"]\n\ny = train[list_classes].values\n\nlist_sentences_train = train[\"comment_text\"]\n\nlist_sentences_test = test[\"comment_text\"]\n\n\n\n# # SHORT DATASET FOR DEBUGGING\n\n# list_sentences_train = list_sentences_train[0:500]\n\n# y = y[0:500]\n\n\n\n# Train-Test Split\n\nX_train, X_test, y_train, y_test = train_test_split(list_sentences_train, y, test_size=0.2, random_state=42)\n\n\n\ntokenizer = Tokenizer(num_words=max_features)\n\ntokenizer.fit_on_texts(list(X_train))\n\n\n\nsentences_train, sentences_test = prepare_data_for_training(X_train, X_test, tokenizer)\n\nmodel, hist = estimator(sentences_train, y_train)\nprint('\\n# Evaluate on test data')\n\nevaluate_model(model, sentences_test, y_test)\n# With preprocessing.\n\nX_train_pp = preprocess_text(X_train)\n\nX_test_pp = preprocess_text(X_test)\n\n\n\ntokenizer_pp = Tokenizer(num_words=max_features)\n\ntokenizer_pp.fit_on_texts(list(X_train_pp))\n\n\n\nsentences_train_pp, sentences_test_pp = prepare_data_for_training(X_train_pp, X_test_pp, tokenizer_pp)\n\nmodel_pp, hist_pp = estimator(sentences_train_pp, y_train)\nprint('\\n# Evaluate on test data')\n\nevaluate_model(model_pp, sentences_test_pp, y_test)\ndef loadEmbeddingMatrix_wv(word_index):\n\n embed_size = 100\n\n embeddings_index = dict()\n\n \n\n for word in wv_model.wv.vocab:\n\n embeddings_index[word] = wv_model.wv[word]\n\n print('Loaded %s word vectors.' % len(embeddings_index))\n\n \n\n gc.collect()\n\n all_embs = np.stack(list(embeddings_index.values()))\n\n emb_mean,emb_std = all_embs.mean(), all_embs.std()\n\n \n\n nb_words = len(word_index) + 1\n\n embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))\n\n gc.collect()\n\n \n\n embeddedCount = 0\n\n for word, i in word_index.items():\n\n i-=1\n\n embedding_vector = embeddings_index.get(word)\n\n if embedding_vector is not None: \n\n embedding_matrix[i] = embedding_vector\n\n embeddedCount+=1\n\n print('total embedded:',embeddedCount,'common words')\n\n\n\n del(embeddings_index)\n\n gc.collect()\n\n\n\n return embedding_matrix\n\n\n\ndef estimator_embedding(X_t, y, embedding_matrix, total_words):\n\n inp = Input(shape=(maxlen, )) #maxlen=200 as defined earlier\n\n x = Embedding(total_words, embedding_matrix.shape[1],weights=[embedding_matrix],trainable=False)(inp)\n\n x = Bidirectional(LSTM(60, return_sequences=True,name='lstm_layer',dropout=0.1,recurrent_dropout=0.1))(x)\n\n x = GlobalMaxPool1D()(x)\n\n x = Dropout(0.1)(x)\n\n x = Dense(50, activation=\"relu\")(x)\n\n x = Dropout(0.1)(x)\n\n x = Dense(6, activation=\"sigmoid\")(x)\n\n model = Model(inputs=inp, outputs=x)\n\n model.compile(loss='binary_crossentropy',\n\n optimizer='adam',\n\n metrics=['accuracy', tf.keras.metrics.CategoricalAccuracy(), tf.keras.metrics.AUC()])\n\n\n\n batch_size = 32\n\n epochs = 5\n\n history = model.fit(X_t,y, batch_size=batch_size, epochs=epochs)\n\n \n\n return model, history\n# With word embeddings\n\nimport gc\n\nfrom gensim.models import Word2Vec\n\nfrom nltk.tokenize import RegexpTokenizer\n\n\n\n# Training custom word2vec model\n\nlist_sentences_tok = X_train.apply(lambda x: regtok.tokenize(x))\n\nwv_model = Word2Vec(list_sentences_tok, min_count=1)\n\nembedding_matrix = loadEmbeddingMatrix_wv(tokenizer.word_index)\n\n\n\nmodel_w2v, hist_w2v = estimator_embedding(sentences_train, y_train, embedding_matrix, len(tokenizer.word_index) + 1)\nprint('\\n# Evaluate on test data')\n\nevaluate_model(model_w2v, sentences_test, y_test)\n# Training custom word2vec model - Preprocessed\n\nlist_sentences_tok_pp = X_train_pp.apply(lambda x: regtok.tokenize(x))\n\nwv_model = Word2Vec(list_sentences_tok_pp, min_count=1)\n\nembedding_matrix = loadEmbeddingMatrix_wv(tokenizer_pp.word_index)\n\n\n\nmodel_w2v, hist_w2v = estimator_embedding(sentences_train_pp, y_train, embedding_matrix, len(tokenizer_pp.word_index) + 1)\nprint('\\n# Evaluate on test data')\n\nevaluate_model(model_w2v, sentences_test_pp, y_test)","repo_name":"aorursy/new-nb-1","sub_path":"arunabh98_nlp-project-final.py","file_name":"arunabh98_nlp-project-final.py","file_ext":"py","file_size_in_byte":7368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25163781901","text":"'''\n 알고리즘:\n 1. 체육복을 잃어버린 학생에 대해서 여분이 있으면 자기가 먼저 입는다\n 2. 만약 앞, 뒤에 여분이 있으면 빌리면 되는데, 이 때 앞 뒤의 친구가 체육복을 잃어버리지 않았어야 한다\n 틀렸던 부분: 앞 뒤의 친구가 체육복을 잃어버리지 않았어야 한다는 조건을 빼먹었다\n'''\ndef solution(n, lost, reserve):\n answer = 0\n students = [1] * (n+1)\n\n for i in lost: # 잃어버린 학생에 대해 체육복 0으로 설정\n students[i] = 0\n \n for i in range(1, n+1):\n if students[i] == 0: \n if i in reserve: # 체육복을 잃어버렸는데, 내가 여분이 있는 경우\n students[i] = 1\n reserve.remove(i)\n else:\n if i-1 in reserve and students[i-1] == 1: # 앞, 뒤 친구가 체육복을 잃어버리지 않았어야 함\n students[i] = 1\n reserve.remove(i-1)\n elif i+1 in reserve and students[i+1] == 1:\n students[i] = 1\n reserve.remove(i+1)\n\n for i in range(1, n+1):\n if students[i] == 1:\n answer += 1\n\n return answer\n","repo_name":"yujinHan97/Algorithm_Study","sub_path":"체육복.py","file_name":"체육복.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"795249501","text":"import os\nimport sys\nimport copy\nimport argparse\nimport pickle\nimport random\nimport yaml\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\nfrom collections import defaultdict\nfrom tqdm.auto import tqdm\nfrom easydict import EasyDict\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem.rdchem import HybridizationType, BondType\nimport torch\nfrom torch_geometric.data import Data, Dataset\nfrom torch_geometric.data import Batch\nfrom torch_geometric.transforms import Compose\nfrom models.epsnet import get_model\nfrom utils.transforms import AddHigherOrderEdges\n\nBOND_TYPES = {t: i for i, t in enumerate(BondType.names.values())}\n\ndef smiles_to_data(smiles):\n if '.' in smiles:\n return None\n else:\n try:\n mol = Chem.AddHs(Chem.MolFromSmiles(smiles))\n except:\n return None\n\n N = mol.GetNumAtoms()\n pos = torch.rand((N, 3), dtype=torch.float32)\n\n atomic_number = []\n aromatic = []\n sp = []\n sp2 = []\n sp3 = []\n\n for atom in mol.GetAtoms():\n atomic_number.append(atom.GetAtomicNum())\n aromatic.append(1 if atom.GetIsAromatic() else 0)\n hybridization = atom.GetHybridization()\n sp.append(1 if hybridization == HybridizationType.SP else 0)\n sp2.append(1 if hybridization == HybridizationType.SP2 else 0)\n sp3.append(1 if hybridization == HybridizationType.SP3 else 0)\n\n z = torch.tensor(atomic_number, dtype=torch.long)\n\n row, col, edge_type = [], [], []\n for bond in mol.GetBonds():\n start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()\n row += [start, end]\n col += [end, start]\n edge_type += 2 * [BOND_TYPES[bond.GetBondType()]]\n\n edge_index = torch.tensor([row, col], dtype=torch.long)\n edge_type = torch.tensor(edge_type)\n\n perm = (edge_index[0] * N + edge_index[1]).argsort()\n edge_index = edge_index[:, perm]\n edge_type = edge_type[perm]\n row, col = edge_index\n\n data = Data(atom_type=z, pos=pos, edge_index=edge_index, edge_type=edge_type, rdmol=copy.deepcopy(mol), smiles=smiles)\n\n return data\n\nclass ConformationDataset(Dataset):\n def __init__(self, data, transform=None):\n super().__init__()\n self.data = data\n self.transform = transform\n self.atom_types = self._atom_types()\n self.edge_types = self._edge_types()\n\n def __getitem__(self, idx):\n\n data = self.data[idx].clone()\n if self.transform is not None:\n data = self.transform(data)\n return data\n\n def __len__(self):\n return len(self.data)\n\n def _atom_types(self):\n \"\"\"All atom types.\"\"\"\n atom_types = set()\n for graph in self.data:\n atom_types.update(graph.atom_type.tolist())\n return sorted(atom_types)\n\n def _edge_types(self):\n \"\"\"All edge types.\"\"\"\n edge_types = set()\n for graph in self.data:\n edge_types.update(graph.edge_type.tolist())\n return sorted(edge_types)\n\nclass PackedConformationDataset(ConformationDataset):\n def __init__(self, path, transform=None):\n super().__init__(path, transform)\n self._pack_data_by_mol()\n\n def _pack_data_by_mol(self):\n \"\"\"\n pack confs with same mol into a single data object\n \"\"\"\n self._packed_data = defaultdict(list)\n if hasattr(self.data, 'idx'):\n for i in range(len(self.data)):\n self._packed_data[self.data[i].idx.item()].append(self.data[i])\n else:\n for i in range(len(self.data)):\n self._packed_data[self.data[i].smiles].append(self.data[i])\n print('[Packed] %d Molecules, %d Conformations.' % (len(self._packed_data), len(self.data)))\n\n new_data = []\n # logic\n # save graph structure for each mol once, but store all confs \n cnt = 0\n for k, v in self._packed_data.items():\n data = copy.deepcopy(v[0])\n all_pos = []\n for i in range(len(v)):\n all_pos.append(v[i].pos)\n data.pos_ref = torch.cat(all_pos, 0) # (num_conf*num_node, 3)\n data.num_pos_ref = torch.tensor([len(all_pos)], dtype=torch.long)\n #del data.pos\n\n if hasattr(data, 'totalenergy'):\n del data.totalenergy\n if hasattr(data, 'boltzmannweight'):\n del data.boltzmannweight\n new_data.append(data)\n self.new_data = new_data\n\n def __getitem__(self, idx):\n\n data = self.new_data[idx].clone()\n if self.transform is not None:\n data = self.transform(data)\n return data\n\n def __len__(self):\n return len(self.new_data)\n\nclass CountNodesPerGraph(object):\n def __init__(self) -> None:\n super().__init__()\n\n def __call__(self, data):\n data.num_nodes_per_graph = torch.LongTensor([data.num_nodes])\n return data\n\ndef seed_all(seed):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\ndef repeat_data(data, num_repeat):\n datas = [data.clone() for i in range(num_repeat)]\n return Batch.from_data_list(datas)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('ckpt', type=str, help='path for loading the checkpoint')\n parser.add_argument('--save_traj', action='store_true', default=False, help='whether store the whole trajectory for sampling')\n parser.add_argument('--input_file', type=str, default=None)\n parser.add_argument('--out', type=str, default=None)\n parser.add_argument('--device', type=str, default='cuda')\n parser.add_argument('--clip', type=float, default=1000.0)\n parser.add_argument('--n_steps', type=int, default=5000, help='sampling num steps; for DSM framework, this means num steps for each noise scale')\n parser.add_argument('--global_start_sigma', type=float, default=0.5, help='enable global gradients only when noise is low')\n parser.add_argument('--w_global', type=float, default=1.0, help='weight for global gradients')\n # Parameters for DDPM\n parser.add_argument('--sampling_type', type=str, default='ld', help='generalized, ddpm_noisy, ld: sampling method for DDIM, DDPM or Langevin Dynamics')\n parser.add_argument('--eta', type=float, default=1.0, help='weight for DDIM and DDPM: 0->DDIM, 1->DDPM')\n args = parser.parse_args()\n\n # Load checkpoint\n ckpt = torch.load(args.ckpt)\n config_path = glob(os.path.join(os.path.dirname(os.path.dirname(args.ckpt)), '*.yml'))[0]\n with open(config_path, 'r') as f:\n config = EasyDict(yaml.safe_load(f))\n seed_all(config.train.seed)\n\n # Datasets and loaders\n print('Loading datasets...')\n transforms = Compose([\n CountNodesPerGraph(),\n AddHigherOrderEdges(order=config.model.edge_order), # Offline edge augmentation\n ])\n\n inputs = pd.read_csv(args.input_file)\n all_data = []\n #for smiles, title, num_confs in tqdm(inputs.values):\n mol_index=1\n for smiles, title, num_confs in tqdm(inputs.values):\n data = smiles_to_data(smiles)\n #data['title'] = title\n data['title'] = f'mol_{mol_index}'\n data['num_confs'] = num_confs\n all_data.append(data)\n mol_index += 1\n\n dataset = PackedConformationDataset(all_data, transform=transforms)\n\n # Model\n print('Loading model...')\n model = get_model(ckpt['config'].model).to(args.device)\n model.load_state_dict(ckpt['model'])\n results = []\n\n for i, data in enumerate(tqdm(dataset)):\n if data.num_confs < 1000:\n num_samples = data.num_confs * 2\n \n if os.path.exists(f'geodiff_{data.title}_conf_{data.num_confs}.pkl'):\n continue\n\n data_input = data.clone()\n data_input['pos_ref'] = None\n batch = repeat_data(data_input, num_samples).to(args.device)\n \n clip_local = None\n for _ in range(2): # Maximum number of retry\n try:\n pos_init = torch.randn(batch.num_nodes, 3).to(args.device)\n pos_gen, pos_gen_traj = model.langevin_dynamics_sample(\n atom_type=batch.atom_type,\n pos_init=pos_init,\n bond_index=batch.edge_index,\n bond_type=batch.edge_type,\n batch=batch.batch,\n num_graphs=batch.num_graphs,\n extend_order=False, # Done in transforms.\n n_steps=args.n_steps,\n step_lr=1e-6,\n w_global=args.w_global,\n global_start_sigma=args.global_start_sigma,\n clip=args.clip,\n clip_local=clip_local,\n sampling_type=args.sampling_type,\n eta=args.eta\n )\n pos_gen = pos_gen.cpu()\n if args.save_traj:\n data.pos_gen = torch.stack(pos_gen_traj)\n else:\n data.pos_gen = pos_gen\n results.append(data)\n\n save_path = f'geodiff_{data.title}_conf_{data.num_confs}.pkl'\n with open(save_path, 'wb') as f:\n pickle.dump([data], f)\n\n break # No errors occured, break the retry loop\n except FloatingPointError:\n clip_local = 20\n save_path= f'{args.out}'\n with open(save_path, 'wb') as f:\n pickle.dump(results, f)\n","repo_name":"wangzhehyd/fastsmcg","sub_path":"dataset-2/script/geodiff/geodiff_gen.py","file_name":"geodiff_gen.py","file_ext":"py","file_size_in_byte":9445,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"74080251451","text":"'''\nUses REST API for FFPRED: Function Prediction\nResults on mail\n'''\n\nURL = 'http://bioinf.cs.ucl.ac.uk/psipred/api/submission.json'\ndef start_job(input_file, sub_name, email, selection = 'human'): \n payload = {'input_data': input_file}\n data = {'job': 'ffpred',\n 'submission_name': sub_name,\n 'email': email,\n 'ffpred_selection': selection}\n r = requests.post(URL, data=data, files=payload)\n print(r.text)\n\n#NOTE: Once posted you will need to use the GET submission endpoint\n#to retrieve your results. Polling the server about once every 2 or 5 mins\n#should be sufficient.\n#\n# Full details at http://bioinf.cs.ucl.ac.uk/web_servers/web_services/\n\ndef process(inp):\n '''\n Helper function for getting sequence from fasta format\n '''\n inp = inp.strip()\n job_name = inp.split('|')[0][1:]\n inp = \"\".join(inp.split('\\n')[1:])\n return inp, job_name\n\n","repo_name":"cn-mm/HubGeneAnalysis","sub_path":"scripts/ffpred.py","file_name":"ffpred.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32012973885","text":"import os\nimport sys \n\n# Fix imports when testing this file directly\nif __name__ == '__main__':\n sys.path.append( os.path.join(os.path.dirname(__file__), \"../..\"))\n\nfrom terminatorlib.config import Config\nimport terminatorlib.plugin as plugin\nfrom terminatorlib.util import get_config_dir, err, dbg, gerr\nfrom terminatorlib.terminator import Terminator\nfrom terminatorlib import util\n\n\n# AVAILABLE must contain a list of all the classes that you want exposed\nAVAILABLE = ['SaveLastSessionLayout']\n\nclass SaveLastSessionLayout(plugin.Plugin):\n capabilities = ['session']\n\n config = None\n conf_file = os.path.join(get_config_dir(),\"save_last_session_cwd\")\n conf_sessions = []\n emit_close_count = 0\n\n def __init__(self):\n dbg(\"SaveLastSessionLayout Init\")\n self.connect_signals()\n\n #not used, but capability can be used to load automatically\n def load_session_layout(self, debugtab=False, widget=None, cwd=None, metadata=None, profile=None):\n dbg(\"SaveLastSessionLayout load layout\")\n terminator = Terminator()\n util.spawn_new_terminator(terminator.origcwd, ['-u', '-l', 'SaveLastSessionLayout'])\n\n def save_session_layout(self, debugtab=False, widget=None, cwd=None, metadata=None, profile=None):\n\n config = Config()\n terminator = Terminator()\n current_layout = terminator.describe_layout(save_cwd = True)\n dbg(\"SaveLastSessionLayout: save layout(%s)\" % current_layout)\n res = config.replace_layout(\"SaveLastSessionLayout\", current_layout)\n if (not res):\n r = config.add_layout(\"SaveLastSessionLayout\", current_layout)\n config.save()\n return True\n \n def connect_signals(self):\n dbg(\"SaveLastSessionLayout connect_signals\")\n n = 0\n for term in Terminator().terminals:\n dbg(\"SaveLastSessionLayout connect_signals to term num:(%d)\" % n)\n n = n + 1\n # event close-term works, and does not require an additional\n # event but has a race condition when\n # there is only one terminal we are unable to get the\n # describe_layout section\n\n #term.connect('close-term', self.close, None)\n term.connect('pre-close-term', self.close, None)\n\n #Can connect signal from terminal\n #term.connect('load-layout', self.load_session_layout, None)\n\n def close(self, term, event, arg1 = None):\n if (self.emit_close_count == 0):\n self.emit_close_count = self.emit_close_count + 1\n self.save_session_layout(\"\", \"\")\n\n","repo_name":"gnome-terminator/terminator","sub_path":"terminatorlib/plugins/save_last_session_layout.py","file_name":"save_last_session_layout.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":1800,"dataset":"github-code","pt":"78"} +{"seq_id":"72922847291","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 10 14:30:08 2015\n@author: BD\n\nThe main script to run the Online Tweets App.\n\"\"\"\n\nfrom flask import Flask, render_template, request, redirect\napp = Flask(__name__)\n\nimport scrape # My own\nimport markov # My own\n\napp.vars = {}\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef redirecting():\n return redirect(\"/main\")\n \n@app.route(\"/main\", methods=[\"GET\", \"POST\"])\ndef main():\n if request.method == \"GET\":\n return render_template(\"form.html\")\n else:\n # request was a POST\n app.vars[\"a1_first_name\"] = request.form['a1_first_name']\n app.vars[\"a1_last_name\"] = request.form['a1_last_name']\n app.vars[\"a2_first_name\"] = request.form['a2_first_name']\n app.vars[\"a2_last_name\"] = request.form['a2_last_name'] # In unicode\n return redirect(\"/graph\") \n\n@app.route(\"/graph\", methods=[\"GET\", \"POST\"])\ndef graph(): \n \"\"\"GET TITLES\n \"\"\"\n A1F = app.vars[\"a1_first_name\"]\n A1L = app.vars[\"a1_last_name\"]\n A2F = app.vars[\"a2_first_name\"]\n A2L = app.vars[\"a2_last_name\"]\n \n titles_A1 = scrape.get_titles_by_author(A1L + \"+\" + A2F) # List of strings\n titles_A2 = scrape.get_titles_by_author(A2L + \"+\" + A2F) # List of strings\n titles_combined = titles_A1 + titles_A2 \n titles_generated = markov.markov_function(titles_combined) # List of strings \n \n \"\"\"RENDER TEMPLATE\n \"\"\"\n def show_x(my_list):\n length = 3\n if len(my_list) <= length:\n return my_list\n else:\n return my_list[0:length] \n \n titles_A1 = show_x(titles_A1)\n titles_A2 = show_x(titles_A2)\n \n return render_template(\"graph.html\",\n titles1=titles_A1,\n titles2=titles_A2,\n titles3=titles_generated,\n a1first=A1F, a1last=A1L,\n a2first=A2F, a2last=A2L)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"BarboraDoslikova/MC-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30031487602","text":"import fnmatch, os, shutil\n\nDEVENV_PATH = f'c:\\\\Programs\\\\VS2019\\\\Common7\\\\IDE\\\\devenv.exe'\n\ndef clean_project(dir):\n [shutil.rmtree(f'{dir}/{x}/') for x in ['obj', 'bin'] if os.path.isdir(f'{dir}/{x}')]\n\ndef copy_and_overwrite(from_path, to_path):\n if os.path.exists(to_path):\n shutil.rmtree(to_path)\n shutil.copytree(from_path, to_path)\n\ndef fix_harmony_namespace(project):\n for root, dirnames, filenames in os.walk(project):\n for filename in fnmatch.filter(filenames, '*.cs'):\n cs = os.path.join(root, filename)\n with open(cs, 'r+', encoding='utf-8') as sln:\n text: str = sln.read()\n if 'using Harmony;' in text:\n sln.seek(0)\n text = text.replace('using Harmony;', 'using HarmonyLib;')\n sln.write(text)\ndef fix_csproj(fn,replacements = None):\n with open(fn, 'r+', encoding='utf-8') as sln:\n text: str = sln.read()\n sln.seek(0)\n text = text.replace('..\\\\packages\\\\RW10\\\\', '..\\\\packages\\\\RW11\\\\')\\\n .replace('UnityEngine', 'UnityEngine.CoreModule')\\\n .replace('v3.5', 'v4.7.2')\n if replacements is not None:\n for r in replacements:\n text = text.replace(r['from'], r['to'])\n sln.write(text)\n\nif __name__ == '__main__':\n inp = input('\\ntake action:\\n1 - create RW 1.1 solution\\n2 - create RW 1.1 solution and compile\\n')\n\n [clean_project(dir) for dir in ['DebugLibrary', 'RimHelperProxyMod', 'RimHelper', 'IPCInterface']]\n copy_and_overwrite('RimHelperProxyMod', 'RimHelperProxyMod-1.1')\n copy_and_overwrite('DebugLibrary', 'DebugLibrary-1.1')\n shutil.copyfile('RimHelper.sln', 'RimHelper-1.1.sln')\n # fix references\n fix_harmony_namespace('RimHelperProxyMod-1.1')\n fix_harmony_namespace('DebugLibrary-1.1')\n fix_csproj('RimHelperProxyMod-1.1/RimHelperProxyMod.csproj', [\n {'from': '\\\\RimHelperProxyMod\\\\Assemblies\\\\', 'to': '\\\\RimHelperProxyMod\\\\1.1\\\\Assemblies\\\\'}\n ])\n fix_csproj('DebugLibrary-1.1/DebugLibrary.csproj')\n with open('RimHelperProxyMod-1.1/Harmony/HM.cs', 'r+', encoding='utf-8') as sln:\n text: str = sln.read()\n sln.seek(0)\n text = text.replace('HarmonyInstance.Create', 'new HarmonyLib.Harmony')\\\n .replace('HarmonyInstance', 'HarmonyLib.Harmony')\n sln.write(text)\n # fix solution\n with open('RimHelper-1.1.sln', 'r+', encoding='utf-8') as sln:\n text: str = sln.read()\n sln.seek(0)\n text = text.replace('\"RimHelperProxyMod\\\\RimHelperProxyMod.csproj\"', '\"RimHelperProxyMod-1.1\\\\RimHelperProxyMod.csproj\"')\\\n .replace('\"DebugLibrary\\\\DebugLibrary.csproj\"', '\"DebugLibrary-1.1\\\\DebugLibrary.csproj\"')\n sln.write(text)\n\n if inp == '2':\n if os.path.isfile(DEVENV_PATH):\n os.system(f'\"{DEVENV_PATH}\" /build Release RimHelper.sln')\n os.system(f'\"{DEVENV_PATH}\" /build Release RimHelper-1.1.sln')\n [clean_project(dir) for dir in ['DebugLibrary', 'RimHelperProxyMod', 'RimHelper', 'IPCInterface', 'DebugLibrary-1.1', 'RimHelperProxyMod-1.1']]\n os.rename('_Release_/RimHelperProxyMod/Assemblies/SharedMemory.dll', '_Release_/RimHelperProxyMod/Assemblies/$haredMemory.dll')\n os.rename('_Release_/RimHelperProxyMod/1.1/Assemblies/SharedMemory.dll', '_Release_/RimHelperProxyMod/1.1/Assemblies/$haredMemory.dll')\n else:\n print(f'BAD DEVENV PATH: {DEVENV_PATH}')\n\n input('Press any key')\n\n","repo_name":"bananasss00/RimHelper","sub_path":"compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"10171247124","text":"# Importaciones\nfrom api.db.db import mysql, DBError\nfrom flask import jsonify\n\n# Clase Producto\nclass Producto():\n # Esquema para validar los datos \n schema = {\n \"id_producto\": int,\n \"unidad_medida\": str,\n \"descripcion\": str,\n \"cantidad\": int,\n \"precio\": float,\n \"tipo\": str,\n \"activo\": int\n }\n \n # Metodo para validar el esquema de los datos\n def check_data_schema(data):\n if data == None or type(data) != dict:\n return False\n # check if data contains all keys of schema\n for key in Producto.schema:\n if key not in data:\n return False\n # check if data[key] has the same type as schema[key]\n if type(data[key]) != Producto.schema[key]:\n return False\n return True\n\n # Constructor de la clase\n def __init__(self, row):\n self._id_producto = row[0]\n self._unidad_medida = row[1]\n self._descripcion = row[2]\n self._cantidad = row[3]\n self._precio = row[4]\n self._tipo = row[5]\n self._activo = row[6]\n\n # Metodo para convertir los datos en formato JSON\n def to_json(self):\n return {\n \"id_producto\": self._id_producto,\n \"unidad_medida\": self._unidad_medida,\n \"descripcion\": self._descripcion,\n \"cantidad\": self._cantidad,\n \"precio\": self._precio,\n \"tipo\": self._tipo,\n \"activo\": self._activo\n }\n \n # Metodo para preguntar si existe un producto en la BD\n def product_exists(descrip):\n cur = mysql.connection.cursor()\n \n cur.execute('SELECT * FROM producto WHERE descripcion = %s', (descrip))\n cur.fetchall()\n \n return cur.rowcount > 0\n\n # Metodo para la creación de un Producto en la BD \n def create_product(data):\n if Producto.check_data_schema(data):\n # check if product already exists\n if Producto.product_exists(data[\"descripcion\"]):\n raise DBError(\"Error creating product - El producto ya existe\")\n cur = mysql.connection.cursor()\n cur.execute('INSERT INTO producto (unidad_medida, descripcion, cantidad, precio,tipo, activo) VALUES (%s, %s, %s, %s, %s, %s )', (data[\"unidad_medida\"], data[\"descripcion\"], data[\"cantidad\"], data[\"precio\"], data[\"tipo\"], data[\"activo\"]))\n mysql.connection.commit()\n if cur.rowcount > 0:\n # get the id of the last inserted row\n cur.execute('SELECT LAST_INSERT_ID()')\n res = cur.fetchall()\n id = res[0][0]\n return Producto((id, data[\"unidad_medida\"], data[\"descripcion\"], data[\"cantidad\"], data[\"precio\"], data[\"tipo\"], data[\"activo\"])).to_json()\n raise DBError(\"Error creating product - no row inserted\")\n raise TypeError(\"Error creating product - wrong data schema\")\n \n # Metodo para la actualización de un Producto en la BD\n def update_product(id, data):\n if Producto.check_data_schema(data):\n cur = mysql.connection.cursor()\n cur.execute('UPDATE producto SET unidad_medida = %s, descripcion = %s, cantidad = %s, precio = %s, tipo= %s, activo = %s WHERE id_producto = %s', (data[\"unidad_medida\"], data[\"descripcion\"], data[\"cantidad\"], data[\"precio\"], data[\"tipo\"], data[\"activo\"], id))\n mysql.connection.commit()\n if cur.rowcount > 0:\n return Producto.get_product_by_id(id)\n raise DBError(\"Error updating product - no row updated\")\n raise TypeError(\"Error updating product - wrong data schema\")\n \n # Metodo para obtener un producto por su id \n def get_product_by_id(id):\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM producto WHERE id_producto = {0}'.format(id))\n data = cur.fetchall()\n if cur.rowcount > 0:\n #print(\"data[0]: \", data[0])\n return Producto(data[0]).to_json()\n raise DBError(\"Error getting product by id - no row found\")\n \n # Metodo para eliminar a un producto de la BD\n def delete_product(id):\n data = Producto.get_product_by_id(id)\n cur = mysql.connection.cursor()\n cur.execute('UPDATE producto SET activo=0 WHERE id_producto = {0}'.format(id))\n mysql.connection.commit()\n if cur.rowcount > 0:\n return Producto.get_product_by_id(id)\n raise DBError(\"Error deleting product - no row updated\")\n ","repo_name":"IlOctavio/ProyectoG14","sub_path":"backend/api/models/producto.py","file_name":"producto.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20941095155","text":"#код))\n#впервые работаю с состояниями. посмотрим, что выйдет\nfrom aiogram import types, executor, Bot, Dispatcher\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import StatesGroup, State\nfrom aiogram.types import ReplyKeyboardMarkup, KeyboardButton\nfrom config import TOKEN, ADMIN_ID\nfrom sqlite import db_start, create_profile, edit_profile\n\nstorage = MemoryStorage\nbot = Bot(TOKEN)\ndp = Dispatcher(bot, storage=MemoryStorage())\n\n\nclass ApplicationStatesGroup(StatesGroup):\n\n name = State()\n tel = State()\n address = State()\n description = State()\n photo = State()\n\n\ndef get_kb(my_list) -> ReplyKeyboardMarkup:\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n for i in my_list:\n kb.add(KeyboardButton(i))\n return kb\n\n\nasync def on_startup(_):\n await db_start()\n\n\n@dp.message_handler(commands=['cancel'], state='*')\nasync def ap_cancel(message: types.Message, state: FSMContext):\n if state is None:\n return\n\n await state.finish()\n await message.answer('Вы прервали создание заявки',\n reply_markup=get_kb(['/create_application']))\n\n\n@dp.message_handler(commands=['start'])\nasync def ha_start(message: types.Message) -> None:\n await message.answer('Добро пожаловать! Чтобы отправить заявку, нажми /create_application ниже',\n reply_markup=get_kb(['/create_application']))\n await create_profile(user_id=message.from_user.id)\n\n\n@dp.message_handler(commands=['create_application'])\nasync def ha_create_application(message: types.Message) -> None:\n await message.answer('Для начала, напиши мне, как я могу к тебе обращаться',\n reply_markup=get_kb(['/cancel']))\n await ApplicationStatesGroup.name.set()\n\n\n@dp.message_handler(state=ApplicationStatesGroup.name)\nasync def load_name(message: types.Message, state: FSMContext) -> None:\n async with state.proxy() as data:\n data['name'] = message.text\n await message.answer('Напиши номер, по которому следует звонить в фомате: +7**********')\n await ApplicationStatesGroup.next()\n\n\n@dp.message_handler(lambda message: (len(message.text) != 12) or (message.text[0:2] != '+7') or\n (not message.text[1:].isdigit()),\n state=ApplicationStatesGroup.tel)\nasync def check_tel(message: types.Message):\n await message.reply('Это не телефон!')\n\n\n@dp.message_handler(state=ApplicationStatesGroup.tel)\nasync def load_tel(message: types.Message, state: FSMContext) -> None:\n async with state.proxy() as data:\n data['tel'] = message.text\n await message.answer('Напиши адрес, где случились проблемы в формате:\\n' +\n 'Город, улица, дом, квартира')\n await ApplicationStatesGroup.next()\n\n\n@dp.message_handler(state=ApplicationStatesGroup.address)\nasync def load_address(message: types.Message, state: FSMContext) -> None:\n async with state.proxy() as data:\n data['address'] = message.text\n await message.answer('Напиши кратко, в одно сообщение, что случилось')\n await ApplicationStatesGroup.next()\n\n\n@dp.message_handler(state=ApplicationStatesGroup.description)\nasync def load_description(message: types.Message, state: FSMContext) -> None:\n async with state.proxy() as data:\n data['description'] = message.text\n await message.answer('Добавь фотографию этикетки с серийным номером техники.' +\n ' Если таковой нет, сфотографируй целиком сломавшийся прибор')\n await ApplicationStatesGroup.next()\n\n\n@dp.message_handler(lambda message: not message.photo, state=ApplicationStatesGroup.photo)\nasync def check_photo(message: types.Message):\n await message.reply('Это не фото!')\n\n\n@dp.message_handler(content_types=['photo'], state=ApplicationStatesGroup.photo)\nasync def load_photo(message: types.Message, state: FSMContext) -> None:\n async with state.proxy() as data:\n data['photo'] = message.photo[0].file_id\n await bot.send_photo(chat_id=ADMIN_ID,\n photo=data['photo'],\n caption=f\"Новая заявка!\\nИмя: {data['name']}\\nТелефон: {data['tel']}\\n\" +\n f\"Адрес: {data['address']}\\nОписание: {data['description']}\")\n await edit_profile(state, user_id=message.from_user.id)\n await message.answer('Ваша заявка принята, ожидайте ответ!')\n await state.finish()\n\n\nif __name__ == \"__main__\":\n executor.start_polling(dp, skip_updates=True, on_startup=on_startup)\n\n\n","repo_name":"soltiste/bot-for-your-home-appliances","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17163807907","text":"import sys\nn = int(input())\nst=[int(sys.stdin.readline()) for _ in range(n)]\nst.reverse()\ncnt = 1\ni = 0\nwhile i st[i+2]:\n cnt +=1\n st[i+1] += st[i]\n elif st[i+1] <= st[i+2]:\n st[i+2]+=st[i]\n i+=1\n cnt ==1\n i+=1\nif cnt==2:\n print(st[n-2])\nelse:\n print(st[n-2]+st[n-1])\n","repo_name":"twodf78/coding_test","sub_path":"DynamicProgramming/2579.py","file_name":"2579.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"45656481722","text":"#!/usr/bin/env python3\n\"\"\"\n7-bi_output.py\nModule that defines a class called BidirectionalCell\n\"\"\"\n\nimport numpy as np\n\n\nclass BidirectionalCell:\n \"\"\"\n Class BidirectionalCell\n \"\"\"\n def __init__(self, i, h, o):\n \"\"\"\n Class Constructor\n\n Args:\n i: Dimensionality of the data\n h: Dimensionality of the hidden state\n o: Dimensionality of the outputs\n \"\"\"\n self.Whf = np.random.normal(size=(i + h, h))\n self.Whb = np.random.normal(size=(i + h, h))\n self.Wy = np.random.normal(size=(2 * h, o))\n self.bhf = np.zeros((1, h))\n self.bhb = np.zeros((1, h))\n self.by = np.zeros((1, o))\n\n def forward(self, h_prev, x_t):\n \"\"\"\n Function that performs forward propagation for one time step\n\n Args:\n x_t: numpy.ndarray of shape (m, i) that contains the data\n input for the cell where m is the batch size for the data\n h_prev: a numpy.ndarray of shape (m, h) containing the\n previous hidden state\n\n Returns:\n h_next the next hidden state\n \"\"\"\n x = np.concatenate((h_prev, x_t), axis=1)\n h_next = np.tanh(np.matmul(x, self.Whf) + self.bhf)\n return h_next\n\n def backward(self, h_next, x_t):\n \"\"\"\n Function that performs backward propagation for one time step\n\n Args:\n x_t: numpy.ndarray of shape (m, i) that contains the data\n input for the cell where m is the batch size for the data\n h_next: a numpy.ndarray of shape (m, h) containing the\n next hidden state\n\n Returns:\n h_pev the previous hidden state\n \"\"\"\n x = np.concatenate((h_next, x_t), axis=1)\n h_pev = np.tanh(np.matmul(x, self.Whb) + self.bhb)\n return h_pev\n\n def output(self, H):\n \"\"\"\n Function that calculates all outputs for the RNN\n\n Args:\n H: numpy.ndarray of shape (t, m, 2 * h) that contains the\n concatenated hidden states from both directions, excluding their\n initialized states\n t is the number of time steps\n m is the batch size for the data\n h is the dimensionality of the hidden states\n\n Returns:\n Y the outputs\n \"\"\"\n t, m, h = H.shape\n o = self.by.shape[-1]\n Y = np.zeros((t, m, o))\n for i in range(t):\n Y[i] = np.matmul(H[i], self.Wy) + self.by\n Y[i] = np.exp(Y[i]) / (np.sum(np.exp(Y[i]), axis=1, keepdims=True))\n return Y\n","repo_name":"HOL-BilalELJAMAL/holbertonschool-machine_learning","sub_path":"supervised_learning/0x0D-RNNs/7-bi_output.py","file_name":"7-bi_output.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24439558512","text":"\"\"\"\nClass for streaming MPEG1 video with cameras connected to \nLima Tango Device Servers\n\nExample configuration:\n\n\n Prosilica 1350C\n id23/limaccd/minidiff\n id23/limabeamviewer/minidiff\n 0.05\n RGB24\n\n\"\"\"\nimport logging\nimport os\nimport subprocess\nimport uuid\nimport psutil\n\nfrom HardwareRepository.HardwareObjects.TangoLimaVideo import TangoLimaVideo\nfrom HardwareRepository.utils.video_utils import streaming_processes\n\n\nclass TangoLimaMpegVideo(TangoLimaVideo):\n def __init__(self, name):\n super(TangoLimaMpegVideo, self).__init__(name)\n\n self._video_stream_process = None\n self._current_stream_size = \"-1, -1\"\n self._stream_script_path = \"\"\n self.stream_hash = str(uuid.uuid1())\n self.video_device = None\n self._p = None\n self._quality_str = \"High\"\n self._QUALITY_STR_TO_INT = {\n \"High\": 4,\n \"Medium\": 10,\n \"Low\": 20,\n \"Adaptive\": -1\n }\n\n def init(self):\n super().init()\n self._debug = self.get_property(\"debug\", False)\n self._loopback_device = self.get_property(\"loopback_device\", \"\")\n self._quality = self.get_property(\"compression\", 10)\n self._mpeg_scale = self.get_property(\"mpeg_scale\", 1)\n def _encoder_friendly_size(self, w, h):\n # Some video decoders have difficulties to decode videos with odd image dimensions\n # (JSMPEG beeing one of them) so we make sure that the size is even\n w = w if w % 2 == 0 else w + 1\n h = h if h % 2 == 0 else h + 1\n\n return w, h\n\n def get_quality(self):\n return self._quality_str\n\n def set_quality(self, q):\n self._quality_str = q\n self._quality = self._QUALITY_STR_TO_INT[q]\n self.restart_streaming()\n\n def set_stream_size(self, w, h):\n w, h = self._encoder_friendly_size(w, h)\n self._current_stream_size = \"%s,%s\" % (int(w), int(h))\n\n def get_stream_size(self):\n current_size = self._current_stream_size.split(\",\")\n scale = float(current_size[0]) / self.get_width()\n return current_size + list((scale,))\n\n def get_quality_options(self):\n return list(self._QUALITY_STR_TO_INT.keys())\n\n def get_available_stream_sizes(self):\n try:\n w, h = self._encoder_friendly_size(self.get_width(), self.get_height())\n # Calculate half the size and quarter of the size if MPEG streaming is used\n # otherwise just return the orignal size.\n if self._video_stream_process:\n video_sizes = [(w, h), (w / 2, h / 2), (w / 4, h / 4)]\n else:\n video_sizes = [(w, h)]\n\n except (ValueError, AttributeError):\n video_sizes = []\n\n return video_sizes\n\n def start_video_stream_process(self):\n if (\n not self._video_stream_process\n or self._video_stream_process.poll() is not None\n ):\n python_executable = os.sep.join(\n os.path.dirname(os.__file__).split(os.sep)[:-2] + [\"bin\", \"python\"]\n )\n\n self._video_stream_process = subprocess.Popen(\n [\n python_executable,\n streaming_processes.__file__,\n self.get_property(\"tangoname\"),\n \"%s, %s\" % (self.get_width(), self.get_height()),\n self._current_stream_size,\n self.stream_hash,\n self.video_mode,\n self._loopback_device,\n str(self._debug),\n str(self._sleep_time),\n str(self._quality)\n ],\n close_fds=True,\n )\n\n with open(\"/tmp/mxcube.pid\", \"a\") as f:\n f.write(\"%s \" % self._video_stream_process.pid)\n\n def stop_streaming(self):\n if self._video_stream_process:\n ps = [self._video_stream_process] + psutil.Process(\n self._video_stream_process.pid\n ).children()\n for p in ps:\n p.kill()\n self._video_stream_process = None\n\n def start_streaming(self, size=()):\n if not size:\n w, h = self.get_width(), self.get_height()\n else:\n w, h = size\n\n self.set_stream_size(w * self._mpeg_scale, h * self._mpeg_scale)\n self.start_video_stream_process()\n\n return self.video_device\n\n def restart_streaming(self, size=()):\n self.stop_streaming()\n self.start_streaming(size)\n","repo_name":"mxcube/HardwareRepository","sub_path":"HardwareObjects/TangoLimaMpegVideo.py","file_name":"TangoLimaMpegVideo.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4403997480","text":"\"\"\"Tests to check funtionallity of section handling.\"\"\"\nimport pytest\nimport vcr\nimport yaml\n\nfrom tests.conftest import filter_request_uri, filter_response, cassette_name, FILTER_REQUEST_HEADERS\nfrom phpypam import PHPyPAMEntityNotFoundException\n\n\nwith open('tests/vars/server.yml') as c:\n server = yaml.safe_load(c)\n\nmy_section = dict(\n name='foobar',\n description='new section',\n permissions='{\"3\":\"1\",\"2\":\"2\"}'\n)\n\n\n@vcr.use_cassette(cassette_name('test_create_section'),\n filter_headers=FILTER_REQUEST_HEADERS,\n before_record_request=filter_request_uri,\n before_recorde_response=filter_response\n )\ndef test_create_section(pi):\n \"\"\"Test to create a new section.\n\n Create a section if it doesn't exists\n \"\"\"\n try:\n entity = pi.get_entity(controller='sections', controller_path=my_section['name'])\n except PHPyPAMEntityNotFoundException:\n print('create entity')\n entity = pi.create_entity(controller='sections', data=my_section)\n entity = pi.get_entity(controller='sections', controller_path=my_section['name'])\n\n assert entity is not None\n\n\n@vcr.use_cassette(cassette_name('test_update_section'),\n filter_headers=FILTER_REQUEST_HEADERS,\n before_record_request=filter_request_uri,\n before_recorde_response=filter_response\n )\ndef test_update_section(pi):\n \"\"\"Test to update an existing section.\n\n Update one field of an existing section.\n \"\"\"\n my_section['description'] = 'new description'\n\n entity = pi.get_entity(controller='sections', controller_path=my_section['name'])\n pi.update_entity(controller='sections', controller_path=entity['id'], data=my_section)\n entity = pi.get_entity(controller='sections', controller_path=my_section['name'])\n\n assert entity['description'] == my_section['description']\n\n\n@vcr.use_cassette(cassette_name('test_delete_section'),\n filter_headers=FILTER_REQUEST_HEADERS,\n before_record_request=filter_request_uri,\n before_recorde_response=filter_response\n )\ndef test_delete_section(pi):\n \"\"\"Test to delete an existing section.\n\n Delete one field of an existing section.\n \"\"\"\n entity = pi.get_entity(controller='sections', controller_path=my_section['name'])\n pi.delete_entity(controller='sections', controller_path=entity['id'])\n entity_kwargs = dict(controller='sections', controller_path=my_section['name'])\n pytest.raises(PHPyPAMEntityNotFoundException, pi.get_entity, **entity_kwargs)\n","repo_name":"codeaffen/phpypam","sub_path":"tests/test_cases/ensure_section.py","file_name":"ensure_section.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"1649208772","text":"import os\nimport json\nimport torch\nimport argparse\nimport numpy as np\nfrom lib.data import get_meanpose\nfrom lib.network import get_autoencoder\nfrom lib.util.motion import preprocess_mixamo, preprocess_test, postprocess\nfrom lib.util.general import get_config\nfrom lib.operation import rotate_and_maybe_project_world\nfrom itertools import combinations\n\n\ndef load_and_preprocess(path, config, mean_pose, std_pose):\n\n motion3d = np.load(path)\n\n # length must be multiples of 8 due to the size of convolution\n _, _, T = motion3d.shape\n T = (T // 8) * 8\n motion3d = motion3d[:, :, :T]\n\n # project to 2d\n motion_proj = motion3d[:, [0, 2], :]\n\n # reformat for mixamo data\n motion_proj = preprocess_mixamo(motion_proj, unit=1.0)\n\n # preprocess for network input\n motion_proj, start = preprocess_test(motion_proj, mean_pose, std_pose, config.data.unit)\n motion_proj = motion_proj.reshape((-1, motion_proj.shape[-1]))\n motion_proj = torch.from_numpy(motion_proj).float()\n\n return motion_proj, start\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', type=str, required=True,\n help='which config to use.')\n parser.add_argument('--description', type=str, default=\"data/mixamo/36_800_24/mse_description.json\",\n help=\"path to the description file which specifies how to run test\")\n parser.add_argument('--checkpoint', type=str, required=True,\n help=\"path to trained model weights\")\n parser.add_argument('--data_dir', type=str, default=\"data/mixamo/36_800_24/test_random_rotate\",\n help=\"path to the directory storing test data\")\n parser.add_argument('--out_dir', type=str, required=True,\n help=\"path to output directory\")\n args = parser.parse_args()\n\n config = get_config(args.config)\n ae = get_autoencoder(config)\n ae.load_state_dict(torch.load(args.checkpoint))\n ae.cuda()\n ae.eval()\n mean_pose, std_pose = get_meanpose(\"test\", config.data)\n print(\"loaded model\")\n\n description = json.load(open(args.description))\n chars = list(description.keys())\n\n cnt = 0\n os.makedirs(args.out_dir, exist_ok=True)\n\n for char1, char2 in combinations(chars, 2):\n\n motions1 = description[char1]\n motions2 = description[char2]\n\n for i, mot1 in enumerate(motions1):\n for j, mot2 in enumerate(motions2):\n\n path1 = os.path.join(args.data_dir, char1, mot1, \"{}.npy\".format(mot1))\n path2 = os.path.join(args.data_dir, char2, mot2, \"{}.npy\".format(mot2))\n\n ############\n # CROSS 2D #\n ############\n\n out_path1 = os.path.join(args.out_dir, \"motion_{}_{}_body_{}_{}.npy\".format(char1, i, char2, j))\n out_path2 = os.path.join(args.out_dir, \"motion_{}_{}_body_{}_{}.npy\".format(char2, j, char1, i))\n\n x_a, x_a_start = load_and_preprocess(path1, config, mean_pose, std_pose)\n x_b, x_b_start = load_and_preprocess(path2, config, mean_pose, std_pose)\n\n x_a_batch = x_a.unsqueeze(0).cuda()\n x_b_batch = x_b.unsqueeze(0).cuda()\n\n x_ab = ae.cross2d(x_a_batch, x_b_batch, x_a_batch)\n x_ba = ae.cross2d(x_b_batch, x_a_batch, x_b_batch)\n\n x_ab = postprocess(x_ab, mean_pose, std_pose, config.data.unit, start=x_a_start)\n x_ba = postprocess(x_ba, mean_pose, std_pose, config.data.unit, start=x_b_start)\n\n np.save(out_path1, x_ab)\n np.save(out_path2, x_ba)\n\n cnt += 1\n print(\"computed {} pairs\".format(cnt), end=\"\\r\")\n\n print(\"finished\" + \" \" * 20)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"yzhq97/transmomo.pytorch","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","stars":367,"dataset":"github-code","pt":"78"} +{"seq_id":"2420342934","text":"from django.db.models import manager\n\nfrom django.db.models import F, ExpressionWrapper, DecimalField\n\n\nclass PerformanceMetricManager(manager.Manager):\n\n def with_cpi(self):\n '''\n Using annotate to add calculated fields: CPI.\n :return: QuerySet\n '''\n return self.model.objects.annotate(\n cpi=ExpressionWrapper(\n F('spend') / F('installs'), output_field=DecimalField()\n )\n )\n","repo_name":"Sorokin-MS95/sample-task","sub_path":"app/api/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22203216383","text":"from mutagen.mp3 import MP3\nimport RPi.GPIO as GPIO\nimport time\nimport os\nimport sys\nimport pygame as pg\nimport yaml;\nfrom resources.utils import (\n LCD_MAP,\n LCD_COMMAND,\n TIMING,\n BUTTON,\n)\n\nGPIO.setwarnings(False);\nGPIO.setmode(GPIO.BCM);\n\n#RS PIN STATES FOR CHARACTER AND COMMAND MODE\nLCD_CHR = GPIO.HIGH;\nLCD_CMD = GPIO.LOW;\n\n\ndef info():\n '''Prints a basic library description'''\n print(\"Software library for the stub project.\")\n\n\ndef is_led_on(led_pin):\n \"\"\"\n returns True if led at pin number led_pin is on\n else returns false\n \"\"\"\n GPIO.setup(led_pin, GPIO.OUT) # remove once led pins have been assigned\n return bool(GPIO.input(led_pin))\n\ndef is_button_pressed(pin):\n \"\"\"\n returns True if button at pin number pin is on\n else returns false\n \"\"\"\n return bool(GPIO.input(pin))\n\n\ndef char_to_arr(c):\n return [int(b) for b in format(ord(c), '08b')];\n\ndef write_arr_4bits(bits, mode, debug=True):\n pins = [LCD_MAP[\"LCD_D7\"], LCD_MAP[\"LCD_D6\"], LCD_MAP[\"LCD_D5\"], LCD_MAP[\"LCD_D4\"]];\n GPIO.output(LCD_MAP[\"LCD_RS\"], mode);\n\n for p, b in zip(pins, bits[:4]):\n GPIO.output(p,b);\n\n time.sleep(TIMING[\"E_DELAY\"]);\n GPIO.output(LCD_MAP[\"LCD_E\"], GPIO.HIGH);\n time.sleep(TIMING[\"E_PULSE\"]);\n GPIO.output(LCD_MAP[\"LCD_E\"], GPIO.LOW);\n time.sleep(TIMING[\"E_DELAY\"]);\n\n for p, b in zip(pins, bits[4:]):\n GPIO.output(p, b);\n\n time.sleep(TIMING[\"E_DELAY\"]);\n GPIO.output(LCD_MAP[\"LCD_E\"], GPIO.HIGH);\n time.sleep(TIMING[\"E_PULSE\"]);\n GPIO.output(LCD_MAP[\"LCD_E\"], GPIO.LOW);\n time.sleep(TIMING[\"E_DELAY\"]);\n\n for p in pins:\n GPIO.output(p, GPIO.LOW);\n\n\ndef toggle_display(state):\n if (state == 0):\n write_arr_4bits(LCD_COMMAND[\"LCD_D_OFF\"], LCD_CMD);\n state_str = \"OFF\"\n else:\n write_arr_4bits(LCD_COMMAND[\"LCD_ON_NC\"], LCD_CMD);\n state_str = \"ON\";\n print(\"LCD has been toggled \" + state_str);\n\n\ndef setup_LCD():\n\n for p in LCD_MAP.values():\n GPIO.setup(p, GPIO.OUT);\n\n write_arr_4bits(LCD_COMMAND[\"LCD_4BIT1\"], LCD_CMD);\n write_arr_4bits(LCD_COMMAND[\"LCD_4BIT2\"], LCD_CMD);\n write_arr_4bits(LCD_COMMAND[\"LCD_ON_NC\"], LCD_CMD);\n write_arr_4bits(LCD_COMMAND[\"LCD_ENTRY\"], LCD_CMD);\n write_arr_4bits(LCD_COMMAND[\"LCD_CLEAR\"], LCD_CMD);\n\n\ndef send_data_to_screen(text):\n \"\"\"\n Sends text to the LCD Screen\n \"\"\"\n for char in text:\n write_arr_4bits(char_to_arr(char), LCD_CHR);\n print(\"The following has been sent to the screen:\", text);\n\ndef lcd_go_to_XY(x, y):\n \n if not 0 <= x < 4 and 0 <= y < 20:\n raise Exception(\"x and y must be integers with 0 <= x <= 4 and 0 <= y <= 20\")\n \n addr = 0\n \n if x == 0:\n addr = 0x00\n \n if x == 1:\n addr = 0x40\n \n if x == 2:\n addr = 0x14\n \n if x == 3:\n addr = 0x54\n \n addr += y\n \n addr = 0x80 | addr # extend to 8 bits\n \n cmd = [int(x) for x in list(bin(addr))[2:]]\n \n write_arr_4bits(cmd, 0) # move cursor to position x, y on screen\n\ndef setup_pygame_player(freq=44100, bitsize=-16, channels=2, buffer=2048):\n pg.mixer.init(freq, bitsize, channels, buffer);\n\ndef play_radio(station_num = 1):\n \"\"\"\n From a created playlist of radio stations, play the first station\n \"\"\"\n os.system(\"mpc play \" + str(station_num));\ndef stop_radio():\n \"\"\"\n Stop radio play, and reset playlist\n \"\"\"\n os.system(\"mpc stop\");\ndef setup_station(station_list):\n \"\"\"\n Given a list, read from it different radio stations and fill the\n MPC playlist with the different stations\n \"\"\"\n for url in station_list:\n if (url[1].strip()):\n os.system(\"mpc add \" + url[1]);\ndef radio_reset():\n os.system(\"mpc clear\");\n\ndef play_sound(sound_file):\n \"\"\"\n Obtained from adafruit I2S decoder setup\n \"\"\"\n sound_file = \"./uploads/\" + sound_file;\n clock = pg.time.Clock();\n try:\n pg.mixer.music.load(sound_file)\n print(\"Music file {} loaded!\".format(sound_file))\n except:\n print(\"File {} not found! {}\".format(music_file, pg.get_error()));\n return\n pg.mixer.music.play()\n print(\"Now playing:\", sound_file);\n\n audio = MP3(sound_file)\n return audio.info.length\n #while pg.mixer.music.get_busy():\n # clock.tick(30);\ndef stop_player():\n pg.mixer.music.stop();\n\ndef is_music_playing():\n return pg.mixer.music.get_busy()\n\ndef pause_music():\n pg.mixer.music.pause()\n\ndef unpause_music():\n pg.mixer.music.unpause()\n\ndef setup_buttons():\n\n for b in BUTTON.values():\n GPIO.setup(b, GPIO.IN, pull_up_down=GPIO.PUD_UP)\ndef setup_pins():\n \"\"\"\n Setup all pins required for radio operation. This function\n must be called before any operation on any GPIO pins\n \"\"\"\n setup_buttons()\n setup_LCD()\n\ndef create_playlist(PLAYLIST_DICT, PLAYLIST_NUM):\n if PLAYLIST_NUM > int(PLAYLIST_DICT['num_playlist']):\n raise IndexError;\n else:\n return PLAYLIST_DICT['playlists'][PLAYLIST_NUM]['songs']\n\ndef create_stations(RADIO_DICT):\n stations = RADIO_DICT['stations'];\n num_stations =int(RADIO_DICT['num_stations']);\n res_stations = [];\n for i in range(num_stations):\n if stations[i]['state']:\n res_stations.append((stations[i]['name'], stations[i]['url']));\n return res_stations, len(res_stations);\n \n","repo_name":"kd1889/Internet-Radio-for-Kids","sub_path":"radio.py","file_name":"radio.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2103603503","text":"import socketserver\nfrom typing import Tuple, List, Dict\n\n\nclass Request:\n def parse_start_line(self, text: bytes) -> List[str]:\n return [elem.strip().decode() for elem in text.split()]\n\n def parse_headers(self, lines: List[bytes]) -> Dict[str, str]:\n headers = dict()\n\n for line in lines:\n k, v = line.split(b\":\", 1)\n\n headers[k.strip().decode()] = v.strip().decode()\n\n return headers\n\n def __init__(self, message: bytes):\n lines = message.split(b\"\\r\\n\")\n\n method, path, protocol = self.parse_start_line(lines[0])\n\n self.method = method\n self.path = path\n self.protocol = protocol\n\n body_exists = not bool(lines[-2])\n\n if body_exists:\n header_lines = lines[1:-2]\n else:\n header_lines = lines[1:]\n\n self.headers = self.parse_headers(header_lines)\n\n\nclass MyTCPHandler(socketserver.BaseRequestHandler):\n \"\"\"\n The RequestHandler class for our server.\n\n It is instantiated once per connection to the server, and must\n override the handle() method to implement communication to the\n client.\n \"\"\"\n\n def handle(self):\n # self.request is the TCP socket connected to the client\n self.data = self.request.recv(1024).strip()\n req = Request(self.data)\n\n print(req.method)\n print(req.path)\n print(req.protocol)\n print(req.headers)\n\n print(\"{} wrote:\".format(self.client_address[0]))\n print(self.data)\n # just send back the same data, but upper-cased\n self.request.sendall(\n b\"\"\"\n HTTP/1.1\"\"\"\n )\n\n\nif __name__ == \"__main__\":\n HOST, PORT = \"localhost\", 5000\n\n # Create the server, binding to localhost on port 9999\n server = socketserver.TCPServer((HOST, PORT), MyTCPHandler)\n\n # Activate the server; this will keep running until you\n # interrupt the program with Ctrl-C\n server.serve_forever()\n","repo_name":"JoMingyu/tcp-server-responses-http-message","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72068262651","text":"l = eval(input())\nn = []\n\nfor i in range(len(l)-1):\n n.append(abs(l[i+1]-l[i]))\n\nc = []\nfor i,j in zip(n,n[1:]):\n if j > i:\n c.append(True)\n else:\n c.append(False)\n\nprint(all(c))\n","repo_name":"GLA-Python/surprise-test-Priya-Varshney08","sub_path":"function_expanding.py","file_name":"function_expanding.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38366726979","text":"# from django.http import HttpResponse\nfrom django.shortcuts import render\n\n\ndef index(request):\n context = {}\n context['title'] = 'index page'\n return render(request, 'index.html', context)\n\n\n# def hello(request):\n# return HttpResponse('Hello world!')\n\n\n","repo_name":"FreesiaGao/Mine","sub_path":"Mine/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29740418779","text":"from collections import deque\n\ncups = deque([int(x) for x in input().split()])\nbottles = [int(x) for x in input().split()]\n\nwasted_liters = 0\n\nwhile cups and bottles:\n current_bottle = bottles.pop()\n current_cup = cups.popleft() - current_bottle\n\n if current_cup > 0:\n cups.appendleft(current_cup)\n else:\n wasted_liters += abs(current_cup)\n\nif bottles:\n print(f\"Bottles: {' '.join(str(bottle) for bottle in reversed(bottles))}\")\nelse:\n print(f\"Cups: {' '.join(str(cup) for cup in cups)}\")\n\nprint(f'Wasted litters of water: {wasted_liters}')","repo_name":"neverlinked/Software-University","sub_path":"python_advanced/Exercise/Exercise_ Lists as Stacks and Queues/cups_and_bottles.py","file_name":"cups_and_bottles.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16914235220","text":"# cook your dish here\n\ndef fun(v_to_go, adj, n):\n arr = [-1]*n\n \n temp = 0\n for i in range(1, n):\n if(v_to_go[i]==1):\n v_to_go[i]=0\n queue = adj[i]\n arr[i] = temp\n while len(queue)!=0:\n temp += 1\n temp = temp%2\n l = len(queue)\n \n for _ in range(l):\n x = queue.pop(0)\n if v_to_go[x]==1:\n v_to_go[x]=0\n queue = queue + adj[x]\n if arr[x]==-1:\n arr[x] = temp\n if(arr[x]!=-1 and arr[x]!=temp):\n print('0')\n return\n \n print('1')\n return\n \n\n\nn, m = map(int,input().split())\nn = n+1\nedges = []\nfor _ in range(m):\n temp = list(map(int, input().split()))\n edges.append(temp)\n\nadj = [[] for _ in range(n)]\n\nfor (a, b) in edges:\n adj[a].append(b)\n adj[b].append(a)\n\nv_to_go = [1]*n\nfun(v_to_go, adj, n)","repo_name":"dhairyakataria/Data-Structure-and-Algorithms","sub_path":"Graph/week3_paths_in_graphs1/2_bipartite/bipartite.py","file_name":"bipartite.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15496457206","text":"import math\r\n\r\nnumbers = list(map(int, input().split(', ')))\r\n\r\ngroups_count = math.ceil(max(numbers) / 10)\r\n\r\nstart_value = 0\r\nend_value = 10\r\n\r\nfor i in range(1, groups_count + 1):\r\n nums = []\r\n for num in numbers:\r\n if start_value < num <= end_value:\r\n nums.append(num)\r\n\r\n print(f\"Group of {i}0's: {nums}\")\r\n start_value += 10\r\n end_value += 10\r\n\r\n","repo_name":"ayk-dev/python-fundamentals","sub_path":"lists-advanced/gropu_of_10s.py","file_name":"gropu_of_10s.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11680466252","text":"#!/usr/bin/python3\n\"\"\"\n method that determines if a given data set represents\n a valid UTF-8 encoding\n\"\"\"\n\n\ndef validUTF8(data):\n \"\"\" data: list\"\"\"\n\n # For each integer in the data array\n for num in data:\n # convert the value num into a binary representation\n # and extract the last 8 characters form the binary string\n binary_rep = format(num, '08b')\n\n no_bytes = 0 # Number of bytes in the current UTF-8 character\n\n if binary_rep[0] == '0':\n no_bytes = 1\n elif binary_rep[0] == '1':\n for bit in binary_rep:\n # get the number of 1s in the beginning of the string\n if bit == '0':\n break\n no_bytes += 1\n\n # Setting the character to be 1 to 4 bytes long\n if no_bytes == 1 or no_bytes > 4:\n return False\n else:\n return False\n\n # Continuation Byte Check\n if no_bytes > 1:\n if not (binary_rep[1] == '1' and binary_rep[2] == '0'):\n return False\n\n # Reduce the number of bytes to process by 1 after each integer.\n no_bytes -= 1\n\n # Check if data is complete for a particular UTF-8 character\n return no_bytes == 0\n","repo_name":"Nnenna-udefi/alx-interview","sub_path":"0x04-utf8_validation/prevcode.py","file_name":"prevcode.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17409558245","text":"from __future__ import division\nimport numpy as np\nfrom scipy.stats import multivariate_normal\n\nfrom pylds.models import DefaultLDS\n\n\n##########\n# util #\n##########\n\ndef cumsum(v,strict=False):\n if not strict:\n return np.cumsum(v,axis=0)\n else:\n out = np.zeros_like(v)\n out[1:] = np.cumsum(v[:-1],axis=0)\n return out\n\n\ndef bmat(blocks):\n rowsizes = [row[0].shape[0] for row in blocks]\n colsizes = [col[0].shape[1] for col in zip(*blocks)]\n rowstarts = cumsum(rowsizes,strict=True)\n colstarts = cumsum(colsizes,strict=True)\n\n nrows, ncols = sum(rowsizes), sum(colsizes)\n out = np.zeros((nrows,ncols))\n\n for i, (rstart, rsz) in enumerate(zip(rowstarts, rowsizes)):\n for j, (cstart, csz) in enumerate(zip(colstarts, colsizes)):\n out[rstart:rstart+rsz,cstart:cstart+csz] = blocks[i][j]\n\n return out\n\n\ndef random_rotation(n,theta):\n if n == 1:\n return np.random.rand() * np.eye(1)\n\n rot = np.array([[np.cos(theta), -np.sin(theta)],\n [np.sin(theta), np.cos(theta)]])\n out = np.zeros((n,n))\n out[:2,:2] = rot\n q = np.linalg.qr(np.random.randn(n,n))[0]\n return q.dot(out).dot(q.T)\n\n\ndef lds_to_dense_infoparams(model,data,inputs):\n T, n = data.shape[0], model.D_latent\n\n mu_init, sigma_init = model.mu_init, model.sigma_init\n A, B, sigma_states = model.A, model.B, model.sigma_states\n C, D, sigma_obs = model.C, model.D, model.sigma_obs\n ss_inv = np.linalg.inv(sigma_states)\n\n h = np.zeros((T,n))\n h[0] += np.linalg.solve(sigma_init, mu_init)\n # Dynamics\n h[1:] += inputs[:-1].dot(B.T).dot(ss_inv)\n h[:-1] += -inputs[:-1].dot(B.T).dot(np.linalg.solve(sigma_states, A))\n # Emissions\n h += C.T.dot(np.linalg.solve(sigma_obs, data.T)).T\n h += -inputs.dot(D.T).dot(np.linalg.solve(sigma_obs, C))\n\n J = np.kron(np.eye(T),C.T.dot(np.linalg.solve(sigma_obs,C)))\n J[:n,:n] += np.linalg.inv(sigma_init)\n pairblock = bmat([[A.T.dot(ss_inv).dot(A), -A.T.dot(ss_inv)],\n [-ss_inv.dot(A), ss_inv]])\n for t in range(0,n*(T-1),n):\n J[t:t+2*n,t:t+2*n] += pairblock\n\n return J.reshape(T*n,T*n), h.reshape(T*n)\n\n\n###########\n# tests #\n###########\n\ndef same_means(model, Jh):\n J,h = Jh\n n, T = model.D_latent, model.states_list[0].T\n\n dense_mu = np.linalg.solve(J,h).reshape((T,n))\n\n model.E_step()\n model_mu = model.states_list[0].smoothed_mus\n\n assert np.allclose(dense_mu,model_mu)\n\n\ndef same_marginal_covs(model, Jh):\n J, h = Jh\n n, T = model.D_latent, model.states_list[0].T\n\n all_dense_sigmas = np.linalg.inv(J)\n dense_sigmas = np.array([all_dense_sigmas[k*n:(k+1)*n,k*n:(k+1)*n]\n for k in range(T)])\n\n model.E_step()\n model_sigmas = model.states_list[0].smoothed_sigmas\n\n assert np.allclose(dense_sigmas,model_sigmas)\n\n\ndef same_pairwise_secondmoments(model, Jh):\n J, h = Jh\n n, T = model.D_latent, model.states_list[0].T\n\n all_dense_sigmas = np.linalg.inv(J)\n dense_mu = np.linalg.solve(J,h)\n blockslices = [slice(k*n,(k+1)*n) for k in range(T)]\n dense_Extp1_xtT = \\\n sum(all_dense_sigmas[tp1,t] + np.outer(dense_mu[tp1],dense_mu[t])\n for tp1,t in zip(blockslices[1:],blockslices[:-1]))\n\n model.E_step()\n model_Extp1_xtT = model.states_list[0].E_dynamics_stats[1][:n, :n]\n\n assert np.allclose(dense_Extp1_xtT,model_Extp1_xtT)\n\n\ndef same_loglike(model,_):\n # NOTE: ignore the posterior (J,h) passed in so we can use the more\n # convenient prior info parameters\n states = model.states_list[0]\n data, inputs = states.data, states.inputs\n T = data.shape[0]\n\n C, model.C = model.C, np.zeros_like(model.C)\n D, model.D = model.D, np.zeros_like(model.D)\n J,h = lds_to_dense_infoparams(model,data,inputs)\n model.C, model.D = C, D\n\n bigC = np.kron(np.eye(T),C)\n bigD = np.kron(np.eye(T),D)\n mu_x = np.linalg.solve(J,h)\n sigma_x = np.linalg.inv(J)\n mu_y = bigC.dot(mu_x) + bigD.dot(inputs.ravel())\n sigma_y = bigC.dot(sigma_x).dot(bigC.T) + np.kron(np.eye(T),model.sigma_obs)\n dense_loglike = multivariate_normal.logpdf(data.ravel(),mu_y,sigma_y)\n\n model_loglike = model.log_likelihood()\n if not np.isclose(dense_loglike, model_loglike):\n print(\"model - dense: \", model_loglike - dense_loglike)\n assert np.isclose(dense_loglike, model_loglike)\n\n\ndef random_model(n,p,d,T):\n data = np.random.randn(T,p)\n inputs = np.random.randn(T,d)\n model = DefaultLDS(p,n,d)\n model.A = 0.99*random_rotation(n,0.01)\n model.B = 0.1*np.random.randn(n,d)\n model.C = np.random.randn(p,n)\n model.D = 0.1*np.random.randn(p,d)\n\n J,h = lds_to_dense_infoparams(model,data,inputs)\n model.add_data(data, inputs=inputs)\n\n return model, (J,h)\n\n\ndef check_random_model(check):\n n, p, d = np.random.randint(2,5), np.random.randint(2,5), np.random.randint(0,3)\n T = np.random.randint(10,20)\n check(*random_model(n,p,d,T))\n\n\ndef test_means():\n for _ in range(5):\n yield check_random_model, same_means\n\n\ndef test_marginals_covs():\n for _ in range(5):\n yield check_random_model, same_marginal_covs\n\n\ndef test_pairwise_secondmoments():\n for _ in range(5):\n yield check_random_model, same_pairwise_secondmoments\n\n\ndef test_loglike():\n for _ in range(5):\n yield check_random_model, same_loglike\n","repo_name":"mattjj/pylds","sub_path":"tests/test_dense.py","file_name":"test_dense.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"78"} +{"seq_id":"26184860230","text":"# 풀이\r\n# (1) L, R, U, D를 입력받고, 그에 매칭하게 이동하도록 좌표 구성\r\n# (2) for문을 활용해 이동 구현\r\n# (3) 예외처리 : 공간을 벗어나는 경우 \r\n\r\n# 데이터 입력\r\nn = int(input())\r\nmove = input().split()\r\nx, y = 1, 1\r\n\r\n# 좌표 이동\r\ndx = [0, 0, -1, 1]\r\ndy = [-1, 1, 0, 0]\r\nmove_types = ['L', 'R', 'U', 'D']\r\n\r\n# 이동 계획 하나씩 확인\r\nfor plan in move : \r\n # 이동 후 좌표 구하기\r\n for i in range(len(move_types)) :\r\n if plan == move_types[i] :\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n \r\n # 공간을 벗어나는 경우 무시\r\n if (nx < 1 or ny < 1 or nx > n or ny > n) :\r\n continue\r\n \r\n x, y = nx, ny\r\n\r\nprint(x, y)","repo_name":"moveformyfuture/Algorithm_realization","sub_path":"1. 상하좌우.py","file_name":"1. 상하좌우.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"8739174585","text":"\n\"\"\"\nA module for calculating the exposure. The main use case is for a calculation\nat a single position on the sky, e.g., to perform a periodicity search when\nthe exposure varies appreciably over a cycle.\n\n$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/pulsar/py_exposure.py,v 1.1 2010/11/01 19:54:18 kerrm Exp $\n\nauthor: M. Kerr \n\n\"\"\"\n\nimport numpy as N\nfrom math import sin,cos\nimport astropy.io.fits as pf\nimport uw.utilities.fitstools as fitstools\nfrom skymaps import Gti,Band,Healpix,SkyDir\nfrom os.path import join\n\nDEG2RAD = N.pi/180.\n\n# TODO -- convert to the sqrt(1-cos(theta)) binning adopted in the ST\n\nclass Livetime(object):\n \"\"\"Calculate the livetime as a function of incidence angle using the GTI\n specified in a collection of FT1 files and the livetime entries from\n an FT2 file.\n\n The default implementation is fully-unbinned, i.e., when the user\n requests the livetime, the exact values for the S/C z-axis and\n zenith positions are used to calculate the incidence/zenith angles.\n\n This executes with comparable speed (factor of ~2 slower) to the\n Science Tools application gtltcube.\"\"\"\n\n def init(self):\n self.gti_mask = None # an additional mask (e.g., to remove a GRB)\n self.zenithcut = cos(DEG2RAD*105.) # 105 deg\n self.fovcut = 0.4 # 66.4 deg\n self.nbins = 20 # bins in cos(theta)\n self.tstart = 0 # lower time limit in MET\n self.tstop = 1e100 # upper time limit in MET\n self.verbose = 1 # default verbosity\n self.mask_zero = True # mask out bins with 0 livetime if True\n self.fast_ft2 = True # use fast algorithm to process FT2/GTI\n self.cosbins = N.linspace(self.fovcut,1,self.nbins+1)\n\n def finish(self):\n for field in self.fields:\n if ('DEC_' in field):\n self.__dict__['COS_'+field] = N.cos(self.__dict__[field])\n self.__dict__['SIN_'+field] = N.sin(self.__dict__[field])\n\n def __init__(self,ft2files,ft1files,**kwargs):\n self.init()\n self.__dict__.update(kwargs)\n self.prev_vals = self.prev_ra = self.prev_dec = None # initialize caching\n self.fields = ['START','STOP','LIVETIME','RA_SCZ','DEC_SCZ','RA_ZENITH','DEC_ZENITH']\n\n if not hasattr(ft2files,'__len__'): ft2files = [ft2files]\n if not hasattr(ft1files,'__len__'): ft1files = [ft1files]\n\n self.__setup_gti(ft1files)\n self.__setup_ft2(ft2files)\n self.__process_ft2()\n\n for field in self.fields:\n if ('RA_' in field) or ('DEC_' in field):\n self.__dict__[field] *= DEG2RAD\n\n self.finish()\n\n def __setup_gti(self,ft1files):\n # the procedure is to take the union of all GTIs provided by FT1 files\n # then, take an intersection with the (optional) gti_mask and the time limits\n if self.verbose >= 1:\n print ('Processing GTI...')\n gti = self.gti = Gti(ft1files[0])\n if len(ft1files) > 1:\n for ft1 in ft1files[1:]: gti.combine(Gti(ft1))\n tmin = max(gti.minValue(),self.tstart)\n tmax = min(gti.maxValue(),self.tstop)\n gti = self.gti = gti.applyTimeRangeCut(tmin,tmax)\n if self.gti_mask is not None:\n before = round(gti.computeOntime())\n gti.interserction(self.gti_mask)\n if verbose >= 1:\n print ('Applied GTI mask; ontime reduced from %ds to %ds'%(before,round(gti.computerOntime())))\n\n ### NB -- this iteration takes way longer than it should -- put in an accessor method in SWIG\n self.gti_starts,self.gti_stops = \\\n N.asarray([(x.minValue(),x.maxValue()) for x in gti]).transpose()\n\n N.sort(self.gti_starts);N.sort(self.gti_stops)\n if self.verbose >= 1:\n print ('Finished computing GTI from FT1 files; total ontime = %ds'%(round(gti.computeOntime())))\n\n def __setup_ft2(self,ft2files):\n \"\"\"Load in the FT2 data. Optionally, mask out values that will not\n contibute to the exposure.\"\"\"\n if self.verbose >= 1:\n print ('Loading FT2 files...')\n handles = [pf.open(ft2,memmap=True) for ft2 in ft2files]\n ft2lens = [handle['SC_DATA'].data.shape[0] for handle in handles]\n fields = self.fields\n arrays = [N.empty(sum(ft2lens)) for i in xrange(len(fields))]\n \n counter = 0\n for ihandle,handle in enumerate(handles):\n if self.verbose > 1:\n print ('...Loading FT2 file # %d'%(ihandle))\n n = ft2lens[ihandle]\n for ifield,field in enumerate(fields):\n arrays[ifield][counter:counter+n] = handle['SC_DATA'].data.field(field)\n handle.close()\n for ifield,field in enumerate(fields):\n self.__dict__[field] = arrays[ifield]\n if self.mask_zero:\n if self.verbose >= 1: print ('Pruning values that yield 0 exposure...')\n mask = (self.LIVETIME > 0) | (self.STOP > self.gti_starts[0]) | (self.START < self.gti_stops[-1])\n self.mask_entries()\n\n self.mask_entries(N.argsort(self.START)) # sort the FT2 file in case it isn't\n if self.verbose > 1:\n print ('Finished loading FT2 files!')\n \n def __process_ft2(self):\n if self.verbose >= 1: print ('Processing the FT2 file (calculating overlap with GTI)...')\n if self.fast_ft2: overlaps = self.__process_ft2_fast(self.gti_starts,self.gti_stops)\n else: overlaps = self.__process_ft2_slow(self.gti_starts,self.gti_stops)\n mask = overlaps > 0\n if self.mask_zero: self.mask_entries(mask)\n self.LTFRAC = self.LIVETIME/(self.STOP-self.START)\n self.fields += ['LTFRAC']\n self.LIVETIME *= overlaps[mask]\n if self.verbose > 1: print ('Finished processing the FT2 file!')\n\n def __process_ft2_slow(self,gti_starts,gti_stops):\n \"\"\"Calculate the fraction of each FT2 interval lying within the GTI.\n Uses a slow, easily-checked algorithm.\n The complexity is O(t^2) and is prohibitive\n for mission-length files.\"\"\"\n t1,t2,lt = self.START,self.STOP,self.LIVETIME\n overlaps = N.zeros_like(lt)\n for i,(gti_t1,gti_t2) in enumerate(zip(gti_starts,gti_stops)):\n maxi = N.maximum(gti_t1,t1)\n mini = N.minimum(gti_t2,t2)\n overlaps += N.maximum(0,mini - maxi)\n return overlaps/(t2 - t1)\n \n def __process_ft2_fast(self,gti_starts,gti_stops):\n \"\"\"Calculate the fraction of each FT2 interval lying within the GTI.\n Use binary search to quickly process the FT2 file.\n The complexity is O(t*log(t)).\"\"\"\n t1,t2,lt = self.START,self.STOP,self.LIVETIME\n gti_t1,gti_t2 = gti_starts,gti_stops\n overlaps = N.zeros_like(lt)\n i1 = N.searchsorted(t2,gti_t1) # NB -- t2 in both not a typo\n i2 = N.searchsorted(t2,gti_t2)\n seps = i2 - i1\n for i,(ii1,ii2) in enumerate(zip(i1,i2)):\n overlaps[ii1:ii2+1] = 1. # fully-contained FT2 intervals\n if seps[i] > 0: # correct the endpoint FT2 intervals\n overlaps[ii1] = (t2[ii1] - gti_t1[i])/(t2[ii1] - t1[ii1])\n overlaps[ii2] = (gti_t2[i] - t1[ii2])/(t2[ii2] - t1[ii2])\n else: # edge case with exceptionally short GTI\n a = max(t1[ii1],gti_t1[i])\n b = min(t2[ii1],gti_t2[i])\n overlaps[ii1] = max(b-a,0)/(t2[ii1] - t1[ii1])\n return overlaps\n\n def mask_entries(self,mask=None):\n \"\"\"If make is None, assume a LIVETIME > 0 cut.\"\"\"\n if mask is None: mask = self.LIVETIME > 0\n for field in self.fields:\n self.__dict__[field] = self.__dict__[field][mask]\n\n def get_cosines(self,skydir):\n \"\"\"Return the cosine of the arclength between the specified direction\n and the S/C z-axis and the zenith for each FT2 interval. Exact.\"\"\"\n ra,dec = N.radians([skydir.ra(),skydir.dec()])\n ra_s,ra_z = self.RA_SCZ,self.RA_ZENITH\n cdec,sdec = cos(dec),sin(dec)\n scosines = self.COS_DEC_SCZ*cdec*N.cos(ra-ra_s) + self.SIN_DEC_SCZ*sdec\n if self.zenithcut > -1:\n zcosines = self.COS_DEC_ZENITH*cdec*N.cos(ra-ra_z) + self.SIN_DEC_ZENITH*sdec\n mask = (scosines>=self.fovcut) & (zcosines>=self.zenithcut)\n else:\n mask = (scosines>=self.fovcut)\n return scosines,mask\n\n def __call__(self,skydir,intervals = None):\n \"\"\"Return the exposure at location indicated by skydir. Intervals is an optional list of time intervals\n that can be used to obtain a time-binned exposure for, e.g., light curves.\"\"\"\n \n ra,dec = N.radians([skydir.ra(),skydir.dec()])\n if (ra == self.prev_ra) and (dec == self.prev_dec) and (intervals is None):\n return self.prev_val\n\n # Calculate incidence and zenith angles wrt skydir, and cuts.\n scosines,mask = self.get_cosines(skydir)\n\n if intervals is None:\n # Bin in cosine using livetimes as the weights\n self.prev_val = N.histogram(scosines[mask],bins=self.cosbins,weights=self.LIVETIME[mask],new=True)\n self.prev_ra,self.prev_dec = ra,dec\n return self.prev_val\n\n # return a time series for the livetime at the given position\n overlaps = [self.__process_ft2_fast([intervals[i][0]],[intervals[i][1]])[mask] for i in xrange(len(starts))]\n livetimes= [N.histogram(scosines[mask],bins=self.cosbins,weights=self.LIVETIME[mask]*ol,new=True) for ol in overlaps]\n return livetimes\n\n#===============================================================================================#\nclass BinnedLivetime(Livetime):\n \"\"\"See remarks for Livetime class for general information.\n \n This class provides an implementation of the livetime calculation\n in which the FT2 entries for the S/Z z-axis and zenith positions\n are binned onto a Healpix grid, allowing for a faster calculation\n with long FT2 files.\n \"\"\"\n\n def finish(self):\n hp = Healpix(self.nside,Healpix.RING,SkyDir.EQUATORIAL)\n ras,decs = N.asarray( [hp.py_pix2ang(i) for i in xrange(12*self.nside**2)]).transpose()\n self.COS_HP_DEC = N.cos(decs)\n self.SIN_HP_DEC = N.sin(decs)\n self.HP_RA = ras\n ra_s,dec_s = self.RA_SCZ,self.DEC_SCZ\n ra_z,dec_z = self.RA_ZENITH,self.DEC_ZENITH\n self.S_PIX = N.fromiter((hp.py_ang2pix(ra_s[i],dec_s[i]) for i in xrange(len(ra_s))),dtype=int)\n self.Z_PIX = N.fromiter((hp.py_ang2pix(ra_z[i],dec_z[i]) for i in xrange(len(ra_z))),dtype=int)\n\n def __init__(self,nside=59,*args,**kwargs):\n self.nside = nside\n super(BinnedLivetime,self).__init__(*args,**kwargs)\n\n def get_cosines(self,skydir):\n ra,dec = N.radians([skydir.ra(),skydir.dec()])\n\n # calculate the arclengths to the various Healpix\n cosines = self.COS_HP_DEC*cos(dec)*N.cos(ra-self.HP_RA) + self.SIN_HP_DEC*sin(dec)\n scosines = cosines[self.S_PIX]\n if self.zenithcut > -1:\n zcosines = cosines[self.Z_PIX]\n mask = (scosines>=self.fovcut) & (zcosines>=self.zenithcut)\n else:\n mask = (scosines>=self.fovcut)\n return scosines,mask\n\n#===============================================================================================#\nclass EfficiencyCorrection(object):\n v1 = [-1.381, 5.632, -0.830, 2.737, -0.127, 4.640] # p0, front\n v2 = [ 1.268, -4.141, 0.752, 2.740, 0.124, 4.625] # p1, front\n v3 = [-1.527, 6.112, -0.844, 2.877, -0.133, 4.593] # p0, back\n v4 = [ 1.413, -4.628, 0.773, 2.864, 0.126, 4.592] # p1, back\n \n def p(self,logE,v):\n a0,b0,a1,logEb1,a2,logEb2 = v\n b1 = (a0 - a1)*logEb1 + b0\n b2 = (a1 - a2)*logEb2 + b1\n if logE < logEb1:\n return a0*logE + b0\n if logE < logEb2:\n return a1*logE + b1\n return a2*logE + b2\n\n def __init__(self,e=1000):\n self.set_p(e)\n\n def set_p(self,e):\n loge = N.log10(e)\n for key,vec in zip(['p0f','p1f','p0b','p1b'],[self.v1,self.v2,self.v3,self.v4]):\n self.__dict__[key] = self.p(loge,vec)\n\n def get_efficiency(self,livetime_fraction,conversion_type=0):\n p0,p1 = (self.p0f,self.p1f) if conversion_type==0 else (self.p0b,self.p1b)\n return p0*livetime_fraction + p1\n\n#===============================================================================================#\nclass EffectiveArea(object):\n\n def init(self):\n self.irf = 'P6_v3_diff'\n\n def __init__(self,CALDB,**kwargs):\n \"\"\"CALDB -- path to CALDB directory\"\"\"\n \n self.init()\n self.__dict__.update(kwargs)\n self.CALDB = join(CALDB,'bcf')\n self.__read_data()\n\n def __read_data(self):\n \n ct0_file = join(self.CALDB,'ea','aeff_%s_front.fits'%(self.irf))\n ct1_file = join(self.CALDB,'ea','aeff_%s_back.fits'%(self.irf))\n ea = pf.open(ct0_file)\n cbins = N.append(ea['EFFECTIVE AREA'].data.field('CTHETA_LO')[0],ea['EFFECTIVE AREA'].data.field('CTHETA_HI')[0][-1])\n ebins = N.append(ea['EFFECTIVE AREA'].data.field('ENERG_LO')[0],ea['EFFECTIVE AREA'].data.field('ENERG_HI')[0][-1])\n feffarea = N.array(ea['EFFECTIVE AREA'].data.field('EFFAREA')[0])\n ea.close()\n ea = pf.open(ct1_file)\n beffarea = N.array(ea['EFFECTIVE AREA'].data.field('EFFAREA')[0])\n ea.close()\n self.cbins,self.ebins = cbins,ebins\n nc,ne = len(self.cbins),len(self.ebins)\n self.feffarea,self.beffarea = feffarea.reshape(nc-1,ne-1),beffarea.reshape(nc-1,ne-1)\n self.i_ebins,self.i_cbins = N.log((ebins[:-1]*ebins[1:])**0.5),(cbins[1:]+cbins[:-1])/2.\n\n def image(self,event_class=-1,logea = False):\n\n if event_class < 0: effarea = self.feffarea + self.beffarea\n elif event_class == 0: effarea = self.feffarea\n else: effarea = self.beffarea\n ebins,cbins = self.ebins,self.cbins\n\n import pylab as P\n\n #Generate a pseudo-color plot of the full effective area\n P.figure(2)\n P.gca().set_xscale('log')\n if logea: P.gca().set_yscale('log')\n P.pcolor((ebins[:-1]*ebins[1:])**0.5,(cbins[:-1]+cbins[1:])/2.,effarea.reshape(len(cbins)-1,len(ebins)-1))\n P.title('Effective Area')\n P.xlabel('$\\mathrm{Energy\\ (MeV)}$')\n P.ylabel('$\\mathrm{cos( \\theta)}$')\n cb = P.colorbar()\n cb.set_label('$\\mathrm{Effective\\ Area\\ (m^2)}$')\n\n #Generate a plot of the on-axis effective area with and without interpolation\n energies = N.logspace(N.log10(ebins[0]),N.log10(ebins[-1]),240)\n f_vals,b_vals = N.array([self(e,.99,interpolate=True) for e in energies]).transpose()\n P.figure(4)\n P.gca().set_xscale('log')\n if logea: P.gca().set_yscale('log')\n P.plot(energies,f_vals,label='front bilinear interp.')\n P.plot(energies,b_vals,label='back bilinear interp.')\n f_vals,b_vals = N.array([self(e,.99,interpolate=False) for e in energies]).transpose()\n P.plot(energies,f_vals,label='front nearest-neighbour interp.')\n P.plot(energies,b_vals,label='back nearest-neighbour interp.')\n P.title('On-axis Effective Area')\n P.xlabel('$\\mathrm{Energy\\ (MeV)}$')\n P.ylabel('$\\mathrm{Effective\\ Area\\ (cm^2)}$')\n P.legend(loc = 'lower right')\n P.grid()\n\n\n def __call__(self,e,c,event_class=-1,interpolate = True):\n \"\"\"Return bilinear (or nearest-neighbour) interpolation.\"\"\"\n eb,cb = self.i_ebins,self.i_cbins\n e = N.log(e)\n ne,nc = len(eb),len(cb)\n\n if e < eb[0]: e = eb[0]\n if e > eb[-1]: e = eb[-1]\n if c < cb[0]: c = cb[0]\n if c > cb[-1]: c = cb[-1]\n\n #Cute way to find nearest neighbour\n i = N.argmin(N.abs(eb-e))\n j = N.argmin(N.abs(cb-c))\n\n # effarea[:,-1] increasing effective area\n # effarea[-1,:] increasing effective area\n # effarea[:,-1].shape 32\n # effarea[-1,:].shape 64\n\n if not interpolate:\n i,j = j,i \n if event_class < 0: return (1e4*self.feffarea[i,j],1e4*self.beffarea[i,j])\n elif event_class == 0: return 1e4*self.feffarea[i,j]\n else: return 1e4*self.beffarea[i,j]\n\n i = i if eb[i]<=e and i < ne-1 else i-1 #adjust nearest neighbor to lower bin\n j = j if cb[j]<=c and j < nc-1 else j-1\n\n def bilinear(effarea):\n \n c2,c1 = cb[j+1],cb[j]\n e2,e1 = eb[i+1],eb[i]\n f00 = effarea[j,i]\n f11 = effarea[j+1,i+1]\n f01 = effarea[j+1,i]\n f10 = effarea[j,i+1]\n\n return 1e4/(e2-e1)/(c2-c1)*( (e2-e)*(f00*(c2-c) + f01*(c-c1)) + (e-e1)*(f10*(c2-c) + f11*(c-c1)) )\n\n if event_class < 0: return (bilinear(self.feffarea),bilinear(self.beffarea))\n elif event_class == 0: return bilinear(self.feffarea)\n else: return bilinear(self.beffarea)\n\n def dual_avg(self,e_range,c_range,event_class=-1,steps=10):\n return N.array([self(x,y,event_class=event_class) for x in N.linspace(e_range[0],e_range[1],steps) for y in N.linspace(c_range[0],c_range[1],steps)]).sum()/steps**2\n\n def theta_avg(self,e,c_range,event_class=-1,steps=10):\n return N.array([self(e,ctheta,event_class=event_class) for ctheta in N.linspace(c_range[0],c_range[1],steps)]).sum()/steps\n\n#===============================================================================================#\nclass Exposure(object):\n\n def __init__(self,ft2file,ft1files=None,fovcut=66.,zenithcut=105.,frontfile=None):\n self.ft2file = ft2file\n self.ea = EffectiveArea(frontfile = frontfile)\n self.lt = Livetime(ft2file,ft1files=ft1files,bins=self.ea.cbins,fovcut=fovcut,zenithcut=zenithcut)\n self.energies = self.event_class = None\n\n def __call__(self,skydir,energies,event_class=-1):\n\n lt = self.lt(skydir)\n if lt is None: return N.zeros(len(energies))\n\n #Do some caching -- effective area is the same for same energy bins!\n if N.all(energies == self.energies) and event_class == self.event_class:\n #print ('using cached values')\n vals = self.vals\n \n else:\n\n e_centers = N.asarray(energies)\n c_centers = (lt[1][1:]+lt[1][:-1])/2.\n \n vals = N.array([[self.ea(e_center,c_center,event_class=event_class) for c_center in c_centers] for e_center in e_centers])\n self.vals = vals \n self.energies = energies\n self.event_class = event_class\n\n \n if event_class == -1:\n return N.append(N.sum(vals[:,:,0]*lt[0],axis=1),N.sum(vals[:,:,1]*lt[0],axis=1))\n else: return N.sum(vals*lt[0],axis=1)\n\n def change_IRF(self,frontfile = None):\n self.ea = EffectiveArea(frontfile = frontfile)\n self.energies = self.event_class = None\n\n#===============================================================================================#\nclass SimpleExposureSeries(object):\n \"\"\"A helper class to generate a simple exposure (evaluated at a\n single energy) time series for use in looking for periodic signals\n from sources with periods long enough to warrant exposure correction.\n\n The binned method is *much* faster and can be executed quickly using\n extremely fine binning, comparable to the unbinned method to much\n better than one percent, which is almost certainly smaller than the\n accuracy hit we get by ignoring interpolation of the S/C pointing.\n \"\"\"\n\n def __init__(self,CALDB,ft1files,ft2files,\n energy=1000,cosbins=N.linspace(0.2,1,1001),apply_correction=True,\n **kwargs):\n self.lt = Livetime(ft2files,ft1files,**kwargs)\n self.ea = EffectiveArea(CALDB,**kwargs)\n self.en = energy\n self.ac = apply_correction\n self.apply_correction()\n self.rebin(cosbins)\n\n def apply_correction(self):\n if self.ac:\n # do efficiency correction\n ec = EfficiencyCorrection(self.en)\n LT0 = self.lt.LIVETIME*ec.get_efficiency(self.lt.LTFRAC,0)\n LT1 = self.lt.LIVETIME*ec.get_efficiency(self.lt.LTFRAC,1)\n self.LIVETIME = N.asarray([LT0,LT1]).transpose()\n else:\n self.LIVETIME = N.asarray([self.lt.LIVETIME,self.lt.LIVETIME]).transpose()\n\n def rebin(self,cosbins):\n self.cosbins = cosbins; ea = self.ea; en = self.en\n self.aeff_vals = N.asarray([ea(en,c) for c in (cosbins[1:] + cosbins[:-1])/2])\n\n def set_energy(self,energy):\n self.en = energy\n self.apply_correction()\n self.rebin(self.cosbins)\n\n def get_series(self,skydir,binned=True,energy=None):\n\n if energy is not None: self.set_energy(energy)\n\n cosines,mask = self.lt.get_cosines(skydir)\n cosines = cosines[mask]\n\n if binned: # use binned effective area -- faster\n indices = N.searchsorted(self.cosbins,cosines) - 1\n aeff = self.aeff_vals[indices]\n else: # use unbinned effective area -- a little slow\n en = self.en; ea = self.ea\n aeff = N.asarray([ea(en,c) for c in cosines])\n #print (aeff.shape)\n exposure = (aeff*self.LIVETIME[mask]).sum(axis=1)\n return self.lt.START[mask],self.lt.STOP[mask],exposure\n\n#===============================================================================================#\nclass SpectralExposureSeries(object):\n \"\"\"Average SimpleExposureSeries over energy using the given spectral\n index and the given energy points.\"\"\"\n\n def __init__(self,ses,index=2,emin=100,emax=1e5,nsimps=12):\n self.ses = ses\n self.e_points = sp = N.logspace(N.log10(emin),N.log10(emax),nsimps+1)\n sweights = (N.log(sp[-1]/sp[0])/(3.*nsimps)) * \\\n N.asarray([1.] + ([4.,2.]*(nsimps/2))[:-1] + [1.])\n sweights *= (self.e_points/emin)**(-index) # energy weighting\n self.s_weights = sweights / (sweights.sum())# normalization\n \n def get_series(self,skydir):\n ep,ew,ses = self.e_points,self.s_weights,self.ses\n t1,t2,results = ses.get_series(skydir,energy=ep[0])\n results *= ew[0]\n for i in xrange(1,len(ep)):\n results += ses.get_series(skydir,energy=ep[i])[-1]*ew[i]\n return t1,t2,results\n \n ","repo_name":"fermi-lat/pointlike","sub_path":"python/uw/pulsar/py_exposure.py","file_name":"py_exposure.py","file_ext":"py","file_size_in_byte":22528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"46178290477","text":"__author__ = 'ava-katushka'\nimport os, django\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"settings\")\ndjango.setup()\n\nfrom blog.models import Post, PostPreview, Tag, TextBlock, ImageBlock\nfrom django.core.files import File\nfrom shutil import copy2\n_SCRIPT_ROOT = \"/Users/ava-katushka/Documents/WebProjects/avakatushka/blog/uploading\"\n_PICTURES_DIR = os.path.join(_SCRIPT_ROOT, \"pictures\")\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media/\")\n\nclass ImageFileBlog:\n def __init__(self, image_name, image_num, image_alt, slug):\n self.image_name = \"/\" + \"media\" + \"/\" + slug + \"/\" + image_name\n self.image_num = image_num\n self.image_alt = image_alt\n blog_directory = os.path.join(MEDIA_ROOT, slug)\n if not os.path.exists(blog_directory):\n os.makedirs(blog_directory)\n copy2(os.path.join(_PICTURES_DIR, image_name), blog_directory)\n\n\ndef get_attribute(line):\n return line.split(\":\")[1].strip()\n\n\ndef get_imagefile(images, index):\n return images[index].image_name\n\n\ndef get_main_attributes(filename):\n with open(filename) as file:\n lines = file.readlines()\n title = get_attribute(lines[0])\n slug = get_attribute(lines[1])\n category = get_attribute(lines[2])\n return title, slug, category\n\n\ndef get_catching_text_and_text_blocks(filename):\n with open(filename) as file:\n text = file.read()\n blocks = text.split(\"---------------\")\n blocks = blocks[1:]\n blocks = [block.strip() for block in blocks]\n catching_text = blocks[0]\n blocks = blocks[1:]\n return catching_text, blocks\n\n\n# get_text\n\nfilename = os.path.join(_SCRIPT_ROOT, \"texts/text.txt\")\ntitle, slug, category = get_main_attributes(filename)\ncatching_text, blocks = get_catching_text_and_text_blocks(filename)\n# get images\nimage_path = os.path.join(_SCRIPT_ROOT, \"pictures\")\nvalid_images = [\".jpg\", \".gif\", \".png\"]\nimages = []\nfor file in os.listdir(image_path):\n ext = os.path.splitext(file)[1]\n if ext.lower() not in valid_images:\n continue\n imagename_splitted = file.split(\"_\", 1)\n image_num = int(imagename_splitted[0])\n image_alt = imagename_splitted[1].split(\".\")[0]\n ifb = ImageFileBlog(file, image_num, image_alt, slug)\n images.append(ifb)\n\nimages = sorted(images, key=lambda f: f.image_num)\n###creating models\npost = Post(title=title, slug=slug)\n\nimage_preview = get_imagefile(images, 0)\ntag = Tag.objects.get(pk=category)\npost.save()\npost_preview = PostPreview(slug=slug, image=image_preview, title=title, catching_text=catching_text, post=post)\npost_preview.save()\npost_preview.tags.add(tag)\npost_preview.save()\nfor text in blocks:\n text_block = TextBlock(text=text, post=post)\n text_block.save()\nfor image in images:\n image_block = ImageBlock(alt=image.image_alt, post=post, image=get_imagefile(images, image.image_num))\n image_block.save()\n","repo_name":"ava-katushka/avakatushka","sub_path":"avakatushka/upload_post.py","file_name":"upload_post.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13236173878","text":"import matplotlib\nmatplotlib.use('Agg')\n\nimport numpy as np\nfrom PIL import Image\nimport numpy as np\nfrom numpy.random import random_sample\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport random\nimport csv\n\n\nclass dataset():\n def __init__(self):\n self.data_row = []\n self.data_reg = []\n self.data_len = 0\n self.batch_index = 0\n \n def next_batch(self, size):\n if size > self.data_len:\n print('batch size can not greater than data length')\n return\n \n batch = self.data_reg[self.batch_index: self.batch_index+size]\n self.batch_index += size\n if self.batch_index >= self.data_len:\n self.batch_index = self.batch_index % self.data_len\n batch = batch + self.data_reg[0: self.batch_index]\n return batch\n \n def load_data(self, data_dir, verbose = 2000):\n i = 0\n while(True):\n img_name = str(i) + '.jpg'\n img_dir = data_dir + img_name\n try:\n img = Image.open(img_dir)\n i = i + 1\n except:\n print('done! total loaded data: ' + str(i))\n break\n img_arr = np.asarray(img)\n self.data_row.append(img_arr)\n self.data_reg.append(np.float32(img_arr) / np.float32(255))\n \n if (verbose != 0) and (i % verbose == 0):\n print(str(i) + ' data have been loaded...')\n self.data_len = len(self.data_row)\n return\n\n def random_sample(self, size, dtype = 'reg'):\n data = None\n if dtype == 'reg':\n data = np.array(self.data_reg)\n elif dtype == 'row':\n data = np.array(self.data_row)\n else:\n print('wrong data type!')\n return\n index = (random_sample(size) * self.data_len).astype(np.int)\n return data[index]\n \n def shuffle(self):\n seed = random.random()\n random.seed(seed)\n random.shuffle(self.data_row)\n random.seed(seed)\n random.shuffle(self.data_reg)\n \n \ndef sample_z(m, n):\n return np.random.uniform(-1., 1., size=[m, n])\n\n\ndef plot_samples(samples, h = 4, w = 4, color_dim = 3, save = False, filename = None, folder_path = 'out/'):\n fig = plt.figure(figsize=(h, w))\n gs = gridspec.GridSpec(h, w)\n gs.update(wspace=0.05, hspace=0.05)\n for i, sample in enumerate(samples):\n sample = np.clip(sample, 0, 1)\n ax = plt.subplot(gs[i])\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n plt.imshow(sample)\n if save and filename:\n plt.savefig(folder_path + filename, bbox_inches='tight')\n plt.close(fig)\n elif not save:\n plt.show()\n return","repo_name":"r06922085/MLDS2018SPRING","sub_path":"hw3/hw3_1/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72901915131","text":"#!/usr/bin/env python3\n\n# Author: Jeffrey Grover\n# Created: 11/2016\n# Purpose: Filter methylation summary .csv files for a minimum coverage level\n\nimport csv\nimport os.path\nfrom argparse import ArgumentParser\n\n\n# Function to determine if you've passed a valid filename\n\n\ndef file_validity(parser, arg):\n if not os.path.exists(arg):\n parser.error('%s is not a valid file path.' % arg)\n else:\n pass\n\n# Parse command line file path and coverage, store in variables\n\nparser = ArgumentParser(description='This script is exactly what you think it is. It filters csv files formatted for '\n 'CoGe\\'s LoadExperiment function based on read coverage')\n\nparser.add_argument('input_file',\n help='Input Bismark context-specific methylation calls',\n metavar='File')\n\nparser.add_argument('-c', '--coverage',\n type=int,\n required=True,\n help='Minimum coverage to report in output csv')\n\ncoge_path = parser.parse_args().input_file\ncoverage = parser.parse_args().coverage\n\n# Check validity of input filename\n\nfile_validity(parser, coge_path)\n\n# Output file is input_file_path.coge.csv\n\nfiltered_coge_path = '%s.filtered.coge.csv' % coge_path\n\nprint('Filtering out cytosines with read coverage <', coverage, 'from:\\n',\n coge_path,\n '\\nSaving filtered file to:\\n',\n filtered_coge_path)\n\n# Iterate through rows in csv and send that row to output file only if read coverage is above the specified value\n\nwith open(coge_path, 'r') as unfiltered_csv, open(filtered_coge_path, 'w') as filtered_csv:\n coge_methyl_summary = csv.reader(unfiltered_csv)\n filtered_methyl_summary = csv.writer(filtered_csv)\n\n for row in coge_methyl_summary:\n total_reads = int(row[5])\n if total_reads >= coverage:\n filtered_methyl_summary.writerow(row)\n\nprint('\\nDone!')\n","repo_name":"groverj3/methylation_coge-tools","sub_path":"coge_coverage-filter.py","file_name":"coge_coverage-filter.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12877392573","text":"from airflow.models.variable import Variable\nfrom airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator\nfrom airflow.providers.google.cloud.transfers.local_to_gcs import LocalFilesystemToGCSOperator\nfrom airflow.operators.dummy import DummyOperator\nfrom airflow.decorators import dag, task\nfrom airflow.utils.dates import days_ago\nimport pandas as pd\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))\n\n\nDATASET_ID = Variable.get(\"DATASET_ID\")\nBASE_PATH = Variable.get(\"BASE_PATH\")\nBUCKET_NAME = Variable.get(\"BUCKET_NAME\")\nGOOGLE_CLOUD_CONN_ID = Variable.get(\"GOOGLE_CLOUD_CONN_ID\")\nBIGQUERY_TABLE_NAME = \"bs_global_superstore\"\nGCS_OBJECT_NAME = \"extract_global_superstore_data.csv\"\nDATA_PATH = f\"{BASE_PATH}/data\"\nOUT_PATH = f\"{DATA_PATH}/{GCS_OBJECT_NAME}\"\n\n\n\n@dag(\n default_args={\n 'owner': 'widia',\n 'email': 'wretasafitri33@gmail.com',\n 'email_on_failure': True\n },\n schedule_interval='0 4 * * * ', # every 4AM\n start_date=days_ago(1),\n tags=['csv', 'tweet', 'disaster', 'blank-space']\n)\ndef etl_global_superstore_dag():\n @task()\n def extract_transform():\n df = pd.read_csv(f\"{DATA_PATH}/global_superstore.csv\")\n\n # Replace null in postal code column with 'None'\n df['Postal Code'].fillna('None', inplace=True)\n # Change Order Date and Ship Date as Datetime\n df['Order Date'] = pd.to_datetime(df['Order Date'])\n df['Ship Date'] = pd.to_datetime(df['Ship Date'])\n \n # Get order year, month, and day\n df['Order Year'] = df['Order Date'].dt.year\n df['Order Month'] = df['Order Date'].dt.strftime('%B')\n df['Order Day'] = df['Order Date'].dt.strftime('%A')\n\n df.to_csv(OUT_PATH, index=False, header=False)\n\n start = DummyOperator(task_id='start')\n end = DummyOperator(task_id='end')\n extract_transform_task = extract_transform()\n\n stored_data_gcs = LocalFilesystemToGCSOperator(\n task_id=\"store_to_gcs\",\n gcp_conn_id=GOOGLE_CLOUD_CONN_ID,\n src=OUT_PATH,\n dst=GCS_OBJECT_NAME,\n bucket=BUCKET_NAME\n )\n\n loaded_data_bigquery = GCSToBigQueryOperator(\n task_id='load_to_bigquery',\n bigquery_conn_id=GOOGLE_CLOUD_CONN_ID,\n bucket=BUCKET_NAME,\n source_objects=[GCS_OBJECT_NAME],\n destination_project_dataset_table=f\"{DATASET_ID}.{BIGQUERY_TABLE_NAME}\",\n schema_fields=[ # based on https://cloud.google.com/bigquery/docs/schemas\n {'name': 'Row_ID', 'type': 'INT64', 'mode': 'REQUIRED'},\n {'name': 'Order_ID', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Order_Date', 'type': 'DATE', 'mode': 'NULLABLE'},\n {'name': 'Ship_Date', 'type': 'DATE', 'mode': 'NULLABLE'},\n {'name': 'Ship_Mode', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Customer_ID', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Customer_Name', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Segment', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'City', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'State', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Country', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Postal_Code', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Market', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Region', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Product_ID', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Category', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Sub_Category', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Product_Name', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Sales', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'Quantity', 'type': 'INT64', 'mode': 'NULLABLE'},\n {'name': 'Discount', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'Profit', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'Shipping_Cost', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'Order_Priority', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Order_Year', 'type': 'INT64', 'mode': 'NULLABLE'},\n {'name': 'Order_Month', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'Order_Day', 'type': 'STRING', 'mode': 'NULLABLE'},\n ],\n autodetect=False,\n # If the table already exists - overwrites the table data\n write_disposition='WRITE_TRUNCATE',\n )\n\n start >> extract_transform_task\n extract_transform_task >> stored_data_gcs\n stored_data_gcs >> loaded_data_bigquery\n loaded_data_bigquery >> end\n\n\nglobal_superstore_etl= etl_global_superstore_dag()\n","repo_name":"widiarsaf/simple-orchestrate-etl-airflow-globalSuperstore","sub_path":"dags/etl_global_superstore_dag.py","file_name":"etl_global_superstore_dag.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22960741837","text":"from z3 import *\nfrom .. import dsl as D\n\n\nclass Optimizer:\n\n # additional variables to track if a production occurs or not in a program\n var_occurs = []\n\n # relaxation variables\n relax_vars = []\n\n # keeps track of the current assumptions\n assumptions = []\n\n # keeps track of the cost of each relaxation variable\n cost_relax_vars = {}\n\n def __init__(self, solver, spec, variables, func_vars, nodes):\n self.bound = 0\n self.ub = 0\n self.solver = solver\n self.spec = spec\n self.variables = variables\n self.func_vars = func_vars\n self.id = 0\n self.objective = []\n self.nodes = nodes\n self.weights = []\n\n def createVariablesOccurrence(self):\n for x in range(0, self.spec.num_productions()):\n name = 'occ' + str(x)\n v = Int(name)\n self.var_occurs.append(v)\n self.solver.add(And(v >= 0, v <= 1))\n\n for x in range(0, len(self.var_occurs)):\n ctr = self.var_occurs[x] == 1\n rhs = self.variables[0] == x\n for y in range(1, len(self.variables)):\n rhs = Or(rhs, self.variables[y] == x)\n self.solver.add(\n Implies(self.variables[y] == x, self.var_occurs[x] == 1))\n self.solver.add(Implies(ctr, rhs))\n\n for x in range(0, len(self.var_occurs)):\n for y in range(0, len(self.variables)):\n self.solver.add(\n Implies(self.var_occurs[x] == 0, self.variables[y] != x))\n\n def mk_is_not_parent(self, parent, child, weight=None):\n # return\n child_pos = []\n # find positions that type-check between parent and child\n for x in range(0, len(parent.rhs)):\n if child.lhs == parent.rhs[x]:\n child_pos.append(x)\n\n for n in self.nodes:\n # not a leaf node\n if n.children != None:\n if weight != 100:\n # FIXME: reduce duplication of code\n name = 'relax' + str(self.id)\n v = Int(name)\n self.cost_relax_vars[v] = weight\n self.relax_vars.append(v)\n self.objective.append(Product(weight, v))\n self.weights.append(weight)\n self.ub += weight\n # domain of the relaxation variable\n self.solver.add(Or(v == 0, v == 1))\n # constraint for the is_parent constraint\n ctr_children = []\n for p in range(0, len(child_pos)):\n ctr_children.append(\n self.variables[n.children[p].id - 1] == child.id)\n\n self.solver.add(\n Or(Implies(Or(ctr_children), self.variables[n.id - 1] != parent.id), v == 1))\n # relation between relaxation variables and constraint\n self.solver.add(Implies(v == 1, Or(\n self.variables[n.id - 1] == parent.id, Not(Or(ctr_children)))))\n self.solver.add(\n Implies(And(self.variables[n.id - 1] != parent.id, Or(ctr_children)), v == 0))\n self.id = self.id + 1\n else:\n ctr_children = []\n for p in range(0, len(child_pos)):\n ctr_children.append(\n self.variables[n.children[p].id - 1] == child.id)\n\n self.solver.add(\n Implies(Or(ctr_children), self.variables[n.id - 1] != parent.id))\n\n # FIXME: dissociate the creation of variables with the creation of constraints?\n def mk_is_parent(self, parent, child, weight=None):\n '''children production will have the parent production with probability weight'''\n\n child_pos = []\n # find positions that type-check between parent and child\n for x in range(0, len(parent.rhs)):\n if child.lhs == parent.rhs[x]:\n child_pos.append(x)\n\n for n in self.nodes:\n # not a leaf node\n if n.children != None:\n if weight != None:\n # FIXME: reduce duplication of code\n name = 'relax' + str(self.id)\n v = Int(name)\n self.cost_relax_vars[v] = weight\n self.relax_vars.append(v)\n self.objective.append(Product(weight, v))\n self.weights.append(weight)\n self.ub += weight\n # domain of the relaxation variable\n self.solver.add(Or(v == 0, v == 1))\n # constraint for the is_parent constraint\n ctr_children = []\n for p in range(0, len(child_pos)):\n ctr_children.append(\n self.variables[n.children[p].id - 1] == child.id)\n\n self.solver.add(\n Or(Implies(self.variables[n.id - 1] == parent.id, Or(ctr_children)), v == 1))\n # relation between relaxation variables and constraint\n self.solver.add(Implies(v == 1, Or(\n self.variables[n.id - 1] != parent.id, Not(Or(ctr_children)))))\n self.solver.add(\n Implies(And(self.variables[n.id - 1] == parent.id, Or(ctr_children)), v == 0))\n self.id = self.id + 1\n else:\n ctr_children = []\n for p in range(0, len(child_pos)):\n ctr_children.append(\n self.variables[n.children[p].id - 1] == child.id)\n\n self.solver.add(\n Implies(self.variables[n.id - 1] == parent.id, Or(ctr_children)))\n\n def mk_at_most_k(self, prod, k):\n return\n '''the production prod will appear at most k times'''\n k = int(k)\n eqls = []\n for n in self.nodes:\n eqls.append(If(self.variables[n.id-1]==prod.id, 1, 0))\n # print(Sum(eqls) <= k)\n self.solver.add(Sum(eqls) <= k)\n\n def mk_distinct_inputs(self, prod, max_children):\n # return\n '''prod will be a production whose inputs are all different'''\n for n in self.nodes:\n if n.children != None:\n for p in range(0, max_children):\n for p1 in range(p+1, max_children):\n self.solver.add(Implies(self.variables[n.id - 1] == prod.id, Or(self.variables[n.children[p].id - 1] != self.variables[n.children[p1].id - 1], And(self.variables[n.children[p].id - 1]==0, self.variables[n.children[p1].id - 1]==0), And(self.func_vars[n.children[p].id - 1]==1, self.func_vars[n.children[p1].id - 1]==1))))\n\n\n def mk_constant_occurs(self, prod):\n # return\n '''at least one node will be assigned to the constant'''\n eqls = []\n for n in self.nodes:\n for p in prod:\n eqls.append(self.variables[n.id-1]==p)\n # print(Or(eqls))\n self.solver.add(Or(eqls))\n\n def mk_happens_before(self, pos, pre):\n # return\n '''prod will be a production whose inputs are all different'''\n for n in range(len(self.nodes)):\n pre_cond = []\n for p in range(n+1,len(self.nodes)):\n pre_cond.append(self.variables[self.nodes[p].id - 1]==pre)\n # print(Implies(self.variables[self.nodes[n].id - 1] == pos, Or(pre_cond)))\n self.solver.add(Implies(self.variables[self.nodes[n].id - 1] == pos, Or(pre_cond)))\n\n\n def mk_not_occurs(self, production, weight=None):\n '''a production will not occur with a given probability'''\n if len(self.var_occurs) == 0:\n self.createVariablesOccurrence()\n\n if weight != None:\n name = 'relax' + str(self.id)\n v = Int(name)\n self.cost_relax_vars[v] = weight\n self.relax_vars.append(v)\n self.objective.append(Product(weight, v))\n self.weights.append(weight)\n self.ub += weight\n # domain of the relaxation variable\n self.solver.add(Or(v == 0, v == 1))\n # constraint for at least once\n self.solver.add(Or(self.var_occurs[production.id] == 0, v == 1))\n # relation between relaxation variables and constraint\n self.solver.add(\n Implies(v == 1, self.var_occurs[production.id] != 0))\n self.solver.add(\n Implies(self.var_occurs[production.id] == 0, v == 0))\n self.id = self.id + 1\n else:\n self.solver.add(self.var_occurs[production.id] == 0)\n\n # FIXME: dissociate the creation of variables with the creation of constraints?\n def mk_occurs(self, production, weight=None):\n '''a production will occur with a given probability'''\n if len(self.var_occurs) == 0:\n self.createVariablesOccurrence()\n\n if weight != 100:\n name = 'relax' + str(self.id)\n v = Int(name)\n self.cost_relax_vars[v] = weight\n self.relax_vars.append(v)\n self.objective.append(Product(weight, v))\n self.weights.append(weight)\n self.ub += weight\n # domain of the relaxation variable\n self.solver.add(Or(v == 0, v == 1))\n # constraint for at least once\n self.solver.add(Or(self.var_occurs[production.id] == 1, v == 1))\n # relation between relaxation variables and constraint\n self.solver.add(\n Implies(v == 1, self.var_occurs[production.id] != 1))\n self.solver.add(\n Implies(self.var_occurs[production.id] == 1, v == 0))\n self.id = self.id + 1\n else:\n self.solver.add(self.var_occurs[production.id] == 1)\n\n def isSubsetSum(self, set, n, sum):\n subset = ([[False for i in range(sum + 1)]\n for i in range(n + 1)])\n\n # If sum is 0, then answer is true\n for i in range(n + 1):\n subset[i][0] = True\n\n # If sum is not 0 and set is empty,\n # then answer is false\n for i in range(1, sum + 1):\n subset[0][i] = False\n\n # Fill the subset table in botton up manner\n for i in range(1, n + 1):\n for j in range(1, sum + 1):\n if j < set[i - 1]:\n subset[i][j] = subset[i - 1][j]\n if j >= set[i - 1]:\n subset[i][j] = (subset[i - 1][j]\n or subset[i - 1][j - set[i - 1]])\n\n return subset[n][sum]\n\n def optimize(self, solver):\n model = None\n cost = 0\n res = sat\n nb_sat = 0\n nb_unsat = 0\n # no optimization is defined\n if len(self.objective) == 0:\n res = solver.check()\n if res == sat:\n model = solver.model()\n # optimization using the LSU algorithm\n else:\n solver.set(unsat_core=True)\n solver.push()\n ctr = Sum(self.objective) <= self.bound\n solver.assert_and_track(ctr, 'obj')\n\n while model == None and res == sat:\n res = solver.check()\n if res == sat:\n nb_sat += 1\n model = solver.model()\n cost = self.computeCost(model)\n assert (cost == self.bound)\n solver.pop()\n else:\n nb_unsat += 1\n solver.pop()\n core = solver.unsat_core()\n if len(core) != 0:\n self.bound += 1\n while(not self.isSubsetSum(self.weights, len(self.weights), self.bound) and self.bound <= self.ub):\n self.bound += 1\n solver.push()\n ctr = Sum(self.objective) <= self.bound\n solver.assert_and_track(ctr, 'obj')\n res = sat\n\n assert(solver.num_scopes() == 0)\n self.bound = cost\n return model\n\n def computeCost(self, model):\n cost = 0\n for v in self.relax_vars:\n if model[v] == 1:\n cost = cost + self.cost_relax_vars[v]\n\n return cost\n","repo_name":"squares-sql/SQUARES","sub_path":"tyrell/enumerator/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":12539,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"78"} +{"seq_id":"71875515772","text":"from fpdf import FPDF\nimport resource\n\nMARGIN = 15\n\n\nclass PDF(FPDF):\n def footer(self):\n # Position at 1.5 cm from bottom\n self.set_y(-15)\n # Arial italic 8\n self.set_font(\"Arial\", \"I\", 8)\n # Page number\n self.cell(0, 10, \"Page \" + str(self.page_no()) + \"/{nb}\", 0, 0, \"C\")\n\n\ndef fill_fields(text, fields):\n for field, value in fields.items():\n field = \"[{}]\".format(field)\n if text.find(field):\n # print(\"Found {}\".format(field))\n text = text.replace(field, value)\n return text\n\n\n# Instantiation of inherited class\n\nappointment = fill_fields(resource.appointment, resource.testfields)\nform = fill_fields(resource.form, resource.testfields)\nsupplement = fill_fields(resource.supplement, resource.testfields)\nmultiple_agent = fill_fields(resource.multiple_agent, resource.testfields)\nsupplement = fill_fields(resource.supplement, resource.testfields)\nincapacity = fill_fields(resource.incapacity, resource.testfields)\ndurable_strike = fill_fields(resource.durable_strike, resource.testfields)\nsignature_block = fill_fields(resource.signature_block, resource.testfields)\nnotary_ack = fill_fields(resource.notary_ack, resource.testfields)\n\n\npdf = PDF(orientation=\"P\", format=\"letter\")\npdf.alias_nb_pages()\npdf.set_margins(MARGIN, MARGIN, MARGIN)\npdf.add_page()\npdf.set_font(\"Times\", \"\", 14)\npdf.multi_cell(\n 0,\n 6,\n \"California Uniform Statutory Form Power of Attorney\\n(California Probate Code §4401)\",\n border=0,\n align=\"C\",\n)\npdf.set_font(\"Times\", \"\", resource.SMALL)\npdf.write(4, resource.preamble)\npdf.set_font(\"Times\", \"\", resource.NORMAL)\npdf.write(5, appointment)\npdf.set_font(\"Times\", \"\", resource.SMALL)\npdf.write(4, resource.preamble2)\npdf.set_font(\"Times\", \"\", resource.NORMAL)\npdf.write(5, form)\npdf.write(4, supplement)\npdf.add_page()\npdf.write(5, incapacity)\npdf.write(4, durable_strike)\npdf.write(5, multiple_agent)\npdf.write(5, signature_block)\npdf.set_font(\"Times\", \"\", resource.SMALL)\npdf.multi_cell(0, 4, resource.notary_box, border=1, align=\"L\")\npdf.write(4, notary_ack)\npdf.output(\"poa_test.pdf\", \"F\")\n","repo_name":"gbroiles/ca_poa","sub_path":"poa.py","file_name":"poa.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42176062814","text":"class Container:\r\n def __init__(self):\r\n self._size = 0\r\n\r\n def __len__(self):\r\n return self._size\r\n\r\n # abstract method\r\n def append(self, val):\r\n raise NotImplementedError\r\n\r\n # abstract method\r\n def pop(self):\r\n raise NotImplementedError\r\n\r\n\r\nclass Node:\r\n def __init__(self, val=None, pre=None, nxt=None):\r\n self.val = val\r\n self.pre = pre\r\n self.next = nxt\r\n\r\n def __str__(self):\r\n return str(self.val)\r\n\r\n\r\nclass Queue(Container):\r\n \"\"\"Just a Double Linked List with the dummy head and tail\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n head<-->tail\r\n \"\"\"\r\n\r\n super().__init__()\r\n self.__head = Node() # dummy head\r\n self.__tail = Node(pre=self.__head) # dummy tail\r\n self.__head.next = self.__tail\r\n\r\n def __iter__(self):\r\n node = self.__head.next\r\n while node != self.__tail:\r\n yield node\r\n node = node.next\r\n\r\n def __str__(self):\r\n res = ', '.join(map(str, self))\r\n return f\"[{res}]\"\r\n\r\n def append(self, item):\r\n \"\"\"\r\n insert the new element to the tail\r\n \"\"\"\r\n # elegant way to insert a node\r\n node = Node(item, pre=self.__tail.pre, nxt=self.__tail)\r\n node.next.pre = node\r\n node.pre.next = node\r\n self._size += 1\r\n\r\n def pop(self):\r\n \"\"\"\r\n delete and return the first element of the queue\r\n \"\"\"\r\n assert self, 'queue is empty'\r\n node = self.__head.next\r\n res = node.val\r\n\r\n # elegant way to delete a node\r\n node.pre.next = node.next\r\n node.next.pre = node.pre\r\n del node\r\n self._size -= 1\r\n\r\n return res\r\n\r\n\r\nclass Heap(Container):\r\n \"\"\"\r\n Min Heap, a complete binary tree where the key at root\r\n is always the minimum among all keys present in Binary Heap,\r\n it supports pop_minimum operations in O(log n) time complexity\r\n \"\"\"\r\n\r\n def __init__(self, cmp=None):\r\n \"\"\"\r\n :param cmp: a comparator(function) that the heap ordered by\r\n\r\n \"\"\"\r\n super().__init__()\r\n cmp = cmp or (lambda x: x)\r\n self._comparator = cmp\r\n self._list = list() # store the items of the heap\r\n\r\n def __str__(self):\r\n return str(self._list)\r\n\r\n def append(self, item):\r\n self._list.append(item)\r\n self._size += 1\r\n self._heapifyUp()\r\n\r\n def pop(self):\r\n assert self, 'heap is empty'\r\n self._list[0], self._list[-1] = self._list[-1], self._list[0]\r\n res = self._list.pop()\r\n self._size -= 1\r\n self._heapifyDown()\r\n return res\r\n\r\n def _isvalid(self, index):\r\n return 0 <= index < self._size\r\n\r\n def _parent(self, index):\r\n parent = (index - 1) // 2\r\n return parent\r\n\r\n def _child(self, index):\r\n left_child = 2 * index + 1\r\n if not self._isvalid(left_child):\r\n return -1\r\n\r\n right_child = left_child + 1\r\n\r\n '''return the smallest child'''\r\n if self._isvalid(right_child) and \\\r\n self._comparator(self._list[right_child]) < \\\r\n self._comparator(self._list[left_child]):\r\n return right_child\r\n\r\n return left_child\r\n\r\n def _heapifyUp(self):\r\n \"\"\"\r\n Adjust the heap from bottom to up\r\n \"\"\"\r\n child = self._size - 1\r\n parent = self._parent(child)\r\n # Keep adjusting until child is greater than parent\r\n while self._isvalid(parent):\r\n # for beautiful code, write in a new if statement instead of while\r\n if self._comparator(self._list[parent]) < self._comparator(self._list[child]):\r\n break\r\n self._list[child], self._list[parent] = self._list[parent], self._list[child]\r\n child = parent\r\n parent = self._parent(child)\r\n\r\n def _heapifyDown(self):\r\n \"\"\"\r\n Adjust the heap from up to bottom\r\n \"\"\"\r\n if len(self) == 0:\r\n return\r\n\r\n parent = 0\r\n child = self._child(parent)\r\n # Keep adjusting until parent is smaller than child\r\n while self._isvalid(child):\r\n # for beautiful code, write in a new if statement instead of while\r\n if self._comparator(self._list[parent]) < self._comparator(self._list[child]):\r\n break\r\n self._list[child], self._list[parent] = self._list[parent], self._list[child]\r\n parent = child\r\n child = self._child(parent)\r\n\r\n\r\nclass DistinctHeap(Heap):\r\n \"\"\"\r\n Distinct Min Heap, where every item in the heap is unique,\r\n implemented by a list combined with a dict,\r\n use list to store the items and implement the priority queue by some algorithms,\r\n use dict to map the item to it's index in the list,\r\n dict can make sure the items are unique, and can be find in constant time\r\n \"\"\"\r\n\r\n def __init__(self, key=None, cmp=None):\r\n \"\"\"\r\n :param key: a function that returns one element as the key of dict,\r\n if two different items have the same key, the smallest one are preserved,\r\n if not given, use the whole item as the key(that item must be hashable)\r\n\r\n :param cmp: a comparator(function) that the heap is ordered by, which will return a\r\n numerical value as the measure of that item\r\n \"\"\"\r\n super().__init__(cmp)\r\n key = key or (lambda x: x)\r\n self.__key = key\r\n self.__dict = dict() # store the ({key of the item}, {index of the item}) pairs\r\n\r\n def _heapifyUp(self, index=None):\r\n \"\"\"\r\n\r\n :param index: heapifyUp the particular item that the index point to,\r\n default is None(begin from the last item)\r\n :return: None\r\n \"\"\"\r\n child = index or self._size - 1\r\n parent = self._parent(child)\r\n\r\n while self._isvalid(parent):\r\n if self._comparator(self._list[parent]) < self._comparator(self._list[child]):\r\n break\r\n # adjust the list and dict\r\n self._list[child], self._list[parent] = self._list[parent], self._list[child]\r\n self.__dict[self.__key(self._list[child])] = child\r\n self.__dict[self.__key(self._list[parent])] = parent\r\n\r\n child = parent\r\n parent = self._parent(child)\r\n\r\n def _heapifyDown(self):\r\n if len(self) == 0:\r\n return\r\n\r\n parent = 0\r\n child = self._child(parent)\r\n\r\n while self._isvalid(child):\r\n if self._comparator(self._list[parent]) < self._comparator(self._list[child]):\r\n break\r\n # adjust the list and dict\r\n self._list[child], self._list[parent] = self._list[parent], self._list[child]\r\n self.__dict[self.__key(self._list[child])] = child\r\n self.__dict[self.__key(self._list[parent])] = parent\r\n\r\n parent = child\r\n child = self._child(parent)\r\n\r\n def append(self, item):\r\n key = self.__key(item)\r\n if key in self.__dict:\r\n index = self.__dict[key]\r\n new_val = self._comparator(item)\r\n old_val = self._comparator(self._list[index])\r\n if new_val > old_val: # if new item is greater than the old, just ignore it\r\n return\r\n\r\n # update the list and dict, then heapifyUp\r\n self._list[index] = item\r\n self.__dict[key] = index\r\n self._heapifyUp(index)\r\n else:\r\n self._list.append(item)\r\n self.__dict[key] = self._size\r\n self._size += 1\r\n self._heapifyUp()\r\n\r\n def pop(self):\r\n assert self, 'heap is empty'\r\n self._list[0], self._list[-1] = self._list[-1], self._list[0]\r\n self.__dict[self.__key(self._list[0])] = 0\r\n\r\n res = self._list.pop()\r\n self.__dict.pop(self.__key(res))\r\n\r\n self._size -= 1\r\n self._heapifyDown()\r\n return res\r\n\r\n\r\nif __name__ == '__main__':\r\n import random\r\n\r\n def test_queue():\r\n lst = [random.randint(0, 100) for _ in range(100)]\r\n queue = Queue()\r\n for val in lst:\r\n queue.append(val)\r\n assert [queue.pop() for _ in range(len(queue))] == lst\r\n print(\"queue pass test\")\r\n\r\n\r\n def test_heap():\r\n heap = Heap()\r\n distinct_heap = DistinctHeap()\r\n # do 1000 times heap sort\r\n for i in range(1000):\r\n li = [random.randint(0, 100) for _ in range(1000)]\r\n for item in li:\r\n heap.append(item)\r\n distinct_heap.append(item)\r\n sorted_1 = [heap.pop() for _ in range(len(heap))]\r\n sorted_2 = [distinct_heap.pop() for _ in range(len(distinct_heap))]\r\n assert sorted_1 == sorted(li)\r\n assert sorted_2 == sorted(set(li))\r\n print(\"heap pass test\")\r\n\r\n\r\n test_queue()\r\n test_heap()\r\n","repo_name":"Robin-ZMH/Search-Algorithms","sub_path":"utils/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":9013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8591181541","text":"import numpy as np\n\ndef any_normal(*vectors):\n # Проверяем, что хотя бы один вектор передан\n if len(vectors) == 0:\n raise ValueError(\"Нет переданных векторов\")\n\n # Преобразуем входные векторы в массивы NumPy\n vectors = [np.array(vector) for vector in vectors]\n\n # Проверяем, что все векторы имеют одинаковую длину\n if not all(len(vector) == len(vectors[0]) for vector in vectors):\n raise ValueError(\"Все векторы должны иметь одинаковую длину\")\n\n # Проверяем пары векторов на перпендикулярность\n for i in range(len(vectors)):\n for j in range(i+1, len(vectors)):\n dot_product = np.dot(vectors[i], vectors[j])\n if dot_product == 0:\n return True\n\n return False\n\n\"\"\"\nНапишите функцию any_normal, которая принимает на вход неограниченное число векторов через запятую.\nГарантируется, что все векторы, которые передаются, одинаковой длины.\n\nФункция возвращает True, если есть хотя бы одна пара перпендикулярных векторов. Иначе возвращает False.\n\nПример\n\nvec1 = np.array([2, 1])\nvec2 = np.array([-1, 2])\nvec3 = np.array([3,4])\nprint(any_normal(vec1, vec2, vec3))\n# True\"\"\"","repo_name":"retro-nihilist/first_for_sf","sub_path":"10_14_10_9.py","file_name":"10_14_10_9.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43251000204","text":"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.fc1 = nn.Linear(20*20*20, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = x.view(-1, 20*20*20)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\ndef train(model, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n optimizer.zero_grad()\n data, target = data.cuda(), target.cuda()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward(torch.ones_like(loss), create_graph=True)\n grad_all = Variable(torch.zeros(1), requires_grad=True).cuda()\n for p in model.parameters():\n if p.grad is not None:\n grad_all = grad_all + (p.grad ** 2).sum()\n grad_all.backward()\n optimizer.step()\n \n if batch_idx % 1 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n\ndef main():\n epochs = 10\n batch_size = 1000\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=batch_size, shuffle=True)\n model = CNN().cuda()\n optimizer = optim.Adam(model.parameters())\n for epoch in range(1, epochs):\n train(model, train_loader, optimizer, epoch)\n grad_norm = get_gradient(model)\n print('grad_norm', grad_norm)\n\n\ndef get_gradient(model):\n grad_all = 0\n for p in model.parameters():\n if p.grad is not None:\n grad = 0.0\n grad = (p.grad ** 2).sum()\n grad_all += grad\n return grad_all\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"rlalpha/MLDS-107","sub_path":"hw1-2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"32673737346","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\"\"\"\nAuteur:\n\nPar David Gauthier\n\nDivision des systemes d'information et du pilotage\nDirection des inventaires forestiers\nMinistere des Forets, de la Faune et des Parcs\nTelephone: (418)-627-8669 poste 4322\nSans frais: 1-877-936-7397 poste 4322\nTelecopieur: (418-646-1955\nCourriel: david.gauthier@mffp.gouv.qc.ca\n\n\"\"\"\n# Historique:\n# 2020-03-20 -Création du script\n\n\nfrom qgis.core import *\nimport processing\nfrom processing.core.Processing import Processing\nfrom qgis.analysis import QgsNativeAlgorithms\nfrom PyQt5.QtCore import QVariant\nimport sys\nimport os\nimport subprocess\nimport geopandas as gpd\nimport shapely\nfrom shapely.geometry import Polygon\n\n# Chemin ou QGIS est installer\nQgsApplication.setPrefixPath(r\"C:\\MrnMicro\\OSGeo4W64\\bin\", True)\n\n# Créer une reference à QgsApplication,\n# Mettre le 2eme argument a faux pour desativer l'interface graphique de QGIS\nqgs = QgsApplication([], False)\n\n# initialiser QGIS\nqgs.initQgis()\n\n# Initialiser les outils qgis\nProcessing.initialize()\n\n# sys.path.append(r'C:\\MrnMicro\\Applic\\OSGeo4W64\\apps\\qgis-ltr\\python\\plugins\\processing\\algs\\gdal')\n\n# Permet d'utiliser les algorithmes \"natif\" ecrit en c++\n# https://gis.stackexchange.com/questions/279874/using-qgis3-processing-algorithms-from-standalone-pyqgis-scripts-outside-of-gui\nQgsApplication.processingRegistry().addProvider(QgsNativeAlgorithms())\n\ndef copyCeToGpkg(ce, gpkg, nomCouche):\n\n # faire un layer avec la string ce\n couche = QgsVectorLayer(ce, \"lyr\", 'ogr')\n\n # Option de sauvegarde\n options = QgsVectorFileWriter.SaveVectorOptions()\n\n # J'enleve l'extension\n gpkg = gpkg.replace(\".gpkg\", \"\")\n\n options.driverName = 'GPKG'\n options.layerName = nomCouche\n\n # Transférer la ce de la gdb vers le GeoPackage\n QgsVectorFileWriter.writeAsVectorFormat(couche, gpkg, options)\n\n sortie = gpkg + \".gpkg\" + '|' + 'layername=' + nomCouche\n\n return sortie\n\n\ndef dissolvedGeopandasGPKG(gpkg, namefc, nameoutfc, fields = None):\n\n \"\"\"\n Permet de faire un dissolve avec Geopandas dans un geopackage\n Geopandas ne gere pas encore les valeur Null lors du dissole. C'est pour ca que je rempli\n les valeurs Null pas une string \"null\"\n\n Args:\n gpkg : geopackage\n namefc : nom de la classe d'entité\n nameoutfc : nom de la classe d'entité en sortie\n fields : champ\n\n Exemple d'appel de la fonction:\n\n gpkg = r\"C:\\MrnMicro\\temp\\test_dissolve_fiona\\HISTO_MAJF_2018.gpkg\"\n ce ='HISTO_MAJF_2018_Repair'\n out = 'HISTO_MAJF_2018_diss'\n fields = 'CO_TER'\n\n dissolvedGeopandasGPKG(gpkg, ce, out, fields)\n\n \"\"\"\n\n if fields is None:\n layer = gpd.read_file(gpkg, layer=namefc)\n gdfu = layer.unary_union\n new = gpd.GeoDataFrame(crs=layer.crs, geometry=[gdfu])\n new.to_file(gpkg, layer=nameoutfc, driver=\"GPKG\")\n\n else:\n layer = gpd.read_file(gpkg, layer=namefc)\n filled = layer.fillna('null')\n dissolved = filled.dissolve(by=fields, aggfunc = 'last', as_index=False)\n dissolved.to_file(gpkg, layer=nameoutfc, driver=\"GPKG\")\n\n\n\nif __name__ == '__main__':\n\n\n\n gpkg = os.path.join(R\"C:\\MrnMicro\\temp\",\"SONAR2_Intrants.gpkg\")\n us = gpd.read_file(gpkg, layer='us')\n print('fill')\n filled_shape = Polygon(us.exterior)\n print('tetet')\n new = gpd.GeoDataFrame(crs=us.crs, geometry=[filled_shape])\n print('tetghhhhet')\n new.to_file(gpkg, layer='us_filled', driver=\"GPKG\")\n\n\n\n\n # # copie l'us dans le geopackage\n # # Copie des peuplement dan sle GPKG\n # us_gpkg = 'us'\n # path_us_gpkg = copyCeToGpkg(path_us, gpkg, us_gpkg)\n #\n # print(\"dissolve\")\n # # faire un dissolve de l'US\n # us_gpkg_dissolve = 'us_gpkg_dissolve'\n # dissolvedGeopandasGPKG(gpkg, us_gpkg, us_gpkg_dissolve, fields = None)\n\n\n\n\n# Écrire les algorithmes que l'on veut appeler.\n\n# ce = 'C:/MrnMicro/temp/Racc_dif.shp'\n# output = 'C:/MrnMicro/temp/test.shp'\n# param = {'INPUT':input,'ROUND_TO':0,'OUTPUT':output}\n\n# # faire un layer avec ce\n# if isinstance(ce, str):\n# layer = QgsVectorLayer(ce, 'lyr', 'ogr')\n# else:\n# layer = ce\n\n# ajouter un champ SUP\n# https://qgis.org/pyqgis/3.2/core/Field/QgsField.html\n# https://gis.stackexchange.com/questions/174971/how-to-define-the-number-of-decimals-when-adding-a-new-field-as-double-to-an-att\n\n# champ = QgsField('test', QVariant.Double,'double',100,2 )\n# layer.dataProvider().addAttributes([champ])\n# layer.updateFields()\n#\n# feat = layer.getFeatures()\n# layer_provider = layer.dataProvider()\n#\n# # caluler la superficie en m carrée\n# for features in feat:\n# id = features.id()\n# # trouver l'index du champ\n# fields = layer.fields()\n# indexChamp = fields.indexFromName('test') # Index du champ\n#\n# sup =str(round(features.geometry().area(),2))\n# attr_value = {indexChamp: sup} # calculer area\n# layer_provider.changeAttributeValues({id: attr_value})\n#\n# layer.commitChanges()\n\n\n# permet de sortir l'extent d'une couche\n# processing.run(\"qgis:polygonfromlayerextent\", param)\n\n# ce='C:/MrnMicro/temp/Racc_dif.shp'\n# nomJeuClasseEntite=\"TOPO\"\n# nomClasseEntite=\"test\"\n# outGDB='C:/MrnMicro/temp/sdfss.gdb'\n#\n# processing.run(\"gdal:convertformat\", {'INPUT':ce,\n# 'OPTIONS':'-lco FEATURE_DATASET={0} -lco XYTOLERANCE=0.02 -nln {1}'.format(nomJeuClasseEntite, nomClasseEntite),\n# 'OUTPUT':outGDB})\n\n# processing.run(\"gdal:convertformat\", {'INPUT':ce,'OPTIONS':'','OUTPUT':outGDB})\n\n# processing.run(\"gdal:convertformat\", {'INPUT':'Q:/Dtxp_Carto/Trm_pre/2020/07/Perm5pre.shp','OPTIONS':'','OUTPUT':'C:/MrnMicro/temp/test.gdb'})\n\n\n# CREATE_NO_WINDOW = 0x08000000\n# cmd = r\"\"\"ogr2ogr -f \"FileGDB\" {3} {0} -t_srs EPSG:32198 -lco FEATURE_DATASET={1} -lco XYTOLERANCE=0.02 -nln {2}\"\"\".format(ce,nomJeuClasseEntite,nomClasseEntite,outGDB)\n# subprocess.call(cmd, creationflags=CREATE_NO_WINDOW)\n#\n\n\n# input = 'E:/Temp/geotraitement_QGIS/sub.shp'\n# output = 'E:/Temp/geotraitement_QGIS/sommet.shp'\n# param = {'INPUT':input,'OUTPUT':output}\n#\n# # permet d'extraire les sommets d'une couche\n# processing.run(\"native:extractvertices\", param)\n\n\n# cmd = r\"\"\"ogr2ogr -f \"FileGDB\" C:\\MrnMicro\\temp\\coversion_GDAL\\EcoForS5_Ori_Prov.gdb C:\\MrnMicro\\temp\\coversion_GDAL\\EcoForS5_Ori_Prov.gpkg EcoForS5_ORI_PROV -lco FEATURE_DATASET=TOPO -lco XYTOLERANCE=0.02 -nln allo\"\"\"\n# subprocess.call(cmd, shell=False)\n#\n# CREATE_NO_WINDOW = 0x08000000\n# cmd = r\"\"\"ogr2ogr -f \"FileGDB\" C:\\MrnMicro\\temp\\coversion_GDAL\\EcoForS5_Ori_Prov.gdb C:\\MrnMicro\\temp\\coversion_GDAL\\EcoForS5_Ori_Prov.gpkg EcoForS5_ORI_PROV -lco FEATURE_DATASET=TOPO -lco XYTOLERANCE=0.02 -nln EcoForS5_ORI_PROV\"\"\"\n# subprocess.call(cmd, creationflags=CREATE_NO_WINDOW)\n\n# os.system(cmd)\n\n# Fermer QGIS, vide la memoire....\nqgs.exitQgis()","repo_name":"josee666/QGIS_DEV","sub_path":"Developpement_DavidG/Procedure_ScriptStandAlone/QGIS.py","file_name":"QGIS.py","file_ext":"py","file_size_in_byte":6943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72883142972","text":"\"\"\"\nFileName: 113.py\nAuthor: Fatpandac\nCreate: 2021/11/20\nDescription: Replace underline with code.\n\nfrom openpyxl import load_workbook\n\ndef main():\n data = []\n wb = load_workbook('data113.xlsx', _______)\n ws = wb.worksheets[0]\n for index, row in enumerate(ws.rows, start=1):\n if index == 1:\n _______\n data.append(_______)\n return data\n\nprint(main())\n\"\"\"\n\nfrom openpyxl import load_workbook\n\ndef main():\n data = []\n wb = load_workbook('data113.xlsx', data_only=True)\n ws = wb.worksheets[0]\n for index, row in enumerate(ws.rows, start=1):\n if index == 1:\n continue\n data.append(row[3].value)\n return data\n\nprint(main())\n","repo_name":"Fatpandac/Homework","sub_path":"Python/Python_bit_house/113.py","file_name":"113.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"20105352571","text":"from __future__ import annotations\n\nimport logging\nimport types\nfrom dataclasses import Field as DataClassField\nfrom dataclasses import fields\nfrom itertools import chain\nfrom typing import (\n TYPE_CHECKING,\n Any,\n List,\n Mapping,\n NamedTuple,\n Optional,\n Tuple,\n Type,\n Union,\n get_args,\n get_origin,\n get_type_hints,\n)\n\nif TYPE_CHECKING:\n from pyoak.legacy.node import AwareASTNode as ASTNode\n\nlogger = logging.getLogger(__name__)\n\n# to make mypy happy\nField = DataClassField[Any]\n\n\ndef is_union(type_: Any) -> bool:\n orig = get_origin(type_)\n if orig is Union:\n return True\n try:\n return isinstance(type_, types.UnionType)\n except TypeError:\n return False\n\n\ndef is_list(type_: Any) -> bool:\n orig = get_origin(type_)\n if orig is list or orig is List:\n return True\n try:\n return issubclass(type_, (List, list))\n except TypeError:\n return False\n\n\ndef is_tuple(type_: Any) -> bool:\n orig = get_origin(type_)\n if orig is tuple or orig is Tuple:\n return True\n try:\n return issubclass(type_, (Tuple, tuple)) # type: ignore[arg-type]\n except TypeError:\n return False\n\n\ndef is_optional(type_: Any) -> bool:\n orig = get_origin(type_)\n if orig is Optional:\n return True\n\n if not is_union(type_):\n return False\n\n args = get_args(type_)\n return any(a is type(None) for a in args)\n\n\ndef is_sequence(type_: Any) -> bool:\n \"\"\"Return True if the type is a supported sequence (list or tuple).\"\"\"\n return is_list(type_) or is_tuple(type_)\n\n\ndef has_node_in_type(type_: Any) -> bool:\n \"\"\"Return True if the type include any subclasses of ASTNode.\"\"\"\n\n # Dynamically import to avoid circular imports\n from pyoak.legacy.node import AwareASTNode as ASTNode\n\n try:\n if issubclass(type_, ASTNode):\n return True\n except TypeError:\n pass\n\n return any(has_node_in_type(t) for t in get_args(type_))\n\n\ndef is_child_node(type_: Any, allow_sequence: bool = True, strict: bool = False) -> bool:\n \"\"\"Return True if the type is a child node.\"\"\"\n\n # Dynamically import to avoid circular imports\n from pyoak.legacy.node import AwareASTNode as ASTNode\n\n arg_check_func = any if not strict else all\n\n if is_optional(type_):\n orig = get_origin(type_)\n args = get_args(type_)\n\n if orig is Optional:\n return arg_check_func(is_child_node(t) for t in args)\n else:\n return arg_check_func(is_child_node(t) for t in args if t is not type(None))\n\n if is_union(type_):\n return arg_check_func(is_child_node(t) for t in get_args(type_))\n\n if allow_sequence and is_list(type_):\n args = get_args(type_)\n if len(args) == 0:\n return False\n\n return arg_check_func(is_child_node(t, allow_sequence=False) for t in args)\n\n if allow_sequence and is_tuple(type_):\n args = get_args(type_)\n if len(args) == 0:\n return False\n\n if len(args) == 2 and args[1] is Ellipsis:\n return is_child_node(args[0], allow_sequence=False)\n else:\n return arg_check_func(is_child_node(t, allow_sequence=False) for t in args)\n\n try:\n return issubclass(type_, ASTNode)\n except TypeError:\n return False\n\n\nclass ChildFieldTypeInfo(NamedTuple):\n is_optional: bool\n sequence_type: Type[list[Any]] | Type[tuple[Any, ...]] | None\n types: tuple[Type[ASTNode], ...]\n\n\ndef get_node_type_info(type_: Any, allow_sequence: bool = True) -> ChildFieldTypeInfo:\n \"\"\"Get a tuple of ASTNode subclasses from a type annotation.\n\n For lists and tuples, the types of the elements are returned.\n \"\"\"\n\n # Dynamically import to avoid circular imports\n from pyoak.legacy.node import AwareASTNode as ASTNode\n\n args = get_args(type_)\n if is_optional(type_):\n return ChildFieldTypeInfo(True, None, tuple(t for t in args if issubclass(t, ASTNode)))\n\n if is_union(type_):\n return ChildFieldTypeInfo(False, None, tuple(t for t in args if issubclass(t, ASTNode)))\n\n if is_list(type_) and allow_sequence:\n if len(args) == 0:\n return ChildFieldTypeInfo(False, None, ())\n\n return ChildFieldTypeInfo(\n False,\n list,\n tuple(\n chain.from_iterable(get_node_type_info(t, allow_sequence=False).types for t in args)\n ),\n )\n\n if is_tuple(type_) and allow_sequence:\n if len(args) == 0:\n return ChildFieldTypeInfo(False, None, ())\n\n if len(args) == 2 and args[1] is Ellipsis:\n return ChildFieldTypeInfo(\n False, tuple, get_node_type_info(args[0], allow_sequence=False).types\n )\n else:\n return ChildFieldTypeInfo(\n False,\n tuple,\n tuple(\n chain.from_iterable(\n get_node_type_info(t, allow_sequence=False).types for t in args\n )\n ),\n )\n\n try:\n if issubclass(type_, ASTNode):\n return ChildFieldTypeInfo(False, None, (type_,))\n else:\n return ChildFieldTypeInfo(False, None, ())\n except TypeError:\n return ChildFieldTypeInfo(False, None, ())\n\n\ndef get_field_types(type_: Type[ASTNode]) -> dict[str, Any]:\n \"\"\"Return the type of a dataclass field.\"\"\"\n ret: dict[str, Any] = {}\n\n for field in fields(type_):\n f_type = field.type\n if isinstance(f_type, str):\n try:\n f_type = get_type_hints(type_).get(field.name)\n except NameError:\n logger.warning(\n f\"Could not determine type of field {field.name} for type {type_}. \"\n \"This is probably due to a forward reference. \"\n \"Please, use type annotations instead of strings.\"\n )\n f_type = None\n\n if f_type is None:\n raise RuntimeError(f\"Could not determine type of field {field.name} for type {type_}\")\n\n ret[field.name] = f_type\n\n return ret\n\n\ndef get_ast_node_child_fields(\n type_: Type[ASTNode],\n) -> Mapping[Field, ChildFieldTypeInfo]:\n \"\"\"Return the child fields of an AST node.\"\"\"\n all_fields = {f.name: f for f in fields(type_)}\n return {\n all_fields[fname]: get_node_type_info(type_)\n for fname, type_ in get_field_types(type_).items()\n if is_child_node(type_)\n }\n\n\ndef get_ast_node_properties(type_: Type[ASTNode]) -> Mapping[Field, Type[Any]]:\n \"\"\"Return the property fields of an AST node.\"\"\"\n all_fields = {f.name: f for f in fields(type_)}\n return {\n all_fields[fname]: type_\n for fname, type_ in get_field_types(type_).items()\n if not is_child_node(type_)\n }\n","repo_name":"mishamsk/pyoak","sub_path":"src/pyoak/legacy/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"78"} +{"seq_id":"72016078653","text":"from random import random\n\n\nclass BitArray:\n '''\n Class to represent a bit sequence\n '''\n\n def __init__(self, bit_array: list):\n '''\n Initialize the bit sequence\n :param bit_array: list of boolean (bits)\n '''\n self.__bit_array__ = []\n for b in bit_array:\n self.__bit_array__.append(bool(b))\n self.__length__ = len(bit_array)\n self.__value__ = self.__value__()\n\n def __value__(self) -> int:\n '''\n Compute decimal value of the sequence\n :return: decimal value\n '''\n v = 0\n for i in range(0, len(self.__bit_array__)):\n v += int(self.__bit_array__[i]) * (2 ** (self.__length__ - 1 - i))\n return v\n\n def bits(self) -> list:\n '''\n Get the binary sequence\n :return: list of bits\n '''\n return self.__bit_array__\n\n def value(self) -> int:\n '''\n Get the decimal value of the sequence\n :return: integer value\n '''\n return self.__value__\n\n def size(self) -> int:\n '''\n Get the number of bits in the sequence\n :return: the number of bits in the sequence\n '''\n return self.__length__\n\n def neighbors(self) -> list:\n '''\n Compute the neighbors of the sequence (all the sequences with Hamming distance of 1)\n :return: list of neighbors (BitArray)\n '''\n neighbors = []\n for i in range(0, self.__length__):\n neighbor = self.__bit_array__[:]\n neighbor[i] = not neighbor[i]\n neighbors.append(BitArray(neighbor))\n return neighbors\n\n @staticmethod\n def from_string(bits: str):\n '''\n Generate a BitArray from a string\n :param bits: string of bit (010001)\n :return: the BitArray\n '''\n arr = []\n for i in range(0, len(bits)):\n arr.append(bits[i] == '1')\n return BitArray(arr)\n\n @staticmethod\n def from_number(num: int):\n '''\n Generate a BitArray from a number\n :param num: integer number\n :return: BitArray\n '''\n return BitArray.from_string(bin(num)[2:])\n\n def __repr__(self):\n s = '0b'\n mod = 4 - self.__length__ % 4\n if mod == 4:\n mod = 0\n for i in range(0, mod):\n s += 'x'\n for i in range(0, self.__length__):\n if (i + mod) % 4 == 0 and mod + i > 0:\n s += ' '\n s += str(int(self.__bit_array__[i]))\n return s\n\n def __int__(self):\n return self.__value__\n\n def __float__(self):\n return float(self.__value__)\n\n def __getitem__(self, index):\n if -1 < index < self.__length__:\n return self.__bit_array__[index]\n return -1\n\n def __lt__(self, other):\n return self.__value__ < other\n\n def ___le__(self, other):\n return self.__value__ <= other\n\n def __eq__(self, other):\n return self.__value__ == other\n\n def __ne__(self, other):\n return self.__value__ != other\n\n def __gt__(self, other):\n return self.__value__ > other\n\n def __ge__(self, other):\n return self.__value__ >= other\n\n def __sub__(self, other):\n '''\n Compute the Hamming distance between 2 BitArrays\n :param other: BitArray\n :return: Hamming distance (integer)\n '''\n if type(other) is BitArray:\n l1 = self.__length__\n l2 = other.size()\n i1 = 0 if l1 < l2 else l1 - l2\n i2 = 0 if l2 < l1 else l2 - l1\n dif = abs(l1 - l2)\n for i in range(0, l1 if l1 < l2 else l2):\n dif += 1 * (int(self[i + i1] ^ other[i + i2]))\n return dif\n return -1\n\n\ndef get_rand_bit_array(N: int) -> BitArray:\n '''\n Generate a random sequence of N bits\n :param N: number of bits\n :return: a random BitArray\n '''\n bit_array = []\n for i in range(0, N):\n bit_array.append(1 if random() > 0.5 else 0)\n return BitArray(bit_array)\n\n\nclass NKFitness:\n '''\n Class to manage and compute of a NK Landscape fitness\n '''\n\n def __init__(self, K: int, local_fitness: dict):\n '''\n Initialize the object, with the K of the landscape and the local fitness\n :param K: K of the landscape\n :param local_fitness: A map with the values of the local fitness\n '''\n self.__K__ = K\n self.__local_fitness__ = local_fitness\n\n def fitness(self, x: BitArray) -> int:\n '''\n Compute the fitness of a BitArray\n :param x: sequence\n :return: fitness value (integer)\n '''\n if x.size() == self.__K__ + 1:\n if x.value() in self.__local_fitness__:\n return self.__local_fitness__[x.value()]\n return 0\n\n fitness = 0\n N = x.size()\n K = self.__K__ + 1\n for i in range(0, N - self.__K__):\n sub_bits = BitArray(x.bits()[i:i + K])\n val = self.fitness(sub_bits)\n fitness += val\n return fitness\n\n def k(self) -> int:\n '''\n Get the K of the landscape\n :return: K of the landscape (integer)\n '''\n return self.__K__\n\n\ndef det_high_climbing(x_i: BitArray, current_fitness: int, fitness_function: NKFitness, counter: int = 0) -> list:\n '''\n Deterministic High-Climbing Meta-Heuristic, choose the neighbor with highest fitness if higher of the fitness of\n the current sequence.\n\n :param x_i: Current sequence\n :param current_fitness: Current fitness value\n :param fitness_function: Fitness function\n :param counter: Counter of the steps did to find the solution\n :return: found sequence, fitness value, steps done\n '''\n neighbors = x_i.neighbors()\n max_v = current_fitness\n max_ind = -1\n for i in range(0, len(neighbors)):\n fitness = fitness_function.fitness(neighbors[i])\n if fitness > max_v:\n max_ind = i\n max_v = fitness\n if max_ind < 0:\n return x_i, current_fitness, counter\n else:\n return det_high_climbing(neighbors[max_ind], max_v, fitness_function, counter + 1)\n\n\ndef \tprob_high_climbing(x_i: BitArray, fitness_i: int, max_fitness: int, max_x: BitArray,\n fitness_function: NKFitness, counter: int = 50) -> list:\n '''\n Probabilistic High-Climbing with aspiration Meta-Heuristic. Choose the neighbor with fitness higher of the maximum ever found\n otherwise chose a random neighbor (with a loaded dice) till the counter is greater then 0\n\n :param x_i: current sequence\n :param fitness_i: current fitness\n :param max_fitness: highest ever find fitness\n :param max_x: sequence associated to the highest fitness\n :param fitness_function: Fitness function\n :param counter: countdown counter to stop the meta\n :return: found sequence, fitness value\n '''\n\n if (counter < 0):\n return max_x, max_fitness\n neighbors = x_i.neighbors()\n tot_v = 0\n probabilities = []\n max_v = max_fitness\n max_n = None\n for n in neighbors:\n l_f = fitness_function.fitness(n)\n tot_v += l_f\n probabilities.append(l_f)\n if l_f > max_v:\n max_n = n\n max_v = l_f\n\n if max_n is not None:\n return prob_high_climbing(max_n, max_v, max_v, max_n, fitness_function, counter - 1)\n\n r = random()\n p_cum = 0\n\n for i in range(0, len(probabilities)):\n if p_cum < r < p_cum + probabilities[i] / tot_v:\n return prob_high_climbing(neighbors[i], probabilities[i], max_fitness, max_x, fitness_function,\n counter - 1)\n p_cum += (probabilities[i] / tot_v)\n","repo_name":"Mandarancio/Metaheuristic","sub_path":"TP1 - NK-Landscape/NKLandscape.py","file_name":"NKLandscape.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39628869602","text":"import re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom django.core.management.base import BaseCommand\n\nfrom searcher.models import Actor, Movie\n\n# We need to set user agent so CSFD site lets us scrape (else we get 429)\nHEADERS = {\n \"User-agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.52 Safari/536.5\"\n}\n\n# Number of top entries changed once, this provides a quick way to set it to a different number, ditto for range step\nMAX_RANGE_LIMIT = 1000\nRANGE_STEP = 100\n\n\nclass Command(BaseCommand):\n help = \"Scrapes info about top 1000 movies from CSFD and populates the database.\"\n\n def handle(self, *args, **kwargs):\n start_url = \"https://www.csfd.cz/zebricky/filmy/nejlepsi/?showMore=1\"\n movie_detail_url_base = \"https://www.csfd.cz\"\n next_url_base = \"https://www.csfd.cz/zebricky/filmy/nejlepsi/?from={}\"\n next_url_params = [step for step in range(100, MAX_RANGE_LIMIT, RANGE_STEP)]\n next_urls = [next_url_base.format(param) for param in next_url_params]\n\n urls = [start_url, *next_urls]\n\n for url in urls:\n # Print this just to get a sense of progress\n print(f\"Scraping {url}\")\n resp = requests.get(url, headers=HEADERS)\n\n # Don't bother with anything if we don't get the all clear signal.\n if resp.status_code != 200:\n raise Exception(\"Response is not valid for scraping!\")\n\n soup = BeautifulSoup(resp.content, \"html.parser\")\n\n # We want links without any class. Also some keyword blacklisting.\n movies = soup.find_all(\n \"a\",\n {\"class\": None},\n href=lambda href: href\n and \"film\" in href\n and \"zebricky\" not in href\n and \"pridat\" not in href,\n )\n\n for movie in movies:\n # To see progress again\n print(f\"Scraping info about movie {movie['title']}\")\n \n movie_csfd_id = int(re.search(r\"([0-9]+)\", movie[\"href\"]).group())\n\n # I wanted to use plain .create() as I assume TOP 1000 list should have 1000 unique items.\n # But, in the words of Quellcrist Falconer: 'Assume nothing. Only then can you truly see what you’re dealing with.'\n movie_obj, created = Movie.objects.get_or_create(name=movie[\"title\"], csfd_id=movie_csfd_id)\n\n if not created:\n print(\n f\"Movie {movie['title']} already exists in the database. Skipping.\"\n )\n continue\n\n movie_resp = requests.get(\n f'{movie_detail_url_base}{movie[\"href\"]}', headers=HEADERS\n )\n movie_soup = BeautifulSoup(movie_resp.text, \"html.parser\")\n\n # Find the Hrají string and get the parent div, so we can get the array of actors.\n try:\n actors_tag = movie_soup.find(\"h4\", text=\"Hrají: \")\n actors_parent_div = actors_tag.find_parent()\n actors = actors_parent_div.find_all(\"a\", {\"class\": None})\n\n for actor in actors:\n # We need to get the unique CSFD ID as there are more actors sharing the same name.\n actor_csfd_id = int(re.search(r\"([0-9]+)\", actor[\"href\"]).group())\n\n # get_or_create(), because actor might already exist from previous movies\n # an actor might have a new name (marriage, etc)\n actor_obj, created = Actor.objects.update_or_create(\n csfd_id=actor_csfd_id,\n defaults={'name': actor.contents[0]}\n )\n\n if not created:\n print(\n f\"Actor {actor.contents[0]} already exists in the database! \"\n f\"Adding movie {movie['title']} to his filmography.\"\n )\n\n movie_obj.actors.add(actor_obj)\n except AttributeError:\n # Movie detail has no actors listed\n print(\n f\"Movie {movie['title']} does not seem to have actors listed on the detail page!\"\n )\n continue\n","repo_name":"jakubstastka/django-csfd-searcher","sub_path":"searcher/management/commands/get_csfd_top_movies.py","file_name":"get_csfd_top_movies.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20385712439","text":"# dice rolling library, nigel griffin, febuary 18, 2021, 2:45pm\n\nimport random\nimport time\n\n# d4 simulator\ndef roll_d4(num_roll): # num_roll is an argument\n rolls = 0\n the_sum = 0\n\n while rolls < num_roll:\n result = random.randint(1, 4)\n print(f\"homie u rolled a {result}.\\n\")\n rolls += 1\n the_sum += result\n print(f\"the total of the {num_roll} rolls was {the_sum} bro.\\n\")\n\n\nroll_d4(5)\n\n\n","repo_name":"NigelGriffin-glitch/game_library_tools","sub_path":"dice_roll.py","file_name":"dice_roll.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38834633656","text":"import dateutil.parser\n\nfrom charts.orm.base import Base\n\nclass ContainerStat(Base):\n def save(self, data, tstamp):\n self.period = self.period(data)\n self.save_container(tstamp, data)\n\n def save_container(self, tstamp, container):\n ram_usage, ram_limit = self.ram_usage(container)\n self.save_db('containers',\n {\n 'tstamp': tstamp,\n 'id': container['id'],\n 'ram_usage': ram_usage,\n 'ram_limit': ram_limit,\n 'cpu': self.cpu_usage(container),\n 'system_cpu': self.system_cpu_usage(container),\n 'network_bytes': self.network_bytes(container),\n 'disk_ops': self.disk_ops(container)\n }\n )\n\n def ram_usage(self, container):\n return (\n container['memory_stats'].get('max_usage', 0),\n container['memory_stats'].get('limit', 0)\n )\n\n def cpu_usage(self, container):\n return container['cpu_stats']['cpu_usage']['total_usage']\n\n def system_cpu_usage(self, container):\n if 'system_cpu_usage' not in container['cpu_stats']:\n return None\n\n return container['cpu_stats']['system_cpu_usage']\n\n def disk_ops(self, container):\n ops = [\n x['value']\n for x in container.get('blkio_stats', {})\n .get('io_service_bytes_recursive', []) or []\n if x['op'] == 'Total'\n ]\n if not ops:\n return None\n\n return ops[0]\n\n def network_bytes(self, container):\n return sum(\n network['rx_bytes'] + network['tx_bytes']\n for network in container.get('networks', {}).values()\n )\n\n def period(self, data):\n start = dateutil.parser.parse(data['preread'])\n end = dateutil.parser.parse(data['read'])\n return (end - start).total_seconds()\n","repo_name":"bobf/skep","sub_path":"charts/charts/orm/container_stat.py","file_name":"container_stat.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"78"} +{"seq_id":"16344945294","text":"# headless으로 매번 브라우저 띄우는 것 안하고 (메모리 차지 안함)\n#백그라운드에서 실행된다.\nfrom selenium import webdriver\n\n# headless chrome \noptions = webdriver.ChromeOptions()\noptions.headless = True\noptions.add_argument(\"window-size=1920*1080\") # 1920*1080 크기에 맞춰서 브라우저를 띄어 내부적으로 동작하게 끔 함\n# 4:20:15\noptions.add_argument(\"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36\")\n\nbrowser = webdriver.Chrome(options=options)\nbrowser.maximize_window()\n\nurl =\"https://www.whatismybrowser.com/detect/what-is-my-user-agent\"\nbrowser.get(url)\n\ndetected_value = browser.find_elements_by_id(\"detected_value\") #
태그 id 값을 넣어줌\n#\n#print(detected_value.text)\n\n\n# 페이지 이동\nurl = \"https://play.google.com/store/movies/top\"\nbrowser.get(url)\n\nimport time\ninterval = 2 # 4초에 한번씩 스크롤 내림\n\n# 현재 문서 높이를 가져와서 저장\nprev_height = browser.execute_script(\"return document.body.scrollheght\")\n\n# 반복 수행\nwhile True:\n # 스크롤을 가장 아래로 내림\n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight)\")\n\n # 페이지 로딩 대기\n time.sleep(interval)\n\n # 현재 문서 높이를 가져와서 저장\n curr_height = browser.execute_script(\"return document.body.scrollheght\")\n if curr_height == prev_height:\n break\n\n prev_height = curr_height\n\nprint(\"스크롤 완료\")\nbrowser.get_screenshot_as_file(\"google_movie.png\")\n \n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nsoup = BeautifulSoup(browser.page_source, \"lxml\")\n\n#movies = soup.find_all(\"div\", attrs={\"class\":[\"ImZGtf mpg5gc\", \"Vpfmgd\"]})\nmovies = soup.find_all(\"div\", attrs={\"class\":\"Vpfmgd\"})\nprint(len(movies))\n\n\nfor movie in movies:\n title = movie.find(\"div\", attrs={\"class\":\"WsMG1c nnK0zc\"}).get_text()\n print(title)\n # 활인 전 가격\n original_price = movie.find(\"span\", attrs={\"class\":\"SUZt4c djCuy\"})\n if original_price:\n original_price = original_price.get_text()\n else:\n # print(title, \"활인되지 않은 영화 제외\")\n continue\n\n # 활인된 가격 /₩7,000\n price = movie.find(\"span\", attrs={\"class\", \"VfPpfd ZdBevf i5DZme\"}).get_text()\n\n # 링크 /\n link = movie.find(\"a\", attrs={\"class\":\"JC71ub\"})[\"href\"] #모든 영화 포스터 링크들이 클래스가 \"JC71ub\"이다\n # \"https://play.google.com\" + link\n\n print(f\"제목 : {title}\")\n print(f\"할인 전 가격 : {original_price}\")\n print(f\"할인 후 가격 : {price}\")\n print(\"링크 : \" + \"https://play.google.com\" + link)\n print(\"-\" * 100)\n\n browser.quit()\n\n\n''' \n#실행결과\n\n'''","repo_name":"SEONGJAE-YOO/python-project.","sub_path":"headless_google_movie3.py","file_name":"headless_google_movie3.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73018229065","text":"import argparse\nimport sys\nimport cPickle\nfrom trapeza import *\nfrom trapeza.match import *\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Manipulate and combine tabular data files. Use this utility to \"\n \"process a master data set for use with trapeza-match.\")\n parser.add_argument(\"-o\", \n \"--output\", \n type=argparse.FileType('wb'), \n default=sys.stdout, \n help=\"Specify an output file (default standard output)\")\n parser.add_argument(\"-i\", \n \"--input-format\", \n choices=formats.available_input_formats(),\n default=\"csv\",\n help=\"Treat input read from stdin and from files whose type cannot be inferred as being in \"\n \"the specified format. Default is CSV.\")\n parser.add_argument(\"--input-encoding\",\n default=\"utf-8\",\n help=\"Treat input data as the specified encoding (for input formats that support Unicode). \"\n \"Column names specified on the command line will be treated as the same encoding.\")\n parser.add_argument(\"-p\", \n \"--profile\", \n type=argparse.FileType('rb'),\n help=\"Specify the profile spreadsheet\")\n parser.add_argument(\"-m\", \n \"--master\", \n type=argparse.FileType('rb'),\n help=\"Specify the master spreadsheet\")\n parser.add_argument(\"--primary-key\", \n help=\"Set the column name in the master sheet where unique identifiers are stored.\")\n\n args = parser.parse_args()\n \n if args.profile is None or args.master is None or args.primary_key is None:\n sys.stderr.write(\"{}: you must specify a master and profile sheet and a primary key column.\\n\"\n .format(sys.argv[0]))\n exit(1)\n try:\n profile = Profile(source=load_source(args.profile, get_format(args.profile.name, args.input_format),\n args.input_encoding))\n master = load_source(args.master, get_format(args.master.name, args.input_format), args.input_encoding)\n except Exception:\n sys.stderr.write(\"{}: an error occured while loading input files.\\n\".format(sys.argv[0]))\n return 1\n \n master.set_primary_key(args.primary_key.decode(args.input_encoding))\n\n pm = ProcessedSource(master, True, profile)\n pm.process()\n\n try:\n cPickle.dump(pm, args.output, protocol=cPickle.HIGHEST_PROTOCOL)\n except Exception as e:\n sys.stderr.write(\"{}: an error occured while writing output: {}\\n\".format(sys.argv[0], e))\n return 1\n \n return 0\n\nif __name__ == '__main__':\n exit(main())\n","repo_name":"davidmreed/trapeza","sub_path":"trapeza-process.py","file_name":"trapeza-process.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"44501654027","text":"from Course import Course\nfrom DataHandler import DataHandler as dataHandl\nfrom ErrorHandler import ErrorHandler as errorHandl\nfrom FileOperations import FileOperations as fileOps\nfrom UserInterface import UserInterface as UI\n\nLINE_STARTER = \">\"\n\n\ndef main():\n main_proc_enable = True\n data_handler = None\n input_filename = input(\"Input file: \")\n\n file_handler = fileOps(input_filename)\n\n if (file_handler.status):\n data_handler = dataHandl(file_handler.data)\n else:\n main_proc_enable = False\n\n user_if = UI(LINE_STARTER)\n\n while(main_proc_enable):\n has_valid_command = user_if.get_command()\n\n if (has_valid_command and not user_if.is_quit()):\n command = user_if.pass_command()\n main_proc_enable = data_handler.process_command(command)\n else:\n main_proc_enable = False\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Ihsara/Education-center","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24423413330","text":"import csv\nimport os\n\ninsert_query = \"INSERT INTO md5 (video_name, md5_val) VALUES ('%s', '%s');\"\n\n# Reads from the csv file.\nqueries = []\nwith open('md5.csv') as file:\n csv_file = csv.reader(file, delimiter=',')\n\n # Reads line by line.\n for row in csv_file:\n video_name = os.path.splitext(row[0])[0]\n md5_val = row[1]\n query = insert_query % (video_name, md5_val)\n queries.append(query)\n\n# Writes the result to the given file.\nwith open('md5.sql', 'a') as file:\n for query in queries:\n file.write(query + \"\\n\")\n","repo_name":"yunpengn/AudioDup","sub_path":"collect_md5.py","file_name":"collect_md5.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"30247658674","text":"import requests\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom library import user\n\npd.set_option('display.unicode.east_asian_width', True) # 对齐\npd.set_option('display.max_columns', 5) # 设置最大列数\n\n\ndef cloud_warehouse(js_code, category, star=4):\n \"\"\"\n 根据json语句查询云仓信息\n :param js_code: 查询语句\n :param category: 执行的操作类别\n :parameter star: 输出DataFrame 从第几列开始,默认第4列\n :return: DataFrame\n \"\"\"\n cloud = user.CloudWarehouse\n url = cloud.url # 接口网址\n json_data = {\n \"funcType\": \"select\", # 方法类别\n \"callCategory\": category, # 操作类别\n \"id\": cloud.id,\n \"pwd\": cloud.password,\n # 字段内容\n \"jsondata\": js_code}\n\n res = requests.post(url=url, json=json_data) # 用Post方法调用接口\n js_data = res.json() # 解析获取到的数据,这里是json,解���后的数据类型为dict\n\n if js_data['Flag'] is False:\n print(\"警告:\", js_data['ErrInfo'])\n else:\n if js_data[\"MsgInfo\"]:\n js_msg_info = js_data[\"MsgInfo\"] # 截取dict中自己要的值\n # key = list(js_MsgInfo[0].keys()) # 获取dict中的key值,并放在list中\n basket = list() # 一篮子计划,你懂的\n for dict_i in js_msg_info:\n # 用dict生成dataframe,必须指定index,这里的index为dict中的key值\n df_i = pd.DataFrame.from_dict(dict_i, orient='index')\n basket.append(df_i.T) # 因为index 为key值,所以要做矩阵转置\n df = pd.concat(basket, ignore_index=True) # 按列明组合,并重新定义index\n df = df.iloc[:, star:] # 去掉前面无用数值\n return df\n else:\n return js_data\n\n\ndef to_sql(data_frame, table_name, database='my_data', exists='fail'):\n \"\"\"\n 将DataFrame插入至数据库\n :param data_frame: DataFrame格式数据\n :param table_name: 表名称\n :parameter database: 数据库名称,默认为:my_data\n :parameter exists: 如果表名称存在则报错,可改为:replace替换、append追加\n :return:\n \"\"\"\n sql = user.SqlServer\n engine = create_engine(\n f\"mssql+pymssql://{sql.yaoud_user}:{sql.yaoud_password}@{sql.yaoud_IP}/{database}?charset=utf8\", echo=False)\n data_frame.to_sql(table_name, engine, index=False, if_exists=exists) # 有replace替换、append追加\n print(f'{table_name} 表已添加至 {database}')\n\n\nif __name__ == '__main__':\n all_id = user.CloudWarehouse\n data = {\n \"Operator_Id\": all_id.Operator_Id,\n \"Con_Id\": all_id.Con_Id,\n \"Ldc_Id\": all_id.Ldc_Id,\n \"Businessbill_No\": \"\",\n \"Goods_No\": \"\",\n \"Start_Date\": \"2022-07-07\",\n \"End_Date\": \"2022-07-07\"\n }\n\n operation = \"kcztbh\"\n\n js = cloud_warehouse(data, operation)\n print(js)\n","repo_name":"mbalucard/Company_project","sub_path":"library/json_data.py","file_name":"json_data.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73662300745","text":"from Crypto.PublicKey import RSA\nimport Crypto.Random\nfrom Crypto.Signature import PKCS1_v1_5\nfrom Crypto.Hash import SHA256\nimport binascii\n\n\nclass Wallet:\n def __init__(self, node_id):\n self.private_key = None\n self.public_key = None\n self.node_id = str(node_id)\n\n @property\n def public_key(self):\n return self.__public_key\n\n @public_key.setter\n def public_key(self, value):\n self.__public_key = value\n\n def save_keys_in_file(self):\n if self.private_key is not None and self.public_key is not None:\n try:\n with open(f'wallet-{self.node_id}', mode='w') as output_file:\n output_file.write(self.private_key)\n output_file.write('\\n')\n output_file.write(self.public_key)\n except (IOError, IndexError):\n raise Exception('Saving wallet failed.')\n\n def create_keys(self):\n private_key, public_key = self.generate_keys()\n self.private_key = private_key\n self.public_key = public_key\n\n def load_keys(self):\n try:\n with open(f'wallet-{self.node_id}', mode='r') as input_file:\n file_content = input_file.readlines()\n self.private_key = file_content[0].strip('\\n')\n self.public_key = file_content[1]\n except IOError:\n raise Exception('Loading wallet failed.')\n\n def generate_keys(self):\n private_key = RSA.generate(2048, Crypto.Random.new().read)\n public_key = private_key.publickey()\n return (binascii.hexlify(private_key\n .exportKey(format='DER')).decode('ascii'),\n binascii.hexlify(public_key\n .exportKey(format='DER')).decode('ascii'))\n\n def sign_transaction(self, sender, recipient, amount):\n if self.public_key is None:\n raise Exception('Wallet is not set up.')\n signer = PKCS1_v1_5.new(RSA.importKey(binascii\n .unhexlify(self.private_key)))\n payload_hash = SHA256.new((str(sender) + str(recipient) + str(amount))\n .encode('utf8'))\n signature = signer.sign(payload_hash)\n return binascii.hexlify(signature).decode('ascii')\n\n @staticmethod\n def verify_transaction(transaction):\n public_key = RSA.importKey(binascii.unhexlify(transaction.sender))\n verifier = PKCS1_v1_5.new(public_key)\n payload_hash = SHA256.new((str(transaction.sender) + str(\n transaction.recipient) + str(\n transaction.amount))\n .encode('utf8'))\n return verifier.verify(payload_hash,\n binascii.unhexlify(transaction.signature))\n","repo_name":"jaqb8/blockchain-app","sub_path":"node/wallet.py","file_name":"wallet.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17851275187","text":"import sys\nimport time\n\nfrom chatting_client import ChattingClient\nfrom chatting_server import ChattingServer\nfrom msg_parser import MsgParser\n\nPORT_NUM = 60860\n\n\nclass P2pChatting(object):\n\n def __init__(self, name, receivedMsgHandler):\n self.my_name = name\n self.is_chairman = False\n self.members = {}\n self.msgHandler = receivedMsgHandler\n self.client = None\n self.server = None\n self.addr = None\n self.listen_port = PORT_NUM\n\n def setUserName(self, name):\n self.my_name = name\n\n def setListenPort(self, port):\n self.listen_port = port\n\n def setAddr(self, addr):\n self.addr = addr\n if addr == 'localhost' or addr == '127.0.0.1':\n self.is_chairman = True\n else:\n self.is_chairman = False\n\n def start(self):\n self.stop()\n self.server = ChattingServer(self, self.addr, self.listen_port)\n self.server.start()\n\n if not self.is_chairman:\n self.client = ChattingClient(self.my_name, self, self.addr, PORT_NUM)\n self.client.start()\n\n def stop(self):\n if self.server:\n self.server.stop()\n if self.client:\n self.client.stop()\n\n def broadcastMsg(self, msg):\n self.msgHandler(self.my_name + ' : ' + msg)\n msg = MsgParser.buildChatMsg(self.my_name, msg)\n if self.client:\n self.client.sendMsg(msg)\n if self.server:\n self.server.sendMsg(msg)\n '''\n def onNewConnection(self, addr, name):\n if self.is_chairman:\n redirect, newAddr = self._allocParentNode(addr)\n if redirect:\n self.server.redirect(addr, name, newAddr)\n '''\n\n def removeMember(self, addr):\n ip, port = addr\n for name, info in self.members.items():\n if info['ip'] == ip and info['port'] == port:\n del self.members[name]\n print('Member {} removed'.format(name))\n return\n\n def sendToUpperlayer(self, msg):\n self.msgHandler(msg['name'] + ' : ' + msg['chat'])\n if self.is_chairman:\n return\n self.client.sendMsg(msg)\n\n def sendToUnderlayer(self, msg):\n self.msgHandler(msg['name'] + ' : ' + msg['chat'])\n self.server.sendMsg(msg)\n\n def onCtrlMsg(self, msg, addr):\n if msg['type'] == 'join':\n self.onJoinMsg(msg, addr)\n elif msg['type'] == 'redirect':\n self.onRedirectMsg(msg, addr)\n elif msg['type'] == 'leave':\n self.onLeaveMsg(msg, addr)\n else:\n print(msg, addr)\n\n def onLeaveMsg(self, msg, addr):\n name = msg['name']\n self.msgHandler(name + ' is leaving, bye!')\n if name in self.members:\n del self.members[name]\n\n def onRedirectMsg(self, msg, addr):\n self.addr = msg['parent_node']\n print('stopping')\n self.client.setServer(self.addr, PORT_NUM)\n self.client.connectServer()\n print('Client reconnect to {}'.format(self.addr))\n\n def onJoinMsg(self, msg, addr):\n if not self.is_chairman:\n return\n ip, port = addr\n welcome = msg['name'] + ' has joined the chatting, welcome!'\n self.broadcastMsg(welcome)\n parentName, parentIP = self._allocParentNode(ip)\n self.members[msg['name']] = {'ip': ip, 'port': port}\n if parentIP:\n print('Redirect', msg['name'], 'to', parentName)\n time.sleep(2)\n self.server.redirect(addr, msg['name'], parentIP)\n\n def _allocParentNode(self, ip):\n ip = ip.split('.')\n for name, info in self.members.items():\n addr = info['ip'].split('.')\n if ip[0] == addr[0] and ip[1] == addr[1] and ip[2] == addr[2]:\n return name, info['ip']\n return None, None\n\n def leave(self):\n if self.is_chairman:\n return\n\n if self.client:\n leaveMsg = MsgParser.buildLeaveMsg(self.my_name)\n self.client.sendMsg(leaveMsg)\n time.sleep(1)\n\n if self.server:\n redirectMsg = MsgParser.buildRedirectMsg(self.addr)\n self.server.sendMsg(redirectMsg)\n time.sleep(3)\n\nclass TestMsgHandler:\n def OnRecvedMsg(self, msg):\n print('Received Msg:', msg)\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(\"Correct usage: script, IP address, port number, name\")\n exit()\n IP_address = str(sys.argv[1])\n name = str(sys.argv[2])\n testHandler = TestMsgHandler()\n chatting = P2pChatting(name, testHandler.OnRecvedMsg)\n chatting.setAddr(IP_address)\n if len(sys.argv) == 4:\n portNum = int(sys.argv[3])\n chatting.setListenPort(portNum)\n chatting.start()\n print('Input your msg:')\n while True:\n data = sys.stdin.readline().strip()\n if data == 'exit' or data == 'quit':\n break\n elif data == 'leave' or data == 'Leave':\n chatting.leave()\n break\n else:\n chatting.broadcastMsg(data)\n print('Ending!')\n chatting.stop()\n","repo_name":"FrankDuan/MultipartyChatting","sub_path":"p2p_chatting.py","file_name":"p2p_chatting.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72580349385","text":"from aiogram import types\nfrom order import order\n#from order_full import order_full\n#from states import states\nfrom aiogram.types import ReplyKeyboardMarkup, ReplyKeyboardRemove, InlineKeyboardButton, InlineKeyboardMarkup, KeyboardButton\nfrom lo import bot, dp\n\nb1 = KeyboardButton('kz🇰🇿')\nb2 = KeyboardButton('ru🇷🇺')\nb3 = KeyboardButton('eng🇺🇸')\n\nmarkup_language = ReplyKeyboardMarkup().row(\n b1, b2, b3\n)\n\ninline_set = InlineKeyboardMarkup(row_width=3)\nset_diplom = InlineKeyboardButton('Дипломат🍣', callback_data='diplomat')\nset_posi = InlineKeyboardButton('Позитивчик🍣', callback_data='posi')\nset_bar = InlineKeyboardButton('Бармаглот🍣', callback_data='barmaglog')\nset_he_she = InlineKeyboardButton('Он и Она🍣', callback_data='He_She')\ninline_set.insert(set_diplom)\ninline_set.insert(set_posi)\ninline_set.insert(set_bar)\ninline_set.insert(set_he_she)\n\nText = \"Тілді таңдаңыз, пажалуиста выберите язык, please choose language\"\ntext_kz = \"сушиға🍣 тапсырыс беру үшін /sushi_kz басыңыз\"\ntext_ru = \"чтобы заказат суши🍣 нажмите кнопку /sushi_ru\"\ntext_eng = \"For order sushi🍣 press /suhi_eng\"\ntext_else = \"нажмите только кнопки\"\ntext_else_kz = \"тек қана кнопкаларды басыңыз\"\n\n@dp.message_handler(commands=['start'])\nasync def start(message: types.Message):\n await bot.send_message(message.from_user.id, text=Text, reply_markup=markup_language)\n\n@dp.message_handler(commands=['sushi_kz'])\nasync def send_message(message: types.Message):\n await bot.send_message(message.from_user.id, text='Сетті таңдаңыз', reply_markup=inline_set)\n\n@dp.message_handler(content_types=['text'])\nasync def answer(message: types.Message):\n m = message.text\n if m == \"kz🇰🇿\":\n await bot.send_message(message.from_user.id, text=text_kz)\n #await bot.send_message(chat_id='@yerekerun', text=\"админ сука\")\n elif m == \"ru🇷🇺\":\n await bot.send_message(message.from_user.id, text=text_ru)\n elif m == \"eng🇺🇸\":\n await bot.send_message(message.from_user.id, text=text_eng)\n else:\n await bot.send_message(message.from_user.id, text=text_else)\n\n@dp.callback_query_handler(text='diplomat')\n@dp.callback_query_handler(text='posi')\n@dp.callback_query_handler(text='barmaglog')\n@dp.callback_query_handler(text='He_She')\nasync def procces(call: types.CallbackQuery):\n answer_data = call.data\n if answer_data == 'diplomat':\n text = \"✳Сет для студентов❗ \\n Сет называется «𝐃𝐈𝐏𝐋𝐎𝐌𝐀𝐍𝐓 \\nПримечание: \\n-Покажите студенческий билет и получите сет со скидкой 4900тг \\nВнимание: ТОЛЬКО НА ДОСТАВКУ \\n——————————————————————\\n60шт суши и одна пицца🔺\\n🍣Филадельфия 10шт\\n🍣Эби кинг с креветкой 10шт\\n🍣Цезарь 10шт\\n🍣Сяке темпура 10шт\\n🍣Бонита 10шт\\n🍣Ёжик в тумане 10шт\\n🍕Пепперони пицца\\n🍱Цена со скидкой = 4900тг🔔\\n——————————————————————\\n Тапсырыс беру үшін /order 📲 басыңыз\"\n elif answer_data == 'posi':\n text = \"\"\n elif answer_data == 'barmaglog':\n text = \"\"\n elif answer_data == 'He_She':\n text = \"\"\n else:\n text = \"Please choose in Inline buttons\"\n await bot.send_message(call.from_user.id, text=text)\n\n\n\n","repo_name":"Yerek-Athlete/Chat_bot","sub_path":"venv/sushi.py","file_name":"sushi.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35749400000","text":"import pandas as pd\nimport streamlit as st\n#import code\n\n\n@st.cache\ndef load():\n data = pd.read_csv(\"https://raw.githubusercontent.com/datasets/population/master/data/population.csv\")\n data.drop(columns = 'Country Code', inplace=True)\n data.replace('United States','US', inplace=True)\n max_year_indexes = data.groupby(['Country Name']).apply(lambda x: pd.Series({'value': x['Year'].idxmax()}))\n return data.iloc[max_year_indexes['value']].reset_index(drop=True).drop(columns=['Year']).rename(columns={'Country Name': 'country', 'Value': 'country_population'})\n \n#code.interact(local=locals())\n","repo_name":"jesperfj/streamlit-covid","sub_path":"worldpopulation.py","file_name":"worldpopulation.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5042929042","text":"import json as j\nimport tkinter as tk\nfrom tkinter import scrolledtext\n\n\nclass EntryWithPlaceholder(tk.Entry):\n def __init__(self, master=None, placeholder=\"PLACEHOLDER\", color='grey'):\n super().__init__(master, width=20, command=None)\n\n self.placeholder = placeholder\n self.placeholder_color = color\n self.default_fg_color = self['fg']\n\n self.bind(\"\", self.foc_in)\n self.bind(\"\", self.foc_out)\n\n self.put_placeholder()\n\n def put_placeholder(self):\n self.insert(0, self.placeholder)\n self['fg'] = self.placeholder_color\n\n def foc_in(self, *args):\n if self['fg'] == self.placeholder_color:\n self.delete('0', 'end')\n self['fg'] = self.default_fg_color\n\n def foc_out(self, *args):\n if not self.get():\n self.put_placeholder()\n\n\nk = str()\n\n\ndef uid():\n msg = tk.Tk()\n msg.title(\"Enter Unique ID\")\n msg.geometry('300x100')\n\n lbl = tk.Label(msg, text=\"UID:\", font=(\"Arial Bold\", 14))\n lbl.grid(column=0, row=0, padx=20, pady=20)\n id_ = EntryWithPlaceholder(msg, placeholder=\"Enter Unique ID\")\n id_.grid(column=1, row=0, padx=20, pady=20)\n id_.focus()\n\n def destroy():\n global k\n k = id_.get()\n msg.quit()\n msg.destroy()\n\n btn = tk.Button(msg, text=\"OK\", command=destroy)\n btn.grid(column=2, row=0, padx=10, pady=20)\n\n msg.mainloop()\n return int(k)\n\n\ndef recDisp(ls):\n ls = j.dumps(ls, indent=4)\n n_w = tk.Tk()\n n_w.title(\"Record\")\n n_w.geometry(\"500x250\")\n sc_txt = scrolledtext.ScrolledText(n_w, width=50, height=20)\n sc_txt.grid(column=0, row=3)\n sc_txt.insert(tk.INSERT, ls)\n n_w.mainloop()\n\n\nif __name__ == '__main__':\n print(uid())\n","repo_name":"rajprakharpatel/HospitalManagement","sub_path":"GUI_elements.py","file_name":"GUI_elements.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36025340378","text":"import time\nimport grpc\nfrom concurrent import futures\nimport crud_pb2_grpc\n\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n\nclass RemoteServer():\n server = None\n\n def __init__(self, cmd_map, grpc_port):\n RemoteServer.setup(cmd_map, grpc_port)\n\n @classmethod\n def setup(cls, cmd_map, grpc_port):\n cls.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n crud_pb2_grpc.add_MapServicer_to_server(cmd_map, cls.server)\n port = f'[::]:{grpc_port}'\n cls.server.add_insecure_port(port)\n\n def serve(self):\n RemoteServer.server.start()\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n RemoteServer.server.stop(0)\n","repo_name":"jchami/gbc074","sub_path":"Projeto2/grpcserver.py","file_name":"grpcserver.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13792853357","text":"\"\"\"\nInference module for window-based strategy.\n\"\"\"\n\nimport math\nimport os\nimport os.path as osp\nfrom itertools import product\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport torchvision.transforms.functional as TF\n\nimport fire\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom skimage.io import imread\n\nfrom models import initialize_trainer\n\n\ndef _get_top_left_coordinates(height, width, patch_size):\n \"\"\"Calculate coordinates of top-left corners for patches.\"\"\"\n\n n_h = math.ceil(height / patch_size)\n n_w = math.ceil(width / patch_size)\n tops = np.linspace(0, height - patch_size, n_h, dtype=int)\n lefts = np.linspace(0, width - patch_size, n_w, dtype=int)\n\n return product(tops, lefts)\n\n\ndef divide_image_to_patches(img, patch_size):\n \"\"\"\n Divide a large image (mask) to patches with (possibly overlapping) tile strategy.\n\n Args:\n img: input image of shape (H, W, 3)\n patch_size: target size of patches\n\n Returns:\n patches: patches of shape (N, patch_size, patch_size, 3)\n \"\"\"\n\n assert len(img.shape) == 3 and img.shape[-1] == 3\n\n height, width, _ = img.shape\n coordinates = _get_top_left_coordinates(height, width, patch_size)\n\n patches = []\n\n for top, left in coordinates:\n patches.append(img[top:top + patch_size, left:left + patch_size])\n\n return np.array(patches).astype('uint8')\n\n\ndef combine_patches_to_image(patches, target_height, target_width):\n \"\"\"Combine patches back to a single image (mask).\n\n Args:\n patches: predicted patches of shape (N, H, W, C) or (N, H, W)\n target_height: target height of combined image\n target_width: target width of combined image\n\n Returns:\n combined: combined output of shape (H, W, C) or (H, W)\n \"\"\"\n\n counter = 0\n patch_size = patches.shape[1]\n coordinates = _get_top_left_coordinates(\n target_height, target_width, patch_size)\n\n if len(patches.shape) == 3: # channel dimension is missing\n patches = np.expand_dims(patches, -1)\n\n # The last channel is the number of overlapping patches for a given pixel,\n # used for averaging predictions from multiple windows.\n combined = np.zeros((target_height, target_width, patches.shape[-1] + 1))\n\n for top, left in coordinates:\n patch = combined[top:top + patch_size, left:left + patch_size, :-1]\n overlaps = combined[top:top + patch_size, left:left + patch_size, -1:]\n patch = (patch * overlaps + patches[counter]) / (overlaps + 1)\n combined[top:top + patch_size, left:left + patch_size, :-1] = patch\n overlaps += 1.\n counter += 1\n\n return np.squeeze(combined[..., :-1])\n\n\ndef predict(trainer, img_path, patch_size, device='cpu'):\n \"\"\"Predict on a single input image.\n\n Arguments:\n trainer: trainer for inference\n img_path: instance of `torch.utils.data.Dataset`\n patch_size: patch size when feeding into network\n device: target device\n\n Returns:\n predictions: list of model predictions of size (H, W)\n \"\"\"\n\n img = imread(img_path)\n patches = divide_image_to_patches(img, patch_size)\n predictions = []\n\n for patch in patches:\n input_ = TF.to_tensor(Image.fromarray(patch)).to(device).unsqueeze(0)\n input_, _ = trainer.preprocess(input_)\n prediction = trainer.postprocess(trainer.model(input_))\n prediction = prediction.detach().cpu().numpy()\n predictions.append(prediction[..., np.newaxis])\n\n predictions = np.concatenate(predictions)\n\n return combine_patches_to_image(predictions, img.shape[0], img.shape[1])\n\n\ndef save_predictions(predictions, img_paths, output_dir='predictions'):\n \"\"\"Save predictions to disk.\n\n Args:\n predictions: model predictions of size (N, H, W)\n img_paths: list of paths to input images\n output_dir: path to output directory\n \"\"\"\n\n print(f'\\nSaving prediction to {output_dir} ...')\n\n if not osp.exists(output_dir):\n os.mkdir(output_dir)\n\n for pred, img_path in tqdm(zip(predictions, img_paths), total=len(predictions)):\n img_name = osp.basename(img_path)\n pred = pred.astype('uint8')\n Image.fromarray(pred * 255).save(osp.join(output_dir, img_name))\n\n\ndef infer(trainer, data_dir, patch_size, output_dir=None, device='cpu'):\n \"\"\"Making inference on a directory of images with given model checkpoint.\"\"\"\n\n if output_dir is not None and not osp.exists(output_dir):\n os.mkdir(output_dir)\n\n data_dir = Path(data_dir).expanduser()\n img_paths = list((data_dir / 'images').iterdir())\n\n print(f'Predicting {len(img_paths)} images from {data_dir} ...')\n predictions = [\n predict(trainer, img_path, patch_size, device=device)\n for img_path in tqdm(img_paths)\n ]\n\n if output_dir is not None:\n save_predictions(predictions, img_paths, output_dir)\n\n return predictions\n\n\ndef main(data_dir, model_type='mild', patch_size=464, checkpoint=None,\n output_dir=None, device=None):\n\n if output_dir is None and checkpoint is not None:\n checkpoint = Path(checkpoint).expanduser()\n output_dir = checkpoint.parent.parent / 'results'\n if not output_dir.exists():\n output_dir.mkdir()\n\n device = device or ('cuda' if torch.cuda.is_available() else 'cpu')\n trainer = initialize_trainer(model_type, device=device)\n if checkpoint is not None:\n trainer.load_checkpoint(checkpoint)\n\n infer(trainer, data_dir, patch_size, output_dir, device=device)\n\n\nif __name__ == '__main__':\n fire.Fire(main)\n","repo_name":"mrcfps/WESUP","sub_path":"infer_tile.py","file_name":"infer_tile.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"1375132958","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Se cargan módulos\nimport cv2\nimport argparse\n\n# Se configura los argumentos de la línea de comandos\np = argparse.ArgumentParser(\"Extrae una region de una imagen\")\np.add_argument(\"x1\",default=None,type=int,\n action=\"store\", help=\"Cordenada x de primera esquina\")\np.add_argument(\"y1\",default=None,type=int,\n action=\"store\", help=\"Cordenada y de primera esquina\")\np.add_argument(\"x2\",default=None,type=int,\n action=\"store\", help=\"Cordenada x de segunda esquina\")\np.add_argument(\"y2\",default=None,type=int,\n action=\"store\", help=\"Cordenada y de segunda esquina\")\np.add_argument(\"archivo\",default=None,\n action=\"store\", help=\"Nombre de archivo\")\nopts = p.parse_args()\n\n\n# Se checa que las cordenadas sean válidas\nif opts.x1>=opts.x2:\n p.error(\"Error coordenadas x1 ({0}) y x2 ({1}) incorrectas\".format(opts.x1,opts.x2))\n\nif opts.y1>=opts.y2:\n p.error(\"Error coordenadas x1 ({0}) y x2 ({1}) incorrectas\".format(opts.x1,opts.x2))\n\n# Se lee la imagen\nimg = cv2.imread(opts.archivo)\n\n# Se extraé las propiedades de la imagen\nheight, width, depth = img.shape\n\n# Se checa que las cordenadas sean válidas para la imagen\nif opts.x1>width or opts.x2>width:\n p.error(\"Cordenadas más grande que el ancho de la imagen ({0})\".format(width))\n p.error()\n\nif opts.y1>height or opts.y2>height:\n p.error(\"Cordenadas más grande que el ancho de la imagen ({0})\".format(width))\n\n# Se corta la región de interes\ncrop_img = img[opts.y1:opts.y2, opts.x1:opts.x2]\n\n# Se guarda la imagen\ncv2.imwrite(\"imagen_recortada.png\", crop_img)\n\n","repo_name":"ivanvladimir/cursoML","sub_path":"sesion4/cortandoimagen.py","file_name":"cortandoimagen.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"37936727775","text":"from pytube import YouTube, Playlist\n\n\ndef get_title(id, type):\n if type == 'video':\n url = f'https://www.youtube.com/watch?v={id}'\n return YouTube(url).title\n elif type == 'playlist':\n url = f'https://www.youtube.com/playlist?list={id}'\n return YouTube(Playlist(url).video_urls[0]).title","repo_name":"disasstor/Youtube-Grasper-Bot","sub_path":"bot/utils/extractor_title.py","file_name":"extractor_title.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30995742292","text":"class Layer(object):\n \"\"\"\n This class represents a dense layer in a neural network.\n\n This layer utilises tensors (https://en.wikipedia.org/wiki/Tensor) - the construct of choice in neural networks.\n\n Attributes:\n in_size: size of the input tensor.\n out_size: size of the output tensor.\n t_in: the input tensor.\n t_out: the output tensor.\n weights: the weights tensor.\n bias: the bias tensor.\n activator: the activator function which is ran after activation.\n \"\"\"\n\n # Optional way for Python to optimize variable lookups\n __slots__ = \"t_in\", \"t_out\", \"weights\", \"bias\", \"activator\"\n\n def __init__(self, in_size, out_size, activator=lambda x: x):\n \"\"\"\n Method to initialize the neural network layer with the given in tensor size and out tensor size.\n If activator is empty, this layer will be linear.\n\n Args:\n in_size: the size of the input tensor.\n out_size: the size of the output tensor.\n activator: optional; the activator function. If not supplied, this will be a linear activator.\n \"\"\"\n\n # Set output tensor (input will be supplied during inference)\n self.t_in = None\n self.t_out = [0] * out_size\n\n # Set weight tensor\n self.weights = [0] * (in_size * out_size)\n # Set bias tensor\n self.bias = [0] * out_size\n # Set the activation method\n self.activator = activator\n\n def activate(self):\n \"\"\"\n Method to activate this layer, runs activator and sets output tensor.\n \"\"\"\n\n # Run for each neuron (output)\n out_size = len(self.t_out)\n for n in range(out_size):\n # Sum input of all input neurons (inputs), multiplied by corresponding weight\n input_sum = 0\n for i in range(len(self.t_in)):\n input_sum += self.t_in[i] * self.weights[i * out_size + n]\n input_sum = input_sum + self.bias[n]\n # Run activator on input\n self.t_out[n] = self.activator(input_sum)\n\n def read_file(self, fh):\n \"\"\"\n Method to import weights and biases from an input file.\n\n Args:\n fh: input stream from which this layer's weights and biases are read.\n \"\"\"\n\n # Import weight data\n for i in range(len(self.weights)):\n self.weights[i] = eval(fh.readline())\n # Import bias data\n for i in range(len(self.bias)):\n self.bias[i] = eval(fh.readline())\n","repo_name":"TUDSSL/BFree","sub_path":"software/applications-and-benchmarks/app-micro-nn/neural_net_src/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":268,"dataset":"github-code","pt":"81"} +{"seq_id":"12069061004","text":"src = [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11]\n\n\ndef count_val(lst):\n tmp_dict = {}\n for el in lst:\n if el in tmp_dict:\n tmp_dict[el] += 1\n else:\n tmp_dict[el] = 0\n return tmp_dict\n\n\n# 1 способ\ndict_src = count_val(src)\nresult = [num for num, count in dict_src.items() if count == 0]\nprint(result)\n\n# 2 способ\nsrc = [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11]\nprint([x for x in src if src.count(x) == 1])\n\n","repo_name":"arseniy-smirnovvv/lessons_geekbrains","sub_path":"arseniy_smirnov_dz_5/task_5_5.py","file_name":"task_5_5.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33487927844","text":"'''\nRemove all elements from a linked list of integers that have value val.\n\nExample:\n\nInput: 1->2->6->3->4->5->6, val = 6\nOutput: 1->2->3->4->5\n'''\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeElements(self, head, val):\n \"\"\"\n :type head: ListNode\n :type val: int\n :rtype: ListNode\n \"\"\"\n first = ListNode(None)\n first.next = head\n pre = first\n while head:\n if head.val == val:\n pre.next = head.next\n head.next = None\n head = pre.next\n else:\n pre = head\n head = head.next\n return first.next\nclass Solution(object):\n def removeElements(self, head, val):\n \"\"\"\n :type head: ListNode\n :type val: int\n :rtype: ListNode\n \"\"\"\n dummpy = ListNode(None)\n dummpy.next = head\n next = dummpy\n while next != None and next.next != None:\n if next.next.val == val:\n next.next = next.next.next\n else:\n next = next.next\n return dummpy.next\n","repo_name":"XiongQiuQiu/leetcode-slove","sub_path":"Algorithms/203-Remove-Linked-List-Elements.py","file_name":"203-Remove-Linked-List-Elements.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19398119419","text":"\"\"\"\nClasse mère des classe comportant des modèles\n\nElle gere la sauvegarde automatique des modèle\nAuthor : bsanchez@starclay.fr\ndate : 06/08/2020\n\"\"\"\n\nfrom abc import ABC\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom abc import ABC\nimport tempfile\nimport pickle\nimport logging\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.callbacks import ModelCheckpoint\n\nimport os, sys\nfrom .similarity_model import SimilarityModel\n\n\nclass TrainingModel(ABC):\n \n def __init__(self, name, path_run,\n nomenclature_distance, \n load_path=None, **kwargs):\n \"\"\"\n Superclasse pour les modèles à entrainer\n\n :param name: nom du modèle\n :param path_run: chemin de sauvegarde\n :param nomenclature_distance: NomenclatureDistance à utiliser\n :param load_path: chemin à charger\n :param kwargs: dict d'arguments pour la construction du modèle (passé à build_model, puis define_layers)\n \"\"\"\n self.model_name = name\n self.path_run = path_run\n self.logger = logging.getLogger(name)\n \n self.nomenclature_distance = nomenclature_distance\n if load_path is not None:\n self.model = self.load_model(load_path)\n else:\n self.model = self.build_model(**kwargs)\n self.logger.info(self.model.summary())\n \n @abstractmethod\n def define_layers(self, **kwargs):\n \"\"\"\n Methode qui définit les couches\n \n :params kwargs: dict d'arguments pour la construction des couches\n \n :returns: input (tf.Tensor ou list de tf.tensor), \n output (tf.Tensor ou list de tf.tensor)\n \"\"\"\n raise NotImplementedError()\n \n def build_model(self, **kwargs):\n \"\"\"\n Method qui crée les couches via define_layers, puis encapsule dans un SimilarityModel\n \n :params kwargs: dict d'arguments pour la construction des couches\n :returns: keras.Model\n \"\"\"\n inputs, output = self.define_layers(**kwargs)\n\n model = SimilarityModel(inputs=inputs, \n outputs=output, \n nomenclature_distance=self.nomenclature_distance)\n return model\n \n def train_model(self, training_pairs_batcher, validation_pairs_batcher, nb_epochs):\n \"\"\"\n Entraine le modèle:\n - compile le modèle\n - ajoute des callbacks (ModelCheckpoint, EarlyStopping)\n - lance le training\n\n :param training_pairs_batcher: AnchorPositivePairsBatch contenant les données de training\n :param validation_pairs_batcher: AnchorPositivePairsBatch contenant les données de validation\n :param nb_epochs: nb d'époques de training\n :returns: training History\n \"\"\"\n self.model.compile(\n optimizer=keras.optimizers.Adam(learning_rate=1e-3),\n loss=keras.losses.MeanSquaredError(), \n# run_eagerly=tf.executing_eagerly()\n )\n self.logger.info(f'Running eagerly : {tf.executing_eagerly()}')\n \n train_save_path = os.path.join(self.path_run, 'train_weights')\n mc = ModelCheckpoint(os.path.join(train_save_path, 'best_model_{epoch:02d}-{val_loss:.5f}'), \n save_best_only=True, save_weights_only=False, \n mode='auto', period=1, verbose=1)\n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10)\n if not os.path.exists(train_save_path):\n os.makedirs(train_save_path)\n\n self.logger.info('training')\n history = self.model.fit(training_pairs_batcher, \n epochs=nb_epochs,\n validation_data=validation_pairs_batcher,\n callbacks = [es, mc],\n verbose = True)\n return history\n\n def run_model_siamese(self, testing_pairs_batcher):\n return\n \n def run_model_single_side(self, formatted_data):\n \"\"\"\n prediction pour formatted_data (formatté via AnchorPositivePairsBatch.format_input)\n\n :param formatted_data: tf.tensor\n :returns: tf.tensor\n \"\"\"\n return self.model(formatted_data, training=False)\n \n def load_model(self,path):\n \"\"\"\n Charge un modele\n IMPORTANT : le modèle chargé est de type keras.Model, pas SimilarityModel\n \n :params path: chemin du model à être charger\n :returns: void\n \"\"\"\n return keras.models.load_model(path)\n","repo_name":"etalab-ia/ami-ia-insee-aiee2","sub_path":"nomenclatures/training_classes/training_model.py","file_name":"training_model.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41661162069","text":"from transformers import DetrFeatureExtractor, DetrForSegmentation\nimport numpy as np\nimport torch\nimport cv2\nfrom matplotlib import pyplot as plt\nimport itertools\nimport seaborn as sns\nfrom copy import deepcopy\nfrom clustering import filter_clusters\nimport motionnet\nfrom diffusers import StableDiffusionInpaintPipeline\nfrom PIL import Image\nimport os\n#from clip_interrogator import Config, Interrogator\n\n\nparts = ['body', 'half', 'face']\n\nfeature_extractor = DetrFeatureExtractor.from_pretrained('facebook/detr-resnet-50-panoptic')\nmodel = DetrForSegmentation.from_pretrained('facebook/detr-resnet-50-panoptic')\n\n\n# point in rectangle\ndef point_in_rect(x, y, rect):\n return rect[0] <= x <= rect[2] and rect[1] <= y <= rect[3]\n\ndef get_cluster(x, y, l):\n if x < 0 or y < 0 or x >= l.shape[0] or y >= l.shape[1]:\n return []\n\n r = []\n if l[x,y] == 1:\n r = [(x, y)]\n l[x, y] = 0\n r.extend(get_cluster(x-1, y, l))\n r.extend(get_cluster(x+1, y, l))\n r.extend(get_cluster(x, y-1, l))\n r.extend(get_cluster(x, y+1, l))\n\n return r\n\n\ndef get_clusters(l):\n clusters = []\n while True:\n x, y = np.where(l == 1)\n if len(x) == 0:\n break\n clusters.append(get_cluster(x[0], y[0], l))\n return clusters\n\n\ndef predict_animal_mask(im,\n gr_slider_confidence):\n image = Image.fromarray(im) # im: numpy array 3d: 480, 640, 3: to PIL Image\n image = image.resize((200,200)) # PIL image # could I upsample output instead? better?\n #result_image = np.array(image.convert('RGB'))\n result_image = im\n\n # encoding is a dict with pixel_values and pixel_mask\n encoding = feature_extractor(images=image, return_tensors=\"pt\") #pt=Pytorch, tf=TensorFlow\n outputs = model(**encoding) # odict with keys: ['logits', 'pred_boxes', 'pred_masks', 'last_hidden_state', 'encoder_last_hidden_state']\n logits = outputs.logits # torch.Size([1, 100, 251]); class logits? but why 251?\n bboxes = outputs.pred_boxes\n masks = outputs.pred_masks # torch.Size([1, 100, 200, 200]); mask logits? for every pixel, score in each of the 100 classes? there is a mask per class\n\n # keep only the masks with high confidence?--------------------------------\n # compute the prob per mask (i.e., class), excluding the \"no-object\" class (the last one)\n prob_per_query = outputs.logits.softmax(-1)[..., :-1].max(-1)[0] # why logits last dim 251?\n # threshold the confidence\n keep = prob_per_query > gr_slider_confidence/100.0\n\n # postprocess the mask (numpy arrays)\n label_per_pixel = torch.argmax(masks[keep].squeeze(),dim=0).detach().numpy() # from the masks per class, select the highest per pixel\n\n processed_sizes = torch.as_tensor(encoding['pixel_values'].shape[-2:]).unsqueeze(0)\n result = feature_extractor.post_process_panoptic(outputs, processed_sizes)[0]\n\n # We extract the segments info and the panoptic result from DETR's prediction\n segments_info = deepcopy(result[\"segments_info\"])\n\n result = []\n category = 1 # 1 is the category id for the \"person\" class\n #color_mask = np.zeros(image.size+(3,))\n color_mask = np.zeros(result_image.shape)\n print('colormask', color_mask.shape) # colormask (200, 200, 3)\n palette = itertools.cycle(sns.color_palette())\n keypoints = motionnet.get_keypoints(result_image)\n\n\n w, h, c = result_image.shape\n noses = np.array([(k[0][1]*w, k[0][0]*h, k[0][2], i) for i, k in enumerate(keypoints)])\n noses = noses[noses[:, 2] > 0.1]\n\n for lbl, cat in zip(np.unique(label_per_pixel), segments_info): #enumerate(palette()):\n if cat['category_id'] == category:\n mask = filter_clusters(label_per_pixel == lbl)\n mask = cv2.resize(mask, dsize=(result_image.shape[1], result_image.shape[0]), interpolation=cv2.INTER_LINEAR)\n y, x = np.where(mask != 0)\n nose = None\n rect = (x.min(), y.min(), x.max(), y.max())\n for key in keypoints:\n if point_in_rect(key[0][1] * h, key[0][0] * w, rect):\n nose = key\n break\n\n if nose is not None:\n cv2.rectangle(color_mask, (int(np.min(x)), int(np.min(y))), (int(np.max(x)), int(np.max(y))), (255,0,0), 1)\n color_mask[mask == 1, :] = np.asarray(next(palette))*255 #color\n result.append((mask, nose, (x.min(), y.min(), x.max(), y.max())))\n\n\n # Show image + mask\n pred_img = np.array(result_image)*0.25 + color_mask*0.75\n\n\n motionnet.loop_through_people(pred_img, keypoints)\n\n pred_img = pred_img.astype(np.uint8)\n\n return pred_img, result\n\n\n\ndef possible_parts(image, detection):\n result = []\n mask, keypoints, rect = detection\n img_w, img_h, _ = image.shape\n x_min, y_min, x_max, y_max = rect\n\n if keypoints[motionnet.LABELS['leftShoulder']][2] > motionnet.THRESHOLD and keypoints[motionnet.LABELS['rightShoulder']][2]:\n leftShoulder_y, leftShoulder_x, _ = keypoints[motionnet.LABELS['leftShoulder']]\n rightShoulder_y, rightShoulder_x, _ = keypoints[motionnet.LABELS['rightShoulder']]\n\n base_line_shoulder = keypoints[motionnet.LABELS['leftShoulder']][0]\n result.append(['face', (rightShoulder_x, base_line_shoulder, leftShoulder_x, y_min)])\n else:\n nose = keypoints[motionnet.LABELS['nose']]\n leftEar = keypoints[motionnet.LABELS['leftEar']]\n rightEar = keypoints[motionnet.LABELS['rightEar']]\n leftEye = keypoints[motionnet.LABELS['leftEye']]\n rightEye = keypoints[motionnet.LABELS['rightEye']]\n\n width = abs(leftEye[1] - rightEye[1])\n\n result.append(['face', (rightEar[1] - width, nose[0] + 3 * width, leftEar[1] + width, rightEye[0] - width)])\n\n if keypoints[motionnet.LABELS['leftHip']][2] > motionnet.THRESHOLD and keypoints[motionnet.LABELS['rightHip']][2]:\n leftHip_y, leftHip_x, _ = keypoints[motionnet.LABELS['leftHip']]\n rightHip_y, rightHip_x, _ = keypoints[motionnet.LABELS['rightHip']]\n base_line_hip = keypoints[motionnet.LABELS['leftHip']][1]\n\n hip_size = leftHip_x - rightHip_x\n\n result.append(['half', (rightHip_x - hip_size, base_line_hip, leftHip_x + hip_size, y_min)])\n\n if keypoints[motionnet.LABELS['leftAnkle']][2] > motionnet.THRESHOLD and keypoints[motionnet.LABELS['rightAnkle']][2]:\n leftAnkle_y, leftAnkle_x, _ = keypoints[motionnet.LABELS['leftAnkle']]\n rightAnkle_y, rightAnkle_x, _ = keypoints[motionnet.LABELS['rightAnkle']]\n\n leftHip_y, leftHip_x, _ = keypoints[motionnet.LABELS['leftHip']]\n rightHip_y, rightHip_x, _ = keypoints[motionnet.LABELS['rightHip']]\n hip_size = leftHip_x - rightHip_x\n\n leftEye = keypoints[motionnet.LABELS['leftEye']]\n rightEye = keypoints[motionnet.LABELS['rightEye']]\n\n width = abs(leftEye[1] - rightEye[1])\n\n result.append(['body', (rightAnkle_x - hip_size, leftAnkle_y + width, leftAnkle_x + hip_size, y_min)])\n\n result_filter = []\n for part in result:\n rect = part[1]\n x_min, y_min, x_max, y_max = rect\n if x_min < 0:\n x_min = 0\n if y_min < 0:\n y_min = 0\n if x_max > image.shape[1]:\n x_max = image.shape[1]\n if y_max > image.shape[0]:\n y_max = image.shape[0]\n\n result_filter.append([part[0], (int(x_min * img_w), int(y_min * img_h), int(x_max * img_w), int(y_max))])\n\n return result_filter\n\n\ndef process_image(filename, output_filename=None):\n image = cv2.imread(filename)[:,:,::-1]\n org_image = image.copy()\n\n _, detection = predict_animal_mask(image, 85)\n\n detection_max = None\n area_max = 0\n for part in detection:\n mask, keypoints, rect = part\n x_min, y_min, x_max, y_max = rect\n area = (x_max - x_min) * (y_max - y_min)\n if area > area_max:\n area_max = area\n detection_max = part\n\n mask, nose, rect = detection_max\n plt.imshow(mask)\n plt.show()\n\n x1, y1, x2, y2 = rect\n w = abs(x2 - x1)\n h = abs(y2 - y1)\n sub = org_image[y1:y2, x1:x2]\n scale_percent = 512 / max([w, h])\n\n width = int(sub.shape[1] * scale_percent)\n height = int(sub.shape[0] * scale_percent)\n dim = (width, height)\n print(dim)\n resized = cv2.resize(sub, dim, interpolation=cv2.INTER_AREA)\n print('resized.shape', resized.shape)\n # put resized image in center of final_image\n yoff = round((512-height)/2)\n xoff = round((512-width)/2)\n\n final_image = np.zeros((512, 512, 3), dtype=np.uint8)\n final_image[yoff:yoff+height, xoff:xoff+width, :] = resized\n mask_image = np.zeros((512, 512, 3), dtype=np.uint8)\n mask_image.fill(255)\n mask_image[yoff:yoff+height, xoff:xoff+width, :] = 0\n\n if output_filename is not None:\n rr = Image.fromarray(final_image)\n rr.save(output_filename)\n\n '''\n pil_image = Image.fromarray(np.uint8(org_image)).convert('RGB')\n ci = Interrogator(Config(clip_model_name=\"ViT-L-14/openai\"))\n ci.config.blip_num_beams = 64\n ci.config.chunk_size = 2048\n ci.config.flavor_intermediate_count = 2048\n\n prompt = ci.interrogate(pil_image)\n\n # python load auth_token from environment variable\n # os.environ\n \n auth_token = os.environ['HF_AUTH_TOKEN']\n\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n pipe = StableDiffusionInpaintPipeline.from_pretrained(\n \"runwayml/stable-diffusion-inpainting\",\n revision=\"fp16\",\n torch_dtype=torch.float16,\n use_auth_token=auth_token\n ).to(device)\n\n output = pipe(prompt=prompt, image=Image.fromarray(final_image), mask_image=Image.fromarray(mask_image)).images[0]\n result = np.array(output)\n result[yoff:yoff+height, xoff:xoff+width, :] = resized\n\n if output_filename is not None:\n result = Image.fromarray(result)\n result.save(output_filename)\n '''\n\n return rr\n\n\ndef test_image(image_path, gr_slider_confidence):\n gr_image_input = cv2.imread(image_path)\n print(gr_image_input.shape)\n\n pred_img, result = predict_animal_mask(gr_image_input, gr_slider_confidence)\n\n\n detection_max = None\n area_max = 0\n for part in result:\n mask, keypoints, rect = part\n x_min, y_min, x_max, y_max = rect\n area = (x_max - x_min) * (y_max - y_min)\n if area > area_max:\n area_max = area\n detection_max = part\n\n mask, nose, rect = detection_max\n plt.imshow(mask)\n plt.show()\n\n\n\n\ntest_image('example_image_3.jpg', 40)\n#test_image('cuerpo1.jpg', 85)\n\n#image_format.process_image('example_image_1.jpg', 'o_2.png')\n#image_format.process_image('example_image_2.jpg', 'o_3.png')\n#image_format.process_image('example_image_3.jpg', 'o_4.png')\n#image_format.process_image('example_image_2.jpeg', 'o_5.png')\n#image_format.process_image('face1.jpg', 'o_6.png')\n#image_format.process_image('face2.jpg', 'o_7.png')\n#image_format.process_image('example_image_3.jpg', 'o_4.png')","repo_name":"carlgira/automatic-image-processing","sub_path":"image_format.py","file_name":"image_format.py","file_ext":"py","file_size_in_byte":10992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23268351541","text":"import sending\nimport queries_to_bd\nimport keyboards\nimport telebot\nimport common_methods\n\n#основной метод проверки входящих сообщений\ndef main(bot, message):\n\n text_data = None\n audio_data = None\n document_data = None\n current_result_text = None\n current_reply_markup = telebot.types.InlineKeyboardMarkup()\n current_parsemod = None\n photo_data = None\n poll_data = None\n invoice_data = None\n sticker_data = None\n\n #проверяем, задал ли бот вопрос пользователю, на который он обязательно должен ответить текстом в чате\n flg_need_response = queries_to_bd.check_need_response_flg(message.chat.id)\n\n #если бот ждет ответа от пользователя в виде текстового сообщения, то\n if flg_need_response == 1:\n\n #проверяем о чем вообще шла речь, для этого заберем ID последней нажатой кнопки и заодно msg_id, в рамках которого будем редачить сообщение\n what_is_current_context = queries_to_bd.get_last_pressed_button(message.chat.id)\n\n #проверка, что пользователь прислал текст\n if message.content_type == 'text':\n\n #если последняя нажатая кнопка относится к меню с ID = 4, то это напоминалки. В напоминалках только в 1 месте требуется, чтобы пользователь дал ответ текстом - это при вводе информации, что же нужно напомнить\n if what_is_current_context.startswith(\"4\"):\n\n #создает в таблице запись о напоминалке\n queries_to_bd.create_new_notification(message.chat.id, message.text)\n\n #забирает ID созданной записи (напоминалки)\n notification_id = queries_to_bd.get_last_notification_id(message.chat.id)\n\n #забираем клавиатуру для редактирования напоминалки\n (current_result_text, current_reply_markup) = keyboards.notification_edit(notification_id)\n\n #если последняя нажатая кнопка относится к меню с ID = 5, то это шифрование/дешифрование\n elif what_is_current_context.startswith(\"5\"):\n\n #вытаскиваем тип операции\n operation_type = (what_is_current_context.split('/'))[-1]\n\n #вытаскиваем язык\n lang_code = (what_is_current_context.split('/'))[-2]\n\n #вытаскиваем ключ\n key = (what_is_current_context.split('/'))[-3]\n\n #отправляем на шифровку/дешифровку\n (current_result_text, current_reply_markup) = keyboards.crypting_result(operation_type, lang_code, key, message.text)\n\n current_parsemod = 'MarkdownV2'\n\n #текстовые данные ожидаются от пользователя по ветке 8 только в случае преобразования текста в войс\n elif what_is_current_context in [\"8/1/1\", \"8/2/1\"]:\n\n #вытаскиваем язык. 1 - русский, 2 - английский\n lang_code = (what_is_current_context.split('/'))[-2]\n\n #получаем путь, по которому лежит аудиофайл\n audio_data = common_methods.convert_text_to_speech(str(message.chat.id), message.text, lang_code)\n\n #получает последнее меню по ветке распознавания войса в текст\n (current_result_text, current_reply_markup) = keyboards.text_speech_result_voice(message.text, lang_code)\n\n #текстовые данные ожидаются от пользователя по ветке 9/2 только в случае получения названия города для погоды\n elif what_is_current_context == \"9/2\":\n\n #получает последнее меню по ветке распознавания войса в текст\n (current_result_text, current_reply_markup) = keyboards.weather_last_menu(message.text)\n\n #текстовые данные ожидаются от пользователя по ветке 9/3 только в случае получения текста, который будем помещен в QR-код\n elif what_is_current_context == \"9/3\":\n\n #получает последнее меню по ветке распознавания войса в текст\n (current_result_text, current_reply_markup) = keyboards.qr_code_result(message.text)\n\n document_data = common_methods.create_qr_code(message.text, str(message.chat.id))\n\n #текстовые данные ожидаются от пользователя по ветке 9/4 только в случае получения от пользователя тегов, по которым будем искать пикчу на реакторе\n elif what_is_current_context == \"9/4\":\n\n #получает последнее меню по ветке получения пикчи по тегу\n (current_result_text, current_reply_markup) = keyboards.get_pic_by_teg_result()\n\n #получает адрес изображения, спойлер и подпись к нему\n photo_data = common_methods.get_pic_by_teg(message.text)\n\n #если тип контента не подходит ни к одному из вариантов, которые ждет бот - сообщаем юзеру об этом\n else:\n current_result_text = 'Ты прислал не тот тип контента который нужен. Поздравляю тебя, начинай заново\\n/menu'\n\n #проверка, что пользователь прислал войс\n elif message.content_type == 'voice':\n\n #если последняя нажатая кнопка относится к меню с ID = 8, то это это преобразование войса в текст. Либо на русском языке, либо на английском другого не дано\n if what_is_current_context in [\"8/1/2\", \"8/2/2\"]:\n\n #вытаскиваем язык. 1 - русский, 2 - английский\n lang_code = (what_is_current_context.split('/'))[-2]\n\n #получаем инфо о войсе\n file_info = bot.get_file(message.voice.file_id)\n\n #скачиваем войс\n downloaded_file = bot.download_file(file_info.file_path)\n\n file_path = 'assets/temp/convert_speech_to_text/' + str(message.chat.id) +'.ogg'\n\n #записываем войс на диск\n with open(file_path, 'wb') as new_file:\n new_file.write(downloaded_file)\n\n #отправляем на преобразование в текст и получаем результат\n text = common_methods.convert_speech_to_text(lang_code, file_path)\n\n #получает последнее меню по ветке распознавания войса в текст\n (current_result_text, current_reply_markup) = keyboards.text_speech_result_text(text, lang_code)\n\n #если тип контента не подходит ни к одному из вариантов, которые ждет бот - сообщаем юзеру об этом\n else:\n current_result_text = 'Ты прислал не тот тип контента который нужен. Поздравляю тебя умник, начинай заново\\n/menu'\n\n #в противном случае, бот не ждет ответа от пользователя в виде сообщения и поэтому отдает по умолчанию основную клавиатуру\n else:\n\n #получает данные основной клавиатуры\n (current_result_text, current_reply_markup) = keyboards.main_menu(message.chat.id)\n\n\n #собираем текстовые данные\n text_data = (current_result_text, current_reply_markup, current_parsemod, 0, 1)\n\n #отправляем все в единый метод отправки\n sending.main(bot, message.chat.id, text_data, photo_data, poll_data, audio_data, invoice_data, sticker_data, document_data)","repo_name":"Nevillested/Ararararagi_bot","sub_path":"simple_message_cases.py","file_name":"simple_message_cases.py","file_ext":"py","file_size_in_byte":9187,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4192555394","text":"from selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom time import sleep\n\ns = Service(ChromeDriverManager().install())\ndriver = webdriver.Chrome(service=s)\n\ndriver.maximize_window()\n\ndriver.get('https://emag.ro')\nsleep(2)\ndriver.find_element(By.XPATH, '//button[text()=\"Accept\"]').click()\nsleep(2)\ndriver.find_element(By.XPATH, '(//i[@class=\"em em-close\"]/parent::button)[3]').click()\n# driver.find_element(By.XPATH, f'//button[@class=\"js-dismiss-login-notice-btn dismiss-btn btn btn-link pad-sep-none pad-hrz-none\"]').click()\nsleep(1)\n\n# scriu in campul de cautare \"biscuiti\"\ndriver.find_element(By.ID, \"searchboxTrigger\").send_keys('biscuiti')\nsleep(2)\n# fac click pe randul doi\ndriver.find_element(By.XPATH, f'(//a[@class=\"searchbox-suggestion-result searchbox-active-item\"])[2]').click()\nsleep(2)\n# driver.find_element(By.XPATH, f'(//i[@class=\"em em-close\"]/parent::button)[3]').click()\n\n\n# fac click pe inima de la produs\n\nbiscuit1 = driver.find_element(By.XPATH, f'//a[contains(text(), \"Biscuiti cu ciocolata si fulgi de cocos Bounty Cookies, 180g\")]/parent::div/parent::div/parent::div/parent::div//i[@class=\"em em-fav em-fav-bold\"]')\nbiscuit1.click()\n\nsleep(2)\nbiscuit2 = driver.find_element(By.XPATH, f'//a[contains(text(), \"2 x Biscuiti cu unt Leibniz, 200 gr.\")]/parent::div/parent::div/parent::div//i[@class=\"em em-fav em-fav-bold\"]')\nbiscuit2.click()\nsleep(2)\ndriver.find_element(By.ID, \"my_wishlist\").click()\nsleep(2)\ndriver.find_element(By.XPATH, '//span[contains(text(), \"Biscuiti cu ciocolata si fulgi de cocos Bounty Cookies, 180g\" )]/parent::a/parent::h2/parent::div/parent::div//span[contains(text(), \"Sterge\")]').click()\nsleep(2)\n# driver.find_element(By.XPATH, f\"//a[contains(text(), \"Sampon L'Oreal Paris Elseve Dream Long reparator pentru par lung, deteriorat, 400 ml\")]/parent::div/parent::div/parent::div/parent::div//i[@class=\"em em-fav em-fav-bold\"]\")\n# (//span[contains(text(), \"Sampon de par\")])[1]","repo_name":"TeodoraBirle03/Emag_Project","sub_path":"test_scripts/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7505632199","text":"from .base_attack import BaseAttack\nfrom .genetic import Genetic\nimport torch\nimport dgl\nimport pandas as pd\nimport numpy as np\nfrom bayesopt.bayesopt.predictors import GPWL, BayesianLinearRegression, NullSurrogate\nfrom .utils import correct_predictions, random_sample_flip, random_sample_rewire_swap, population_graphs, extrapolate_breakeven\nfrom copy import deepcopy\n\n\nclass BayesOptAttack(BaseAttack):\n def __init__(self, classifier: torch.nn.Module, loss_fn: torch.nn.Module,\n batch_size: int = 1, n_init: int = 10,\n edit_per_stage=None,\n surrogate: str = 'bayeslinregress',\n mode: str = 'flip',\n target_class: int = None,\n surrogate_settings: dict = None,\n acq_settings: dict = None,\n verbose: bool = True,\n terminate_after_n_fail: int = None,\n n_hop_constraint: int = None,\n preserve_disconnected_components: bool = False,):\n \"\"\"\n Attacking classifier via Bayesian optimisation with GP/Bayesian Linear regression surrogate with Weisfeiler-\n Lehman kernels.\n :param classifier: see BaseAttack\n :param loss_fn: see BaseAttack\n :param batch_size: the number of possible adversarial samples to propose at each BO iteration. Larger batch\n size will lead to faster performance, but correspondingly the performance might decrease\n :param edit_per_stage: int or float. the number of edits amortised to each stage. A smaller edit_per_stage leads\n to more stages which is more greedy, a larger edit_per_stage is less greedy but leads to a larger search\n space.\n :param surrogate: the choice of surrogate.\n :param n_init: the number of initial perturbations to be sampled randomly from the search space\n :param mode: str: 'flip', 'add', 'remove' or 'rewire': allowed edit operations on the edges.\n :param surrogate_settings: dict: any parameters to be passed to the surrogates. See bayesopt/gp_predictor.py\n :param acq_settings: dict: any parameters to be passed to the acquisition function.\n :param verbose: whether to enable diagnostic information.\n :param terminate_after_n_fail: the tolerance when the BO agent fails to push the attack loss. If this is not None (\n a positive int), after this number of successive failures in increasing attack loss the attack will be aborted.\n :param n_hop_constraint: int. If not None (a positive int), and edge perturbation (either rewire or flip) must\n be constrained within the n_hop distance of the first node.\n\n \"\"\"\n super().__init__(classifier, loss_fn)\n self.target_class = target_class\n if acq_settings is None:\n acq_settings = {}\n if 'acq_type' not in acq_settings.keys(): acq_settings['acq_type'] = 'ei'\n if 'acq_optimiser' not in acq_settings.keys(): acq_settings['acq_optimiser'] = 'mutation'\n if 'acq_max_step' not in acq_settings.keys(): acq_settings['acq_max_step'] = 400\n if 'random_frac' not in acq_settings.keys(): acq_settings['random_frac'] = 0.5\n self.acq_settings = acq_settings\n self.batch_size = batch_size\n self.n_init = n_init\n self.edit_per_stage = edit_per_stage\n if surrogate_settings is None:\n surrogate_settings = {}\n\n if surrogate == 'gpwl': self.surrogate = GPWL(**surrogate_settings)\n elif surrogate == 'bayeslinregress': self.surrogate = BayesianLinearRegression(**surrogate_settings)\n elif surrogate == 'null': self.surrogate = NullSurrogate()\n else: raise ValueError(f'Unrecognised surrogate choice {surrogate}')\n\n self.verbose = verbose\n assert mode in ['flip', 'add', 'remove', 'rewire'], f'mode {mode} is not recognised!'\n self.mode = mode\n # save a record of previous query history\n self.query_history = []\n self.loss_history = []\n self.terminate_after_n_fail = terminate_after_n_fail if terminate_after_n_fail is not None and terminate_after_n_fail > 0 else None\n self.n_hop_constraint = n_hop_constraint if n_hop_constraint is not None and n_hop_constraint > 0 else None\n self.preserve_disconnected_components = preserve_disconnected_components\n\n def attack(self, graph: dgl.DGLGraph, label: torch.tensor, budget, max_queries: int):\n \"\"\"\n The main attack loop.\n - For BO, at each iteration, we only modify one edge. If we have budget > 1, we use a greedy approach to\n partition the total max_queries into int(max_queries/budget) stages. At each stage, we attack on the\n *base graph*: in the first stage, it is the original graph (i.e. graph passed as an argument here);\n in the subsequent stages, it is the best perturbed graph of the previous stage that led to the largest\n classifier loss.\n - The optimisation terminates once it detects a successful attack.\n\n For the rest, see documentation for Genetic and BaseAttack\n \"\"\"\n if isinstance(budget, float):\n assert 0 < budget < 1., f'if a float is supplied, this number must be within 0 and 1 but got {budget}'\n budget = np.round(budget * graph.num_edges()).astype(np.int)\n if isinstance(self.edit_per_stage, float):\n self.edit_per_stage = np.round(self.edit_per_stage * graph.num_edges()).astype(np.int)\n stages, edits_per_stage = self.get_stage_statistics(max_queries, budget)\n if self.verbose:\n print(f'Total number of {max_queries} of queries is divided into {stages}')\n print(f'Edits per stage is {edits_per_stage}')\n self.query_history = []\n self.loss_history = []\n dfs = []\n self.committed_edits = []\n base_graph = graph\n i = 0\n adv_example = None\n is_edge_weighted = 'weight' in graph.edata.keys()\n\n best_loss = -np.inf\n n_fail = 0\n while i < max_queries:\n curr_stage = np.digitize(i, stages) - 1\n prev_stage = np.digitize(max(0, i - self.batch_size), stages) - 1\n edit_allowed_this_stage = edits_per_stage[curr_stage]\n if curr_stage != prev_stage or i == 0:\n if i > 0:\n best_idx = torch.argmax(self.surrogate.y)\n base_graph = deepcopy(self.surrogate.X[best_idx])\n # update the list of prohibited edges\n if len(self.query_history) > 0:\n self.committed_edits += self.query_history[-self.surrogate.y.shape[0] + int(best_idx)]\n\n if self.verbose:\n print(f'Entering Stage {curr_stage}. ')\n print(f'Committed edge edits={self.committed_edits}')\n # sample randomly at the start of each stage\n n_init = min(self.n_init, stages[curr_stage + 1] - stages[curr_stage])\n if self.mode == 'rewire':\n samples = [random_sample_rewire_swap(base_graph, edit_allowed_this_stage, rewire_only=not is_edge_weighted, n_hop=self.n_hop_constraint,\n preserve_disconnected_components=self.preserve_disconnected_components\n ) for _\n in range(n_init)]\n else:\n samples = [\n random_sample_flip(base_graph, edit_allowed_this_stage, remove_edge_only=self.mode == 'remove',\n add_edge_only=self.mode == 'add', n_hop=self.n_hop_constraint,\n committed_edges=self.committed_edits,\n preserve_disconnected_components=self.preserve_disconnected_components,)\n for _ in range(n_init)]\n if not len(samples):\n print('Patience reached. Terminating the current run')\n break\n\n perturbed_graphs = population_graphs(base_graph, samples, self.mode)\n self.query_history += samples\n i += n_init\n else:\n perturbed_graphs = self.suggest(base_graph, edit_allowed_this_stage, )\n i += self.batch_size\n\n with torch.no_grad():\n try:\n preds = self.classifier(dgl.batch(perturbed_graphs))\n except:\n preds = torch.cat([self.classifier(g) for g in perturbed_graphs])\n if preds.ndimension() == 1:\n preds.reshape(-1, 1)\n\n # dgl.batch and dgl.unbatch create lots of problems. use this as a fallback option\n # see reference in github issue:\n # https://github.com/dmlc/dgl/issues/2409\n if preds.shape[0] != len(perturbed_graphs):\n preds = self.classifier(perturbed_graphs)\n\n if len(perturbed_graphs) == 1 and preds.shape[1] == 1:\n labels = label[0].reshape(1)\n else:\n labels = torch.repeat_interleave(label, len(perturbed_graphs))\n losses = self.loss_fn(preds, labels, reduction='none')\n if losses.ndimension() == 0:\n losses = losses.reshape(1)\n self.loss_history += losses.detach().numpy().tolist()\n\n if self.verbose:\n print(f'Iteration {i}. Loss: {losses.detach().numpy()}.')\n\n dfs.append(self.construct_dataframe(losses, preds, label.squeeze(), i + 1))\n\n if len(self.loss_history) > 200 and extrapolate_breakeven(self.loss_history) > 1e5:\n print(f'Predicted breakeven point {extrapolate_breakeven(self.loss_history)} and run terminated')\n break\n\n if (self.target_class is None and np.sum(correct_predictions(preds.numpy(), labels.numpy())) < len(perturbed_graphs)) \\\n or (self.target_class is not None and (np.argmax(preds.numpy(), axis=1) == self.target_class).any()):\n print('Attack succeeded!')\n if self.target_class is None:\n comps = correct_predictions(preds.numpy(), labels.numpy())\n for i, comp in enumerate(comps):\n if not comp:\n adv_example = perturbed_graphs[i]\n break\n else:\n for i, pred in enumerate(preds):\n if np.argmax(pred.numpy()) == self.target_class:\n adv_example = perturbed_graphs[i]\n break\n break\n reset_surrogate = False\n self.observe(perturbed_graphs, losses, reset_surrogate=reset_surrogate)\n\n if np.max(losses.numpy()) > best_loss:\n n_fail = 0\n best_loss = torch.max(losses).detach().numpy()\n else:\n n_fail += len(perturbed_graphs)\n if self.terminate_after_n_fail is not None and n_fail > self.terminate_after_n_fail:\n print('Patience reached. Terminating the current run')\n break\n\n return pd.concat(dfs), adv_example\n\n def suggest(self, base_graph: dgl.DGLGraph, n_edit: int, prohibited_edges: list = None):\n \"\"\"\n The BO function to suggest perturbations to be queried from self.classifier\n :param base_graph: the graph on which we perform perturbations\n :param n_edit: number of edge edit allowed per perturbation\n :param prohibited_edges: list of edge edits that are not allowed.\n :return: a list of dgl graphs of shape self.batch_size\n \"\"\"\n is_edge_weighted = 'weight' in base_graph.edata.keys()\n candidate_graphs = None\n\n n_samples = self.acq_settings['acq_max_step']\n if self.acq_settings['acq_optimiser'] == 'random':\n if self.mode == 'rewire':\n candidate_samples = [random_sample_rewire_swap(base_graph,\n n_edit,\n rewire_only=not is_edge_weighted,\n n_hop=self.n_hop_constraint,\n preserve_disconnected_components=self.preserve_disconnected_components,\n ) for _\n in range(n_samples)]\n else:\n candidate_samples = [random_sample_flip(base_graph, n_edit, remove_edge_only=self.mode == 'remove',\n add_edge_only=self.mode == 'add', n_hop=self.n_hop_constraint,\n committed_edges=self.committed_edits,\n preserve_disconnected_components=self.preserve_disconnected_components,\n\n )\n for _ in range(n_samples)]\n\n elif self.acq_settings['acq_optimiser'] in ['genetic', 'mutation']:\n n_round = 10\n top_k = 3\n pop_size = max(n_samples // n_round, 100)\n # optionally set the fraction of randomly generated samples\n n_rand = np.round(pop_size * self.acq_settings['random_frac']).astype(np.int)\n n_mutate = pop_size - n_rand\n\n genetic_optimiser = Genetic(classifier=lambda x_: 0, loss_fn=lambda x_: 0,\n population_size=pop_size,\n mutation_rate=1., mode=self.mode)\n if self.mode == 'rewire':\n candidate_samples = [\n random_sample_rewire_swap(base_graph, n_edit, rewire_only=not is_edge_weighted,\n n_hop=self.n_hop_constraint,\n preserve_disconnected_components=self.preserve_disconnected_components,\n\n ) for _ in\n range(n_rand)]\n else:\n candidate_samples = [\n random_sample_flip(base_graph, n_edit, remove_edge_only=self.mode == 'remove',\n add_edge_only=self.mode == 'add',\n n_hop=self.n_hop_constraint,\n committed_edges=self.committed_edits,\n preserve_disconnected_components=self.preserve_disconnected_components,\n\n ) for _ in range(n_rand)]\n\n self.query_history += candidate_samples\n topk_indices = torch.topk(self.surrogate.y, min(self.surrogate.y.shape[0], top_k))[1]\n while len(candidate_samples) < pop_size:\n selected_index = topk_indices[np.random.randint(len(topk_indices))]\n candidate_samples.append(\n genetic_optimiser.mutate_sample(base_graph,\n self.query_history[-len(self.surrogate.y) + selected_index],\n )\n )\n candidate_graphs = population_graphs(base_graph, candidate_samples, self.mode)\n acq_values = self.surrogate.acquisition(candidate_graphs, acq_func=self.acq_settings['acq_type'], bias=None)\n\n # for each mutation round, alternate between optimising the topology (A) with features (X)\n for r in range(n_round):\n topk_indices = torch.topk(acq_values, min(len(candidate_graphs), top_k))[1]\n while len(candidate_samples) < pop_size:\n selected_sample = candidate_samples[np.random.randint(len(topk_indices))]\n candidate_samples.append(\n genetic_optimiser.mutate_sample(base_graph, selected_sample,))\n candidate_samples = candidate_samples[n_mutate:]\n\n candidate_graphs = population_graphs(base_graph, candidate_samples, self.mode)\n acq_values = self.surrogate.acquisition(candidate_graphs, acq_func=self.acq_settings['acq_type'], bias=None)\n\n else:\n raise NotImplementedError(f'Unable to parse the acq_optimiser {self.acq_settings[\"acq_optimiser\"]}')\n\n if candidate_graphs is None:\n candidate_graphs = population_graphs(base_graph, candidate_samples, self.mode)\n\n acq_values = self.surrogate.acquisition(candidate_graphs, acq_func=self.acq_settings['acq_type'])\n\n acq_values_np = acq_values.detach().numpy().flatten()\n acq_values_np_, unique_idx = np.unique(acq_values_np, return_index=True)\n i = np.argpartition(acq_values_np_, -min(acq_values_np_.shape[0], self.batch_size))[\n -min(acq_values_np_.shape[0], self.batch_size):]\n indices = np.array([unique_idx[j] for j in i])\n suggested = [candidate_graphs[j] for j in indices]\n self.query_history += [candidate_samples[j] for j in indices]\n return suggested\n\n def observe(self, X, y, reset_surrogate=False):\n \"\"\"\n Update the BO with new sample-target pair(s) we obtained from quering the classifer\n :param X: a list of dgl graphs. The list of dgl graphs we queried from the classifier\n :param y: a Tensor of shape[0] = len(X). The tensor of the classifier loss\n :param reset_surrogate: whether to reset the surrogate (clearing all previous fitted (X, y)).\n \"\"\"\n nan_idx = (y != y).nonzero().view(-1)\n if nan_idx.shape[0] > 0:\n for i in nan_idx:\n X.pop(i)\n y = y[y == y]\n if self.surrogate.X is None or reset_surrogate:\n self.surrogate.fit(X, y)\n else:\n self.surrogate.update(X, y)\n\n @staticmethod\n def construct_dataframe(losses: np.array, predictions: torch.tensor, label: torch.tensor, queries: int) \\\n -> pd.DataFrame:\n \"\"\"Construct a pandas dataframe consistent with the base class. This dataframe is for all samples evaluated\n after exactly `queries` queries.\"\"\"\n labels = np.tile(label, len(predictions))\n df = pd.DataFrame({'losses': losses,\n 'correct_prediction': correct_predictions(predictions.numpy(), labels),\n 'queries': queries})\n return df\n\n def get_stage_statistics(self, max_queries: int, budget: int):\n if self.edit_per_stage is None:\n self.edit_per_stage = budget\n if budget % self.edit_per_stage:\n num_stages = budget // self.edit_per_stage + 1\n else:\n num_stages = budget // self.edit_per_stage\n query_per_edit = max_queries // budget\n stage_length = self.edit_per_stage * query_per_edit\n stages = []\n edits_per_stages = []\n for i in range(num_stages):\n stages.append(min(max_queries, i * stage_length))\n if sum(edits_per_stages) + self.edit_per_stage < budget:\n edits_per_stages.append(self.edit_per_stage)\n else:\n edits_per_stages.append(budget - sum(edits_per_stages))\n stages.append(max_queries)\n return np.array(stages), np.array(edits_per_stages)\n","repo_name":"xingchenwan/grabnel","sub_path":"src/attack/bayesopt_attack.py","file_name":"bayesopt_attack.py","file_ext":"py","file_size_in_byte":19657,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"13113257132","text":"def calcular_edad(an):\n e = 2023 - an\n return e\n\ndef mayor_de_edad(e):\n m = False\n if e >= 18:\n m = \"Mayor de edad\"\n else:\n m = \"menor de edad\"\n return m\ndef generar_correo(n,a,p):\n for x in a:\n p += remplazar_caracteres(x)\n a = p\n\n\n n = n.strip()\n a = a.strip()\n correo = remplazar_caracteres(n[0].lower())\n x = a.split()\n #primer apellido\n correo += x[0].lower()\n #primera letra del segundo apellido\n correo += remplazar_caracteres(x[1][0].lower())\n correo += \"@unemi.edu.ec\"\n return correo\n\n\ndef remplazar_caracteres(l):\n r = l\n if l == \"ñ\":\n r = \"n\"\n elif l ==\"à\":\n r = \"a\"\n elif l == \"è\":\n r = \"e\"\n elif l == \"ì\":\n r = \"i\"\n elif l == \"ò\":\n r = \"o\"\n elif l == \"ù\":\n r = \"u\"\n return r\n\n#def generar_contrasena():\n #pass\ndef run():\n nombres = input(\"ingrese sus nombres: \")\n apellidos = input(\"ingrese sus apellidos : \")\n anio_nacimiento = int(input(\"Imgrese su año de nacimiento\"))\n edad = calcular_edad(anio_nacimiento)\n tipo_edad = mayor_de_edad(edad)\n correo = generar_correo(nombres,apellidos)\n #contrasena = generar_contrasena()\n\n\n print(f\"La edad de {nombres} es {edad}, es una persona {tipo_edad} su cuenta de correo generado es: {correo}\")\n\n\n\n#if_name== \"__main_\":\n\nrun()\n","repo_name":"theoriginalex/S2-TALLER_2.py","sub_path":"trabajo en clases.py","file_name":"trabajo en clases.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35187286841","text":"#\n# @lc app=leetcode.cn id=547 lang=python3\n#\n# [547] 省份数量\n#\n\n# @lc code=start\nfrom typing import List\nclass bxjj:\n def __init__(self, l: int) -> None:\n self.data, self.l = [-1]*l, l\n def father(self, x: int) -> int:\n if self.data[x]<0: return x\n self.data[x] = self.father(self.data[x])\n return self.data[x]\n def merge(self, a: int, b: int) -> None:\n a, b = self.father(a), self.father(b)\n if a == b: return\n if self.data[a] < self.data[b]:\n self.data[a] += self.data[b]\n self.data[b] = a\n else:\n self.data[b] += self.data[a]\n self.data[a] = b\n def query(self, a: int, b: int) -> bool:\n return self.father(a) == self.father(b)\nclass Solution:\n def findCircleNum(self, isConnected: List[List[int]]) -> int:\n n = len(isConnected)\n a = bxjj(n)\n for i in range(n):\n for j in range(n):\n if isConnected[i][j]:\n a.merge(i, j)\n ret = 0\n for i in a.data:\n if i < 0: ret += 1\n return ret\n# @lc code=end\n\n","repo_name":"HellOwhatAs/Leetcode","sub_path":"547.省份数量.py","file_name":"547.省份数量.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29369715306","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 4 00:00:17 2021\r\n\r\n@author: LONG QIANG\r\n\r\n值迭代算求解GridWorld问题\r\n基于状态值函数\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n'''\r\n创建一个随机确定性策略\r\n'''\r\ndef create_random_greedy_policy(env):\r\n random_greedy_policy = {} # 用字典表示策略\r\n for state in env.get_state_space(): # 遍历每一个状态\r\n random_greedy_policy[state] = np.zeros(env.action_space_size)\r\n # 随机选择一个动作,设置其概率为1\r\n random_greedy_policy[state][np.random.choice(range(env.action_space_size))] = 1.0\r\n \r\n return random_greedy_policy # 返回策略\r\n\r\n'''\r\n迭代更新状态值函数\r\n'''\r\ndef statevalue_update(env,V):\r\n V_new = np.zeros_like(V) # 初始化新的状态值函数\r\n Psa = env.Psa() # 获取状态转移概率矩阵\r\n delta = 0 # 值函数更新前后最大绝对差值\r\n epsilon = 0.001 # 更新容忍系数\r\n no_value_change = True # 是否更新指示器\r\n \r\n # 对每一个状态进行循环\r\n for s_i,s in enumerate(env.get_state_space()):\r\n action_values = np.zeros(env.action_space_size)\r\n for a_i,a in enumerate(env.get_action_space()):\r\n for ns_i,ns in enumerate(env.get_state_space()):\r\n reward = env.Rsa(s,a,ns) # (s,a)转移到ns的即时奖励\r\n prob = Psa[s_i,a_i,ns_i] # (s,a)转移到ns的概率\r\n action_values[a_i] += prob*(reward+env.gamma*V[ns_i])\r\n V_new[s_i] = np.max(action_values)\r\n \r\n # 维持最大的增量\r\n delta = max(delta,np.abs(V_new[s_i]-V[s_i]))\r\n \r\n # 检查是否满足终止条件\r\n if delta >= epsilon:\r\n no_value_change = False\r\n \r\n return V_new, no_value_change\r\n\r\n'''\r\n策略改进函数,用贪婪法求解最优策略\r\n'''\r\ndef policy_update(env,V):\r\n Psa = env.Psa() # 获取状态转移概率矩阵\r\n policy = create_random_greedy_policy(env) # 初始化策略\r\n\r\n # 求解最优策略 \r\n for s_i,s in enumerate(env.get_state_space()): # 对每一个状态进行循环 \r\n action_values = np.zeros(env.action_space_size)\r\n for a_i,a in enumerate(env.get_action_space()):\r\n for ns_i,ns in enumerate(env.get_state_space()):\r\n reward = env.Rsa(s,a,ns) # (s,a)转移到ns的即时奖励\r\n prob = Psa[s_i,a_i,ns_i] # (s,a)转移到ns的概率\r\n action_values[a_i] += prob*(reward+env.gamma*V[ns_i])\r\n \r\n # 求解贪婪策略\r\n best_action = np.argmax(action_values)\r\n policy[s] = np.eye(env.action_space_size)[best_action]\r\n \r\n return policy\r\n\r\n'''\r\n将policy表示成矩阵形式\r\n'''\r\ndef policy_express(env,policy):\r\n policy_mat = np.zeros((env.grid_height,env.grid_width))\r\n for s in env.get_state_space():\r\n policy_mat[s[0]][s[1]] = np.argmax(policy[s])\r\n \r\n return policy_mat\r\n \r\n'''\r\n值迭代主程序,该函数是算法2-4的具体实现\r\n'''\r\ndef value_iteration(env,episode_limit=100):\r\n V = np.zeros(env.state_space_size)\r\n \r\n # 迭代法求解最优状态值\r\n for i in range(episode_limit):\r\n print('第{}次迭代'.format(i)) \r\n V,no_value_change = statevalue_update(env,V)\r\n print('V=',V)\r\n if no_value_change:\r\n print('Iteration terminate with stable state value.')\r\n break\r\n \r\n # 计算最优策略\r\n policy = policy_update(env,V)\r\n\r\n # 将决策表示成矩阵形式\r\n policy_mat = policy_express(env,policy)\r\n \r\n # 返回最优策略和对应状态值\r\n return policy,policy_mat, V\r\n\r\n'''\r\n主程序\r\n'''\r\nif __name__ == '__main__':\r\n import GridWorld\r\n env = GridWorld.GridWorldEnv()\r\n policy_opt,policy_mat,V_opt = value_iteration(env,episode_limit=100)\r\n print(policy_mat)\r\n print(V_opt)","repo_name":"QiangLong2017/Deep-Reiforcement-Learning","sub_path":"code/code2-2 值迭代法求解例1-1.py","file_name":"code2-2 值迭代法求解例1-1.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"81"} +{"seq_id":"17378037111","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom tweets.models import Tweet\n\n\ndef tweet_view(request):\n tweet_content = Tweet.objects.all()\n context = {\n \"tweet_content\": tweet_content\n }\n return render(request, \"index.html\", context)\n","repo_name":"Oviep/twitter_clone","sub_path":"tweets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37619611824","text":"import pytest\nimport pandas as pd\nimport datatest as dt\n\nd = {\n'cars' : ['BMW','Toyato','Audi'],\n'year' : [2000,2001,2002],\n}\n\nmyvar = pd.DataFrame(d)\n\ndef test_columns():\n dt.validate(\n myvar.columns,\n {'cars', 'year'}\n )\n","repo_name":"MuraliMR259/Python-code","sub_path":"pytest_python.py","file_name":"pytest_python.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37876692355","text":"#!/usr/bin/env python3\n\nimport sys\n\nfor line in sys.stdin:\n a = line.strip().split()\n if a[1] != \"T\":\n continue\n if not a[2].startswith(\"furi\"):\n continue\n print(\"%s = 0x%s;\" % (a[2], a[0]))\n","repo_name":"Disasm/flipper-sandbox","sub_path":"utils/nm2ld.py","file_name":"nm2ld.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"15784112575","text":"# encode(): str -> bytes\n# decode(): bytes -> str\n\n# import socket module\nfrom socket import *\nserverSocket = socket(AF_INET, SOCK_STREAM)\n# Prepare a sever socket\n# 将套接字绑定到本地地址,地址是一个元组\n# 对于IP套接字,地址是一对(主机、端口)\n# 主机必须引用本地主机\nserverSocket.bind((\"localhost\", 6789))\n# 使服务器能够接受连接\nserverSocket.listen(1)\n\nwhile True:\n # Establish the connection\n print('Ready to serve...')\n # 返回一个表示连接的新套接字和客户端地址\n connectionSocket, addr = serverSocket.accept()\n try:\n # 从套接字接收最多缓冲大小的字节,返回的是 bytes 类型\n message = connectionSocket.recv(1024)\n # 分割 bytes,到各个 list,其中 index = 1 是请求文件地址\n filename = message.split()[1]\n # filename[1:] 去除了斜杠: b'/HelloWorld.html' -> b'HelloWorld.html'\n f = open(filename[1:])\n outputdata = f.read()\n # Send one HTTP header line into socket\n # 注意: 最后一定要两个\\n\n # Connection 一定要设置为 close\n header = 'HTTP/1.1 200 OK\\nConnection:close\\nContent-type:text/html\\nContent-length:%d\\n\\n' % len(outputdata)\n # 向套接字发送一个数据字符串\n connectionSocket.send(header.encode())\n # Send the content of the requested file to the client\n for i in range(0, len(outputdata)):\n connectionSocket.send(outputdata[i].encode())\n f.close()\n connectionSocket.close()\n except IOError:\n # Send response message for file not found\n header = 'HTTP/1.1 404 Not Found'\n # encode(): str -> bytes\n connectionSocket.send(header.encode())\n # Close client socket\n # 不能使用 serverSocket.close() 因为会直接关闭 socket\n connectionSocket.close()\n\nserverSocket.close()\n","repo_name":"chenyuxiang0425/Computer-Networking-A-Top-Down-Approach","sub_path":"lab01/WebServer.py","file_name":"WebServer.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"12461330428","text":"import libtcodpy as libtcod\n\n\ndark_wall = libtcod.Color(0, 0, 100)\nlight_wall = libtcod.Color(130, 110, 50)\ndark_ground = libtcod.Color(50, 50, 150)\nlight_ground = libtcod.Color(200, 180, 50)\n\ndarker_red = libtcod.darker_red\n# render_all missing health in health bar\ndarker_green = libtcod.darker_green\n# place_objects troll\ndarker_orange = libtcod.darker_orange\n# place_objects shield\ndark_red = libtcod.dark_red\n# new_game~player_death dead player, place_objects~monster_death corpse\nwhite = libtcod.white\n# settings~player, settings~stairs, message default message color,\n# menu default foreground, Object.clear, render_all floor, render_all wall,\n# render_all default foreground, render_all~render_bar default foreground\nyellow = libtcod.yellow\n# Equipment.dequip message, Item.drop dropped item message,\n# player_game.check_level_up level up message\nred = libtcod.red\n# handle_keys~next_level descending message,\n# Item.pick_up full inventory message, new_game welcome message,\n# spells~cast_heal full health message, spells~cast_lightning no target message\ngreen = libtcod.green\n# Item.pick_up item pick up message\nsky = libtcod.sky\n# new_game dagger, place_objects sword\nviolet = libtcod.violet\n# place_objects healing potion\norange = libtcod.orange\n# place_objects~monster_death death message,\n# spells~cast_fireball area message, spells~cast_fireball hit message\nblack = libtcod.black\n# render_all default background\nlight_green = libtcod.light_green\n# Equipment.equip message, spells~cast_confuse success message\nlight_violet = libtcod.light_violet\n# handle_keys~next_level heal message, spells~cast_heal heal message\nlight_yellow = libtcod.light_yellow\n# place_objects scroll of lightning bolt, place_objects scroll of fireball,\n# place_objects scroll of confusion, tutmut default foreground\nlight_red = libtcod.light_red\n# render_all health total in health bar\nlight_gray = libtcod.light_gray\n# render_all default foreground\nlight_blue = libtcod.light_blue\n# spells~cast_lightning hit message\nlight_cyan = libtcod.light_cyan\n# spells~cast_fireball targeting message,\n# spells~cast_confuse targeting message\ndesaturated_green = libtcod.desaturated_green\n# place_objects orc\n","repo_name":"Akhier/Py-TutMut","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"3428808081","text":"from itertools import count\nfrom peewee import *\n\ndb = SqliteDatabase('database.sqlite')\n\nclass BaseModel(Model):\n class Meta:\n database = db\n\n\nclass User(BaseModel):\n id = IntegerField(primary_key=True)\n status = CharField()\n name = CharField()\n username = CharField()\n\nclass Room(BaseModel):\n id = AutoField()\n name = CharField()\n price = IntegerField()\n max_people_count = IntegerField()\n\nclass Order(BaseModel):\n id = AutoField()\n status = CharField(default = \"В обработке\")\n user = ForeignKeyField(User, backref='order', on_delete='CASCADE')\n room = CharField()\n count_people = IntegerField()\n phone = CharField()\n\n\n\ndb.create_tables([User, Room, Order])","repo_name":"CoOsmoze/AntiCafeBot","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15654745011","text":"from tkinter import Tk, Label, Entry, Button, IntVar, Checkbutton, END\nimport youtube_dl\n\n\nclass MainWindow:\n def __init__(self, window):\n self.window = window\n window.title(\"youtube-dl GUI\")\n Label(window, text=\"URL# 1\").grid(row=1, column=1)\n Label(window, text=\"URL# 2\").grid(row=2, column=1)\n Label(window, text=\"URL# 3\").grid(row=3, column=1)\n Label(window, text=\"URL# 4\").grid(row=4, column=1)\n Label(window, text=\"URL# 5\").grid(row=5, column=1)\n self.url1 = Entry(window, width=50)\n self.url2 = Entry(window, width=50)\n self.url3 = Entry(window, width=50)\n self.url4 = Entry(window, width=50)\n self.url5 = Entry(window, width=50)\n self.url1.grid(row=1, column=2, padx=5, pady=10)\n self.url2.grid(row=2, column=2, padx=5, pady=10)\n self.url3.grid(row=3, column=2, padx=5, pady=10)\n self.url4.grid(row=4, column=2, padx=5, pady=10)\n self.url5.grid(row=5, column=2, padx=5, pady=10)\n Button(window, text=\"Done\", command=window.quit).grid(row=7, column=1, pady=10)\n self.checked1 = IntVar()\n self.checked2 = IntVar()\n self.checked3 = IntVar()\n self.checked4 = IntVar()\n self.checked5 = IntVar()\n Checkbutton(window, text=\"Audio Only\", variable=self.checked1).grid(row=1, column=3)\n Checkbutton(window, text=\"Audio Only\", variable=self.checked2).grid(row=2, column=3)\n Checkbutton(window, text=\"Audio Only\", variable=self.checked3).grid(row=3, column=3)\n Checkbutton(window, text=\"Audio Only\", variable=self.checked4).grid(row=4, column=3)\n Checkbutton(window, text=\"Audio Only\", variable=self.checked5).grid(row=5, column=3)\n Button(window, text=\"Download\", command=self.download).grid(row=7, column=3, pady=10)\n Button(window, text=\"Clear\", command=self.clear_inputs).grid(row=7, column=2)\n\n def download(self): # test link http://www.youtube.com/watch?v=BaW_jenozKc\n def downloader(link, opts):\n with youtube_dl.YoutubeDL(opts) as ydl:\n ydl.download([link])\n\n def downloader_opts(audio_check):\n if audio_check != 0: # 6\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '256',\n }],\n }\n else:\n ydl_opts = {}\n return ydl_opts\n link1 = self.url1.get()\n link2 = self.url2.get()\n link3 = self.url3.get()\n link4 = self.url4.get()\n link5 = self.url5.get()\n if link1 != \"\":\n audio1 = self.checked1.get()\n downloader(link1, downloader_opts(audio1))\n print(\"Done\")\n else:\n print(\"URL# 1 is empty\")\n if link2 != \"\":\n audio2 = self.checked2.get()\n downloader(link2, downloader_opts(audio2))\n print(\"Done\")\n else:\n print(\"URL# 2 is empty\")\n if link3 != \"\":\n audio3 = self.checked3.get()\n downloader(link3, downloader_opts(audio3))\n print(\"Done\")\n else:\n print(\"URL# 3 is empty\")\n if link4 != \"\":\n audio4 = self.checked4.get()\n downloader(link4, downloader_opts(audio4))\n print(\"Done\")\n else:\n print(\"URL# 4 is empty\")\n if link5 != \"\":\n audio5 = self.checked5.get()\n downloader(link5, downloader_opts(audio5))\n print(\"Done\")\n else:\n print(\"URL# 5 is empty\")\n print(\"Done\")\n\n def clear_inputs(self): # clearing the entry boxes\n self.url1.delete(0, END) # deleting from first to the end\n self.url2.delete(0, END)\n self.url3.delete(0, END)\n self.url4.delete(0, END)\n self.url5.delete(0, END)\n self.checked1.set(0)\n self.checked2.set(0)\n self.checked3.set(0)\n self.checked4.set(0)\n self.checked5.set(0)\n\nif __name__ == '__main__':\n root = Tk()\n my_gui = MainWindow(root)\n root.mainloop()\n","repo_name":"catdog13/youtube_downloader","sub_path":"youtube_downloader_GUI.py","file_name":"youtube_downloader_GUI.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29930308783","text":"from ..units import Entity\nfrom ...board import Board\nfrom ...utils.geom import get_hypotenuse_length, get_polygon_radius\n\n\ndef resize_unit(unit: Entity, board: Board) -> None:\n \"\"\"\n Resize a unit to fit a tile of the given board\n\n Args:\n unit: The unit to resize\n board: The board on which the tile of the unit is located\n \"\"\"\n if board.graphics is not None and unit.sprite is not None and unit.sprite.rect is not None:\n multiply_ratio = board.graphics.sideLength / max(unit.sprite.rect.height, unit.sprite.rect.width)\n hypotenuse = get_hypotenuse_length(unit.sprite.rect.height * multiply_ratio,\n unit.sprite.rect.width * multiply_ratio)\n tile_diameter = get_polygon_radius(board.graphics.nbrOfSides, board.graphics.sideLength) * 2\n while hypotenuse > tile_diameter:\n multiply_ratio *= 0.99\n hypotenuse = get_hypotenuse_length(unit.sprite.rect.height * multiply_ratio,\n unit.sprite.rect.width * multiply_ratio)\n unit.sprite.size(int(round(unit.sprite.rect.width * multiply_ratio)),\n int(round(unit.sprite.rect.height * multiply_ratio)))\n","repo_name":"Angeall/pyTGF","sub_path":"pytgf/characters/utils/units.py","file_name":"units.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8688914691","text":"# coding=utf-8\n\nfrom utils.gensim_util import tokenization, similarity\nfrom utils.inverted_index import get_inverted_index\nfrom utils.question_util import load_questions\n\n\ndef search(question):\n # 分词\n words = tokenization(content=question)\n # 获取倒排索引\n inverted_index = get_inverted_index('inverted_index.pkl')\n # 从倒排索引中过滤出words中数据\n real_questions = []\n for word in words:\n if word in inverted_index:\n for q in inverted_index[word].keys():\n real_questions.append(q)\n # 去重\n real_questions = list(set(real_questions))\n # 计算相似度\n question_similarities = similarity(question, real_questions)\n question_similarities = list(map(lambda m: {\n 'sentence': m['sentence'],\n 'similarity': str(m['similarity'])\n }, question_similarities))\n # # 找到真正的问答详情\n # question_details = load_questions()\n return question_similarities\n\n\ndef question_search(question):\n questions = load_questions()\n if question in questions:\n return {\n 'question': question,\n 'detail': questions[question]\n }\n return {\n 'question': question,\n 'detail': '没有找到'\n }\n\n\n# if __name__ == '__main__':\n # print(search('结婚一年提出离婚,男方能否要求返还彩礼?'))\n # print(question_search('结婚一年提出离婚,男方能否要求返还彩礼?'))\n","repo_name":"supermareo/chinacourt","sub_path":"service/question_service.py","file_name":"question_service.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5913138744","text":"def f(x):\n return x*x\nd = {}\nn = int(input())\nfor i in range (1, n+1):\n x = int(input())\n if x not in d:\n d[x] = f(x)\n print(f(x))\n else:\n print(d.get(x))\n\n\n\n\n\n\n\n","repo_name":"deada11/PyLessons","sub_path":"FirstCourseOfPython/dicts-3.py","file_name":"dicts-3.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34082939799","text":"class Solution:\n def isValid(self, s: str) -> bool:\n dic = {'(':')','{':'}','[':']'}\n left = {'(','{','['}\n length = len(s)\n if length%2 == 1:\n return False\n tempStack = []\n for i in range(length):\n if s[i] in left:\n tempStack.append(s[i])\n else:\n if tempStack == []:\n return False\n top = tempStack.pop()\n if dic[top] == s[i]:\n continue\n else: \n return False\n if len(tempStack) != 0:\n return False\n return True\n","repo_name":"zjsdcae/LeetCode","sub_path":"Stack/020.py","file_name":"020.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27775607856","text":"import pandas as pd\r\n\r\nask_state = True\r\nmain_list = []\r\n\r\nwhile ask_state == True:\r\n ask_subject = input(\"subject (box kain, props, game items, stationery, indofest, cooking supplies): \")\r\n ask_item_name = input(\"Enter the name of the item: \")\r\n ask_invent_qty = input('Enter the storage quantity of item: ')\r\n\r\n ask_state = input('continue y/n: ')\r\n if ask_state == \"n\":\r\n ask_state = False\r\n else:\r\n ask_state = True\r\n\r\nsub_list = [ask_subject, ask_item_name, ask_invent_qty]\r\nmain_list.append(sub_list)\r\n\r\ncolumns_list = ['Subject', 'Item', 'Quantity']\r\n\r\ndf = pd.DataFrame(main_list, columns=columns_list)\r\n\r\nprint(df.to_excel(\"excel.xlsx\"))\r\n\r\n\r\n\r\n \r\n ","repo_name":"chritzadz/coding_problems","sub_path":"input_invent_v1.py","file_name":"input_invent_v1.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39508465183","text":"n=int(input())\narr=[]\nfor i in range(n):\n x,y=map(int,input().split())\n arr.append([x,y])\ndist=10**18\nax,ay=arr[0]\nfor i in range(n):\n tDist=-(10**18)\n for j in range(n):\n if i==j: continue\n tDist=max(tDist,(arr[i][0]-arr[j][0])**2+(arr[i][1]-arr[j][1])**2)\n if tDist int:\n EXPS = set((\"+\", \"-\", \"*\", \"/\"))\n stack = []\n ans = int(tokens[0])\n\n for token in tokens:\n if token not in EXPS:\n stack.append(token)\n continue\n \n num2 = int(stack.pop())\n num1 = int(stack.pop())\n\n ans = self.calc(\n num1,\n num2,\n token\n )\n stack.append(ans)\n\n return ans\n \n \n def calc(self, a, b, exp):\n if exp == \"+\": return a + b\n if exp == '-': return a - b\n if exp == \"*\": return a * b\n if exp == '/': return int(a / b)\n\n\n\"\"\"\nYou are given an array of strings tokens that represents an arithmetic expression in a Reverse Polish Notation.\n\nEvaluate the expression. Return an integer that represents the value of the expression.\n\nNote that:\n\nThe valid operators are '+', '-', '*', and '/'.\nEach operand may be an integer or another expression.\nThe division between two integers always truncates toward zero.\nThere will not be any division by zero.\nThe input represents a valid arithmetic expression in a reverse polish notation.\nThe answer and all the intermediate calculations can be represented in a 32-bit integer.\n \n\nExample 1:\n\nInput: tokens = [\"2\",\"1\",\"+\",\"3\",\"*\"]\nOutput: 9\nExplanation: ((2 + 1) * 3) = 9\nExample 2:\n\nInput: tokens = [\"4\",\"13\",\"5\",\"/\",\"+\"]\nOutput: 6\nExplanation: (4 + (13 / 5)) = 6\n\nExample 3:\n\nInput: tokens = [\"10\",\"6\",\"9\",\"3\",\"+\",\"-11\",\"*\",\"/\",\"*\",\"17\",\"+\",\"5\",\"+\"]\nOutput: 22\nExplanation: ((10 * (6 / ((9 + 3) * -11))) + 17) + 5\n= ((10 * (6 / (12 * -11))) + 17) + 5\n= ((10 * (6 / -132)) + 17) + 5\n= ((10 * 0) + 17) + 5\n= (0 + 17) + 5\n= 17 + 5\n= 22\n \n\nConstraints:\n\n1 <= tokens.length <= 104\ntokens[i] is either an operator: \"+\", \"-\", \"*\", or \"/\", or an integer in the range [-200, 200].\n\"\"\"","repo_name":"lauvsong/leetcode","sub_path":"medium/150.py","file_name":"150.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31174907225","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n***************************************************************************\r\n* Copyright (c) 2022 *\r\n* Shai Seger *\r\n* *\r\n* This file is a supplement to the FreeCAD CAx development system. *\r\n* *\r\n* This program is free software; you can redistribute it and/or modify *\r\n* it under the terms of the GNU Lesser General Public License (LGPL) *\r\n* as published by the Free Software Foundation; either version 2 of *\r\n* the License, or (at your option) any later version. *\r\n* for detail see the LICENCE text file. *\r\n* *\r\n* This software is distributed in the hope that it will be useful, *\r\n* but WITHOUT ANY WARRANTY; without even the implied warranty of *\r\n* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *\r\n* GNU Library General Public License for more details. *\r\n* *\r\n* You should have received a copy of the GNU Library General Public *\r\n* License along with this macro; if not, write to the Free Software *\r\n* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *\r\n* USA *\r\n* *\r\n***************************************************************************\r\n\"\"\"\r\nfrom screw_maker import *\r\nimport FastenerBase\r\n\r\n# PCB spacers / Wurth standard WA-SSTII\r\n\r\ncos30 = math.cos(math.radians(30))\r\n\r\n\r\ndef pspMakeFace(m, sw, l, id, thl):\r\n id2 = id / 2.0\r\n sw2 = sw / 2.0\r\n m2 = m / 2.0\r\n d2 = 0.95 * sw2 / cos30\r\n l1 = l - (d2 - sw2) / 2.0\r\n dd = m2 - id2\r\n thl1 = thl - id2\r\n\r\n fm = FastenerBase.FSFaceMaker()\r\n fm.AddPoints(\r\n (id2, l - dd),\r\n (id2 + dd, l),\r\n (sw2, l),\r\n (d2, l1),\r\n (d2, dd),\r\n (sw2, 0),\r\n (id2 + dd, 0),\r\n (id2, dd),\r\n )\r\n if thl > 0:\r\n # separate holes\r\n fm.AddPoints((id2, thl1), (0, thl), (0, l - thl), (id2, l - thl1))\r\n return fm.GetFace()\r\n\r\n\r\ndef makePCBSpacer(self, fa):\r\n diam = fa.calc_diam\r\n width = fa.width\r\n flen = fa.calc_len\r\n\r\n FreeCAD.Console.PrintLog(\r\n \"Making PCB spacer\" + diam + \"x\" + str(flen) + \"x\" + str(width) + \"\\n\"\r\n )\r\n\r\n th, _ = fa.dimTable\r\n dia = self.getDia(fa.calc_diam, True)\r\n P = FsData[\"MetricPitchTable\"][fa.diameter][0]\r\n id = self.GetInnerThreadMinDiameter(dia, P)\r\n w = float(width)\r\n l = float(flen)\r\n if l > th:\r\n # separate thread holes on both sides\r\n thl = 10\r\n if thl + 0.5 > l / 2.0:\r\n thl = l / 2.0 - 0.5\r\n else:\r\n thl = 0\r\n\r\n f = pspMakeFace(dia * 1.05, w, l, id, thl)\r\n p = self.RevolveZ(f)\r\n htool = self.makeHexPrism(w, l)\r\n htool.translate(Base.Vector(0.0, 0.0, - 0.1))\r\n fSolid = p.common(htool)\r\n if fa.thread:\r\n if thl > 0: # blind & threaded from both sides\r\n threadCutter = self.CreateInnerThreadCutter(dia, P, thl - dia / 2)\r\n fSolid = fSolid.cut(threadCutter)\r\n threadCutter.rotate(\r\n Base.Vector(0.0, 0.0, l / 2),\r\n Base.Vector(1.0, 0.0, 0.0),\r\n 180\r\n )\r\n else: # has through hole, fully threaded\r\n threadCutter = self.CreateInnerThreadCutter(dia, P, l + P)\r\n fSolid = fSolid.cut(threadCutter)\r\n return fSolid\r\n","repo_name":"shaise/FreeCAD_FastenersWB","sub_path":"FsFunctions/FSmakePCBSpacer.py","file_name":"FSmakePCBSpacer.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":231,"dataset":"github-code","pt":"81"} +{"seq_id":"37741904608","text":"from flask import Blueprint, jsonify, request\nfrom flask_login import login_required\nfrom app.models import db, User\n\nfollows_routes = Blueprint('follows', __name__)\n\n#GET /api/follows/\n## PEOPLE YOU FOLLOW\n@follows_routes.route('/')\n@login_required\ndef get_follows(id):\n user = User.query.get(id)\n follows = user.following.all()\n return {'follows': [follow.to_dict() for follow in follows]}\n\n#GET /api/follows/\n## PEOPLE FOLLOWING **YOU**\n@follows_routes.route('//followers')\n@login_required\ndef get_your_followers(id):\n user = User.query.get(id)\n followers = user.followers.all()\n return {'followers': [follower.to_dict() for follower in followers]}\n\n#POST\n@follows_routes.route('//new-follow', methods=[\"POST\"])\n@login_required\ndef post_follow(id):\n \n user = User.query.get(id)\n followerId = request.json['followerId']\n followedId = request.json['followedId']\n\n followers = user.followers.all()\n newFollower = User.query.get(followerId)\n\n if newFollower in user.followers:\n user.followers.remove(newFollower)\n db.session.commit()\n return newFollower.to_dict()\n else:\n user.followers.append(newFollower)\n db.session.commit()\n return newFollower.to_dict()","repo_name":"alex-pober/Instagram-Clone","sub_path":"app/api/follows_routes.py","file_name":"follows_routes.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"24031555646","text":"# https://www.codewars.com/kata/5567e7d0adb11174c50000a7/train/python\n\n# This kata is about singly linked list. A linked list is an ordered set of data elements, each containing a link to its successor (and sometimes its predecessor, known as a double linked list). You are you to implement an algorithm to find the kth to last element.\n\n# For example given a linked list of:\n\n# a -> b -> c -> d\n\n# if k is the number one then d should be returned\n# if k is the number two then c should be returned\n# if k is the number three then b should be returned\n# if k is the number four then a should be returned\n# if k exceeds the size of the list then None returned\n# Special Note --> Node classes contain two fields; data and next. And to access the head of the list, use head. e.g. linked_list.head\n\n\ndef get_length(l):\n count, node = 0, l.head\n while node != None:\n node = node.next\n count += 1\n return count\n\ndef search_k_from_end(linked_list, k):\n length = get_length(linked_list)\n count, node = 0, linked_list.head\n while count < length - k:\n node = node.next\n count += 1\n return node.data if k <= length else None","repo_name":"phil-huynh/Problem-Sets","sub_path":"python/CodeWars/6kyu/node_mania.py","file_name":"node_mania.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35563927190","text":"import os,logging,subprocess, time\nimport tensorflow as tf\nimport PIL\nimport numpy as np\nfrom scipy import misc\nfrom collections import defaultdict\nfrom .facenet import facenet\nfrom .facenet.align import detect_face\nimport random\nfrom time import sleep\n\n\ndef _parse_function(filename):\n image_string = tf.read_file(filename)\n image_decoded = tf.image.decode_image(image_string,channels=3)\n return tf.expand_dims(image_decoded, 0), filename\n\n\n\ndef pil_to_array(pilImage):\n \"\"\"\n Load a PIL image and return it as a numpy array. For grayscale\n images, the return array is MxN. For RGB images, the return value\n is MxNx3. For RGBA images the return value is MxNx4\n \"\"\"\n def toarray(im, dtype=np.uint8):\n \"\"\"Return a 1D array of dtype.\"\"\"\n # Pillow wants us to use \"tobytes\"\n if hasattr(im, 'tobytes'):\n x_str = im.tobytes('raw', im.mode)\n else:\n x_str = im.tostring('raw', im.mode)\n x = np.fromstring(x_str, dtype)\n return x\n\n if pilImage.mode in ('RGBA', 'RGBX'):\n im = pilImage # no need to convert images\n elif pilImage.mode == 'L':\n im = pilImage # no need to luminance images\n # return MxN luminance array\n x = toarray(im)\n x.shape = im.size[1], im.size[0]\n return x\n elif pilImage.mode == 'RGB':\n # return MxNx3 RGB array\n im = pilImage # no need to RGB images\n x = toarray(im)\n x.shape = im.size[1], im.size[0], 3\n return x\n elif pilImage.mode.startswith('I;16'):\n # return MxN luminance array of uint16\n im = pilImage\n if im.mode.endswith('B'):\n x = toarray(im, '>u2')\n else:\n x = toarray(im, ' min_score:\n top,left = (int(boxes[0][i][0] * shape[0]), int(boxes[0][i][1] * shape[1]))\n bot,right = (int(boxes[0][i][2] * shape[0]), int(boxes[0][i][3] * shape[1]))\n detections.append({\n 'x': left,\n 'y':top,\n 'w':right-left,\n 'h':bot-top,\n 'score': scores[0][i],\n 'object_name': self.class_index_to_string[int(classes[0][i])]\n })\n return detections\n\n def load(self):\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n self.filenames_placeholder = tf.placeholder(\"string\")\n dataset = tf.contrib.data.Dataset.from_tensor_slices(self.filenames_placeholder)\n dataset = dataset.map(_parse_function)\n self.iterator = dataset.make_initializable_iterator()\n self.od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(self.model_path, 'rb') as fid:\n serialized_graph = fid.read()\n self.od_graph_def.ParseFromString(serialized_graph)\n self.image, self.fname = self.iterator.get_next()\n tf.import_graph_def(self.od_graph_def, name='',input_map={'image_tensor': self.image})\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = 0.15\n self.session = tf.Session(graph=self.detection_graph,config=config)\n self.boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n self.scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n self.classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n\n\nclass FaceDetector():\n\n def __init__(self,session=None):\n self.image_size = 182\n self.margin = 44\n self.gpu_memory_fraction = 0.1\n self.session = session\n self.minsize = 20\n self.threshold = [0.6, 0.7, 0.7]\n self.factor = 0.709\n\n def load(self):\n logging.info('Creating networks and loading parameters')\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=self.gpu_memory_fraction)\n self.session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with self.session.as_default():\n self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(self.session, None)\n\n def detect(self,image_path):\n aligned = []\n try:\n img = misc.imread(image_path)\n except (IOError, ValueError, IndexError) as e:\n errorMessage = '{}: {}'.format(image_path, e)\n logging.info(errorMessage)\n else:\n if img.ndim < 2:\n logging.info('Unable to align \"%s\"' % image_path)\n return []\n if img.ndim == 2:\n img = facenet.to_rgb(img)\n img = img[:, :, 0:3]\n bounding_boxes, _ = detect_face.detect_face(img, self.minsize, self.pnet, self.rnet, self.onet, self.threshold, self.factor)\n nrof_faces = bounding_boxes.shape[0]\n if nrof_faces > 0:\n det_all = bounding_boxes[:, 0:4]\n img_size = np.asarray(img.shape)[0:2]\n for boxindex in range(nrof_faces):\n det = np.squeeze(det_all[boxindex, :])\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0] - self.margin / 2, 0)\n bb[1] = np.maximum(det[1] - self.margin / 2, 0)\n bb[2] = np.minimum(det[2] + self.margin / 2, img_size[1])\n bb[3] = np.minimum(det[3] + self.margin / 2, img_size[0])\n cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]\n scaled = misc.imresize(cropped, (self.image_size, self.image_size), interp='bilinear')\n left, top, right, bottom = bb[0], bb[1], bb[2], bb[3]\n aligned.append({\n 'scaled':scaled,\n 'x': left,\n 'y':top,\n 'w':right-left,\n 'h':bottom-top,\n })\n return aligned","repo_name":"simmoncn/DeepVideoAnalytics","sub_path":"dvalib/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":7586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"70389278346","text":"## This version works, without the extra blank line\n##\n## Write a program that asks the user for a positive number 'n' as input.\n## Assume that the user enters a number greater than or equal to 3 and\n## print a triangle as described below. For example if the user enters 6 then the output should be:\n## *\n## **\n## ***\n## ****\n## *****\n## ******\n## *****\n## ****\n## ***\n## **\n## *\n\nn = int(input ('Enter a positive number great than or equal to 3:'))\ni = 1\nk = 1\nwhile i <= n and i > 0 and k < 2*n:\n print ('*'*i)\n if k < n:\n i = i+1\n else:\n i = i-1\n k = k+1\n \n \n","repo_name":"lipingzhu/PythonEdX","sub_path":"quiz3_part3_1.py","file_name":"quiz3_part3_1.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11143962020","text":"import cv2\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') \nsmile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml') \ndef detect(gray, frame): \n faces = face_cascade.detectMultiScale(gray, 1.3, 5) \n for (x, y, w, h) in faces: \n cv2.rectangle(frame, (x, y), ((x + w), (y + h)), (255, 0, 0), 2) \n roi_gray = gray[y:y + h, x:x + w] \n roi_color = frame[y:y + h, x:x + w] \n smiles = smile_cascade.detectMultiScale(roi_gray, 1.8, 20) \n \n for (sx, sy, sw, sh) in smiles: \n cv2.rectangle(roi_color, (sx, sy), ((sx + sw), (sy + sh)), (0, 0, 255), 2) \n return frame \nvideo = cv2.VideoCapture(0,cv2.CAP_DSHOW) \nwhile True: \n a, frame = video.read() # used a as a throwaway variable\n \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) \n \n feed = detect(gray, frame) \n \n cv2.imshow('Video', feed) \n \n if cv2.waitKey(1) & 0xff == ord('e'): \n break\nvideo.release() \ncv2.destroyAllWindows() \n","repo_name":"vulbsti/Smile_detection","sub_path":"smile_windows.py","file_name":"smile_windows.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4820554723","text":"import yaml\nimport subprocess\nimport base64\nimport json\n\n\ndef create_contract_query_cmd(address, q):\n prefix = [\"junod\", \"q\", \"wasm\", \"contract-state\", \"smart\"]\n\n return prefix + [address, q]\n\n\ndef contract_query(query):\n q = json.dumps(query[\"content\"]).replace(\"null\", f'{{}}')\n\n subprocess.run(create_contract_query_cmd(query[\"address\"], q), check=True)\n\n\ndef create_tx_cmd(cmd):\n prefix = [\"junod\", \"tx\", \"wasm\"]\n suffix = [\n \"--from\", \"user1\", \"--chain-id\", \"testing\", \"--yes\", \"-b\", \"block\",\n \"--gas\", \"auto\", \"--gas-adjustment\", \"1.5\"\n ]\n\n return prefix + cmd + suffix\n\n\ndef create_store_cmd(contract_file):\n return create_tx_cmd([\"store\", contract_file])\n\n\ndef create_instantiate_cmd(name, code_id, init_msg):\n return create_tx_cmd(\n [\"instantiate\", code_id, init_msg, \"--label\", name, \"--no-admin\"])\n\n\ndef get_code_id_from_response(res):\n # parse response and get code id\n for event in res[\"events\"]:\n if event[\"type\"] == \"store_code\":\n for attr in event[\"attributes\"]:\n if base64.b64decode(attr[\"key\"]).decode('utf-8') == \"code_id\":\n return base64.b64decode(attr[\"value\"]).decode('utf-8')\n\n raise Exception(\"No code id found in response\")\n\n\ndef store(contract_file):\n p = subprocess.run(create_store_cmd(contract_file),\n check=True,\n stdout=subprocess.PIPE)\n res = yaml.safe_load(p.stdout)\n\n return get_code_id_from_response(res)\n\n\ndef get_contract_addresses_from_response(res, contract_name_list):\n addresses = {}\n\n events = res[\"logs\"][0][\"events\"]\n\n # parse response and get contract addresses from the list of names\n for event in events:\n if event[\"type\"] == \"wasm\":\n for attr in event[\"attributes\"]:\n if attr[\"key\"] in contract_name_list:\n addresses[attr[\"key\"]] = attr[\"value\"]\n\n # check in instantiate event type if nothing found\n if len(addresses) == 0:\n for event in events:\n if event[\"type\"] == \"instantiate\":\n for attr in event[\"attributes\"]:\n if attr[\"key\"] in contract_name_list:\n addresses[attr[\"key\"]] = attr[\"value\"]\n\n return addresses\n\n\ndef format_msg(msg):\n return msg.replace(\"null\", f'{{}}')\n\n\ndef instantiate_contract(instance):\n init_msg = json.dumps(instance[\"msg\"])\n\n init_msg = format_msg(init_msg)\n\n p = subprocess.run(create_instantiate_cmd(instance[\"name\"],\n str(instance[\"code\"]), init_msg),\n check=True,\n stdout=subprocess.PIPE)\n res = yaml.safe_load(p.stdout)\n\n # print(json.dumps(res, indent=4))\n # exit(1)\n\n return get_contract_addresses_from_response(res, instance[\"address_list\"])\n\n\ndef compile_wasm():\n subprocess.run([\"cargo\", \"wasm\"],\n check=True,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)\n\n\ndef contracts():\n # read contracts.yaml file\n with open(\"contracts.yaml\") as file:\n contracts = yaml.safe_load(file)\n return contracts\n\n\ndef queries():\n # read queries.yaml file\n with open(\"queries.yaml\") as file:\n contracts = yaml.safe_load(file)\n return contracts\n\n\ndef dump_code_ids(codes):\n with open(\"codes.yaml\", \"w\") as file:\n yaml.dump(codes, file)\n\n\ndef dump_contract_addresses(addresses_from_serie, serie):\n with open(\"addresses.yaml\") as file:\n addresses = yaml.safe_load(file)\n addresses[serie] = addresses_from_serie\n with open(\"addresses.yaml\", \"w\") as addr_file:\n yaml.dump(addresses, addr_file)","repo_name":"lumtis/wasmlab","sub_path":"scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36876757656","text":"from codecs import open\nfrom os.path import abspath, dirname, join, expanduser\n\nfrom setuptools import Command, find_packages, setup\n\nthis_dir = abspath(dirname(__file__))\n\nwith open(join(this_dir, 'README.md'), encoding='utf-8') as file:\n long_description = file.read()\n\nsetup(\n name='bhimupipy',\n version=\"0.0.2\",\n description=\"A package to find upi details details\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/shrikantbache/bhimupipy.git\",\n author=\"BACHE SHRIKANT GANGADHAR\",\n author_email='bacheshrikant@gmail.com',\n maintainer=\"BACHE SHRIKANT GANGADHAR\",\n maintainer_email=\"bacheshrikant@gmail.com\",\n license='MIT',\n license_file=\"LICENSE\",\n platforms=\"any\",\n\n classifiers=[\n 'Topic :: Utilities',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n\n ],\n keywords=['search', 'bhimupijs', 'brute', 'address', 'vpa', 'upi', 'spy'],\n include_package_data=True,\n install_requires=['requests'],\n entry_points={\n 'console_scripts': [\n 'bhimupipy=bhimupipy.__init__:ExecuteBhimupiPy',\n ],\n },\n\n)\n","repo_name":"shrikantbache/bhimupipy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37971119596","text":"from .Message import Message\nfrom typing import List, Dict, Any\n\n\nclass Mailer:\n\n def __init__(self):\n\n self.mailbox: Dict[int, List[Message]] = dict()\n self.delivered: List[Message] = []\n\n def get_messages(self, recipient: int) -> List[Message]:\n \"\"\"\n Method to get all the messages of a given recipient\n :param recipient: recipient id\n :return: list with all the messages\n \"\"\"\n\n return self.mailbox.get(recipient, [])\n\n def deliver_message(self, sender: int, recipient: int, content: Any, title: str):\n\n \"\"\"\n The method create a message and append it to delivered messages list.\n :param title: title of the message\n :param recipient: id of the recipient\n :param sender: id of the sender\n :param content: an object with the content of the message\n :return: none\n \"\"\"\n\n msg = Message(sender, recipient, content, title)\n\n self.delivered.append(msg)\n\n def assign_messages(self):\n\n \"\"\"\n Get all messages from the delivered list and assign them according to the message recipient.\n :return:\n \"\"\"\n\n # Empty the current mailbox\n self.mailbox = dict()\n\n for msg in self.delivered:\n\n recipient = msg.recipient\n\n # If this recipient already have messages\n try:\n self.mailbox[recipient].append(msg)\n\n # Except if we don't have any messages for this recipient yet\n except KeyError:\n\n self.mailbox[recipient] = [msg]\n\n # Empty delivered messages\n self.delivered = []\n\n\n\n\n\n\n\n","repo_name":"evyatarluv/Artificial-Intelligence","sub_path":"DCOP/Mailer.py","file_name":"Mailer.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29462281063","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib import admin\n\n# Register your models here.\nfrom .models import Note, NoteContent\n\n\nclass NoteContentInline(admin.TabularInline):\n model = NoteContent\n extra = 1\n\n\nclass NoteAdmin(admin.ModelAdmin):\n list_display = ('title', 'description', 'date_created', 'date_modified')\n inlines = [NoteContentInline]\n\n\nadmin.site.register(Note, NoteAdmin)\n","repo_name":"ladeoshodi/gnotes","sub_path":"api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11553573784","text":"\"\"\"\nID: warreng1\nLANG: PYTHON3\nTASK: dualpal\n\"\"\"\n\nwith open('dualpal.in', 'r') as fin:\n firstnumbers, start = fin.readline().split()\n firstnumbers = int(firstnumbers)\n start = int(start)\n\ndef converttobase(num, base):\n newstr = ''\n copynum = num\n while copynum:\n mod = copynum % base\n copynum = copynum // base\n newstr = chr(48+mod+7*(mod>=10)) + newstr\n return newstr\n\nanswers = []\n\nwhile firstnumbers:\n #Look for matching numbers:\n amountofpalin = 0\n for i in range(2, 11):\n convertnum = converttobase(start+1, i)\n if convertnum == convertnum[::-1]:\n amountofpalin += 1\n if amountofpalin == 2:\n answers.append(start+1)\n firstnumbers -= 1\n break\n start += 1\n\nwith open('dualpal.out', 'w') as fout:\n for i in answers:\n #print(i)\n print(i, file=fout)","repo_name":"waguo/USACO","sub_path":"training/Chapter_1/dualpal/dualpal.py","file_name":"dualpal.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38662516848","text":"from pprint import pprint\nfrom browsermobproxy import Server\nfrom selenium import webdriver\nfrom time import sleep\n\nclass ProxyManager:\n\n __BMP = \"/home/gitcha/BMP/bin/browsermob-proxy\"\n bmp_port={'port':8090}\n\n def __init__(self): \n self.__server = Server(ProxyManager.__BMP, options=ProxyManager.bmp_port)\n self.__client = None\n \n\n def start_server(self):\n self.__server.start()\n return self.__server\n\n \n def start_client(self):\n self.__client = self.__server.create_proxy(params={'trustAllServers':'true'})\n return self.__client\n \n\n @property\n def client(self):\n return self.__client\n\n \n @property\n def server(self):\n return self.__server\n\n\n \nif __name__ == \"__main__\":\n\n proxy = ProxyManager()\n server = proxy.start_server()\n client = proxy.start_client()\n \n client.new_har(\"ya.ru\")\n print(client.proxy)\n\n options = webdriver.ChromeOptions()\n options.add_argument(\"--proxy-server={}\".format(client.proxy))\n options.add_argument('ignore-certificate-errors')\n \n driver = webdriver.Chrome(options=options)\n driver.get(\"https://www.ya.ru\")\n sleep(3)\n\n pprint(client.har)\n \n driver.close()\n server.stop()\n driver.quit\n\n \n\n \n\n \n\n","repo_name":"vobla13/Appium-Pytest-Android","sub_path":"proxy/bmp_proxy.py","file_name":"bmp_proxy.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2541998376","text":"L1 = [2, 7, 5, 6, 7, 1, 6, 2, 1, 7, 6]\n\nx = 0\ncpt = 0\nsave = 0\nsbr = 0\nfor i in range(len(L1)):\n x = L1[i]\n for y in range(len(L1)):\n if x == L1[y]:\n cpt += 1\n\n if cpt > save:\n save = cpt\n sbr = L1[i]\n cpt = 0\n\n\nprint(\"Le nombre le plus frequent dans la liste est le :\",sbr,\"(\",save,\"x)\")","repo_name":"Ethan68000/TP-r1.07","sub_path":"tp4/Exo4 TP4 (part 1).py","file_name":"Exo4 TP4 (part 1).py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36369076471","text":"\"\"\"crmproj URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom crm import views\n\nurlpatterns = [\n\n url(r'^depart/list/',views.depart_list,name=\"departlist\" ),\n url(r'^depart/add/',views.depart_add,name=\"departadd\" ),\n url(r'^depart/edit/(\\d+)/',views.depart_edit,name=\"departedit\" ),\n url(r'^depart/del/(\\d+)/',views.depart_del,name=\"departdel\" ),\n url(r'^user/list/',views.user_list,name=\"userlist\" ),\n url(r'^user/add/',views.user_add,name=\"useradd\" ),\n url(r'^user/edit/(\\d+)/',views.user_edit,name=\"useredit\" ),\n url(r'^user/del/(\\d+)/',views.user_del,name=\"userdel\" ),\n url(r'^class/list/',views.class_list,name=\"classlist\" ),\n url(r'^class/add/',views.class_change,name=\"classadd\" ),\n url(r'^class/edit/(\\d+)/',views.class_change,name=\"classedit\" ),\n\n]\n","repo_name":"zhouyu37/crmproj","sub_path":"crm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1646438118","text":"import argparse\nimport os\nimport joblib\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.experimental import enable_halving_search_cv\nfrom sklearn.model_selection import HalvingGridSearchCV\nfrom sklearn.feature_extraction import text\nimport nltk\nfrom nltk.stem.snowball import SnowballStemmer\nimport re\nimport numpy as np\nfrom nltk.tokenize import word_tokenize\nfrom nltk.probability import FreqDist\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.stem import WordNetLemmatizer\nfrom pprint import pprint\nimport pandas as pd\n\n\n\nclass LemmaTokenizer:\n def __init__(self):\n self.wnl = WordNetLemmatizer()\n def __call__(self, doc):\n return [self.wnl.lemmatize(t) for t in word_tokenize(doc)]\n\ndef stemmed_words(doc):\n return (stemmer.stem(w) for w in analyzer(doc))\n\n\"\"\"\nmodel_fn\n model_dir: (sting) specifies location of saved model\n\nThis function is used by AWS Sagemaker to load the model for deployment.\nIt does this by simply loading the model that was saved at the end of the\n__main__ training block above and returning it to be used by the predict_fn\nfunction below.\n\"\"\"\ndef model_fn(model_dir):\n model = joblib.load(os.path.join(model_dir, \"model.joblib\"))\n return model\n\n\"\"\"\ninput_fn\n request_body: the body of the request sent to the model. The type can vary.\n request_content_type: (string) specifies the format/variable type of the request\n\nThis function is used by AWS Sagemaker to format a request body that is sent to \nthe deployed model.\nIn order to do this, we must transform the request body into a numpy array and\nreturn that array to be used by the predict_fn function below.\n\nNote: Oftentimes, you will have multiple cases in order to\nhandle various request_content_types. Howver, in this simple case, we are \nonly going to accept text/csv and raise an error for all other formats.\n\"\"\"\ndef input_fn(request_body, request_content_type):\n if request_content_type == 'text/csv':\n samples = []\n for r in request_body.split('|'):\n samples.append(list(map(float,r.split(','))))\n return np.array(samples)\n else:\n raise ValueError(\"Thie model only supports text/csv input\")\n\n\"\"\"\npredict_fn\n input_data: (numpy array) returned array from input_fn above \n model (sklearn model) returned model loaded from model_fn above\n\nThis function is used by AWS Sagemaker to make the prediction on the data\nformatted by the input_fn above using the trained model.\n\"\"\"\ndef predict_fn(input_data, model):\n return model.predict(input_data)\n\n\"\"\"\noutput_fn\n prediction: the returned value from predict_fn above\n content_type: (string) the content type the endpoint expects to be returned\n\nThis function reformats the predictions returned from predict_fn to the final\nformat that will be returned as the API call response.\n\nNote: While we don't use content_type in this example, oftentimes you will use\nthat argument to handle different expected return types.\n\"\"\"\ndef output_fn(prediction, content_type):\n return '|'.join([t for t in prediction])\n\nif __name__ =='__main__':\n # Create a parser object to collect the environment variables that are in the\n # default AWS Scikit-learn Docker container.\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--output-data-dir', type=str, default=os.environ.get('SM_OUTPUT_DATA_DIR'))\n parser.add_argument('--model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))\n parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))\n parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))\n\n args = parser.parse_args()\n\n # Load data from the location specified by args.train (In this case, an S3 bucket).\n nltk.download('wordnet')\n nltk.download('punkt')\n\n stemmer = SnowballStemmer('english')\n analyzer = HashingVectorizer().build_analyzer()\n\n #df = pd.read_csv('./archive/modified.csv', encoding=\"ISO-8859-1\")\n\n df = pd.read_csv(os.path.join(args.train, 'modified.csv'), index_col=0, engine=\"python\")\n\n df.dropna(subset = ['abstract','headline'], inplace=True)\n\n X = df['abstract']\n X_headline = df['headline']\n X_section = df['section']\n Y = df['clickbait_category_4']\n\n documents = []\n\n # text preprocessing\n\n Y_modified = pd.DataFrame()\n\n\n for sen in range(0, len(X)):\n # Remove all the special characters\n\n headline = X_headline.get(sen)\n abstract = X.get(sen)\n section = X_section.get(sen)\n\n if not(headline is None) and not(abstract is None) and not(section is None):\n\n doc = section + \" : \" + headline + \" : \" + abstract\n document = re.sub(r'\\W', ' ', str(doc))\n\n # remove all single characters\n document = re.sub(r'\\s+[a-zA-Z]\\s+', ' ', document)\n\n # Remove single characters from the start\n document = re.sub(r'\\^[a-zA-Z]\\s+', ' ', document)\n\n # Substituting multiple spaces with single space\n document = re.sub(r'\\s+', ' ', document, flags=re.I)\n\n # Removing prefixed 'b'\n document = re.sub(r'^b\\s+', '', document)\n\n # Converting to Lowercase\n document = document.lower()\n\n # Lemmatization\n document = document.split()\n\n # document = [stemmer.lemmatize(word) for word in document]\n document = ' '.join(document)\n\n documents.append(document)\n\n #print(\"document: \" + document)\n\n Y_modified = Y_modified.append({'clickbait_category_4': Y[sen]}, ignore_index=True)\n\n\n # frequency filtering\n\n\n tokens = word_tokenize(\"\\n\".join(X.values))\n freq = FreqDist(tokens)\n frequent_words = []\n\n for key, value in freq.items():\n if value >= 200:\n frequent_words.append(key.lower())\n\n stop_words = text.ENGLISH_STOP_WORDS\n\n vectorizer = TfidfVectorizer(tokenizer=LemmaTokenizer(), max_features=2500, analyzer=stemmed_words , stop_words=stop_words)\n\n # create the parameter grid:\n\n # Number of trees in random forest\n n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n # Number of features to consider at every split\n max_features = ['auto', 'sqrt']\n # Maximum number of levels in tree\n max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]\n max_depth.append(None)\n # Minimum number of samples required to split a node\n min_samples_split = [2, 5, 10]\n # Minimum number of samples required at each leaf node\n min_samples_leaf = [1, 2, 4]\n # Method of selecting samples for training each tree\n bootstrap = [True, False]\n\n # Create the random grid\n # 'n_estimators': n_estimators,\n\n random_grid = {\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n pprint(random_grid)\n\n\n processed_features = vectorizer.fit_transform(documents)\n\n processed_features.shape\n\n X_dense = processed_features.todense()\n\n X_dense.shape\n\n x_train, x_test, y_train, y_test = train_test_split(X_dense, Y_modified['clickbait_category_4'], test_size = 0.2)\n\n x_train.shape, x_test.shape\n\n y_train.shape, y_test.shape\n\n clf = RandomForestClassifier(n_estimators = 100)\n\n model = HalvingGridSearchCV(estimator=clf, param_grid = random_grid, cv=3, factor=2,\n resource='n_estimators',max_resources=30).fit(x_train, y_train)\n\n #Save the model to the location specified by args.model_dir\n joblib.dump(model, os.path.join(args.model_dir, \"model.joblib\"))","repo_name":"abhinavGirish/newsy","sub_path":"aws_training.py","file_name":"aws_training.py","file_ext":"py","file_size_in_byte":7880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25967820336","text":"from database import DATABASE\nimport myparser\nimport freqlist\nfrom dict import DICT\nfrom math import log, fabs, exp\n\nMAX = 12000\nSTEP = 1000\nEPS = 0.01\n\n\ndef get_pr_func(db=DATABASE, max_=MAX, step=STEP):\n '''\n Zwraca funkcję, która przyporządokwuje ang. słowu szacowane prawdopodobieństwo znania tego słowa.\n\n :param db: baza danych zawierająca informacje o słownictwie użytkownika\n :param max_: maks. pozycja na liście frekwencyjnej słów branych pod uwagę\n :param step: dł. przedziału, na jakim jest szacowane prawdopodobieństwo\n :return: funkcja, która przyporządokwuje ang. słowu szacowane prawdopodobieństwo znania tego słowa\n '''\n rank_prob = [(-step // 2, 1.0)]\n rank = 0\n while rank + step <= max_:\n pr = db.known_words(rank, rank + step)\n mean_rank = rank + step // 2\n if pr is None:\n (r, p) = rank_prob[-1]\n pr = abs(r) / mean_rank * p\n rank_prob.append((mean_rank, pr))\n rank += step\n\n def pr_func(word):\n x = freqlist.FREQLIST.rank(word)\n if x is None:\n return EPS\n i = 0\n while i+1 < len(rank_prob):\n if rank_prob[i][0] <= x < rank_prob[i+1][0]:\n break\n i += 1\n if i+1 == len(rank_prob):\n (r, p) = rank_prob[-1]\n pr = abs(r) / x * p\n return pr\n (x1, y1) = rank_prob[i]\n (x2, y2) = rank_prob[i+1]\n return y1 + (y2 - y1) / (x2 - x1) * (x - x1)\n\n return pr_func\n\n\ndef get_utility_func(db=DATABASE, fl=freqlist.FREQLIST):\n '''\n Zwraca funkcję użyteczności, która przyporządkowuje słowu jego wartość.\n\n :param db: baza danych zawierająca informacje o słownictwie użytkownika\n :param fl: lista frekwencyjna\n :return: funkcja użyteczności, która przyporządkowuje słowu jego wartość\n '''\n pr = get_pr_func(db=db)\n C = 11.2\n\n def utility_func(word):\n f = fl.freq(word)\n p = 1.0 - pr(word)\n # print((word, f, p))\n return (log(C+f) - log(C)) * exp(C*p)\n\n return utility_func\n\n\ndef select(stream, db=DATABASE):\n '''\n Zwraca listę słów posortowanych według wartości funkcji użyteczności dla podanego strumienia znakowego.\n\n :param stream: strumień znakowy\n :param db: baza danych zawierająca informacje o słownictwie użytkownika\n :return: lista słów posortowanych według wartości funkcji użyteczności\n '''\n words = myparser.parse(stream)\n temp_fl = freqlist.FreqList()\n temp_fl.load(stream=stream)\n fl = freqlist.DynMixedFreqList([(freqlist.FREQLIST, 0.5), (temp_fl, 0.5)])\n utility_fun = get_utility_func(db=db, fl=fl)\n pr = get_pr_func(db=db)\n mean_word = []\n for word in words:\n if DICT.correct(word) is not None and DATABASE.known_now(word) == False:\n mean_word.append((utility_fun(word), word, pr(word), ))\n mean_word.sort(reverse=True)\n # print(mean_word)\n return [word for (_, word, _) in mean_word]\n\nif __name__ == '__main__':\n pass\n","repo_name":"kocichy/Words","sub_path":"selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41740810192","text":"import cv2\nimport json\nimport requests\nimport time\nfrom requests.structures import CaseInsensitiveDict\n\nurl = \"https://demo.thingsboard.io/api/v1/Yn5VHwYZhjyl8zpcHF4T/telemetry\"\nheaders = CaseInsensitiveDict()\nheaders[\"Content-Type\"] = \"application/json\"\n\n# Initialize the cv2 QRCode detector\ndetector = cv2.QRCodeDetector()\n\n# Open the webcam\ncap = cv2.VideoCapture(0)\n\n# Set the scanner window dimensions\nscan_width = 300\nscan_height = 300\n\n# Set the position of the scanner window\nscan_x = int((cap.get(cv2.CAP_PROP_FRAME_WIDTH) - scan_width) / 2)\nscan_y = int((cap.get(cv2.CAP_PROP_FRAME_HEIGHT) - scan_height) / 2)\n\n# Set the delay time in seconds\ndelay_time = 5\n\n# Set the start time to the current time\nstart_time = time.time()\n\nwhile True:\n # Read the image from the webcam\n ret, img = cap.read()\n\n # Create a gray version of the image\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Detect and decode the QR code in the image\n data, bbox, _ = detector.detectAndDecode(img)\n\n # If a QR code is detected\n if data and (time.time() - start_time) >= delay_time:\n # Print the decoded data to the console\n print(\"Data:\", data)\n\n # Convert the data string to a dictionary\n data_dict = eval(data)\n\n # Loading the json data\n json_data = json.loads(data)\n\n # Extracting the keys and values from the json data\n keys = []\n values = []\n for key, value in json_data.items():\n keys.append(key)\n values.append(value)\n\n # Constructing the data string to be sent\n data = {}\n for i in range(len(keys)):\n data[keys[i]] = values[i]\n\n # Sending the data to Thingsboard\n response = requests.post(url, headers=headers, json=data)\n print(response.status_code)\n \n # Set the start time to the current time\n start_time = time.time()\n\n # Draw the scanner window\n cv2.rectangle(img, (scan_x, scan_y), (scan_x+scan_width, scan_y+scan_height), (255, 0, 0), 2)\n\n # Show the image in a window\n cv2.imshow(\"QR Code Scanner\", img)\n\n # Exit the loop if the \"s\" key is pressed\n if cv2.waitKey(1) & 0xFF == ord(\"s\"):\n break\n# Release the webcam and close the window\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"Pentagon-Deerhack/HEMOve","sub_path":"QRcode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8121633951","text":"#!/usr/bin/env python\n\n#Coding ini detect bola merah dan bveeta mini follow bola\n#Bizbot Technology\n#Programmer: Ts.Khairul\n\nimport cv2\nimport serial\nimport time\n\n# Connect to the serial port to send commands to the robot\n#ser = serial.Serial('/dev/ttyUSB1', 57600) # Replace with the correct port and baud rate for your robot\n#time.sleep(2)\n\n# Load the image or video\ncap = cv2.VideoCapture(0) # Use 0 for webcam or specify file path for video\n\n# Define the lower and upper bounds of the red color in HSV color space\nred_lower = (0, 120, 70)\nred_upper = (10, 255, 255)\n\nser = serial.Serial('/dev/ttyUSB1', 57600, timeout=0.050)\ntime.sleep(2) #must wait for 2 seconds for connections established\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n if not ret:\n break\n\n # Convert the frame to HSV color space\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # Threshold the HSV image to get only red colors\n mask = cv2.inRange(hsv, red_lower, red_upper)\n\n # Find contours of the red color in the mask image\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Draw a rectangle around the ball if it is detected\n if len(contours) > 0:\n # Find the largest contour (assuming it is the ball)\n largest_contour = max(contours, key=cv2.contourArea)\n (x, y, w, h) = cv2.boundingRect(largest_contour)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # Compute the center of the ball\n cx = int(x + w/2)\n cy = int(y + h/2)\n\n # Send commands to the robot to follow the ball\n if cx < 320:\n #ser.write(b'l') # Turn left\n ser.write(b\"m 0 -30 \\r\\n\")\n elif cx > 320:\n #ser.write(b'r') # Turn right\n ser.write(b\"m -30 0 \\r\\n\")\n else:\n #ser.write(b'f') # Move forward\n ser.write(b\"m -80 -80 \\r\\n\")\n else:\n # Stop the robot if the ball is not detected\n #ser.write(b's') # Stop moving\n ser.write(b\"m 0 0 \\r\\n\")\n\n # Display the resulting image\n cv2.imshow('frame', frame)\n\n # Exit the loop if 'q' is pressed\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release the capture and destroy all windows\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"skj84/bveeta-R007","sub_path":"Tutorial_6_red_ball_follow.py","file_name":"Tutorial_6_red_ball_follow.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"10280544652","text":"import unittest\n\nfrom title_case import title_case\n\nclass Test(unittest.TestCase):\n def test_title_case(self):\n string='the quick brown fox jumped over the lazy dog'\n result=title_case(string)\n self.assertEqual(result, 'The Quick Brown Fox Jumped Over The Lazy Dog')\n\nif __name__=='__main__':\n unittest.main()","repo_name":"kendayao/algorithms-practice","sub_path":"12-title-case/test_title_case.py","file_name":"test_title_case.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71817466492","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\" \n@Author:ling \n@Date: 2017/11/13 \n\"\"\"\n\n\ndef triangles(maxline):\n a, li = 0, [1]\n while a < maxline:\n yield li\n li = [1] + [li[x] + li[x+1] for x in range(a)] + [1]\n a += 1\n\nn = 0\nfor t in triangles(10):\n print(t)\n n += 1\n if n == 10:\n break\n","repo_name":"leif-sh/PythonDemo","sub_path":"ling/triangles.py","file_name":"triangles.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37389433712","text":"from pygdbmi.constants import DEFAULT_GDB_TIMEOUT_SEC\nfrom pygdbmi.gdbcontroller import GdbController\n\nfrom playground.gdb_wrapper.gdb_wrapper import gdb_wrapper\n\n\nclass gdb_wrapper_x86_64(gdb_wrapper):\n def __init__(self, port: int = None, file: str = None):\n self.gdb_ctrl = GdbController([\"gdb-multiarch\", \"-q\", \"--interpreter=mi\"])\n self.gdb_ctrl.write(\"set architecture i386:x86_64\")\n self._registers = {'r{}'.format(i) for i in range(8, 16)}\n self._registers.update({'rax', 'rdi', 'rsi', 'rdx', 'rcx', 'rbx', 'rsp', 'rbp', 'rip', 'eflags'})\n # self._registers.update({'eax', 'edi', 'esi', 'edx', 'ecx', 'ebx', 'esp', 'ebp', 'eip'})\n self._flags_name = 'eflags'\n self._flag_to_pos = {'CF': [0], 'PF': [2], 'AF': [4], 'ZF': [6], 'SF': [7], 'TF': [8], 'IF': [9], 'DF': [10],\n 'OF': [11], 'IOPL': [11, 12], 'NT': [14], 'RF': [16], 'VM': [17], 'AC': [18], 'VIF': [19],\n 'VIP': [20], 'ID': [21]}\n\n super().__init__(port, file)\n\n @gdb_wrapper.no_response()\n def get_flags(self, timeout_sec: int = DEFAULT_GDB_TIMEOUT_SEC):\n log = self.gdb_ctrl.write(\"print ${}\".format(self._flags_name), timeout_sec)\n _, _, values = self._parse_log(log, 'console')['payload'].partition(' = ')\n result = {}\n all_flags = values.rstrip('\\\\n').strip('][').split()\n for flag_value in all_flags:\n flag_name, _, value = flag_value.partition('=')\n if value == '':\n value = 1\n result[flag_name] = value\n for flag_name in self._flag_to_pos:\n if flag_name not in result:\n result[flag_name] = 0\n return result\n","repo_name":"OSLL/asm_web_debug","sub_path":"playground/gdb_wrapper/gdb_wrapper_x86.py","file_name":"gdb_wrapper_x86.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"42517843389","text":"from __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n \"metadata_version\": \"1.1\",\n \"status\": [\"preview\"],\n \"supported_by\": \"community\",\n}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: oci_opsi_operations_insights_warehouse_resource_usage_summary_facts\nshort_description: Fetches details about a OperationsInsightsWarehouseResourceUsageSummary resource in Oracle Cloud Infrastructure\ndescription:\n - Fetches details about a OperationsInsightsWarehouseResourceUsageSummary resource in Oracle Cloud Infrastructure\n - Gets the details of resources used by an Operations Insights Warehouse.\n There is only expected to be 1 warehouse per tenant. The warehouse is expected to be in the root compartment.\nversion_added: \"2.9.0\"\nauthor: Oracle (@oracle)\noptions:\n operations_insights_warehouse_id:\n description:\n - Unique Operations Insights Warehouse identifier\n type: str\n aliases: [\"id\"]\n required: true\nextends_documentation_fragment: [ oracle.oci.oracle ]\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: Get a specific operations_insights_warehouse_resource_usage_summary\n oci_opsi_operations_insights_warehouse_resource_usage_summary_facts:\n # required\n operations_insights_warehouse_id: \"ocid1.operationsinsightswarehouse.oc1..xxxxxxEXAMPLExxxxxx\"\n\n\"\"\"\n\nRETURN = \"\"\"\noperations_insights_warehouse_resource_usage_summary:\n description:\n - OperationsInsightsWarehouseResourceUsageSummary resource\n returned: on success\n type: complex\n contains:\n id:\n description:\n - OPSI Warehouse OCID\n returned: on success\n type: str\n sample: \"ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx\"\n cpu_used:\n description:\n - Number of OCPUs used by OPSI Warehouse ADW. Can be fractional.\n returned: on success\n type: float\n sample: 1.2\n storage_used_in_gbs:\n description:\n - Storage by OPSI Warehouse ADW in GB.\n returned: on success\n type: float\n sample: 1.2\n lifecycle_state:\n description:\n - Possible lifecycle states\n returned: on success\n type: str\n sample: CREATING\n sample: {\n \"id\": \"ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx\",\n \"cpu_used\": 1.2,\n \"storage_used_in_gbs\": 1.2,\n \"lifecycle_state\": \"CREATING\"\n }\n\"\"\"\n\nfrom ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils\nfrom ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (\n OCIResourceFactsHelperBase,\n get_custom_class,\n OCIAnsibleModule,\n)\n\ntry:\n from oci.opsi import OperationsInsightsClient\n\n HAS_OCI_PY_SDK = True\nexcept ImportError:\n HAS_OCI_PY_SDK = False\n\n\nclass OperationsInsightsWarehouseResourceUsageSummaryFactsHelperGen(\n OCIResourceFactsHelperBase\n):\n \"\"\"Supported operations: get\"\"\"\n\n def get_required_params_for_get(self):\n return [\n \"operations_insights_warehouse_id\",\n ]\n\n def get_resource(self):\n return oci_common_utils.call_with_backoff(\n self.client.summarize_operations_insights_warehouse_resource_usage,\n operations_insights_warehouse_id=self.module.params.get(\n \"operations_insights_warehouse_id\"\n ),\n )\n\n\nOperationsInsightsWarehouseResourceUsageSummaryFactsHelperCustom = get_custom_class(\n \"OperationsInsightsWarehouseResourceUsageSummaryFactsHelperCustom\"\n)\n\n\nclass ResourceFactsHelper(\n OperationsInsightsWarehouseResourceUsageSummaryFactsHelperCustom,\n OperationsInsightsWarehouseResourceUsageSummaryFactsHelperGen,\n):\n pass\n\n\ndef main():\n module_args = oci_common_utils.get_common_arg_spec()\n module_args.update(\n dict(\n operations_insights_warehouse_id=dict(\n aliases=[\"id\"], type=\"str\", required=True\n ),\n )\n )\n\n module = OCIAnsibleModule(argument_spec=module_args)\n\n if not HAS_OCI_PY_SDK:\n module.fail_json(msg=\"oci python sdk required for this module.\")\n\n resource_facts_helper = ResourceFactsHelper(\n module=module,\n resource_type=\"operations_insights_warehouse_resource_usage_summary\",\n service_client_class=OperationsInsightsClient,\n namespace=\"opsi\",\n )\n\n result = []\n\n if resource_facts_helper.is_get():\n result = resource_facts_helper.get()\n else:\n resource_facts_helper.fail()\n\n module.exit_json(operations_insights_warehouse_resource_usage_summary=result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"oracle/oci-ansible-collection","sub_path":"plugins/modules/oci_opsi_operations_insights_warehouse_resource_usage_summary_facts.py","file_name":"oci_opsi_operations_insights_warehouse_resource_usage_summary_facts.py","file_ext":"py","file_size_in_byte":4692,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"78"} +{"seq_id":"12922759289","text":"import pytest\nimport threading\nimport time\n\nfrom psycopg2_mq import MQSource, MQWorker\n\n\ndef test_integration(model, dbsession, worker_proxy):\n worker_proxy.start(\n queues={\n 'dummy': DummyQueue(),\n }\n )\n\n source = MQSource(dbsession=dbsession, model=model)\n with dbsession.begin():\n job_id = source.call('dummy', 'echo', {'message': 'hello world'})\n\n while True:\n with dbsession.begin():\n job = source.find_job(job_id)\n if job.state in {\n model.JobStates.COMPLETED,\n model.JobStates.FAILED,\n model.JobStates.LOST,\n }:\n break\n time.sleep(0.1)\n\n job = source.find_job(job_id)\n assert job.state == model.JobStates.COMPLETED\n assert job.result == {\n 'queue': 'dummy',\n 'method': 'echo',\n 'args': {'message': 'hello world'},\n }\n assert job.start_time is not None\n assert job.end_time is not None\n\n\nclass DummyQueue:\n def execute_job(self, job):\n return {\n 'queue': job.queue,\n 'method': job.method,\n 'args': job.args,\n }\n\n\nclass WorkerProxy:\n worker = None\n thread = None\n\n def __init__(self, model, dbengine):\n self.model = model\n self.dbengine = dbengine\n\n def start(self, **kw):\n self.worker = MQWorker(\n engine=self.dbengine,\n model=self.model,\n capture_signals=False,\n **kw,\n )\n self.thread = threading.Thread(target=self.worker.run)\n self.thread.daemon = True\n self.thread.start()\n return self.worker\n\n def stop(self):\n self.worker.shutdown_gracefully()\n self.thread.join()\n\n\n@pytest.fixture\ndef worker_proxy(model, dbengine):\n proxy = WorkerProxy(model, dbengine)\n try:\n yield proxy\n finally:\n proxy.stop()\n","repo_name":"mmerickel/psycopg2_mq","sub_path":"tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"73411504252","text":"\nwith open(\"saved.txt\", \"r\") as f:\n bit_string = f.readlines()[0]\nnumber_of_bits = len(bit_string)\n\npointer = 0\nn = 0\n\ndef read_n_bits(num):\n global pointer\n global number_of_bits\n if num == 0: return 0\n res = int(bit_string[pointer:pointer+num], 2)\n pointer += num\n return res\n\nrows = read_n_bits(16)\ncols = read_n_bits(16)\ncolor_dif_bits = read_n_bits(8)\nn = rows * cols\n\nv = [[False]*cols for i in range(rows)]\n\npath = []\npath_to_index = {}\nk = 0\nfor i in range(rows):\n for j in range(cols):\n path.append((i,j))\n path_to_index[(i,j)] = k\n k += 1\n\nv = [False]*n\nr = v[:]\ng = v[:]\nb = v[:]\n\n\ndef neighbor_cells(r, c, visited):\n neighbors = [\n (r + rd, c + cd)\n for rd, cd in\n [(1,1),(1,0),(1,-1),(0,1),(0,-1),(-1,1),(-1,0),(-1,-1)]\n ]\n neighbors = [(r, c) for r, c in neighbors\n if (r >= 0 and r < rows and c >= 0 and c < cols\n and not visited[path_to_index[(r, c)]]\n )]\n\n return neighbors\n\n\n\nfor i in range(n):\n if v[i]: continue\n v[i] = True\n\n r[i], g[i], b[i] = [read_n_bits(8) for _ in range(3)]\n\n queue = [i]\n\n while queue:\n cur = queue.pop(0)\n cur_r, cur_c = path[cur]\n\n neighbors = neighbor_cells(cur_r, cur_c, v)\n bitset = read_n_bits(len(neighbors))\n # print(len(neighbors))\n # if len(neighbors) == 4: exit()\n if bitset == 0: continue\n\n for index, nei in enumerate(neighbors):\n if not (bitset & (1 << index)): continue\n nei_r, nei_c = nei\n nei_index = path_to_index[(nei_r, nei_c)]\n v[nei_index] = True\n\n __ = [None]*3\n for _ in range(3):\n if read_n_bits(1) == 1:\n __[_] = read_n_bits(color_dif_bits - 1)\n else:\n __[_] = -read_n_bits(color_dif_bits - 1)\n\n r[nei_index] = r[cur] + __[0]\n g[nei_index] = g[cur] + __[1]\n b[nei_index] = b[cur] + __[2]\n\n queue.append(nei_index)\n\nprint(pointer, number_of_bits)\nprint((number_of_bits + 7999) // 8000, \"kb\")\n\ncolor_string = f\"{rows} {cols}\\n\"\nfor i in range(n):\n color_string += f\"{r[i]} {g[i]} {b[i]}\\n\"\n\nwith open(\"decompressed_img.txt\", \"w\") as f:\n f.write(color_string)\n\n# for i in range(n):\n# print(r[i], g[i], b[i])\n","repo_name":"ollelapidus/Gymnasiearbete---Complete-Bucket-Tiling","sub_path":"cbt_decompression.py","file_name":"cbt_decompression.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"832110504","text":"from test_framework.test_framework import IoPTestFramework\nfrom test_framework.util import *\n\n# Create one-input, one-output, no-fee transaction:\nclass MempoolSpendCoinbaseTest(IoPTestFramework):\n\n def __init__(self):\n super().__init__()\n self.num_nodes = 1\n self.setup_clean_chain = False\n\n def setup_network(self):\n # Just need one node for this test\n args = [\"-checkmempool\", \"-debug=mempool\"]\n self.nodes = []\n self.nodes.append(start_node(0, self.options.tmpdir, args))\n self.is_network_split = False\n\n def run_test(self):\n chain_height = self.nodes[0].getblockcount()\n assert_equal(chain_height, 200)\n node0_address = self.nodes[0].getnewaddress()\n\n # Coinbase at height chain_height-100+1 ok in mempool, should\n # get mined. Coinbase at height chain_height-100+2 is\n # is too immature to spend.\n b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]\n coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]\n spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]\n\n spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])\n\n # coinbase at height 102 should be too immature to spend\n assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])\n\n # mempool should have just spend_101:\n assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])\n\n # mine a block, spend_101 should get confirmed\n self.nodes[0].generate(1)\n assert_equal(set(self.nodes[0].getrawmempool()), set())\n\n # ... and now height 102 can be spent:\n spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])\n assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])\n\nif __name__ == '__main__':\n MempoolSpendCoinbaseTest().main()\n","repo_name":"Internet-of-People/iop-blockchain","sub_path":"qa/rpc-tests/mempool_spendcoinbase.py","file_name":"mempool_spendcoinbase.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"18913343628","text":"import requests\nimport os\nfrom twilio.rest import Client\n\nBERLIN_LAT = \"52.509079\"\nBERLIN_LONG = \"13.298040\"\nTHALASSERY_LAT = \"11.753000\"\nTHALASSERY_LONG = \"75.493400\"\n\n#\n\nowa_endpoint = os.environ['OPEN_WEATHER_ENDPOINT']\nowa_token = os.environ['OPEN_WEATHER_API_TOKEN']\ntwilio_account_sid = os.environ['TWILIO_ACCOUNT_SID']\ntwilio_auth_token = os.environ['TWILIO_AUTH_TOKEN']\ntwilio_phone_no = os.environ['TWILIO_PHONE_NO']\nclien_phone_no = os.environ['CLIENT_PHONE_NO']\n\n\nparameters = {\n \"lat\":THALASSERY_LAT,\n \"lon\":THALASSERY_LONG,\n \"exclude\":\"current,minutely,daily\",\n \"appid\":owa_token,\n}\n\nresponse = requests.get(url=owa_endpoint, params=parameters)\nresponse.raise_for_status()\ndata = response.json()[\"hourly\"]\nweather_ids = []\nfor hour in range(12):\n weather_ids.append(data[hour][\"weather\"][0][\"id\"])\n\nwill_rain = False\nfor id in weather_ids:\n if id < 700:\n will_rain = True\nif will_rain:\n client = Client(twilio_account_sid, twilio_auth_token)\n message = client.messages \\\n .create(\n body=\"It's going to rain today. Carry an umbrella\",\n from_=twilio_phone_no,\n to=clien_phone_no\n )\n print(message.status)","repo_name":"rohitrav33ndran/100daysofcode","sub_path":"day35/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2997119417","text":"'''\nAugust 25, 2020\nSudoku Solver using Backtracking Algorithm\nAngela Zhang\n\nbacktracking algorithm logic\n1. pick an empty box\n2. try all numbers\n3. find one that works\n4. repeat\n5. as soon as solution cannot be completed, backtrack\n - erase current, go back to previous one and try others\n - if not, keep going back and try others until it does\n'''\n\n# empty slots denoted by 0\nboard = [\n\n # valid board\n [5, 3, 0, 0, 7, 0, 0, 0, 0],\n [6, 0, 0, 1, 9, 5, 0, 0, 0],\n [0, 9, 8, 0, 0, 0, 0, 6, 0],\n [8, 0, 0, 0, 6, 0, 0, 0, 3],\n [4, 0, 0, 8, 0, 3, 0, 0, 1],\n [7, 0, 0, 0, 2, 0, 0, 0, 6],\n [0, 6, 0, 0, 0, 0, 2, 8, 0],\n [0, 0, 0, 4, 1, 9, 0, 0, 5],\n [0, 0, 0, 0, 8, 0, 0, 7, 9]\n\n # invalid board\n # [7,8,0,4,0,0,1,2,0],\n # [6,0,0,0,7,5,0,0,9],\n # [0,0,0,6,0,1,0,7,8],\n # [0,0,7,0,4,0,2,6,0],\n # [0,8,1,0,5,0,9,3,0],\n # [9,0,4,0,6,0,0,0,5],\n # [0,7,0,3,0,0,0,1,2],\n # [1,2,0,0,0,7,4,0,0],\n # [0,4,9,2,0,6,0,0,7]\n\n]\n\ndef solve_board(b):\n # base case\n found = find_empty(b)\n if not found:\n return True # board is solved\n\n row = found[0]\n col = found[1]\n\n for i in range(1, 10): # tries numbers 1-9\n if is_valid(b, i, found):\n b[row][col] = i # update board if valid number\n\n if solve_board(b): # recursive calls solve_board() on new board\n return True\n\n b[row][col] = 0 # backtrack if not viable\n\n return False # cannot solve\n\n\ndef print_board(b): # prints out board for visualization\n n = len(b) # dimension of board\n\n for row in range(n):\n print_row = \"| \" # row to be printed, reset every row\n if row % 3 == 0 and row != 0:\n print(\"-------------------------------------\")\n for column in range(n):\n print_row += (str(b[row][column]) + \" \")\n if (column + 1) % 3 == 0:\n print_row += \"| \"\n print(print_row)\n\ndef is_valid(b, value, pos):\n n = len(b) # dimension of board\n r = pos[0] # row ind of value\n c = pos[1] # column ind of value\n\n # check row\n for i in range(n):\n if b[r][i] == value and i != c:\n return False\n\n # check column\n for i in range(n):\n if b[i][c] == value and i != r:\n return False\n\n # check 3 x 3\n box_x = c // 3\n box_y = r // 3 # gives us x,y coordinates of 3x3 box of value\n for row in range(box_y*3, box_y*3 + 3):\n for column in range(box_x*3, box_x*3 + 3):\n if b[row][column] == value and (row, column) != pos:\n return False\n\n return True\n\n\ndef find_empty(b): # helper function that returns tuple with coordinates of empty slot, false if no empty\n n = len(b) # dimension of board\n\n for row in range(n):\n for column in range(n):\n if b[row][column] == 0:\n return (row, column)\n\n return False\n\n\nprint(\"original board:\")\nprint_board(board)\nsolve_board(board)\nprint(\"\\n\" + \"solution:\")\nif not find_empty(board):\n print_board(board)\nelse:\n print(\"invalid board. unable to solve\")","repo_name":"azhang4216/sudoku-solver","sub_path":"sudoku_solver.py","file_name":"sudoku_solver.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"21106535532","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSetup for the SixGill.\n\nSource:: https://github.com/ampledata/sixgill\n\"\"\"\n\n\n__title__ = 'sixgill'\n__version__ = '0.0.1b1'\n__author__ = 'Greg Albrecht '\n__license__ = 'Apache License, Version 2.0'\n__copyright__ = 'Copyright 2016 Orion Labs, Inc.'\n\n\nimport os\nimport setuptools\nimport sys\n\n\ndef publish():\n \"\"\"Function for publishing package to pypi.\"\"\"\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n sys.exit()\n\n\npublish()\n\n\nsetuptools.setup(\n name='sixgill',\n version=__version__,\n description='SixGill',\n author='Greg Albrecht',\n author_email='gba@orionlabs.io',\n packages=['sixgill'],\n package_data={'': ['LICENSE']},\n license=open('LICENSE').read(),\n long_description=open('README.rst').read(),\n url='https://github.com/ampledata/sixGill',\n setup_requires=[\n 'coverage >= 3.7.1',\n 'httpretty >= 0.8.10',\n 'nose >= 1.3.7'\n ],\n install_requires=[\n 'pynmea2 >= 1.4.2',\n 'pyserial == 2.7',\n 'requests >= 2.7.0'\n ],\n package_dir={'sixgill': 'sixgill'},\n zip_safe=False,\n include_package_data=True,\n entry_points={'console_scripts': ['sixgill = sixgill.cmd:cli']}\n)\n","repo_name":"ampledata/sixgill","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"10815289548","text":"import json\n\nimport jsonpath as jsonpath\nimport requests\n\nfrom config.config import get_env, collection_get\n\n\nclass WeatherUtil:\n env = get_env()\n weather_config_key = 'QWEATHER_' + env\n location_path = collection_get(weather_config_key, 'LOCATION_PATH')\n weather_path = collection_get(weather_config_key, 'WEATHER_PATH')\n air_path = collection_get(weather_config_key, 'AIR_PATH')\n key = collection_get(weather_config_key, 'KEY')\n\n def get_weather_now(self, location_id: int):\n response = requests.get(self.weather_path, params={'location': location_id, 'key': self.key})\n if response.status_code == 200:\n data = response.text\n jsondata = json.loads(data)\n weather = jsondata['daily']\n return weather\n return None\n\n def get_air_now(self, location_id: int):\n response = requests.get(self.air_path, params={'location': location_id, 'key': self.key})\n if response.status_code == 200:\n data = response.text\n jsondata = json.loads(data)\n air = jsondata['daily']\n return air\n return None\n\n def create_weather_str(self, city_name: str, air, weather, day: str):\n weatherStr = f\"\"\n if day == '今天':\n weatherStr = \"今日 \"\n weather = weather[0]\n air = air[0]\n elif day == '明天':\n weatherStr = \"明天 \"\n weather = weather[1]\n air = air[1]\n elif day == '后天':\n weatherStr = \"后天 \"\n weather = weather[2]\n air = air[2]\n if city_name is not None:\n weatherStr = weatherStr + f\"{city_name} 天气:{weather['textDay']}\\n\" \\\n f\" ⬆️ 最高气温:{weather['tempMax']}°C\\n\" \\\n f\" ⬇️ 最低气温:{weather['tempMin']}°C\\n\" \\\n f\" 🌞 紫外线等级:{weather['uvIndex']}\\n\" \\\n f\" 🌪 最高风速:{weather['windScaleDay']}级\\n\" \\\n f\" 🌫 空气质量:{air['category']}\\n\" \\\n f\" 🔢 空气指数:{air['aqi']}\"\n return weatherStr\n\n def get_weather_str(self, city_name: str, day: str):\n location_id = self.getLocationId(city_name)\n weather = self.get_weather_now(location_id)\n air = self.get_air_now(location_id)\n\n return self.create_weather_str(weather=weather, air=air, city_name=city_name, day=day)\n\n def getLocationId(self, city):\n response = requests.get(self.location_path, params={'location': city, 'key': self.key})\n if response.status_code == 200:\n return json.loads(response.text)['location'][0]['id']\n","repo_name":"JasonSTong/chatgpt-empower-wechat","sub_path":"util/http_utils.py","file_name":"http_utils.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"78"} +{"seq_id":"26141093798","text":"import speech_recognition as sr\nimport tts\n\ndef listen():\n global recognizer\n try:\n with sr.Microphone() as source:\n tts.speak('You may now speak')\n audio = recognizer.record(source,duration=5)\n text = recognizer.recognize_google(audio)\n except Exception as e:\n print(e)\n return \"stop\"\n return text\n\nrecognizer = sr.Recognizer()","repo_name":"samip-gyawali/gpt-assistant","sub_path":"stt.py","file_name":"stt.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"14068354970","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom Logistic_Regression import *\r\nfrom metrics import *\r\nfrom sklearn.linear_model import LogisticRegression as SKLR\r\nfrom sklearn.datasets import load_breast_cancer\r\n\r\nnp.random.seed(42)\r\n\r\nN = 30\r\nP = 2\r\nX = pd.DataFrame(np.random.randn(N, P))\r\ny = pd.Series(np.random.randint(2, size=N)) #integer because y and y_pred must have same class\r\nX = (X - X.min()) / (X.max() - X.min()) #normalized\r\n\r\nfit_intercept = True\r\n\r\n#L1 type\r\nprint(\"\\nRegularized Logistic Regression (L1 type): \")\r\nLR = LogisticRegression(fit_intercept=fit_intercept)\r\nLR.fit_autograd(X, y, n_iter = 200, batch_size = 5, lr = 1.5, reg_type = \"l1_reg\") \r\ny_hat = LR.predict(X)\r\nprint('theta = ', LR.coef_)\r\nprint('Accuracy = ', accuracy(y_hat, y))\r\nLR.plot_decision_boundary(X,y)\r\n\r\n#L2 type\r\nprint(\"\\nRegularized Logistic Regression (L2 type): \")\r\nLR = LogisticRegression(fit_intercept=fit_intercept)\r\nLR.fit_autograd(X, y, n_iter = 200, batch_size = 5, lr = 1.5, reg_type = \"l2_reg\") \r\ny_hat = LR.predict(X)\r\nprint('theta = ', LR.coef_)\r\nprint('Accuracy = ', accuracy(y_hat, y))\r\nLR.plot_decision_boundary(X,y)","repo_name":"DhruvinPatel31/ML_assignment_3","sub_path":"ML-Assignment-3/q2_reg_lr.py","file_name":"q2_reg_lr.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30874528156","text":"import os\nimport cv2\nfrom glob import glob\nfrom tqdm import tqdm\nfrom joblib import Parallel,delayed\n\nfrom engine import utils\nfrom constants import NIH_DIR,CXRAY_SZ\n\n\ndef resize_nih_images():\n \n def resize_save(src):\n name = src.split('/')[-1].replace('.png','.jpg')\n dst = '%snih_images_320/%s'%(NIH_DIR,name) \n if os.path.exists(dst):\n return True\n utils.create_folder(dst)\n try:\n img = cv2.imread(src,0)\n img = cv2.resize(img,(CXRAY_SZ,CXRAY_SZ))\n status = cv2.imwrite(dst,img)\n return status\n except Exception as e:\n print(src,e)\n return False\n\n paths = glob('%simages/*.png'%NIH_DIR)\n utils.create_folder('%snih_images_320/'%(NIH_DIR))\n results = Parallel(n_jobs=-1,prefer='threads')(delayed(resize_save)(path) for path in tqdm(paths,desc='Preparing NIH Data'))\n utils.unique(results)\n\ndef main():\n resize_nih_images()","repo_name":"mcintoshML/Data-Bias-Analysis","sub_path":"data_prep/prep_nih_data.py","file_name":"prep_nih_data.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42864775379","text":"import numpy as np\nimport torch\nfrom pytorch_tabnet.pretraining import TabNetPretrainer\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom torch import nn\nimport wandb\nfrom pytorch_tabnet.callbacks import Callback\n\nfrom augmentations.augmentations import Augmentator\nfrom pipelines.common_pipeline import CommonPipeline\nfrom utils.datasets import CommonDataset\n\n\nclass WandbTabNetCallback(Callback):\n def __init__(self, wandb_obj, params):\n super().__init__()\n self.wandb = wandb_obj\n wandb.config.update({\n 'parameters': params\n })\n\n def set_params(self, params):\n super().set_params(params)\n\n def set_trainer(self, model):\n super().set_trainer(model)\n\n def on_epoch_begin(self, epoch, logs=None):\n super().on_epoch_begin(epoch, logs)\n if logs is not None:\n wandb.log(logs)\n\n def on_epoch_end(self, epoch, logs=None):\n super().on_epoch_end(epoch, logs)\n if logs is not None:\n wandb.log(logs)\n\n def on_batch_begin(self, batch, logs=None):\n super().on_batch_begin(batch, logs)\n\n def on_batch_end(self, batch, logs=None):\n super().on_batch_end(batch, logs)\n\n def on_train_begin(self, logs=None):\n super().on_train_begin(logs)\n\n def on_train_end(self, logs=None):\n super().on_train_end(logs)\n if logs is not None:\n wandb.log(logs)\n\n\nclass TabNetClassificationPipeline(CommonPipeline):\n\n def __init__(self, model, dataset: CommonDataset, wandb_obj, wandb_project_name, BEST_MODELS_FOLDER, **kwargs):\n super().__init__(model, dataset, wandb_obj, wandb_project_name, BEST_MODELS_FOLDER, **kwargs)\n\n def launch_full_cv(self,\n augmentation_types_with_proportions: dict[str, float],\n augmentators: dict[str, Augmentator],\n metric_names_mapping: dict[str, (str, str)],\n n_splits=5,\n seed=7575):\n kf = StratifiedKFold(n_splits=n_splits, random_state=seed, shuffle=True)\n results = self._launch_cv(kf, augmentation_types_with_proportions, augmentators)\n self._log_summary(augmentation_types_with_proportions, results, metric_names_mapping)\n return results\n\n def _train_model(self, train_idx, test_idx):\n wandb_callback = WandbTabNetCallback(self.wandb_obj, self.model.get_params())\n self.model_copy.fit(\n self.dataset[train_idx][0], self.dataset[train_idx][1],\n eval_set=[(self.dataset[test_idx][0], self.dataset[test_idx][1])],\n loss_fn=nn.CrossEntropyLoss(),\n callbacks=[wandb_callback],\n **self.kwargs\n )\n return self.model_copy.history\n\n def _save_model_copy(self, path):\n self.model_copy.save_model(path)\n\n\nclass TabNetRegressionPipeline(CommonPipeline):\n\n def __init__(self, model, dataset: CommonDataset, wandb_obj, wandb_project_name, BEST_MODELS_FOLDER, **kwargs):\n super().__init__(model, dataset, wandb_obj, wandb_project_name, BEST_MODELS_FOLDER, **kwargs)\n\n def launch_full_cv(self,\n augmentation_types_with_proportions: dict[str, float],\n augmentators: dict[str, Augmentator],\n metric_names_mapping: dict[str, str],\n n_splits=5,\n seed=7575):\n kf = KFold(n_splits=n_splits, random_state=seed, shuffle=True)\n results = self._launch_cv(kf, augmentation_types_with_proportions, augmentators)\n self._log_summary(augmentation_types_with_proportions, results, metric_names_mapping)\n return results\n\n def _train_model(self, train_idx, test_idx):\n wandb_callback = WandbTabNetCallback(self.wandb_obj, self.model.get_params())\n self.model_copy.fit(\n self.dataset[train_idx][0], self.dataset[train_idx][1],\n eval_set=[(self.dataset[test_idx][0], self.dataset[test_idx][1])],\n loss_fn=nn.MSELoss(),\n callbacks=[wandb_callback],\n **self.kwargs\n )\n return self.model_copy.history\n\n def _save_model_copy(self, path):\n self.model_copy.save_model(path)\n\n\nclass TabNetPretrainingPipeline:\n def __init__(self, dataset: CommonDataset, wandb_obj,\n wandb_project_name, model_save_path, **kwargs):\n self.model = None\n self.model_save_path = model_save_path\n self.wandb_project_name = wandb_project_name\n self.wandb_obj = wandb_obj\n self.dataset = dataset\n self.kwargs = kwargs\n\n def launch_pretraining(self, pretraining_ratio=0.8, SEED=7575):\n self.wandb_obj.init(self.wandb_project_name, 'fdcf')\n np.random.seed(SEED)\n length = len(self.dataset.X)\n N_valid = int(length * 0.2)\n valid_idx = np.random.choice(length, N_valid, replace=False)\n X_train = np.delete(self.dataset.X, valid_idx, axis=0)\n X_valid = self.dataset.X[valid_idx]\n self.model = TabNetPretrainer(\n optimizer_fn=torch.optim.Adam,\n optimizer_params=dict(lr=2e-2),\n mask_type=\"entmax\"\n )\n wandb_callback = WandbTabNetCallback(self.wandb_obj, self.model.get_params())\n self.model.fit(\n X_train=X_train,\n eval_set=[X_valid],\n pretraining_ratio=pretraining_ratio,\n callbacks=[wandb_callback],\n **self.kwargs\n )\n self.model.save_model(self.model_save_path)\n\n","repo_name":"Zhekuson/TabnetResearch","sub_path":"code/pipelines/tabnet_pipelines.py","file_name":"tabnet_pipelines.py","file_ext":"py","file_size_in_byte":5524,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"21145161240","text":"#!/usr/bin/python\n#-*- encoding: UTF-8 -*-\n\nfrom ctypes import *\nckipws = None\n\ndef initial(main_dll, py_dll, ini):\n global ckipws\n c_main_dll = c_wchar_p(main_dll)\n c_ini = c_wchar_p(ini)\n ckipws = CDLL(py_dll)\n ckipws.Initial(c_main_dll,c_ini)\n\ndef segment(inputStr, mode = 0):\n global ckipws\n Result = ''\n try:\n CResult = ckipws.Segment(inputStr)\n CResult = cast(CResult,c_wchar_p)\n Result = CResult.value\n except:\n pass\n finally:\n if mode == 0:\n WSResult = []\n Result = Result.split()\n for res in Result:\n re = res.strip()\n re = res[0:len(res)-1]\n temp = re.split(u'(')\n word = temp[0]\n pos = temp[1]\n WSResult.append((word,pos))\n #[('蔡英文', 'Nb'), ('是', 'SHI'), ...]\n return WSResult\n else:\n #蔡英文(Nb) 是(SHI) 中華民國(Nc)...\n return Result\n\ndef segList(corpus):\n # #指定 CKIPWS 統系統檔, 請勿修改\n main_dll = 'CKIPWS.dll'\n py_dll = 'PY_CKIPWS.dll'\n # 指定 CKIPWS 的設定檔\n ini = 'ws.ini'\n\t# 進行 CKIPWS 初始化的動作\n initial(main_dll, py_dll, ini)\n \n ans = []\n for text in corpus:\n ans.append(segment(text))\n # 結果在 Result 中\n return ans\n","repo_name":"666XD/simpleParser","sub_path":"CKIPWS.py","file_name":"CKIPWS.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23927982516","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Client',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('client_name', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('product_name', models.CharField(max_length=200)),\n ('price', models.IntegerField(default=0)),\n ('delivery_date', models.DateField(verbose_name=b'date delivered')),\n ('client', models.ForeignKey(to='sales.Client')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"sprzedwojski/django-erp","sub_path":"sales/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"31161943966","text":"##chrome diansour game##\nimport pygame\nimport random\nfrom pygame.locals import *\n\n\npygame.init()\n\nclock = pygame.time.Clock()\nfps = 100\n\nscreenwidth = 1000\nscreenhieght = 936\ntrack_scroll = 0\nmove_speed = 4\nscreen = pygame.display.set_mode((screenwidth, screenhieght))\npygame.display.set_caption('chrome dino')\n\nrun =True\nwhile run :\n\t\n\tclock.tick(fps)\n\t\n\t\n\tbg = pygame.image.load('Bg.png')\n\tscreen.blit (bg,(0,0))\n\ttk = pygame.image.load('Track.png')\n\tscreen.blit (tk,(track_scroll,1227))\n\ttrack_scroll -= move_speed\n\tif abs(track_scroll) > 500 :\n\t\t\ttrack_scroll = 0\n\t\n\t\n\t\n\tpygame.display.update()","repo_name":"Legend891/chrome-trex","sub_path":"chrome diansaur.py","file_name":"chrome diansaur.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4229704183","text":"from flask import Flask, render_template, request, redirect, url_for, flash, Markup, jsonify\nimport datetime\nimport RPi.GPIO as GPIO\nimport Adafruit_ADS1x15\n\nstate = True\n\nadc = Adafruit_ADS1x15.ADS1115()\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n print(\"start webpage\")\n return render_template('index.html')\n\n\n@app.route('/updateTime')\ndef updateTime():\n print('update time and button change ads val to perc')\n\n now = datetime.datetime.now()\n timeString = \"TIME: \" + now.strftime(\"%H:%M:%S\")\n global state\n OutputType = \"value\" if state else \"percent\"\n OutputValue = adc_val(state)\n UpdateTimeOnweb = {\n 'type': OutputType,\n 'value': OutputValue,\n 'timeupdate': timeString\n }\n return jsonify(**UpdateTimeOnweb)\n\n\n@app.route('/changeState')\ndef changeState():\n global state\n state = not state\n print(state)\n return jsonify(state=state)\n\n\ndef GPIO_init(GPIO):\n global state\n global max_val\n max_val = 1\n state = True\n GPIO.setmode(GPIO.BOARD)\n GPIO.setwarnings(False)\n # GPIO.setup(P_OUT, GPIO.OUT)\ndef adc_val(state):\n val = max(adc.read_adc(0),0)\n global max_val \n max_val = max(val,max_val )\n\n return val if state else int(val/max_val*100)\n \n\nif __name__ == '__main__':\n\n GPIO_init(GPIO)\n app.run(debug=True, host='0.0.0.0', port=80)\n","repo_name":"etnk125/Raspi","sub_path":"asg09_lab-webserver/lab/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18497243671","text":"import mysql.connector\r\nfrom henryInterfaceClasses import Author, Book, Branch, Price, Category, Cat_Book, Cat_Price, Publisher, Pub_Book, Pub_Price\r\n\r\nclass HenryDAO():\r\n def __init__(self):\r\n self.mydb = mysql.connector.connect(\r\n user='root',\r\n passwd='root',\r\n database='comp3421',\r\n host='127.0.0.1')\r\n\r\n self.mycur = self.mydb.cursor()\r\n\r\n def getAuthorData(self):\r\n # Perform the query\r\n sql = \"SELECT distinct(A.author_num), author_first, author_last FROM henry_author A LEFT JOIN henry_wrote W ON A.AUTHOR_NUM = W.AUTHOR_NUM WHERE W.AUTHOR_NUM IS NOT NULL ORDER BY w.author_num\"\r\n self.mycur.execute(sql)\r\n\r\n data = []\r\n # Display the results\r\n for row in self.mycur:\r\n data.append(Author(row[0], row[1], row[2]))\r\n return data\r\n\r\n def getBookData(self, auth_number):\r\n sql = f\"SELECT b.book_code, title, price FROM HENRY_BOOK b JOIN HENRY_WROTE w ON b.book_code = w.book_code JOIN HENRY_AUTHOR a ON w.AUTHOR_NUM = a.AUTHOR_NUM WHERE (w.BOOK_CODE = b.BOOK_CODE) AND (w.AUTHOR_NUM like '{auth_number}')\"\r\n self.mycur.execute(sql)\r\n\r\n data = []\r\n for row in self.mycur:\r\n data.append(Book(row[0], row[1], row[2]))\r\n return data\r\n\r\n def getBranchData(self, book_code):\r\n sql = f\"select B.branch_name, I.on_hand from HENRY_BRANCH B JOIN HENRY_INVENTORY I ON B.BRANCH_NUM = I.BRANCH_NUM where B.BRANCH_NUM=I.BRANCH_NUM and I.BOOK_CODE='{book_code}'\"\r\n self.mycur.execute(sql)\r\n\r\n data = []\r\n for row in self.mycur:\r\n data.append(Branch(row[0], row[1]))\r\n return data\r\n\r\n def getPriceData(self, BOOK_CODE, AUTHOR_NUM):\r\n sql = f\"SELECT price from HENRY_BOOK B JOIN HENRY_WROTE W ON W.BOOK_CODE = B.BOOK_CODE WHERE (W.BOOK_CODE = '{BOOK_CODE}') AND (W.AUTHOR_NUM = '{AUTHOR_NUM}')\"\r\n self.mycur.execute(sql)\r\n\r\n data = []\r\n for row in self.mycur:\r\n data.append(Price(row[0]))\r\n return data\r\n\r\n def getCategory(self):\r\n sql = \"SELECT DISTINCT(type) From HENRY_BOOK\"\r\n self.mycur.execute(sql)\r\n\r\n data = []\r\n for row in self.mycur:\r\n data.append(Category(row[0]))\r\n return data\r\n\r\n def getCategory_Book(self, TYPE):\r\n sql = f\"SELECT title, book_code, price FROM HENRY_BOOK WHERE (type = '{TYPE}')\"\r\n self.mycur.execute(sql)\r\n\r\n data = []\r\n for row in self.mycur:\r\n data.append(Cat_Book(row[0], row[1], row[2]))\r\n return data\r\n\r\n def cat_getPriceData(self, BOOK_CODE):\r\n sql = f\"SELECT price from HENRY_BOOK WHERE BOOK_CODE = '{BOOK_CODE}'\"\r\n self.mycur.execute(sql)\r\n\r\n data = []\r\n for row in self.mycur:\r\n data.append(Cat_Price(row[0]))\r\n return data\r\n\r\n def getPublisher(self):\r\n sql = \"SELECT P.PUBLISHER_NAME, P.PUBLISHER_CODE FROM HENRY_publisher P LEFT OUTER JOIN henry_book B ON P.publisher_code = B.publisher_code GROUP BY B.PUBLISHER_CODE HAVING B.publisher_code is not null\"\r\n self.mycur.execute(sql)\r\n\r\n data = []\r\n for row in self.mycur:\r\n data.append(Publisher(row[0], row[1]))\r\n return data\r\n\r\n def getPublisher_Book(self, PUBLISHER_CODE):\r\n sql = f\"SELECT title, book_code, price FROM HENRY_BOOK WHERE (PUBLISHER_CODE = '{PUBLISHER_CODE}')\"\r\n self.mycur.execute(sql)\r\n\r\n data = []\r\n for row in self.mycur:\r\n data.append(Pub_Book(row[0], row[1], row[2]))\r\n return data\r\n\r\n def pub_getPriceData(self, BOOK_CODE, PUBLISHER_CODE):\r\n sql = f\"SELECT price from HENRY_BOOK WHERE (BOOK_CODE = '{BOOK_CODE}') AND (PUBLISHER_CODE = '{PUBLISHER_CODE}')\"\r\n self.mycur.execute(sql)\r\n\r\n data = []\r\n for row in self.mycur:\r\n data.append(Pub_Price(row[0]))\r\n return data\r\n\r\n def close(self):\r\n self.mydb.commit()\r\n self.mydb.close()\r\nobj = HenryDAO()","repo_name":"KAPILDESAI007/Academic-Projects","sub_path":"henryDAO.py","file_name":"henryDAO.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26682633908","text":"#-*- coding = utf-8 -*-\n#@Time: 17:43\n#@Author : floyd\n#@File :.py\n#@Software :PyCharm\nimport requests\nimport json\nfrom fake_useragent import UserAgent\n\nua=UserAgent()\ndef fanyi(word):\n data={\n 'from':'en',\n 'to':'zh',\n 'query':word,\n 'transtype':'translang',\n 'simple_means_flag':'3',\n 'sign':'704513.926512',\n 'token':'2b03bb02439027e78bb0e65809fdd5c5',\n 'domain':'common',\n 'raw_trans':word\n }\n proxies={\n 'HTTP' : '113.194.28.125:9999'\n }\n header={\n\n 'user-agent':str(ua.random),\n 'x-requested-with':'XMLHttpRequest'\n }\n url=\"https://fanyi.baidu.com/multitransapi\"\n res=requests.post(url,data=data,headers=header,proxies=proxies).text\n res=json.loads(res)\n result=res['data']['cands'][0]\n return result\n\nif __name__==\"__main__\":\n word=''\n while word != 'q':\n word = input ('请输入:')\n if word != 'q':\n print(fanyi(word))\n\n\n","repo_name":"saintkobe/bigbug","sub_path":"翻译/百度翻译.py","file_name":"百度翻译.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24122068639","text":"from fastapi import APIRouter, Depends, Response, status\nfrom psycopg import Connection\n\nfrom app.database.connection import get_db_connection\nfrom app.schemas.appointment import AppointmentCreate\nfrom app.usecases.appointment import AppointmentUseCases\n\nrouter = APIRouter(tags=[\"Appointments\"], prefix=\"/appointments\")\n\n\n@router.get('')\ndef list_all(\n db_connection: Connection = Depends(get_db_connection)\n):\n uc = AppointmentUseCases(db_connection)\n appointments = uc.get_all_appointments()\n return appointments\n \n@router.post('')\ndef create_appointment(\n appointment: AppointmentCreate,\n db_connection: Connection = Depends(get_db_connection)\n):\n uc = AppointmentUseCases(db_connection)\n uc.create_appointment(appointment)\n return Response(status_code=status.HTTP_201_CREATED)\n\n@router.get('/{appointment_id}')\ndef get_appointment(\n appointment_id: int,\n db_connection: Connection = Depends(get_db_connection)\n):\n uc = AppointmentUseCases(db_connection)\n appointment = uc.get_appointment(appointment_id)\n return appointment\n\n@router.put('/{appointment_id}')\ndef update_appointment(\n appointment_id: int,\n appointment: AppointmentCreate,\n db_connection: Connection = Depends(get_db_connection)\n):\n uc = AppointmentUseCases(db_connection)\n uc.update_appointment(appointment_id, appointment)\n return Response(status_code=status.HTTP_204_NO_CONTENT)\n\n@router.delete('/{appointment_id}')\ndef delete_appointment(\n appointment_id: int,\n db_connection: Connection = Depends(get_db_connection)\n):\n uc = AppointmentUseCases(db_connection)\n uc.delete_appointment(appointment_id)\n return Response(status_code=status.HTTP_204_NO_CONTENT)\n","repo_name":"Jackson-Vieira/MindMeet-PADB","sub_path":"app/routes/appointment.py","file_name":"appointment.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3774531967","text":"from scipy import stats\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport requests\nimport json\n\nASE_DEFS = {\n '11820001': 'ASE_NL01',\n '11820002': 'ASE_NL02',\n '11820003': 'ASE_NL03',\n '11820004': 'ASE_NL04',\n '11820005': 'ASE_NL05',\n}\n\nGAS_DEFS = {\n 'co': 'carbon_monoxide__air_',\n 'no': 'nitrogen_monoxide__air_',\n 'no2': 'nitrogen_dioxide__air_',\n 'o3': 'ozone__air_',\n}\n\nSITE_DEFS = {\n 'breuk-sw': 'breukelen_snelweg',\n 'nijm-ruy': 'nijmegen_ruyterstraat',\n}\n\n# InfluxDB creds\nINFLUX_DB_URL = 'https://test.smartemission.nl/influxdb/query?pretty=true'\nINFLUX_DB = 'smartemission'\nINFLUX_DB_READ_USER = 'smart'\nINFLUX_DB_READ_PW = 'smart'\nMEAS_RIVM = 'rivm'\nMEAS_ASE = 'joserefined'\n\n\n# Get calibrated or reference data from InfluxDB\ndef get_data(db, measurement, component, station, time_start, time_end):\n q = \"SELECT time,component,station,value from %s WHERE time >= '%s' AND time <= '%s' AND station = '%s' AND component = '%s'\" % (measurement, time_start, time_end, station, component)\n payload = {'q': q, 'db': db}\n auth = (INFLUX_DB_READ_USER, INFLUX_DB_READ_PW)\n\n # Remote Influx DB query\n r = requests.get(INFLUX_DB_URL, auth=auth, params=payload)\n json_data = json.loads(r.text)\n\n values_arr = None\n try:\n values_arr = json_data['results'][0]['series'][0]['values']\n except:\n pass\n\n # We need values hashtable indexed by datetime for later skipping\n values_dict = {}\n for value in values_arr:\n values_dict[value[0]] = value\n return values_arr, values_dict\n\n\n# Make regression plots for specific date-period, a site, ASE stations, and gasses\ndef make_plots(date_start, date_end, site, ase_stations, gasses):\n\n # Go through all gasses and then for each gas RIVM and all AirSensEUR stations\n for gas in gasses:\n\n # Time formatting: complete ISO Datetime\n time_start = '%sT%sZ' % (date_start, '00:00:00')\n time_end = '%sT%sZ' % (date_end, '00:00:00')\n\n # Get RIVM (reference) ASE data from InfluxDB\n rivm_val_arr, rivm_val_dict = get_data(INFLUX_DB, MEAS_RIVM, GAS_DEFS[gas], SITE_DEFS[site], time_start, time_end)\n\n for ase in ase_stations:\n # Get calibrated (calculated) ASE data from InfluxDB\n se_val_arr, se_val_dict = get_data(INFLUX_DB, MEAS_ASE, gas, ase, time_start, time_end)\n\n x, y = [], []\n for se_val in se_val_arr:\n try:\n # only use entries where for both hour-values exist, otherwise skip\n rivm_val = rivm_val_dict[se_val[0]]\n except:\n # No RIVM value for this hour, skip\n continue\n\n y.append(se_val[3])\n rivm_val_g = rivm_val[3]\n if gas == 'co':\n # RIVM has CO in mg/m3 so convert to ug/m3\n rivm_val_g *= 1000.0\n x.append(rivm_val_g)\n\n # Make a Pandas Dataframe from the x and y single-dim arrays\n d = {'x': x, 'y': y}\n df = pd.DataFrame(data=d)\n\n # R-square calculator from the two x (Ref) and y (ASE) arrays\n def linregress(x, y):\n # Alternative R2 calc, leads to same value\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n r2 = r_value ** 2.0\n # r2 = stats.pearsonr(x, y)[0] ** 2.0\n return r2, slope\n\n # R-square calculator from the two x (Ref) and y (ASE) arrays\n def r_squared(x, y):\n r2, slope = linregress(x, y)\n return r2\n\n # Plot styling\n sns.set(rc={'axes.grid': True, 'grid.linestyle': '-', \"axes.titlesize\": 9, \"axes.labelsize\": 9, 'grid.color': '.8', 'patch.edgecolor': 'w', \"xtick.major.size\": 4, \"ytick.major.size\": 4})\n sns.set(font_scale=0.8)\n\n # Draw the plot\n # sns.lmplot(x='x', y='y', data=df)\n # sns.scatterplot(x=\"x\", y=\"y\", data=df)\n g = sns.PairGrid(df, y_vars=[\"y\"], x_vars=[\"x\"], height=5)\n g.map(sns.scatterplot, s=50)\n # g.set(ylim=(0,120), yticks=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90,100,110,120])\n\n # More styling\n r2, slope = linregress(x, y)\n\n plt.title(\"%s - %s - %s - %s to %s - R2=%.3f m=%.2f\" % (gas.upper(), site, ASE_DEFS[ase], date_start, date_end, r2, slope))\n plt.xlabel('RIVM Ref (SOS) - 1h avg - ug/m3')\n plt.ylabel('ASE Calc - 1h avg - ug/m3')\n # plt.show()\n\n # Save to png file\n plt.savefig('airsenseur/%s-%s-%s-%s-%s.png' % (site, gas, ASE_DEFS[ase], date_start, date_end))\n plt.close()\n\n\nif __name__ == '__main__':\n make_plots('2018-09-10', '2018-10-09', 'breuk-sw', ASE_DEFS.keys(), ['no', 'no2', 'o3'])\n make_plots('2018-12-25', '2019-01-24', 'nijm-ruy', ['11820001'], ['co', 'no', 'no2', 'o3'])\n","repo_name":"smartemission/smartemission","sub_path":"etl/calibration/airsenseurplots.py","file_name":"airsenseurplots.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"43323479564","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 27 16:51:25 2017\n\n@author: tobias\n\"\"\"\nimport os\nimport re\nimport random\nimport subprocess\nfrom Bio import SeqIO\nfrom Bio import AlignIO\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Align import MultipleSeqAlignment\nfrom Bio.Alphabet import generic_dna\n\ndef read_fasta(fasta):\n name, seq = None, []\n for line in fasta:\n line = line.rstrip()\n if line.startswith(\">\"):\n if name: yield (name, ''.join(seq))\n name, seq = line, []\n else:\n seq.append(line)\n if name: yield (name, ''.join(seq))\n\ndef get_seq_dict(dictionary, fasta_path):\n new_dict = {}\n for locus in dictionary.keys():\n new_dict.setdefault(locus,[])\n alignment = SeqIO.parse(open(fasta_path),'fasta')\n for seq in alignment:\n ref_loc = str(seq.id).split('_')[0]\n new_dict[ref_loc].append(seq)\n seq.name=''\n seq.description=''\n return new_dict\n\n\ndef create_reference_fasta(out_dir,path_to_alignments):\n # Create a list of fasta files from the input directory\n file_list = [fn for fn in os.listdir(path_to_alignments) if fn.endswith(\".fasta\")]\n reference_list = []\n for fasta_alignment in file_list:\n sequence_name = re.sub(\".fasta\",\"\",fasta_alignment)\n orig_aln = os.path.join(path_to_alignments,fasta_alignment)\n sep_reference = \"%s/%s\" %(out_dir,fasta_alignment)\n reference_list.append(sep_reference)\n cons_cmd = \"cons -sequence %s -outseq %s -name %s -plurality 0.1 -setcase 0.1\" %(orig_aln,sep_reference,sequence_name)\n os.system(cons_cmd)\n reference = os.path.join(out_dir,\"joined_fasta_library.fasta\")\n join_fastas = \"cat %s/*.fasta > %s\" %(out_dir,reference)\n os.system(join_fastas)\n return reference\n\n\n\nfasta_file = '/Users/tobias/Desktop/cos2.fasta'\n\nlocus_bait_dict = {}\nwith open(fasta_file) as f:\n for name, seq in read_fasta(f):\n locus_name = re.sub('>','',name.split('_')[0])\n locus_bait_dict.setdefault(locus_name,[])\n locus_bait_dict[locus_name].append(seq)\n\nlocus_fasta_dict = get_seq_dict(locus_bait_dict,fasta_file)\n\n \n\nout_path = '/Users/tobias/Desktop/merging_probes/sequence_files'\nfor locus in locus_fasta_dict:\n filename = '%s_sequences.fasta' %locus\n with open(os.path.join(out_path,filename), \"w\") as out_file:\n seq_list = locus_fasta_dict[locus]\n index = 0\n for sequence in seq_list:\n sequence.id = '%s_%i' %(locus,index)\n sequence.name=''\n sequence.description='' \n index += 1\n out_file.write(sequence.format('fasta'))\n\n# align the sequence fasta files\naln_path = '/Users/tobias/Desktop/merging_probes/alignments'\nfor fasta in os.listdir(out_path):\n fasta_file = os.path.join(out_path,fasta)\n new_file_name = re.sub('_sequences.fasta','_sequence_alignment.fasta',fasta)\n aln = os.path.join(aln_path,new_file_name)\n aln_stdout = open(aln, 'w')\n # run MAFFT on the temp file\n cmd = [\"mafft\",\"--maxiterate\", \"1000\", fasta_file]\n # just pass all ENV params\n proc = subprocess.Popen(cmd,stderr=subprocess.PIPE,stdout=aln_stdout)\n stderr = proc.communicate()\n aln_stdout.close()\n\n\noutput = '/Users/tobias/Desktop/merging_probes/new_reference'\ncreate_reference_fasta(output,aln_path)\n\n","repo_name":"AntonelliLab/seqcap_processor","sub_path":"src/merge_baits_for_each_locus.py","file_name":"merge_baits_for_each_locus.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"78"} +{"seq_id":"22241933551","text":"from PIL import Image\nfrom threading import Thread\nfrom django.conf import settings\nimport pandas as pd\nimport os\nfrom urllib.request import urlopen\nimport os\nimport numpy as np\nimport traceback\nimport logging\n\n\ncomp_logger = logging.getLogger(__name__)\n\n\nclass ResultImageDownloaderThread(Thread):\n\n\tdef __init__(self,\n\t\t\t\t thread_name,\n\t\t\t\t input_df,\n\t\t\t\t image_download_dir):\n\n\t\tThread.__init__(self)\n\t\tself.thread_name = thread_name\n\t\tself.input_df = input_df\n\t\tself.image_download_dir = image_download_dir\n\n\n\tdef run(self):\n\t\tself.input_df.apply(self.process, axis=1)\n\n\n\tdef downloadImage(self,url,file_path):\n\t\tif not os.path.exists(file_path):\n\t\t\t# try:\n\t\t\tf = open(file_path,'wb')\n\t\t\tf.write(urlopen(url).read())\n\t\t\tf.close()\n\t\t\t# except:\n\t\t\t# \treturn \n\t\t\timage = Image.open(file_path)\n\t\t\timage.convert('RGB').save(file_path)\n\n\n\tdef process(self, row):\n\t\tproduct_image_url = row['r_image_url'].split(settings.IMAGE_LIST_SEPARATOR)[0].strip()\n\t\t# r_title = str(row['r_item_name'])\n\t\t# image_name = r_title.replace(' ','_').replace('.','_').replace('/','_').replace('\"','_').replace('\\'','_')\n\t\t# image_name = '{}.jpg'.format(image_name)\n\t\timage_name = row['result_image']\n\n\t\timage_path = os.path.join(self.image_download_dir,image_name)\n\t\ttry:\n\t\t\tif not os.path.exists(image_path):\n\t\t\t\tself.downloadImage(product_image_url,image_path)\n\t\texcept:\n\t\t\ttraceback_error = traceback.format_exc()\n\t\t\tcomp_logger.info(traceback_error)\n\n\n\n\ndef getResultImageMapperFileDF(client_df,\n\t\t\t\t\t\t\t result_image_mapper_file_path):\n\t\"\"\"\n\tCreates a result image mapper file that holds result item name to image file mapping\n\treturn the mapping data frame\n\t\"\"\"\n\t\n\tif os.path.exists(result_image_mapper_file_path):\n\t\tresult_df = pd.read_csv(result_image_mapper_file_path,sep='\\t', encoding='ISO-8859-1')\n\telse:\t\n\t\t# Getting unique product images(single)\n\t\tresult_df = client_df[['r_item_name']].copy()\n\t\tresult_df = result_df.drop_duplicates(subset=['r_item_name'], keep='first')\n\t\ttotal_rows_in_client_file = len(result_df)\n\t\tresult_df['result_image'] = list(map(lambda x: settings.PROJECT_RESULT_IMAGE_PATTERN.format(x),range(1,total_rows_in_client_file+1)))\n\t\tresult_df.to_csv(result_image_mapper_file_path, index=False,sep='\\t', encoding='ISO-8859-1')\n\treturn result_df\n\n\n\n\ndef getResultMultiImageMapperFile(client_df, \n\t\t\t\t\t\t\t\t result_image_mapper_file_path):\n\t\"\"\"\n\tCreates a result image mapper file that holds result item name to image file mapping\n\treturn the mapping data frame\n\t\"\"\"\n\t\n\tif os.path.exists(result_image_mapper_file_path):\n\t\tresult_df = pd.read_csv(result_image_mapper_file_path,sep='\\t', encoding='ISO-8859-1')\n\n\telse:\n\t\t# split single image list to multiple image listing\n\t\tr_image_stack_df = pd.DataFrame(client_df['r_image_url'].str.split(settings.IMAGE_LIST_SEPARATOR).tolist(), \n\t\t\t\t\t\t\t\tindex=[client_df['r_item_name'],client_df['s_sku']]).stack()\n\n\t\tr_image_stack_df = r_image_stack_df.reset_index()\n\n\t\tdel r_image_stack_df['level_2']\n\n\t\tr_image_stack_df.rename(columns={0:'r_image_url'}, inplace=True)\n\t\tr_image_stack_df['r_image_url'] = r_image_stack_df['r_image_url'].str.strip()\n\n\t\t# unique r_image_url listing and integer name mapping\n\t\tr_image_uniq_df = r_image_stack_df[['r_image_url']]\n\t\tr_image_uniq_df = r_image_uniq_df.drop_duplicates(subset=['r_image_url'], keep='first')\n\t\ttotal_rows_in_client_file = len(r_image_uniq_df)\n\t\t# r_image_uniq_df['result_image'] = list(map(lambda x: '{}.jpg'.format(x),range(1,total_rows_in_client_file+1)))\n\t\tr_image_uniq_df['result_image'] = list(map(lambda x: settings.PROJECT_RESULT_IMAGE_PATTERN.format(x),range(1,total_rows_in_client_file+1)))\n\t\t\n\t\t\"\"\"\n\t\tmerging r_image stacked df with unique integer name mapping\n\t\tExploded r_image_url df with integer name mapping\n\t\t\"\"\"\n\n\t\tresult_df = pd.merge(r_image_stack_df, r_image_uniq_df, how='inner',on=['r_image_url'])\n\t\tresult_df.drop_duplicates(subset=['s_sku','r_item_name','r_image_url'], keep='first', inplace=True)\n\n\t\t# There is no redundant product combination(redundancy may occur due to SERP KEY used) in this df persistance\n\t\tresult_df.to_csv(result_image_mapper_file_path, index=False,sep='\\t', encoding='ISO-8859-1')\n\t\t\n\treturn result_df\n\n\n\n\ndef main(client_input_file_path, \n\t\t image_download_dir,\n\t\t result_image_mapper_file_path):\n\n\tclient_df = pd.read_csv(client_input_file_path, sep='\\t', encoding='ISO-8859-1')\n\tclient_df = client_df[client_df['r_image_url'].notnull()]\n\tclient_df = client_df.fillna(value='')\n\n\n\t# result_df = getResultImageMapperFileDF(client_df, result_image_mapper_file_path)\n\t# client_df = pd.merge(client_df, result_df, how='inner', on=['r_item_name'])\n\n\tresult_df = getResultMultiImageMapperFile(client_df=client_df,\n\t\t\t\t\t\t\t\t\t\t\t result_image_mapper_file_path=result_image_mapper_file_path)\n\n\n\tNUMBER_OF_THREAD = settings.PROJECT_IMAGES_DOWNLOAD_THREADS\n\tdf_list = np.array_split(result_df, NUMBER_OF_THREAD)\n\tthread_list = []\n\n\t# Threaded image downloader\n\tfor item in range(0, NUMBER_OF_THREAD):\n\t\tdf_subset = df_list[item]\n\t\tthread_name = 'Thread{}'.format(item)\n\t\tthread = ResultImageDownloaderThread(thread_name=thread_name,\n\t\t\t\t\t\t\t\t\t\t\t input_df=df_subset,\n\t\t\t\t\t\t\t\t\t\t\t image_download_dir=image_download_dir)\n\t\tthread_list.append(thread)\t\t\n\t\tthread.start()\n\n\tclient_df = client_df.iloc[0:0]\n\n\tfor thread in thread_list:\n\t\tthread.join()\n","repo_name":"rohanneps/django","sub_path":"Django-Rest-Framework/rest_app/core/image_download_helper/downloadResultImageHelper.py","file_name":"downloadResultImageHelper.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"27690856730","text":"# -*- coding: utf-8 -*-\r\nimport pandas as pd \r\nimport numpy as np\r\n\r\nfirst = pd.read_pickle(\"customers_more_than_one_product.pkl\")\r\ndata = pd.read_pickle(\"customers_more_than_one_product.pkl\")\r\n\r\ndata['reviewerID'] = data[\"reviewerID\"].rank(method='dense').astype(int)\r\ndata['asin'] = data[\"asin\"].rank(method='dense').astype(int)\r\ndata['brand'] = data[\"brand\"].rank(method='dense').astype(int)\r\ndata = data.drop([\"reviewTime\",\"reviewText\",\"title\"],1)\r\n\r\n#tahminleme için örneklem sayısını 15000 belirledik\r\nX= data[\"asin\"].values[:15000]\r\nx1=data[\"overall\"].values[:15000]\r\nx2=data[\"subjectivity\"].values[:15000]\r\nx3=data[\"polarity\"].values[:15000]\r\nX=np.vstack((X, x1,x2,x3)).T\r\n\r\ny= data[\"brand\"].values[:15000]\r\n\r\n#Ürünün bilgilerini öğretip kim almıştır gibi bir çıkarımda bulunmaya çalıştık\r\n#error rate 0,61 çıktı.\r\n\r\n#Ürün bilgilerini öğretip hangi brand olduğunu tahminlemeye çalıştık.\r\n#error rate 0.25 çıktı.\r\n\r\n#to split into train test sets\r\nfrom sklearn.model_selection import train_test_split\r\nX_train,X_test , y_train , y_test = train_test_split(X, y, test_size=0.3, random_state=1)\r\n\r\n\r\nfrom sklearn.neural_network import MLPClassifier\r\nimport time\r\n\r\n#Hidden layer size'ı da değiştirerek denedik. En başarılı sonuç alıdğımız bu oldu.\r\nclf= MLPClassifier(hidden_layer_sizes=(16,16), max_iter=1000)\r\n\r\nstart = time.time()\r\nclf.fit(X_train, y_train)\r\nstop = time.time()\r\n\r\naccuracy=clf.score(X_test, y_test)\r\ny_pred = clf.predict(X_test)\r\nerror_rate = 1 - accuracy\r\n\r\ntraining_time = (stop - start)\r\n\r\nprint(\"\\nTraining Time (in ms): \",training_time)\r\nprint(\"Error (cost): \", error_rate)\r\n","repo_name":"almiragurkan/Data-Mining","sub_path":"07-MLPClassifier.py","file_name":"07-MLPClassifier.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9770940093","text":"import unittest2\nfrom sparktestingbase.sqltestcase import SQLTestCase\n\nfrom datetime import datetime\nfrom pyspark.sql import Row\nfrom pyspark.sql.types import StructType\n\n\nclass SimpleSQLTest(SQLTestCase):\n \"\"\"A simple test.\"\"\"\n\n def test_empty_expected_equal(self):\n allTypes = self.sc.parallelize([])\n df = self.sqlCtx.createDataFrame(allTypes, StructType([]))\n self.assertDataFrameEqual(df, df)\n\n def test_simple_expected_equal(self):\n allTypes = self.sc.parallelize(\n [\n Row(\n i=1,\n s=\"string\",\n d=1.0,\n lng=1,\n b=True,\n list=[1, 2, 3],\n dict={\"s\": 0},\n row=Row(a=1),\n time=datetime(2014, 8, 1, 14, 1, 5),\n )\n ]\n )\n df = allTypes.toDF()\n self.assertDataFrameEqual(df, df)\n\n @unittest2.expectedFailure\n def test_dif_schemas_unequal(self):\n allTypes1 = self.sc.parallelize([Row(d=1.0)])\n allTypes2 = self.sc.parallelize([Row(d=\"1.0\")])\n self.assertDataFrameEqual(allTypes1.toDF(), allTypes2.toDF(), 0.0001)\n\n\nif __name__ == \"__main__\":\n unittest2.main()\n","repo_name":"alexott/spark-playground","sub_path":"testing/src/python/test-spark-testing-base.py","file_name":"test-spark-testing-base.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"78"} +{"seq_id":"29750331493","text":"import matplotlib.pyplot as plt\nfrom matplotlib import colors as mcolors\nimport numpy as np\nimport pandas as pd\nimport math\ncolors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)\n\ndef MontarGraficoCurva(paises, dados, ano1, ano2, cores,titulo,eixo_Y): \n #Ele recebe o nome dos paises, e os dados para a criação do grafico em curva\n print(\"Criou o Grafico dos paises \" + str(paises))\n anos = []\n \n #gera o intervalo de anos apartir da entrada do usuario para o eixo x\n for ano in range(ano1, ano2 + 1):\n \tanos.append(ano)\n #gera as curvas do grafico\n for i in range(0, len(paises)):\n plt.plot(anos, dados[i], label = str(paises[i]), color = cores[i])\n\n plt.title(titulo)\n plt.ylabel(eixo_Y)\n plt.xlabel(\"Anos\")\n plt.legend()\n plt.show()\n return\n\n\ndef MontarGraficoBarra(paises, dados, ano1, ano2, cores, titulo,eixo_Y): \n #Ele recebe o nome dos paises, e os dados para a criação do grafico em curva\n print(\"Criou o Grafico dos paises \" + str(paises))\n anos = []\n #gera o intervalo de anos apartir da entrada do usuario para o eixo x\n for ano in range(ano1, ano2 + 1):\n \tanos.append(ano)\n #chama a função com as variações de grafico e em seguida mostra ao usuario\n ax = plt.subplot()\n ax = DefinirAx(ax, np.array(anos), dados, cores, paises)\n\n plt.title(titulo)\n plt.ylabel(eixo_Y)\n plt.xlabel(\"anos\")\n plt.legend(loc = \"best\")\n plt.show()\n return\n\ndef DefinirAx(ax, a, dados, cores, paises): #Função para a formatação do grafico de barras\n width = 0.7 #de Acordo com a quantidade de dados escolhidos pelo usuario\n\n if(len(dados) == 1):\n ax.bar(a,dados[0], width/2, align=\"center\", color=cores[0], label = str(paises[0]))\n\n if(len(dados) == 2):\n ax.bar(a-width/2, dados[0], width/2, align=\"center\", color=cores[0], label = str(paises[0]))\n ax.bar(a,dados[1], width/2, align=\"center\", color=cores[1], label = str(paises[1]))\n\n if(len(dados) == 3):\n ax.bar(a-width/3, dados[0], width/3, align=\"center\", color=cores[0], label = str(paises[0]))\n ax.bar(a,dados[1], width/3, align=\"center\", color=cores[1], label = str(paises[1]))\n ax.bar(a+width/3, dados[2], width/3, align=\"center\", color=cores[2], label = str(paises[2]))\n\n if(len(dados) == 4):\n ax.bar(a-width/4, dados[0], width/4, align=\"center\", color=cores[0], label = str(paises[0]))\n ax.bar(a-width/2, dados[1], width/4, align=\"center\", color=cores[1], label = str(paises[1]))\n ax.bar(a+width/2, dados[2], width/4, align=\"center\", color=cores[2], label = str(paises[2]))\n ax.bar(a+width/4, dados[3], width/4, align=\"center\", color=cores[3], label = str(paises[3]))\n\n if(len(dados) == 5):\n ax.bar(a-width/5, dados[0], width/5, align=\"center\", color=cores[0], label = str(paises[0]))\n ax.bar(a-width/2.5, dados[1], width/5, align=\"center\", color=cores[1], label = str(paises[1]))\n ax.bar(a, dados[2], width/5, align=\"center\", color=cores[2], label = str(paises[2]))\n ax.bar(a+width/2.5, dados[3], width/5, align=\"center\", color=cores[3], label = str(paises[3]))\n ax.bar(a+width/5, dados[4], width/5, align=\"center\", color=cores[4], label = str(paises[4]))\n return ax\n\n","repo_name":"eriklemy/Projeto_CAE","sub_path":"util/MontarGrafico.py","file_name":"MontarGrafico.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74125039932","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\n\n\n# In[22]:\n\n\ndef f(x):\n\tval=x**4-2*x+1\n\treturn val\n\n#setting endpoints and number of values\nN=1000000\na=0.\nb=5.\n\ndef riemann_dan(f,a,b,N):\n#setting bin width\n\tdelta_x=(b-a)/N\n\n#Create empty list\n\txvals=[]\n\tyvals=[]\n\ts=0\n\n#Loop over all y values\n\tfor i in range(0,N,1):\n#set x value at left edge of bin\n\t\tx=a+i*delta_x\n#set function value of bin\n\t\ty=f(x)\n#append x and y to list\n\t\txvals.append(x)\n\t\tyvals.append(y)\n#increment sum\t\n\t\ts+=y\n\ts*=delta_x\n\treturn s\n\n\ndef trapezoid(f,a,b,N):\n h = (b-a)/N\n fa = f(a)\n fb = f(b)\n y = 0\n for i in range(1,N,2):\n y += (f(a+i*h)+f(a+(i+1)*h))\n s = h*(.5*fa + .5*fb + y)\n return s\n \n \n \ndef simpson(f,a,b,N):\n h = (b-a)/N\n fa = f(a)\n fb = f(b)\n y1 = 0\n y2 = 0\n for i in range(1,N,2):\n y1+= f(a+i*h)\n for i in range(2,N-1,2):\n y2 += f(a+i*h)\n s = 1/3*h*(fa + fb + 4*y1 + 2*y2)\n return s\n \n \n \n\n\n# In[23]:\n\n\nprint(simpson(f,0,2,1000))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"dgrin1/inclass_hw3_2020","sub_path":"row2ints.py","file_name":"row2ints.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"19737949612","text":"import requests\nfrom http.cookiejar import LWPCookieJar\nfrom bs4 import BeautifulSoup\nimport pprint\n\nflag = ['typeflag Manhwa', 'typeflag Manga', 'typeflag Manhwa']\n\nclass MangaScrapper:\n def __init__(self, link = 'https://kiryuu.id'):\n self.link = link\n self._requestText = None\n self.session = self._build_session()\n\n def _request(self, link:str):\n self._requestText = self.session.get(link).text\n\n def _build_session(self) -> requests.Session:\n \"\"\"\n Buat session baru\n \"\"\"\n\n session = requests.Session()\n session.headers[\"User-Agent\"] = \"Mozilla/5.0 (Linux; Android 7.0; 5060 Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/58.0.3029.83 Mobile Safari/537.36\"\n session.cookies = LWPCookieJar()\n return session\n\n def getHome(self, page = None):\n if page is None:\n self._request(self.link)\n else:\n self._request(self.link+\"/page/{}/\".format(str(page)))\n\n soup = BeautifulSoup(self._requestText, \"html.parser\")\n content = soup.find('div', id=\"content\")\n popular_today = content.find('div', class_='hotslid')\n post_body = content.find('div', class_='postbody')\n # sidebar = content.find('div', id=\"sidebar\")\n\n result = {}\n\n # postbody\n for i, post in enumerate(post_body.find_all('div', class_=\"bixbox\")):\n if not post.find('div', class_=\"series-gen\"):\n releases = post.find('div', class_=\"releases\").find(\"h2\")\n # print(releases.text)\n list_upd = post.find('div', class_=\"listupd\").find_all(\"div\", class_=\"utao\")\n\n result[releases.text] = []\n for upd in list_upd: \n upd_item = {\n \"title\": upd.find('div', class_=\"luf\").a.h4.text,\n \"cover\": upd.find('div', class_=\"imgu\").img['src'],\n \"link\": upd.find('div', class_=\"imgu\").a['href']\n }\n\n if upd.find('div', class_=\"luf\").ul is not None:\n upd_item['type'] = upd.find('div', class_=\"luf\").ul['class'][0]\n else:\n upd_item['type'] = 'Belum Rilis'\n\n result[releases.text].append(upd_item)\n\n elif post.find('div', class_=\"series-gen\"):\n releases = post.find('div', class_=\"releases\").find(\"h2\")\n series_gen = post.find('div', class_=\"series-gen\")\n\n result[releases.text] = {}\n \n # list_upd = series_gen.find(\"div\", class_=\"listupd\").find_all('div')\n \n # print(pprint.pformat(list_upd))\n\n for i, rekomendasi in enumerate(series_gen.find_all('li')):\n try:\n list_upd = series_gen.find(\"div\", class_=\"listupd\").find_all('div', class_=\"tab-pane\")[i]\n except IndexError as e:\n continue\n\n result[releases.text][rekomendasi.a.text] = []\n for bs in list_upd.find_all('div', class_=\"bs\"):\n bsx = bs.find('div', class_=\"bsx\")\n\n if bsx:\n upd_item = {\n \"title\": bsx.a['title'],\n \"cover\": bsx.find('div', class_=\"limit\").img['src'],\n \"link\": bsx.a['href'],\n \"type\": bsx.find('div', class_=\"limit\").find(\"span\", class_=\"type\")['class'][1]\n }\n\n result[releases.text][rekomendasi.a.text].append(upd_item)\n\n # print(\"jumlah dari \" + releases.text, len(result[releases.text]))\n\n # hotlist\n for i, post in enumerate(popular_today):\n releases = post.find('div', class_=\"releases\").find(\"h2\")\n # print(releases.text)\n list_upd = post.find('div', class_=\"listupd\").find_all(\"div\", class_=\"bs\")\n\n result[releases.text] = []\n for upd in list_upd: \n bsx = upd.find('div', class_=\"bsx\")\n\n if bsx:\n upd_item = {\n \"title\": bsx.a['title'],\n \"cover\": bsx.find('div', class_=\"limit\").img['src'],\n \"link\": bsx.a['href'],\n \"type\": bsx.find('div', class_=\"limit\").find(\"span\", class_=\"type\")['class'][1]\n }\n\n result[releases.text].append(upd_item)\n\n # serial populer\n # incomming\n\n print(pprint.pformat(result))\n\n return result\n\n def mangaInfo(self, manga: str):\n manga = manga.replace(f\"{self.link}\", '')\n self._request(self.link + manga)\n\n soup = BeautifulSoup(self._requestText, \"html.parser\")\n post_body = soup.find('div', class_=\"postbody\")\n\n # information\n information = post_body.find(\"div\", class_=\"seriestucon\")\n genres = information.find('div', class_='seriestucontentr').find('div', class_=\"seriestugenre\").find_all('a')\n information_tables = information.find('div', class_='seriestucontentr').find(\"table\", class_=\"infotable\").find('tbody').find_all('tr')\n sinopsis = information.find('div', class_='seriestucontentr').find('div', class_='entry-content').find_all('p')\n\n # episodes\n chapters = post_body.find(\"div\", class_=\"epcheck\").find('ul', class_='clstyle').find_all('li')\n\n # \"\\n\".join(.find('div', class_='entry-content').find_all('p'))\n result = {\n \"title\": information.find(\"div\", class_=\"seriestuheader\").h1.text,\n \"title_alternatif\": information.find(\"div\", class_=\"seriestuheader\").div.text.strip(),\n \"sinopsis\": '\\n'.join([x.text for x in sinopsis]),\n \"information\": {},\n \"chapters\": [],\n \"genres\": []\n }\n\n # chapters loop\n for chapter in chapters:\n chapter_item = {\n \"title\": chapter.find(\"div\", class_=\"eph-num\").a.find(\"span\", class_=\"chapternum\").text,\n \"link\": chapter.find(\"div\", class_=\"eph-num\").a['href'],\n \"updateat\": chapter.find(\"div\", class_=\"eph-num\").a.find(\"span\", class_=\"chapterdate\").text\n }\n\n result['chapters'].append(chapter_item)\n\n # genre loop\n for genre in genres:\n genre_item = {\n \"title\": genre.text,\n \"link\": genre['href']\n }\n result['genres'].append(genre_item)\n\n # information loop\n for informa in information_tables:\n td = informa.find_all('td')\n result['information'][td[0].text.lower().replace(' ', '')] = td[1].text\n\n return result\n\n def getManga(self, manga: str):\n manga = manga.replace(f\"{self.link}\", '')\n self._request(self.link + manga)\n \n soup = BeautifulSoup(self._requestText, \"html.parser\")\n post_area = soup.find('div', class_=\"postarea\")\n\n result = []\n\n for manga_img in post_area.find(\"div\", id=\"readerarea\").find_all('img'):\n result.append(manga_img['src'])\n\n \n return result\n\n def searchManga(self, query: str, page: str = None):\n query = query.replace(f\"{self.link}\", '')\n if page is not None:\n self._request(\"%s/page/%s/?s=%s\" % (self.link, page, query))\n else:\n self._request(\"%s/?s=%s\" % (self.link, query))\n\n soup = BeautifulSoup(self._requestText, \"html.parser\")\n post_body = soup.find(\"div\", class_=\"postbody\")\n \n result = {\n \"manga_result\": []\n }\n\n # scrap for manga\n for manga_search in post_body.find(\"div\", class_=\"listupd\").find_all(\"div\", class_=\"bs\"):\n bsx = manga_search.find(\"div\", class_=\"bsx\")\n\n manga_item = {\n \"title\": bsx.a['title'],\n \"cover\": bsx.find('div', class_=\"limit\").img['src'],\n \"link\": bsx.a['href'],\n \"type\": bsx.find('div', class_=\"limit\").find(\"span\", class_=\"type\")['class'][1],\n \"chapter\": bsx.find('div', class_=\"epxs\").text\n }\n\n result['manga_result'].append(manga_item)\n\n # scrap for pagenation\n pagination = post_body.find(\"div\", class_=\"pagination\")\n max_page = pagination.find_all(\"a\", class_=\"page-numbers\")\n\n if max_page:\n if len(max_page) >= 4:\n max_page = max_page[2]\n result['max_page'] = int(max_page.text)\n elif len(max_page) == 3:\n max_page = max_page[1]\n result['max_page'] = int(max_page.text)\n elif len(max_page) == 2:\n max_page = max_page[0]\n result['max_page'] = int(max_page.text)\n \n return result\n\n def getMangaList(self, option):\n pass\n\n def _isLink(self, link: str):\n print(link)\n return link.startswith('http') or link.startswith('https')\n\nif __name__ == \"__main__\":\n manga = MangaScrapper().getHome()","repo_name":"kevinoctavian/myApp-api","sub_path":"scrapper/mangascrapper.py","file_name":"mangascrapper.py","file_ext":"py","file_size_in_byte":9262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74571467452","text":"# _*_ coding:utf-8 _*_\n\n'''\n使用传统的机器学习的方法进行文本情感分析\n'''\n\nimport codecs\nimport jieba\nimport numpy as np\n\n# from gensim.models.doc2vec import Doc2Vec\nfrom gensim.models.word2vec import Word2Vec\nfrom sklearn.externals import joblib\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import neighbors\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import SGDClassifier\n\n\n# 对每个句子的所有词向量取均值\n# text 需要是切完词的 词列表\n# size 一般是词向量的维度\n# word_vector_model: 训练好的词向量模型 (一般使用 gensim 中的 WordVector 进行词向量训练)或者是直接加载训练好的模型\ndef buildWordVector(text, size, word_vector_model):\n vec = np.zeros(size).reshape((1, size))\n count = 0.\n for word in text:\n try:\n vec += word_vector_model[word].reshape((1, size))\n count += 1.\n except KeyError:\n continue\n if count != 0:\n vec /= count\n return vec\n\n\n# 计算词向量\ndef get_train_vecs(x_train, x_test, n_dim):\n '''\n x_train: 训练集\n x_test: 测试集\n n_dim: 训练词向量的维度\n '''\n n_dim = n_dim\n # 初始化模型和生成词汇表\n all_text = x_train + x_test\n text_w2v = Word2Vec(size=n_dim, min_count=5, workers=1)\n text_w2v.build_vocab(all_text)\n text_w2v.train(all_text, total_examples=text_w2v.corpus_count, epochs=5)\n\n # 分别得到训练集和测试集文本的词向量合集,这个数据集就很大了\n train_vecs = np.concatenate([buildWordVector(text, n_dim, text_w2v) for text in x_train])\n # 将训练集的词向量进行保存\n np.save(storedpaths + \"train_vecs.npy\", train_vecs)\n print(\"训练集数据的词向量维度:{}\".format(train_vecs.shape))\n\n test_vecs = np.concatenate([buildWordVector(text, n_dim, text_w2v) for text in x_test])\n # 将测试集的词向量进行保存\n np.save(storedpaths + \"test_vecs.npy\", test_vecs)\n print(\"测试集数据的词向量维度:{}\".format(test_vecs.shape))\n\n # 保存词向量模型,供后面使用预训练的词向量模型预测使用\n text_w2v.save(storedpaths + \"w2v_model.pkl\")\n\n\n# 加载向量化的文本和标签(利用get_train_vecs函数里保存的训练集词向量和测试集词向量)\ndef get_data():\n train_vecs = np.load(storedpaths + 'train_vecs.npy')\n y_train = np.load(storedpaths + 'y_train.npy')\n test_vecs = np.load(storedpaths + 'test_vecs.npy')\n y_test = np.load(storedpaths + 'y_test.npy')\n return train_vecs, y_train, test_vecs, y_test\n\n\n# 训练svm模型并保存模型\ndef svm_train(train_vecs, y_train, test_vecs, y_test):\n clf = SVC(kernel='rbf', verbose=True)\n clf.fit(train_vecs, y_train)\n joblib.dump(clf, storedpaths + 'model.pkl')\n test_scores = clf.score(test_vecs, y_test)\n return test_scores\n\n\n# 训练朴素贝叶斯模型并保存模型\ndef NB_train(train_vecs, y_train, test_vecs, y_test):\n gnb = GaussianNB()\n gnb.fit(train_vecs, y_train)\n joblib.dump(gnb, storedpaths + 'model_gnb.pkl')\n test_scores = gnb.score(test_vecs, y_test)\n return test_scores\n\n\n# 训练决策树模型并保存模型\ndef decision_tree(train_vecs, y_train, test_vecs, y_test):\n clf = DecisionTreeClassifier(max_depth=10, min_samples_split=2, random_state=0)\n clf.fit(train_vecs, y_train)\n joblib.dump(clf, storedpaths + 'model_dtree.pkl')\n test_scores = clf.score(test_vecs, y_test)\n return test_scores\n\n\n# 训练随机森林算法并保存模型\ndef random_forest(train_vecs, y_train, test_vecs, y_test):\n clf = RandomForestClassifier(n_estimators=10, max_depth=10, min_samples_split=2, n_jobs=1, random_state=0)\n clf.fit(train_vecs, y_train)\n joblib.dump(clf, storedpaths + 'model_randomforest.pkl')\n test_scores = clf.score(test_vecs, y_test)\n return test_scores\n\n\n# 训练 ExtraTreesClassifier 分类算法并保存模型\ndef extract_tree(train_vecs, y_train, test_vecs, y_test):\n clf = ExtraTreesClassifier(n_estimators=10, max_depth=10, min_samples_split=2, n_jobs=1, random_state=0)\n clf.fit(train_vecs, y_train)\n joblib.dump(clf, storedpaths + 'model_extracttree.pkl')\n test_scores = clf.score(test_vecs, y_test)\n return test_scores\n\n\n# 训练 GBDT 分类算法并保存模型\ndef gbdt_classifier(train_vecs, y_train, test_vecs, y_test):\n clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=10, random_state=0)\n clf.fit(train_vecs, y_train)\n joblib.dump(clf, storedpaths + 'model_gbdt.pkl')\n test_scores = clf.score(test_vecs, y_test)\n return test_scores\n\n\n# 训练近邻分类算法并保存模型\ndef nn_classifier(n_neighbors, train_vecs, y_train, test_vecs, y_test):\n clf = neighbors.KNeighborsClassifier(n_neighbors, weights='uniform')\n clf.fit(train_vecs, y_train)\n joblib.dump(clf, storedpaths + 'model_nn.pkl')\n test_scores = clf.score(test_vecs, y_test)\n return test_scores\n\n\n# 训练 LogisticRegression 分类算法并保存模型\ndef LR_classifier(train_vecs, y_train, test_vecs, y_test):\n clf = LogisticRegression(C=50. / len(y_train), multi_class='multinomial',penalty='l1', solver='saga', tol=0.1)\n clf.fit(train_vecs, y_train)\n joblib.dump(clf, storedpaths + 'model_lr.pkl')\n test_scores = clf.score(test_vecs, y_test)\n return test_scores\n\n\n# 训练 随机梯度下降 分类算法并保存模型\ndef SGD_classifier(train_vecs, y_train, test_vecs, y_test):\n clf = SGDClassifier(alpha=0.001, max_iter=100)\n clf.fit(train_vecs, y_train)\n joblib.dump(clf, storedpaths + 'model_sgd.pkl')\n test_scores = clf.score(test_vecs, y_test)\n return test_scores\n\n\n# 训练多层感知机分类算法并保存模型\ndef MP_classifier(train_vecs, y_train, test_vecs, y_test):\n clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)\n clf.fit(train_vecs, y_train)\n joblib.dump(clf, storedpaths + 'model_mp.pkl')\n test_scores = clf.score(test_vecs, y_test)\n return test_scores\n\n\n# 得到待预测单个句子的词向量\n# 预先进行分词操作\ndef get_predict_vecs(string, n_dim, w2v_model_path):\n '''\n string: 输入的句子\n n_dim: 词向量维度\n w2v_model_path: 預训练词向量的模型路径\n '''\n n_dim = n_dim\n text_w2v = Word2Vec.load(w2v_model_path)\n words = [i for i in jieba.cut(string, cut_all=False)]\n train_vecs = buildWordVector(words, n_dim, text_w2v)\n\n return train_vecs\n\n\n# 调用训练模型进行预测\ndef svm_predict(string, trainmodelpath):\n words_vecs = get_predict_vecs(string, n_dim, w2v_model_path)\n clf = joblib.load(trainmodelpath)\n result = clf.predict(words_vecs)\n\n return result\n\nif __name__ == \"__main__\":\n \"\"\"\n 先训练模型、保存模型再预测模型\n \"\"\"\n # datapaths = \"./data/\"\n # storedpaths = \"./store/\"\n #\n # positive_data = []\n # y_positive = []\n #\n # neutral_data = []\n # y_neutral = []\n #\n # negative_data = []\n # y_negative = []\n #\n # print(\"#------------------------------------------------------#\")\n # print(\"加载数据集\")\n # with codecs.open(datapaths + \"pos.csv\", \"r\", \"utf-8\") as f1, \\\n # codecs.open(datapaths + \"neutral.csv\", \"r\", \"utf-8\") as f2, \\\n # codecs.open(datapaths + \"neg.csv\", \"r\", \"utf-8\") as f3:\n # for line in f1:\n # positive_data.append(\" \".join(i for i in jieba.lcut(line.strip(), cut_all=False)))\n # # y_positive.append([1,0,0])\n # y_positive.append([0])\n # for line in f2:\n # neutral_data.append(\" \".join(i for i in jieba.lcut(line.strip(), cut_all=False)))\n # # y_neutral.append([0,1,0])\n # y_neutral.append([1])\n # for line in f3:\n # negative_data.append(\" \".join(i for i in jieba.lcut(line.strip(), cut_all=False)))\n # # y_negative.append([0,0,1])\n # y_negative.append([2])\n #\n # print(\"positive data:{}\".format(len(positive_data)))\n # print(\"neutral data:{}\".format(len(neutral_data)))\n # print(\"negative data:{}\".format(len(negative_data)))\n #\n # x_text = positive_data + neutral_data + negative_data\n # y_label = y_positive + y_neutral + y_negative\n # print(\"#------------------------------------------------------#\")\n # print(\"\\n\")\n #\n # # 数据集混洗\n # shuffle_indices = np.random.permutation(np.arange(len(y_label)))\n # train_test_percent = 0.2\n #\n # x_train = []\n # x_test = []\n #\n # y_train = []\n # y_test = []\n #\n # for i in shuffle_indices[:-(int(len(shuffle_indices) * train_test_percent))]:\n # x_train.append(x_text[i])\n # y_train.append(y_label[i])\n #\n # for i in shuffle_indices[-(int(len(shuffle_indices) * train_test_percent)):]:\n # x_test.append(x_text[i])\n # y_test.append(y_label[i])\n #\n # x_train_pos = 0\n # x_train_neu = 0\n # x_train_neg = 0\n #\n # x_test_pos = 0\n # x_test_neu = 0\n # x_test_neg = 0\n #\n # for i in y_train:\n # if i[0] == 0:\n # x_train_pos += 1\n # elif i[0] == 1:\n # x_train_neu += 1\n # else:\n # x_train_neg += 1\n #\n # for i in y_test:\n # if i[0] == 0:\n # x_test_pos += 1\n # elif i[0] == 1:\n # x_test_neu += 1\n # else:\n # x_test_neg += 1\n #\n # print(\"#------------------------------------------------------#\")\n #\n # print(\"保存标签数据\") # 这是经过分词处理好后的数据,可以直接使用\n # np.save(storedpaths + \"y_train.npy\", np.array(y_train))\n # np.save(storedpaths + \"y_test.npy\", np.array(y_test))\n #\n # print(\"训练集总数:{}\".format(len(x_train)))\n # print(\"训练集正样本:{}\".format(x_train_pos))\n # print(\"训练集中性样本:{}\".format(x_train_neu))\n # print(\"训练集负样本:{}\".format(x_train_neg))\n #\n # print(\"测试集总数:{}\".format(len(x_test)))\n # print(\"测试集正样本:{}\".format(x_test_pos))\n # print(\"测试集中性样本:{}\".format(x_test_neu))\n # print(\"测试集负样本:{}\".format(x_test_neg))\n #\n # print(\"#------------------------------------------------------#\")\n # print(\"\\n\")\n #\n # # Train model\n # n_dim = 100 # 默认是100维\n # n_neighbors = 10\n #\n # # 获得词向量模型\n # get_train_vecs(x_train, x_test, n_dim)\n #\n # # 获得向量化的文本和标签\n # train_vecs, y_train, test_vecs, y_test = get_data()\n\n # 训练模型并在测试集上验证分数\n # test_scores = svm_train(train_vecs, y_train, test_vecs, y_test)\n # print(\"#----------------------------------------#\")\n # print(\"SVM测试集测试得分:{}\".format(test_scores))\n # print(\"#----------------------------------------#\")\n\n # test_scores = NB_train(train_vecs, y_train, test_vecs, y_test)\n # print(\"#----------------------------------------#\")\n # print(\"NB测试集测试得分:{}\".format(test_scores))\n # print(\"#----------------------------------------#\")\n #\n # test_scores = nn_classifier(n_neighbors, train_vecs, y_train, test_vecs, y_test)\n # print(\"#----------------------------------------#\")\n # print(\"NN测试集测试得分:{}\".format(test_scores))\n # print(\"#----------------------------------------#\")\n #\n # test_scores = LR_classifier(train_vecs, y_train, test_vecs, y_test)\n # print(\"#----------------------------------------#\")\n # print(\"LR测试集测试得分:{}\".format(test_scores))\n # print(\"#----------------------------------------#\")\n #\n # test_scores = SGD_classifier(train_vecs, y_train, test_vecs, y_test)\n # print(\"#----------------------------------------#\")\n # print(\"SGD测试集测试得分:{}\".format(test_scores))\n # print(\"#----------------------------------------#\")\n #\n # test_scores = decision_tree(train_vecs, y_train, test_vecs, y_test)\n # print(\"#----------------------------------------#\")\n # print(\"TREE测试集测试得分:{}\".format(test_scores))\n # print(\"#----------------------------------------#\")\n #\n # test_scores = random_forest(train_vecs, y_train, test_vecs, y_test)\n # print(\"#----------------------------------------#\")\n # print(\"Random_Forest测试集测试得分:{}\".format(test_scores))\n # print(\"#----------------------------------------#\")\n #\n # test_scores = extract_tree(train_vecs, y_train, test_vecs, y_test)\n # print(\"#----------------------------------------#\")\n # print(\"Extract_Tree测试集测试得分:{}\".format(test_scores))\n # print(\"#----------------------------------------#\")\n #\n # test_scores = gbdt_classifier(train_vecs, y_train, test_vecs, y_test)\n # print(\"#----------------------------------------#\")\n # print(\"GBDT_Tree测试集测试得分:{}\".format(test_scores))\n # print(\"#----------------------------------------#\")\n #\n # test_scores = MP_classifier(train_vecs, y_train, test_vecs, y_test)\n # print(\"#----------------------------------------#\")\n # print(\"MP测试集测试得分:{}\".format(test_scores))\n # print(\"#----------------------------------------#\")\n\n # 预测单个句子\n # string = '最大的困扰是穴位有点把不准,不过我牢记中里先生离穴不离经的教诲。'\n # trainmodelpath = './store/model.pkl'\n # w2v_model_path = './store/w2v_model.pkl'\n # n_dim = 300\n # # n_dim = 100\n # print(svm_predict(string, trainmodelpath))\n\n # 预测\n trainmodelpath = './store/model.pkl'\n w2v_model_path = './store/w2v_model.pkl'\n n_dim = 100\n with open('./data/predict_test.txt',mode='r',encoding='utf-8') as fr:\n with open('./out/predict_out_svm.txt',mode='w',encoding='utf-8') as fw:\n for line in fr.readlines():\n fw.write(str(svm_predict(line,trainmodelpath)[0]) + '\\n')","repo_name":"7125messi/sentiment_analysis_from_raw_corpus","sub_path":"05_sentiment_analysis_ml.py","file_name":"05_sentiment_analysis_ml.py","file_ext":"py","file_size_in_byte":14273,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"} +{"seq_id":"28774800454","text":"N, M, C = [int(i) for i in input().split()]\nBList = [int(i) for i in input().split()]\nALists = [[int(i) for i in input().split()] for _ in range(N)]\n\nans = 0\nfor AList in ALists:\n point = C\n for i, A in enumerate(AList):\n point += A * BList[i]\n if point > 0:\n ans += 1\n\nprint(ans)\n\n","repo_name":"yasunariston/atCoder","sub_path":"ABC/121/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71664336585","text":"n=int(input())\r\nc=0\r\nl1=list(map(int,input().split()))\r\nl2=list(map(int,input().split()))\r\nl1.pop(0)\r\nl2.pop(0)\r\nd={}\r\nfor i in l1:\r\n if i not in d:\r\n d[i]=1\r\n c+=1\r\nfor i in l2:\r\n if i not in d:\r\n d[i]=1\r\n c+=1\r\n\r\nf=0\r\nfor i in range(1,n+1):\r\n if i not in d:\r\n f=1\r\n break\r\nif f==1:\r\n print('Oh, my keyboard!')\r\nelse:\r\n print('I become the guy.')\r\n","repo_name":"sandeep414-boo/codeforces","sub_path":"I Wanna Be the Guy.py","file_name":"I Wanna Be the Guy.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4268429622","text":"import pytest\n\nfrom django import db\nfrom django.core.management import call_command\nfrom django.db.migrations.executor import MigrationExecutor\nfrom unittest import mock\n\nfrom submission import constants\n\n\n@pytest.fixture\ndef erp_zendesk_payload():\n return {\n 'meta': {\n 'action_name': constants.ACTION_NAME_ZENDESK,\n 'email_address': 'erp+testform+testform@gmail.com',\n 'full_name': 'test test',\n 'funnel_steps': [],\n 'ingress_url': 'https://erp.service.dev.uktrade.io/triage/',\n 'sender': {'country_code': None,\n 'email_address': 'erp+testform@jhgk.com'},\n 'service_name': 'erp',\n 'spam_control': {},\n 'subject': 'ERP form was submitted'\n },\n 'data': {\n 'commodities': '190300 - Other',\n 'company_name': 'TASTE OF FOOD',\n 'company_number': '2387892',\n 'company_type': 'LIMITED',\n 'email': 'erp+testform@gmail.com',\n 'employees': '1-10',\n 'employment_regions': ['NORTH_EAST'],\n 'family_name': 'test',\n 'given_name': 'test',\n 'has_market_price_changed': False,\n 'has_market_size_changed': False,\n 'has_other_changes': False,\n 'has_price_changed': False,\n 'has_volume_changed': False,\n 'market_size_known': False,\n 'other_information': 'test',\n 'quarter_four_2018': '0',\n 'quarter_one_2019': '0',\n 'quarter_three_2019': '0',\n 'quarter_two_2019': '0',\n 'sales_volume_unit': 'KILOGRAM',\n 'sector': 'AEROSPACE',\n 'tariff_quota': 'N/A',\n 'tariff_rate': 'N/A',\n 'turnover': '0-25k'\n },\n }\n\n\n@pytest.fixture\ndef email_action_payload():\n return {\n 'data': {\n 'text_body': 'hello there',\n 'html_body': 'Hello there',\n },\n 'meta': {\n 'action_name': constants.ACTION_NAME_EMAIL,\n 'recipients': ['foo@bar.com', 'foo2@bar.com'],\n 'subject': 'Hello',\n 'reply_to': ['email-user@example.com'],\n }\n }\n\n\n@pytest.fixture\ndef zendesk_action_payload():\n return {\n 'data': {\n 'title': 'hello',\n },\n 'meta': {\n 'action_name': constants.ACTION_NAME_ZENDESK,\n 'subject': 'Hello',\n 'full_name': 'Jim Example',\n 'email_address': 'zendesk-user@example.com',\n 'service_name': 'Market Access',\n 'form_url': '/some/form/',\n 'ingress_url': 'https://www.example.com',\n 'sort_fields_alphabetically': True\n },\n }\n\n\n# This payload is to support backward compatibility with gov-notify action\n# which is replaced by gov-notify-email eventually this can be removed.\n@pytest.fixture\ndef gov_notify_action_payload_old():\n return {\n 'data': {\n 'title': 'hello',\n },\n 'meta': {\n 'action_name': 'gov-notify',\n 'template_id': '213123',\n 'email_address': 'notify-user@example.com',\n }\n }\n\n\n@pytest.fixture\ndef gov_notify_email_action_payload():\n return {\n 'data': {\n 'title': 'hello',\n },\n 'meta': {\n 'action_name': constants.ACTION_NAME_GOV_NOTIFY_EMAIL,\n 'template_id': '213123',\n 'email_address': 'notify-user@example.com',\n 'sender': {\n 'country_code': None,\n 'email_address': 'erp+testform@jhgk.com',\n 'ip_address': '252.252.928.233'\n },\n }\n }\n\n\n@pytest.fixture\ndef gov_notify_letter_action_payload():\n return {\n 'data': {\n 'address_line_1': 'The Occupier',\n 'address_line_2': '123 High Street',\n 'postcode': 'SW14 6BF',\n 'name': 'John Smith',\n },\n 'meta': {\n 'action_name': constants.ACTION_NAME_GOV_NOTIFY_LETTER,\n 'template_id': '21312345',\n }\n }\n\n\n@pytest.fixture\ndef pardot_action_payload():\n return {\n 'data': {\n 'title': 'hello',\n },\n 'meta': {\n 'action_name': constants.ACTION_NAME_PARDOT,\n 'pardot_url': 'http://www.example.com/some/submission/path/',\n }\n }\n\n\n@pytest.fixture()\ndef migration(transactional_db):\n \"\"\"\n This fixture returns a helper object to test Django data migrations.\n The fixture returns an object with two methods;\n - `before` to initialize db to the state before the migration under test\n - `after` to execute the migration and bring db to the state after the\n migration. The methods return `old_apps` and `new_apps` respectively; these\n can be used to initiate the ORM models as in the migrations themselves.\n For example:\n def test_foo_set_to_bar(migration):\n old_apps = migration.before([('my_app', '0001_inital')])\n Foo = old_apps.get_model('my_app', 'foo')\n Foo.objects.create(bar=False)\n assert Foo.objects.count() == 1\n assert Foo.objects.filter(bar=False).count() == Foo.objects.count()\n # executing migration\n new_apps = migration.apply('my_app', '0002_set_foo_bar')\n Foo = new_apps.get_model('my_app', 'foo')\n assert Foo.objects.filter(bar=False).count() == 0\n assert Foo.objects.filter(bar=True).count() == Foo.objects.count()\n From: https://gist.github.com/asfaltboy/b3e6f9b5d95af8ba2cc46f2ba6eae5e2\n \"\"\"\n class Migrator:\n def before(self, migrate_from):\n \"\"\" Specify app and starting migration name as in:\n before(['app', '0001_before']) => app/migrations/0001_before.py\n \"\"\"\n\n self.migrate_from = migrate_from\n self.executor = MigrationExecutor(db.connection)\n self.executor.migrate(self.migrate_from)\n self._old_apps = self.executor.loader.project_state(\n self.migrate_from).apps\n return self._old_apps\n\n def apply(self, app, migrate_to):\n \"\"\" Migrate forwards to the \"migrate_to\" migration \"\"\"\n self.migrate_to = [(app, migrate_to)]\n self.executor.loader.build_graph() # reload.\n self.executor.migrate(self.migrate_to)\n self._new_apps = self.executor.loader.project_state(\n self.migrate_to).apps\n return self._new_apps\n\n yield Migrator()\n call_command('migrate')\n\n\n@pytest.fixture(autouse=False)\ndef mock_middleware_test_sig():\n yield mock.patch(\n 'client.helpers.RequestSignatureChecker.test_signature',\n return_value=True,\n ).start()\n","repo_name":"uktrade/directory-forms-api","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":6927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41178775038","text":"#Display settings\n\nDEFAULT_IMAGE_SIZE=(300,300)\nFPS=144\nHEIGHT=1000\nWIDTH=1600\nSTART_X,START_Y=0,-300\nX_OFFSET,Y_OFFSET=20,0\n\nBG_IMAGE_PATH='graphics/0/bg.png'\nGAME_INDICES=[1,2,3] #there are 0 and 4 outside of the playing area\nSYMBOLS_PATH='graphics/0/symbols'\n\nFONT_STYLE='graphics/font/RobotoCondensed-LightItalic.ttf'\nFONT_SIZE=32\nBIG_FONT_SIZE=48\n#Slot symbols\nsymbols={\n 'diamond':f\"{SYMBOLS_PATH}/0_diamond.png\",\n 'floppy':f\"{SYMBOLS_PATH}/0_floppy.png\",\n 'hourglass':f\"{SYMBOLS_PATH}/0_hourglass.png\",\n 'seven':f\"{SYMBOLS_PATH}/0_seven.png\",\n 'telephone':f\"{SYMBOLS_PATH}/0_telephone.png\",\n}","repo_name":"destrike00/casino-is-life","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71325910665","text":"from itertools import cycle\n\nfrom individual_circuit import *\n\nclass Population:\n \n def __init__(self):\n self.pop_size = 0\n self.individuals = []\n self.fittest = None\n self.least_fittest = None\n \n \n def fill_population(self, pop_size, target, num_one_gates, num_two_gates, max_moments, n_qubits):\n for i in range(pop_size):\n unique = True\n random_tensor = generate_random_tensor(n_qubits, num_one_gates, num_two_gates, max_moments)\n for j in range(self.pop_size):\n if random_tensor == self.individuals[j].tensor:\n unique = False\n break\n if unique:\n curr_circuit = IndividualCircuit(random_tensor, target, num_one_gates, num_two_gates, n_qubits)\n self.pop_size += 1\n self.individuals.append(curr_circuit)\n else:\n i -= 1\n self.update()\n \n \n \n def add_circuit(self, circuit, increase_size=True):\n self.individuals.append(circuit)\n if increase_size:\n self.pop_size += 1\n self.update()\n \n \n def add_circuits(self, circuits, increase_size=True):\n self.individuals.extend(circuits.individuals)\n if increase_size:\n self.pop_size += len(circuits)\n self.update()\n \n \n def get_fittest_fitness(self):\n self.update()\n return self.fittest.get_fitness()\n \n \n def remove_circuit(self, circuit):\n self.individuals.remove(circuit)\n self.pop_size -= 1\n self.update()\n \n \n def update(self):\n self.individuals.sort(key=lambda circuit:circuit.fitness, reverse=True)\n self.fittest = self.individuals[0]\n self.least_fittest = self.individuals[-1]\n self.pop_size = len(self.individuals)\n \n \n def train_all(self):\n start_time = time.time()\n upper_limit = 500\n iterations = int(self.pop_size/upper_limit)\n new_individuals = []\n \n for j in range(iterations):\n args = []\n gpu_device = cycle(range(8))\n for i in range(upper_limit):\n args.append((self.individuals[i], str(next(gpu_device))))\n\n with mp.get_context(\"spawn\").Pool(processes=upper_limit) as pool:\n results = pool.starmap(circuit_fitness, args)\n\n new_individuals += results\n \n remainder = self.pop_size % upper_limit\n\n args = []\n gpu_device = cycle(range(8))\n for i in range(iterations * upper_limit, iterations * upper_limit + remainder):\n args.append((self.individuals[i], str(next(gpu_device))))\n \n if remainder > 0:\n with mp.get_context(\"spawn\").Pool(processes=remainder) as pool:\n results = pool.starmap(circuit_fitness, args)\n\n new_individuals += results\n \n self.individuals = new_individuals\n \n\n end_time = time.time()\n exec_time = end_time - start_time\n print(\"Execution time multiprocessing {}\".format(exec_time))\n self.update()\n \n \n \n def get_fittests(self, num_fittests):\n \"\"\"\n RETURN: copies of IndividualCircuit Objects\n \"\"\"\n self.fittest = self.individuals[0]\n fittests = self.individuals[:num_fittests]\n return copy.deepcopy(fittests)","repo_name":"yiyiccai/Quantum-Machine-Learning-for-Ansatz-Search","sub_path":"final_code/population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32123826247","text":"#프로그래머스 게임맵최단거리 (level2)\nfrom collections import deque\ndef solution(maps):\n\n x_len=len(maps)\n\n y_len=len(maps[0])\n\n dx=[1,-1,0,0]\n dy=[0,0,1,-1]\n maps[0][0]=0\n queue=deque()\n queue.append((0,0,1))\n while queue:\n x,y,answer=queue.popleft()\n\n for i in range(4):\n next_x = x + dx[i]\n next_y = y + dy[i]\n if 0 <= next_x < x_len and 0 <= next_y < y_len:\n if maps[next_x][next_y] == 1:\n maps[next_x][next_y] = answer+1\n queue.append((next_x,next_y,maps[next_x][next_y]))\n\n if maps[x_len-1][y_len-1]==1:\n return -1\n else:\n return maps[x_len-1][y_len-1]\n","repo_name":"imdduoming/hitalgor","sub_path":"최단거리/게임맵최단거리.py","file_name":"게임맵최단거리.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8904163026","text":"#!/usr/bin/env python\r\nimport logging\r\nimport os\r\nimport psutil\r\nimport time\r\nimport datetime\r\nimport urllib\r\nimport urllib2\r\nfrom email.mime.text import MIMEText\r\nfrom Keys import Keys as limskeys\r\nimport smtplib\r\nimport traceback\r\nimport subprocess\r\nimport sys\r\nimport glob\r\nimport shutil\r\nimport xml.etree.ElementTree as ET\r\n\r\n# UNIVERSAL\r\nSILENCE_EMAIL = False\r\nTROVAEMON_ID = \"trovaemon\"\r\nTROVAEMON_PWD = \"trovaemon123\"\r\nTROVABASE_JAR = '/mnt/data/Bioinformatics/trovapipe/trovabase/v13/upload.jar'\r\nTROVAPIPE_SCRIPT_LOCATION = '/home/palak/isilon/BI-125-trovapipe-structure-modification/DataAnalysisWrapper_1604v1_0_0.py'\r\nMAX_LOG_SIZE = 500000000 # (500 Mb) \r\n\r\n# TESTING\r\nDATABASE = 'trovabase_dev' # test\r\nUNIFLOW_URL = 'https://trovagene_dev.uniconnect.com:8100/uniflow' # test\r\nREVIEW_CACHE = '/mnt/rnd/Bioinformatics/trovapipe_DEV/review' # test\r\n\r\n# PRODUCTION\r\n#UNIFLOW_URL = 'https://trovagene.uniconnect.com/uniflow' # production\r\n#DATABASE = 'trovabase' # production\r\n#REVIEW_CACHE = '/mnt/prd/review/' # production\r\n\r\n''' Returns a list of paths to MiSeq repositories\r\n'''\r\ndef Repositories() :\r\n\t\r\n\trepositories_file = os.path.join( \"Utilities\" , \"repositories.xml\" )\r\n\t#msg = \"Repo file used \" + repositories_file\r\n\t#logging.info( msg )\r\n\t#msg = \"Repo file used \" + os.path.abspath( repositories_file )\r\n\t#logging.info( msg )\r\n\t\r\n\tif not os.path.exists( repositories_file ) :\r\n\t\tlogging.critical( \"The Utilities/repositories.xml file is missing. Analysis cannot proceed...\" )\r\n\t\tlogging.critical( os.getcwd() )\r\n\t\tsys.exit()\r\n\r\n\ttree = ET.parse( repositories_file )\r\n\troot = tree.getroot()\r\n\t\r\n\trepositories = list()\r\n\t\r\n\tfor child in root:\r\n\t\tif child.tag == \"repository\" :\r\n\t\t\trepositories.append( child.text )\r\n\t\t\t#msg = \"Repo identified \" + child.text\r\n\t\t\t#logging.info( msg )\r\n\t\t\t\r\n\treturn repositories\r\n\t\t\r\n''' Deletes the lock file upon normal exit\r\n'''\r\ndef CleanUp( LOCK_FILE , APP_NAME ) :\r\n\tLogAndEmail('Stopping ' + APP_NAME + ': ' + datetime.datetime.fromtimestamp( time.time() ).strftime( '%Y-%m-%d %H:%M:%S' ) , 0 )\r\n\tif os.path.exists( LOCK_FILE ) : os.remove( LOCK_FILE )\r\n\tlogging.info( 'Complete.\\n')\r\n\r\n''' Central routing for errors and messages in general\r\n Priorities:\r\n\t0: Lowest, just log the message as info\r\n\t1: Low, log the message as info and email bioinformatics\r\n\t2: Medium, log the message as error, email bioinformatics\r\n\t>=3: High, log the message as critical, email bioinformatics & executive level\r\n'''\r\ndef LogAndEmail( message , priority ) :\r\n\r\n\tif priority == 0 :\r\n\t\tlogging.info( message )\r\n\telif priority > 0 :\r\n\t\tlogging.error( message )\r\n\t\tEmail( message , priority )\r\n\t\t\r\n''' Email message to bioinfo\r\n'''\r\ndef Email( message , priority ) :\r\n\r\n\tif SILENCE_EMAIL : return\r\n\t\r\n\tmsg = MIMEText( message )\r\n\r\n\tdistribution_list = os.path.join( \"Utilities\" , \"email.xml\" )\r\n\t\r\n\tpriority_1 = list() # not currently used, mostly for warnings\r\n\tpriority_2 = list() # basic error\r\n\tpriority_3 = list() # moderate error, not used\r\n\tpriority_4 = list() # critical error\r\n\t\r\n\tif not os.path.exists( distribution_list ) :\r\n\t\tlogging.critical( \"Email distribution list is not accessible. The message below was not delivered: \" )\r\n\t\tlogging.critical( message )\r\n\t\tsys.exit()\r\n\t\t\r\n\ttree = ET.parse( distribution_list )\r\n\troot = tree.getroot()\r\n\t\t\r\n\tfor child in root:\r\n\t\tif child.tag == \"Priority-1\" :\r\n\t\t\tfor grandchild in child :\r\n\t\t\t\tif grandchild.tag == \"address\" :\r\n\t\t\t\t\tpriority_1.append( grandchild.text )\r\n\t\telif child.tag == \"Priority-2\" :\r\n\t\t\tfor grandchild in child :\r\n\t\t\t\tif grandchild.tag == \"address\" :\r\n\t\t\t\t\tpriority_2.append( grandchild.text )\r\n\t\telif child.tag == \"Priority-3\" :\r\n\t\t\tfor grandchild in child :\r\n\t\t\t\tif grandchild.tag == \"address\" :\r\n\t\t\t\t\tpriority_3.append( grandchild.text )\r\n\t\telif child.tag == \"Priority-4\" :\r\n\t\t\tfor grandchild in child :\r\n\t\t\t\tif grandchild.tag == \"address\" :\r\n\t\t\t\t\tpriority_4.append( grandchild.text )\r\n\t\r\n\tsend_list = \"\"\r\n\t\r\n\tif priority == 1 :\r\n\t\tsend_list = \",\".join( priority_1 )\r\n\telif priority == 2 :\r\n\t\tsend_list = \",\".join( priority_2 )\r\n\telif priority == 3 :\r\n\t\tsend_list = \",\".join( priority_3 )\r\n\telif priority == 4 :\r\n\t\tsend_list = \",\".join( priority_4 )\r\n\t\t\r\n\tme = \"trovaemon@trovagene.com\"\r\n\r\n\t# use this later to automatically generate tickets?\r\n\t#cc = \"support@trovagene.atlassian.net\"\r\n\t\r\n\tme = \"trovaemon@trovagene.com\"\r\n\tmsg['Subject'] = 'AUTOMATED TROVAPIPE MESSAGE: Error'\r\n\tmsg['From'] = me\r\n\tmsg['To'] = send_list\r\n\t\r\n\ts = smtplib.SMTP('localhost') \r\n\t\r\n\t# s.set_debuglevel(1) # debug output 2 screen\r\n\t\r\n\t# if 'Cc' in msg :\r\n\t\t# s.sendmail( msg[ 'From' ] , msg[ 'To' ].split( ',' ) + msg[ 'Cc' ].split( ',' ), msg.as_string() )\r\n\t# else :\r\n\t\r\n\ts.sendmail( msg[ 'From' ] , msg[ 'To' ].split( ',' ) , msg.as_string() )\r\n\ts.quit()\r\n\t\r\n''' JSON comes back as a complex string, this\r\n converts to dictionary\r\n\t\r\n\tALERT: Exceptions not handled, you must catch\r\n\t- work with uniconnect to dev better schema\r\n'''\r\ndef ParseJSONOneRun( JSON ) :\r\n\t\r\n\ttokens1 = JSON.split( '{' )\r\n\ttokens2 = tokens1[ 1 ].split( '}' )\r\n\ttemp = tokens2[ 0 ].replace( '\\n' , '' )\r\n\tresults = dict()\r\n\tcomma_tokens = temp.split( ',' )\r\n\tfor token in comma_tokens :\r\n\t\tbits = token.replace( '\"' , '' ).split( ':' )\r\n\t\tresults[ bits[ 0 ].strip() ] = bits[ 1 ].strip()\r\n\t\r\n\treturn results\r\n\t\r\n''' JSON comes back as a complex string, this\r\n converts to dictionary of dictionaries:\r\n\tone per run \r\n\t\r\n\tALERT: Exceptions not handled, you must catch\r\n\t- work with uniconnect to dev better schema\r\n'''\r\ndef ParseJSONMultipleRuns( JSON ) :\r\n\tresults = dict()\r\n\ttemp = dict()\r\n\tleft_tokens = JSON.split( '{' )\r\n\tfor left_token in left_tokens :\r\n\t\tif left_token.startswith( '\"' ) :\r\n\t\t\tright_tokens = left_token.split( '}' )\r\n\t\t\tfor right_token in right_tokens :\r\n\t\t\t\tgroups = right_token.replace( '\\n,' , '' ).replace( '])' , '' ).strip().replace( '\\n' , '' ).strip().split( ',' )\r\n\t\t\t\ttemp = {}\r\n\t\t\t\tfor group in groups :\r\n\t\t\t\t\titems = group.split( '\":\"' )\r\n\t\t\t\t\tif len( items ) == 2 :\r\n\t\t\t\t\t\ttemp[ items[ 0 ].replace( '\"' , '' ).strip() ] = items[ 1 ].replace( '\"' , '' ).strip()\r\n\t\t\t\tif 'flowcellID' in temp :\r\n\t\t\t\t\tresults[ temp[ 'flowcellID' ] ] = temp\r\n\treturn results\r\n\t\r\n''' Uploads CSV results file to uniflow\r\n'''\r\ndef UploadRunResultsToUniflow( results_cache , flowcellID , run_folder_name ) :\r\n\r\n\ttry :\r\n\t\r\n\t\t# Verify all results files exist \r\n\t\t\r\n\t\twide_results = \"\" # Homo sapiens readable\r\n\t\traw_results = \"\"\r\n\t\tsample_sheet = \"\"\r\n\t\tstats_results = \"\"\r\n\t\tdb_results = \"\" # database readable\r\n\t\trunqc_results = \"\"\r\n\t\tlog_file = \"\"\r\n\t\t\r\n\t\toutfiles_dir = os.path.join( results_cache, run_folder_name )\r\n\t\treview_cache = os.path.join( REVIEW_CACHE , run_folder_name )\r\n\t\tif os.path.exists( review_cache ) : shutil.rmtree( review_cache )\r\n\t\tos.mkdir( review_cache )\r\n\t\t\r\n\t\t#results_files = glob.glob( os.path.join( results_cache , '*.csv' ) )\r\n\t\t#log_files = glob.glob( os.path.join( results_cache , '*.txt' ) )\r\n\t\t\r\n\t\tqc_files = glob.glob( os.path.join( results_cache , '*.csv' ) )\r\n\t\tfor file in qc_files :\r\n\t\t\tif 'RunQuality' in file :\r\n\t\t\t\trunqc_results = file\r\n\t\t\t\t#shutil.copy( os.path.join( results_cache , file ) , review_cache )\r\n\t\t\t\tshutil.move( os.path.join( results_cache , file ) , outfiles_dir )\r\n\t\t\r\n\t\tresults_files = glob.glob( os.path.join( outfiles_dir , '*.csv' ) )\r\n\t\tfor file in results_files :\r\n\t\t\tif file.endswith( '_raw.csv' ) :\r\n\t\t\t\traw_results = file\r\n\t\t\t\tshutil.copy( os.path.join( outfiles_dir , file ) , review_cache )\r\n\t\t\telif file.endswith( '_sheet.csv' ) :\r\n\t\t\t\tsample_sheet = file\r\n\t\t\t\tshutil.copy( os.path.join( outfiles_dir , file ) , review_cache )\r\n\t\t\telif file.endswith( '_stats.csv' ) :\r\n\t\t\t\tstats_results = file\r\n\t\t\t\tshutil.copy( os.path.join( outfiles_dir , file ) , review_cache )\r\n\t\t\telif file.endswith( '_summary.csv' ) :\r\n\t\t\t\tdb_results = file\r\n\t\t\t\tshutil.copy( os.path.join( outfiles_dir , file ) , review_cache )\r\n\t\t\telif file.endswith( '_wide.csv' ) :\r\n\t\t\t\twide_results = file\r\n\t\t\t\tshutil.copy( os.path.join( outfiles_dir , file ) , review_cache )\r\n\t\t\telif 'RunQuality' in file :\r\n\t\t\t\trunqc_results = file\r\n\t\t\t\tshutil.copy( os.path.join( outfiles_dir , file ) , review_cache )\r\n\t\t\r\n\t\tlog_files = glob.glob( os.path.join( outfiles_dir , '*.txt' ) )\r\n\t\tfor file in log_files :\r\n\t\t\t#if file.startswith( 'log' ) :\r\n\t\t\tif file.endswith( '.txt' ) :\r\n\t\t\t\tlog_file = file\r\n\t\t\t\tshutil.copy( os.path.join( outfiles_dir , file ) , review_cache )\r\n\t\t\t\t\r\n\t\t#wide_results = db_results.replace( '_summary', '' ) # wide results has no '_' extension\r\n\t\t#wide_results = db_results.replace( '_summary', '_wide' ) # wide results has no '_' extension\r\n\t\t\r\n\t\t#if not os.path.exists( wide_results ) or raw_results == \"\" or sample_sheet == \"\" or stats_results == \"\" or db_results == \"\" or runqc_results == \"\" or log_file == \"\" :\r\n\t\tif wide_results == \"\" or raw_results == \"\" or sample_sheet == \"\" or stats_results == \"\" or db_results == \"\" or runqc_results == \"\" or log_file == \"\" :\r\n\t\t\tLogAndEmail( 'Trovapipe Results missing from results cache: ' + outfiles_dir + '\\nwide_results: ' + wide_results + '\\nraw_results: ' + raw_results + '\\nsample_sheet: ' + sample_sheet + '\\nstats_results: ' + stats_results + '\\ndb_results: ' + db_results + '\\nrunqc_results: ' + runqc_results + '\\nlog_file: ' + log_file, 3 )\r\n\t\t\tsys.exit()\r\n\t\t#else : \r\n\t\t#\tshutil.copy( os.path.join( results_cache , wide_results ) , review_cache )\r\n\t\r\n\t\tLogAndEmail( 'Uploading trovapipe results to uniflow: ' + outfiles_dir , 0 )\r\n\t\r\n\t\tbatchID = 'RB' + flowcellID\r\n\t\t\r\n\t\tproc = subprocess.Popen( [ 'curl' , \\\r\n\t\t\t'-F' , 'userId=' + TROVAEMON_ID , \\\r\n\t\t\t'-F' , 'password=' + TROVAEMON_PWD , \\\r\n\t\t\t'-F' , 'stepName=API Result Upload' , \\\r\n\t\t\t'-F' , 'batchId=' + batchID , \\\r\n\t\t\t'-F' , 'flowCellID=' + flowcellID , \\\r\n\t\t\t'-F' , 'status=success' , \\\r\n\t\t\t'-F' , 'formNumber=0' , \\\r\n\t\t\t'-F' , 'Submit=true' , \\\r\n\t\t\t'-F' , 'accountId=Trovagene' , \\\r\n\t\t '-F' , 'csvResultsFile=@' + db_results , \\\r\n\t\t\tUNIFLOW_URL ] , stdout=subprocess.PIPE , stderr=subprocess.PIPE )\r\n\t\t\t\t \r\n\t\tout, err = proc.communicate()\r\n\t\tboth = out + err\r\n\r\n\t\tif \"SYSTEM EXCEPTION\" in both or \"*1\" in both or \"couldn't open file\" in both :\r\n\t\t\tmessage = \"An error occurred while trying to upload a csv results file to UniFlow using curl: \" + db_results + '\\n' + both\r\n\t\t\tLogAndEmail( message , 4 )\r\n\t\t\tsys.exit()\t\r\n\t\t\t\r\n\texcept Exception as detail : # Catch ALL exceptions \r\n\t\tmessage = \"An error occurred while trying to upload a csv results file to UniFlow using curl: \" + db_results + '\\n' + traceback.format_exc()\r\n\t\tLogAndEmail( message , 4 )\r\n\t\r\n''' Gets the state of the run from UNIFlow\r\n by way of a RESTful API\r\n\t- if parsing error return None\r\n\t- if exists return state, if not create and set/return\r\n'''\t\r\ndef GetUNIFlowState( flowcellID ) :\r\n \r\n\tvalues = { \r\n\t'userId' : TROVAEMON_ID, \r\n\t'password' : TROVAEMON_PWD, \r\n\t'stepName' : 'Query Run Status', \r\n\t'Submit' : 'true', \r\n\t'flowcellID' : flowcellID \r\n\t}\r\n\t\r\n\tdata = urllib.urlencode( values )\r\n\trequest = urllib2.Request( UNIFLOW_URL , data )\r\n\tresponse = urllib2.urlopen( request )\r\n\r\n\tJSON = response.read()\r\n\t\t\r\n\tresults = dict()\r\n\t\r\n\ttry :\r\n\t\tresults = ParseJSONOneRun( JSON )\r\n\texcept Exception as detail : # Catch ALL exceptions\r\n\t\tmessage = \"UNIFlow Error - Unable to parse JSON when querying state of flowcellID: \" + flowcellID + \" \" + str( detail ) + \" \" + traceback.format_exc()\r\n\t\tLogAndEmail( message , 3 )\r\n\t\treturn None\r\n\t\t\r\n\tif 'status' not in results or 'flowcellID' not in results :\r\n\t\tmessage = \"UNIFlow Error - Unable to find status or flowcellID results when querying state of run: \" + flowcellID\r\n\t\tLogAndEmail( message , 3 )\r\n\t\treturn None\r\n\t\r\n\telse :\r\n\t\r\n\t\tif results[ 'status' ] == 'NONE' and results[ 'flowcellID' ] == 'NGS_RUN_DOES_NOT_EXIST' :\r\n\t\t\tresponse_code = SetUNIFlowState( flowcellID , limskeys.NGS_RUN_PENDING )\r\n\t\t\tif response_code == 200 : return limskeys.NGS_RUN_PENDING\r\n\t\t\telse :\r\n\t\t\t\tmessage = \"UNIFlow Error - response code was not 200 (not okay) when setting state of run: \" + \\\r\n\t\t\t\tflowcellID + \" to \" + limskeys.NGS_RUN_PENDING\r\n\t\t\t\tLogAndEmail( message , 3 )\t\t\t\t\r\n\t\t\t\treturn None\r\n\t\telse :\r\n\t\t\treturn results[ 'status' ]\r\n\t\r\n''' Returns flowcellID and metadata for runs\r\n with the specified status\r\n'''\r\ndef QueryRunsByStatus( status ) :\r\n\t\r\n\tvalues = {\r\n\t'userId' : TROVAEMON_ID,\r\n\t'password': TROVAEMON_PWD,\r\n\t'stepName': 'Query Runs By Status',\r\n\t'Submit' : 'true', \r\n\t'status' : status\r\n\t}\r\n\t\r\n\turl_encoded_values = urllib.urlencode( values )\r\n\trequest = urllib2.Request( UNIFLOW_URL , url_encoded_values )\r\n\tresponse = urllib2.urlopen( request )\r\n\tJSON = response.read()\r\n\t\r\n\tif \"SYSTEM EXCEPTION\" in JSON :\r\n\t\tmessage = \"Uniflow Error - Error when attempting to query UNIFlow stepName: Query Runs By Status.\\nUNIFlow output: \" + JSON\r\n\t\tLogAndEmail( message , 4 )\r\n\t\treturn None\r\n\t\t\r\n\ttry :\r\n\t\tresults = ParseJSONMultipleRuns( JSON )\r\n\t\treturn results\r\n\texcept Exception as detail : # Catch ALL exceptions\r\n\t\tmessage = \"Uniflow Error - Unable to retrieve run dictionary when parsing JSON \" + JSON + '\\n' + traceback.format_exc()\r\n\t\tLogAndEmail( message , 4 )\r\n\t\treturn None\t\r\n\t\r\n''' Sets the state of an run. If run \r\n does not exist it will be created.\r\n'''\r\ndef SetUNIFlowState( flowcellID , state ) :\r\n\t\r\n\tLogAndEmail( \"Setting \" + flowcellID + \" to \" + state , 0 )\r\n\t\r\n\tvalues = { \r\n\t'userId' : TROVAEMON_ID, \r\n\t'password' : TROVAEMON_PWD, \r\n\t'stepName' : 'Update Run Status', \r\n\t'Submit' : 'true', \r\n\t'flowcellID' : flowcellID , \r\n\t'status' : state \r\n\t}\r\n\t\r\n\turl_encoded_values = urllib.urlencode( values )\r\n\trequest = urllib2.Request( UNIFLOW_URL , url_encoded_values )\r\n\tresponse = urllib2.urlopen( request )\r\n\tcode = response.getcode()\r\n\t\r\n\tif code != 200 :\r\n\t\tmessage = \"TrovapipeUtils~UNIFlow Error - response code was not 200 (not okay) when setting state of run: \" + \\\r\n\t\trunID + \" to \" + state + \" response: \" + str( response.getcode() )\r\n\t\tLogAndEmail( message , 3 )\r\n\t\tsys.exit()\r\n\t\t\r\n\treturn code\r\n\t\r\n''' Determines if this instance is a clone\r\n if so kills the process\r\n'''\r\ndef NoClone( LOCK_FILE ) :\r\n\tpid = str( os.getpid() )\r\n\tpidfile = LOCK_FILE\r\n\t\t\r\n\tif os.path.isfile( pidfile ) :\r\n\t\tif IsDead( pidfile ) :\r\n\t\t\t file( pidfile , 'w' ).write( pid )\r\n\t\t\t logging.warning( \"A lock.pid file existed with a dead process...\" )\r\n\t\telse :\r\n\t\t\tlogging.info( \"Terminating analysis, a process already exists...\" )\r\n\t\t\tsys.exit()\r\n\telse:\r\n\t\tfile( pidfile , 'w' ).write( pid )\t\r\n\t\t\r\n''' Process ID file still exists suggesting this\r\n code is already running. Verify that's true.\t\t\r\n'''\r\ndef IsDead( pid_file ) :\r\n\twith open( pid_file , 'rb' ) as input :\r\n\t\tpid = input.readline().strip()\r\n\t\tif psutil.pid_exists( int( pid ) ) : return False\r\n\t\telse : return True\r\n\t\r\n''' Archives logs when they get too large\r\n'''\r\ndef ArchiveLog( APP_LOG ) :\r\n\t\r\n\tif not os.path.exists( APP_LOG ) : open( APP_LOG , 'w' ).close()\r\n\t\t\r\n\tif os.path.getsize( APP_LOG ) > MAX_LOG_SIZE : \r\n\t\r\n\t\tbase = os.path.basename( APP_LOG ).split('.')[0]\r\n\t\tarchive_dir = os.path.join( os.path.dirname( APP_LOG ) , 'archive' )\r\n\t\t\r\n\t\tindices = [ 0 ]\r\n\t\tfiles = glob.glob( os.path.join( archive_dir , base + \"*\" ) )\r\n\t\tfor file in files :\r\n\t\t\tindices.append( int( file.split('.')[ 1 ] ) )\r\n\t\t\t\r\n\t\tnew_index = max( indices ) + 1\r\n\t\tnew_archive_file = os.path.join( archive_dir , base + '.' + str( new_index ) + '.log' )\r\n\t\tif os.path.exists( APP_LOG ) : shutil.move( APP_LOG , new_archive_file )\r\n","repo_name":"palakpsheth/Trovagene","sub_path":"scripts/RunQC/Utilities/TrovapipeUtils.py","file_name":"TrovapipeUtils.py","file_ext":"py","file_size_in_byte":15558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12378698834","text":"# ergasia 4\r\ndef plus(b):\r\n\treturn '+'+str(b)\r\n\r\ndef minus(b):\r\n\treturn '-'+str(b)\r\n\r\ndef times(b):\r\n\treturn '*'+str(b)\r\n\r\n\r\n\r\ndef zero(a):\r\n\tif a[0]== '+':\r\n\t\treturn int(a[1])\r\n\telif a[0]== '-':\r\n\t\treturn -int(a[1])\r\n\telse : \r\n\t\treturn 0\r\n\r\ndef one(a):\r\n\tif a[0]== '+':\r\n\t\treturn 1+int(a[1])\r\n\telif a[0]== '-':\r\n\t\treturn 1-int(a[1])\r\n\telif a[0]==\"*\" : \r\n\t\treturn 1*int(a[1])\r\n\telse:\r\n\t\treturn 1\r\n\r\ndef tow(a):\r\n\tif a[0]== '+':\r\n\t\treturn 2+int(a[1])\r\n\telif a[0]== '-':\r\n\t\treturn 2-int(a[1])\r\n\telif a[0]==\"*\" : \r\n\t\treturn 2*int(a[1])\r\n\telse:\r\n\t\treturn 2\r\n\r\ndef three(a):\r\n\tif a[0]== '+':\r\n\t\treturn 3+int(a[1])\r\n\telif a[0]== '-':\r\n\t\treturn 3-int(a[1])\r\n\telif a[0]==\"*\" : \r\n\t\treturn 3*int(a[1])\r\n\telse:\r\n\t\treturn 3\r\n\r\ndef four(a):\r\n\tif a[0]== '+':\r\n\t\treturn 4+int(a[1])\r\n\telif a[0]== '-':\r\n\t\treturn 4-int(a[1])\r\n\telif a[0]==\"*\" : \r\n\t\treturn 4*int(a[1])\r\n\telse:\r\n\t\treturn 4\r\n\r\ndef five(a):\r\n\tif a[0]== '+':\r\n\t\treturn 5+int(a[1])\r\n\telif a[0]== '-':\r\n\t\treturn 5-int(a[1])\r\n\telif a[0]==\"*\" : \r\n\t\treturn 5*int(a[1])\r\n\telse:\r\n\t\treturn 5\r\n\r\ndef six(a):\r\n\tif a[0]== '+':\r\n\t\treturn 6+int(a[1])\r\n\telif a[0]== '-':\r\n\t\treturn 6-int(a[1])\r\n\telif a[0]==\"*\" : \r\n\t\treturn 6*int(a[1])\r\n\telse:\r\n\t\treturn 6\r\ndef seven(a):\r\n\tif a[0]== '+':\r\n\t\treturn 7+int(a[1])\r\n\telif a[0]== '-':\r\n\t\treturn 7-int(a[1])\r\n\telif a[0]==\"*\" : \r\n\t\treturn 7*int(a[1])\r\n\telse:\r\n\t\treturn 7\r\n\r\ndef eight(a):\r\n\tif a[0]== '+':\r\n\t\treturn 8+int(a[1])\r\n\telif a[0]== '-':\r\n\t\treturn 8-int(a[1])\r\n\telif a[0]==\"*\" : \r\n\t\treturn 8*int(a[1])\r\n\telse:\r\n\t\treturn 8\r\n\r\ndef nine(a):\r\n\tif a[0]== '+':\r\n\t\treturn 9+int(a[1])\r\n\telif a[0]== '-':\r\n\t\treturn 9-int(a[1])\r\n\telif a[0]==\"*\" : \r\n\t\treturn 9*int(a[1])\r\n\telse:\r\n\t\treturn 9\r\nanswer='1'\r\nwhile answer == '1':\r\n\t\r\n\tc=input('kaleste thn sinaryisi edo : ')\r\n\tleftbrackets=[]\r\n\trightbrackets=[]\r\n\t# στις 2 προηγουμενες λιστες θα αποθηκευονται οι θεσεις των δεξιων και αριστερων παρενθεσεων της μεταβλητης c\r\n\tfor i in range(0,len(c)):\r\n\t\tif c[i] == '(':\r\n\t\t\tleftbrackets.append(i)\r\n\t\telif c[i] == ')' :\r\n\t\t\trightbrackets.append(i)\r\n\r\n\tflag1=0\r\n\tflag2=0\r\n\t# με τις flag1 και flag2 στην συνεχεια θα ελενχετε αν η συναρτηση εχει κληθει σωστα απο τον χρηστη\r\n\tch=''\r\n\r\n\tif len(leftbrackets) ==1 and len(rightbrackets) ==1:\r\n\t\tif c[0:leftbrackets[0]] =='zero':\r\n\t\t\tprint(zero(\" \"))\r\n\t\t\tflag1=1\r\n\t\telif c[0:leftbrackets[0]] =='one':\r\n\t\t\tprint(one(\" \"))\r\n\t\t\tflag1=1\r\n\t\telif c[0:leftbrackets[0]] =='tow':\r\n\t\t\tflag1=1\r\n\t\t\tprint(tow(\" \"))\r\n\t\telif c[0:leftbrackets[0]] =='three':\r\n\t\t\tflag1=1\r\n\t\t\tprint(three(\" \"))\r\n\t\telif c[0:leftbrackets[0]] =='four':\r\n\t\t\tflag1=1\r\n\t\t\tprint(four(\" \"))\r\n\t\telif c[0:leftbrackets[0]] =='five':\r\n\t\t\tflag1=1\r\n\t\t\tprint(five(\" \"))\r\n\t\telif c[0:leftbrackets[0]] =='six':\r\n\t\t\tflag1=1\r\n\t\t\tprint(six(\" \"))\r\n\t\telif c[0:leftbrackets[0]] =='seven':\r\n\t\t\tflag1=1\r\n\t\t\tprint(seven(\" \"))\r\n\t\telif c[0:leftbrackets[0]] =='eight':\r\n\t\t\tflag1=1\r\n\t\t\tprint(eight(\" \"))\r\n\t\telif c[0:leftbrackets[0]] =='nine':\r\n\t\t\tflag1=1\r\n\t\t\tprint(nine(\" \"))\r\n\t\tflag2=1\r\n\r\n\tif len(leftbrackets) ==3 and len(rightbrackets) ==3:\r\n\t\tif c[leftbrackets[0]+1:leftbrackets[1]]=='plus':\r\n\t\t\tch='+'\r\n\t\telif c[leftbrackets[0]+1:leftbrackets[1]]=='minus':\r\n\t\t\tch='-'\r\n\t\telif c[leftbrackets[0]+1:leftbrackets[1]]=='times':\r\n\t\t\tch='*'\r\n\r\n\t\tif c[leftbrackets[1]+1:leftbrackets[2]]=='zero':\r\n\t\t\tch = ch+'0'\r\n\t\telif c[leftbrackets[1]+1:leftbrackets[2]]=='one':\r\n\t\t\tch = ch+'1'\r\n\t\telif c[leftbrackets[1]+1:leftbrackets[2]]=='tow':\r\n\t\t\tch = ch+'2'\r\n\t\telif c[leftbrackets[1]+1:leftbrackets[2]]=='three':\r\n\t\t\tch = ch+'3'\r\n\t\telif c[leftbrackets[1]+1:leftbrackets[2]]=='four':\r\n\t\t\tch = ch+'4'\r\n\t\telif c[leftbrackets[1]+1:leftbrackets[2]]=='five':\r\n\t\t\tch = ch+'5'\r\n\t\telif c[leftbrackets[1]+1:leftbrackets[2]]=='six':\r\n\t\t\tch = ch+'6'\r\n\t\telif c[leftbrackets[1]+1:leftbrackets[2]]=='seven':\r\n\t\t\tch = ch+'7'\r\n\t\telif c[leftbrackets[1]+1:leftbrackets[2]]=='eight':\r\n\t\t\tch = ch+'8'\r\n\t\telif c[leftbrackets[1]+1:leftbrackets[2]]=='nine':\r\n\t\t\tch = ch+'9'\r\n\r\n\t\tif len(ch)==2:\r\n\t\t\tif (ch[0]=='+' or ch[0]=='-' or ch[0]=='*') and int(ch[1])<=9 and int(ch[1])>=0:\r\n\t\t\t\tflag1=1\r\n\r\n\t\tif flag1==1:\r\n\t\t\tif c[0:leftbrackets[0]] =='zero':\r\n\t\t\t\tprint(zero(ch))\r\n\t\t\t\tflag2=1\r\n\t\t\telif c[0:leftbrackets[0]] =='one':\r\n\t\t\t\tprint(one(ch))\r\n\t\t\t\tflag2=1\r\n\t\t\telif c[0:leftbrackets[0]] =='tow':\r\n\t\t\t\tflag2=1\r\n\t\t\t\tprint(tow(ch))\r\n\t\t\telif c[0:leftbrackets[0]] =='three':\r\n\t\t\t\tflag2=1\r\n\t\t\t\tprint(three(ch))\r\n\t\t\telif c[0:leftbrackets[0]] =='four':\r\n\t\t\t\tflag2=1\r\n\t\t\t\tprint(four(ch))\r\n\t\t\telif c[0:leftbrackets[0]] =='five':\r\n\t\t\t\tflag2=1\r\n\t\t\t\tprint(five(ch))\r\n\t\t\telif c[0:leftbrackets[0]] =='six':\r\n\t\t\t\tflag2=1\r\n\t\t\t\tprint(six(ch))\r\n\t\t\telif c[0:leftbrackets[0]] =='seven':\r\n\t\t\t\tflag2=1\r\n\t\t\t\tprint(seven(ch))\r\n\t\t\telif c[0:leftbrackets[0]] =='eight':\r\n\t\t\t\tflag2=1\r\n\t\t\t\tprint(eight(ch))\r\n\t\t\telif c[0:leftbrackets[0]] =='nine':\r\n\t\t\t\tflag2=1\r\n\t\t\t\tprint(nine(ch))\t\r\n\r\n\r\n\tif flag1 == 0 | flag2 == 0:\r\n\t\tprint('h sinartisi den klithike sosta')\r\n\tanswer=input(\"an thelete na epanalavete tin diadikasia patiste to 1 allios patiste kati allo\")\r\n\tpass\r\naa=input(\"press enter to finish\")\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"stathisp18056/-1-","sub_path":"ergasia4.py","file_name":"ergasia4.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5108263672","text":"import textwrap\n\nimport synthtool as s\nfrom synthtool import gcp\nfrom synthtool.languages import python\n\ncommon = gcp.CommonTemplates()\n\ndefault_version = \"v2\"\n\nfor library in s.get_staging_dirs(default_version):\n # Do not expose ModelServiceClient and ModelServiceAsyncClient, as there\n # is no public API endpoint for the models service.\n s.replace(\n library / f\"google/cloud/bigquery_{library.name}/__init__.py\",\n r\"from \\.services\\.model_service import ModelServiceClient\",\n \"\",\n )\n\n s.replace(\n library / f\"google/cloud/bigquery_{library.name}/__init__.py\",\n r\"from \\.services\\.model_service import ModelServiceAsyncClient\",\n \"\",\n )\n\n s.replace(\n library / f\"google/cloud/bigquery_{library.name}/__init__.py\",\n r\"\"\"[\"']ModelServiceClient[\"'],\"\"\",\n \"\",\n )\n\n s.replace(\n library / f\"google/cloud/bigquery_{library.name}/__init__.py\",\n r\"\"\"[\"']ModelServiceAsyncClient[\"'],\"\"\",\n \"\",\n )\n\n # Adjust Model docstring so that Sphinx does not think that \"predicted_\" is\n # a reference to something, issuing a false warning.\n s.replace(\n library / f\"google/cloud/bigquery_{library.name}/types/model.py\",\n r'will have a \"predicted_\"',\n \"will have a `predicted_`\",\n )\n\n # Avoid breaking change due to change in field renames.\n # https://github.com/googleapis/python-bigquery/issues/319\n s.replace(\n library / f\"google/cloud/bigquery_{library.name}/types/standard_sql.py\",\n r\"type_ \",\n \"type \",\n )\n\n s.move(\n library,\n excludes=[\n \"*.tar.gz\",\n \".coveragerc\",\n \"docs/index.rst\",\n f\"docs/bigquery_{library.name}/*_service.rst\",\n f\"docs/bigquery_{library.name}/services.rst\",\n \"README.rst\",\n \"noxfile.py\",\n \"setup.py\",\n f\"scripts/fixup_bigquery_{library.name}_keywords.py\",\n \"google/cloud/bigquery/__init__.py\",\n \"google/cloud/bigquery/py.typed\",\n # There are no public API endpoints for the generated ModelServiceClient,\n # thus there's no point in generating it and its tests.\n f\"google/cloud/bigquery_{library.name}/services/**\",\n f\"tests/unit/gapic/bigquery_{library.name}/**\",\n ],\n )\n\ns.remove_staging_dirs()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(\n cov_level=100,\n samples=True,\n microgenerator=True,\n split_system_tests=True,\n intersphinx_dependencies={\n \"pandas\": \"http://pandas.pydata.org/pandas-docs/dev\",\n \"geopandas\": \"https://geopandas.org/\",\n },\n)\n\n# BigQuery has a custom multiprocessing note\ns.move(\n templated_files,\n excludes=[\n \"noxfile.py\",\n \"docs/multiprocessing.rst\",\n \".coveragerc\",\n # Include custom SNIPPETS_TESTS job for performance.\n # https://github.com/googleapis/python-bigquery/issues/191\n \".kokoro/presubmit/presubmit.cfg\",\n # Group all renovate PRs together. If this works well, remove this and\n # update the shared templates (possibly with configuration option to\n # py_library.)\n \"renovate.json\",\n ],\n)\n\n# ----------------------------------------------------------------------------\n# Samples templates\n# ----------------------------------------------------------------------------\n\npython.py_samples()\n\ns.replace(\n \"docs/conf.py\",\n r'\\{\"members\": True\\}',\n '{\"members\": True, \"inherited-members\": True}',\n)\n\n# Tell Sphinx to ingore autogenerated docs files.\ns.replace(\n \"docs/conf.py\",\n r'\"samples/snippets/README\\.rst\",',\n '\\\\g<0>\\n \"bigquery_v2/services.rst\", # generated by the code generator',\n)\n\n# ----------------------------------------------------------------------------\n# pytype-related changes\n# ----------------------------------------------------------------------------\n\n# Add .pytype to .gitignore\ns.replace(\".gitignore\", r\"\\.pytest_cache\", \"\\\\g<0>\\n.pytype\")\n\n# Add pytype config to setup.cfg\ns.replace(\n \"setup.cfg\",\n r\"universal = 1\",\n textwrap.dedent(\n \"\"\" \\\\g<0>\n\n [pytype]\n python_version = 3.8\n inputs =\n google/cloud/\n exclude =\n tests/\n google/cloud/bigquery_v2/\n output = .pytype/\n disable =\n # There's some issue with finding some pyi files, thus disabling.\n # The issue https://github.com/google/pytype/issues/150 is closed, but the\n # error still occurs for some reason.\n pyi-error\"\"\"\n ),\n)\n\ns.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False)\n","repo_name":"suryaprakashreddy034/python_bq","sub_path":"owlbot.py","file_name":"owlbot.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13012516805","text":"import discord\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport json\n\nclient = discord.Client()\njson_key = 'Gnor Bot json_key.json'\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\nglobal all_sheet_values\n\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(json_key, scope)\ngc = gspread.authorize(credentials)\nsheet = gc.open_by_url(\"https://docs.google.com/spreadsheets/d/1KrRiRQ_J9laMaZyf9orBZvedbepJXI3LEbaUylMZ1TM\").sheet1\nall_sheet_values = sheet.get_all_values()\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\n@client.event\nasync def on_member_join(member):\n defaultchannel = discord.utils.get(member.server.channels, name = settings['custom']['default_channel'])\n defaultrole = discord.utils.get(member.server.roles, name = settings['custom']['default_role'])\n if defaultchannel is not None and defaultrole is not None:\n await client.add_roles(member, defaultrole)\n await client.send_message(defaultchannel, member.name + ' has joined the server')\n else:\n print('Default channel or default role not found for join notification')\n\n@client.event\nasync def on_member_leave(member):\n defaultchannel = discord.utils.get(member.server.channels, name = settings['custom']['default_channel'])\n if defaultchannel is not None:\n await client.send_message(defaultchannel, member.name + ' has left the server')\n else:\n print('Default channel not found for leave notification')\n\n@client.event\nasync def on_message(message):\n if message.content.startswith('uwu'):\n await client.add_reaction(message, 'gnaruwu:389219155288653837')\n\n elif message.content.startswith('?role '):\n newrole = discord.utils.get(message.author.server.roles, name = message.content[6:])\n if newrole is None:\n await client.send_message(message.channel, 'No such role')\n elif getrolelayer(newrole) > 0:\n await client.send_message(message.channel, 'You do not have the permit to join/leave this role')\n elif(not newrole in message.author.roles):\n await client.add_roles(message.author, newrole)\n await client.send_message(message.channel, 'You joined ' + newrole.name)\n else:\n await client.remove_roles(message.author, newrole)\n await client.send_message(message.channel, 'You left ' + newrole.name)\n \n elif message.content.startswith('?matchup '):\n i = find_sheet_row_by_matchup(message.content[9:])\n if i is not None:\n message_string = ('```Matchup: Gnar vs. ' + all_sheet_values[i][0] +\n '\\nDifficulty: ' + all_sheet_values[i][1] +\n '\\nStat Shards: ' + all_sheet_values[i][8] +\n '\\nStarting Items: ' + all_sheet_values[i][9] +\n '\\nCore Items: ' + all_sheet_values[i][10] +\n '\\n\\nInformation: ' + all_sheet_values[i][11] + '```')\n await client.send_message(message.channel, message_string)\n else:\n await client.send_message(message.channel, 'Could not find matchup info for ' + message.content[9:])\n\n elif message.content.startswith('?refresh matchups') and \\\n 'Mega Gnar' in [role.name for role in message.author.roles]:\n all_sheet_values = sheet.get_all_values()\n await client.send_message(message.channel, 'Updated matchups!')\n\n elif message.content.startswith('?ranks'):\n roles = message.author.server.roles\n member_count = []\n for index in roles:\n member_count.append(0)\n for index1 in message.author.server.members:\n i = 0\n for index2 in roles:\n if index2 in index1.roles:\n member_count[i]+=1\n i+=1\n string = 'Server ranks:\\n'\n i = 0\n for index in roles:\n if not index.is_everyone and getrolelayer(index) == 0:\n string += index.name + \": \" + str(member_count[i]) + \" members\\n\"\n i+=1\n await client.send_message(message.channel, string)\n\n elif message.content.startswith('?assignrole '):\n if getperm(message.author) < 3:\n await client.send_message(message.channel, 'You do not have the permit to access this command')\n return\n arguments = message.content.split(' ', 2)\n role = discord.utils.get(message.author.server.roles, name = arguments[2])\n if role is None:\n await client.send_message(message.channel, 'No such role')\n elif not message.mentions:\n await client.send_message(message.channel, 'No user mentioned')\n elif getperm(message.author) < getrolelayer(role):\n await client.send_message(message.channel, 'You do not have the permit to assign this role')\n elif getperm(message.channel.server.me) <= getrolelayer(role):\n await client.send_message(message.channel, 'The bot does not have the permit to assign this role')\n elif role not in message.mentions[0].roles:\n await client.add_roles(message.mentions[0], role)\n await client.send_message(message.channel, message.mentions[0].name + ' has joined ' + role.name)\n else:\n await client.remove_roles(message.mentions[0], role)\n await client.send_message(message.channel, message.mentions[0].name + ' has left ' + role.name)\n\n else:\n arguments = message.content.split(' ')\n output = ''\n for customcommand in settings['custom_commands']:\n if arguments[0] == customcommand:\n output = settings['custom_commands'][customcommand]\n output = output.replace('{user}', message.author.name)\n output = output.replace('{server}', message.author.server.name)\n output = output.replace('{channel}', message.channel.name)\n if output != '':\n await client.send_message(message.channel, output)\n\n \n \ndef find_sheet_row_by_matchup(champion):\n for index, sublist in enumerate(all_sheet_values, start=3):\n if sublist[0].lower().replace('\\'', '').replace(' ', '') \\\n == champion.lower().replace('\\'', '').replace(' ', ''):\n return index - 3\n\ndef getperm(member):\n maxprio = 0\n j = 1\n while j <= len(settings['perms']):\n for index in member.roles:\n if index.name in settings['perms']['layer' + str(j)]:\n maxprio = j\n j+=1\n return maxprio\n\ndef getrolelayer(role):\n layer = 0\n j = 1\n while j <= len(settings['perms']):\n if role.name in settings['perms']['layer' + str(j)]:\n layer = j\n j+=1\n return layer\n\nwith open('bot_info.txt') as file:\n lines = file.readlines()\n bot = lines[1].strip()\n\nwith open('settings.json') as set:\n settings = json.load(set)\n\nclient.run(bot)\n","repo_name":"nabibyte427/GnorBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33042673756","text":"\n\"\"\"\"\n# Created on Tue Mar 21 15:46:39 2017\n\n# @author: keriabermudez\n\n# This script is an example of how to use the cell tracking \n\n\"\"\"\n\nimport cell_segmentation as cellseg\nfrom skimage import io\nimport numpy as np\n\n\n# %%\n#Reading Image Stack\n\n\npath= '/Users/keriabermudez/Dropbox/Projects/Gregory_Brittingham/ERK_KTR/'\n\nimage_dapi = 'ERK_DAPI2 (Converted).mov'\nimage_erk = 'ERK_KTR1 (Converted).mov'\n\npath_results = path +'Results/'\n\n\n#%%\n#crop\ndapi = cellseg.video_to_tif(path+image_dapi,0)\nerk = cellseg.video_to_tif(path+image_erk,1)\ndapi = dapi[:,500:,500:]\nerk = erk[:,500:,500:]\n\n\n# %% \n# Creating ztsack of intesitiy image and color zstack\n\ndapi_color = np.zeros((dapi.shape[0], dapi.shape[1], dapi.shape[2],3), dtype = np.uint8)\ndapi_color[:,:,:,2 ] = dapi.copy()\n\nerk_color = np.zeros((erk.shape[0], erk.shape[1], erk.shape[2],3), dtype = np.uint8)\nerk_color[:,:,:,1 ] = erk.copy()\n#%%\n# 2) Try paremeters in one slice or zlevel\n\nz = 0\nzlevel_image_dapi = dapi[z].copy()\nzlevel_image_erk = erk[z].copy()\n\nzlevel_color_erk = erk_color[z].copy()\nzlevel_color_dapi = dapi_color[z].copy()\n\n#%%\n# Enhance, Blur, and Segment \n\ncl1, gaussian_blur_cl1, segmented_zlevel, centers = cellseg.enhance_blur_segment(zlevel_image_dapi,enhance = False, blur = False, kernel = 7, n_intensities = 2)\nio.imshow(cl1)\nio.imshow(gaussian_blur_cl1)\nio.imshow(segmented_zlevel)\ncenters\n \n#%%\n# Test Region Detection parameters\n\nlabeled = cellseg.watershedsegment(segmented_zlevel,smooth_distance = True,kernel = 3) ##increase kernel size if it is oversegmenting\n\n\n#%% Draw Contours \n\n\nzlevel_image_color_regions_d = cellseg.draw_contours(labeled,zlevel_color_dapi.copy(), with_labels = True, color = (255,0,0),width = 2 )\nzlevel_image_color_regions_e = cellseg.draw_contours(labeled,zlevel_color_erk.copy(), with_labels = True, color = (255,0,0),width = 2 )\n\nio.imshow(zlevel_image_color_regions_d)\nio.imshow(zlevel_image_color_regions_e)\n\nio.imsave(path_results+'Z-level_Regions_'+str(z)+'dapi.tif',zlevel_image_color_regions_d)\nio.imsave(path_results+'Z-level_Regions_'+str(z)+'erk.tif',zlevel_image_color_regions_e)\n\n# Get Measurements\npositions_regions = cellseg.regions_measure(labeled,zlevel_image_erk)\n\n#%%\n# Test Blob Detection parameters\n\nzlevel_image_color_marked = cellseg.draw_blob_log(cl1,zlevel_color_dapi.copy(), with_labels = True,max_sigma=30, min_sigma=20,num_sigma=10,threshold=.01,overlap=0.6,color_blobs = (255,0,0),width =3)\nio.imshow(zlevel_image_color_marked)\nio.imsave(path_results+'Z-level_Blobs_'+str(z)+'.tif',zlevel_image_color_marked)\n\n #Get Measurements\npositions_blobs = cellseg.blob_log_measure(cl1,zlevel_image_erk, max_sigma=30, min_sigma=20,num_sigma=10,threshold=.01,overlap=0.6 )\n\n\n#%%\n# 2) Cell Tracking With Blob Detection, Note: This can take a long time to run\n\nct = cellseg.cell_tracking(erk,dapi,erk_color)\n\n# Setting Segmentation Parameters\nct.set_segment_param(enhance = False, blur = False, n_intensities = 2)\n\n# Setting Blob Parameters\nct.set_blob_param(max_sigma=30,min_sigma=20,num_sigma=10,threshold=.01,overlap=0.6)\n\n# Track with Blob\nct.track_with_blob(min_slices =1,color_blobs= (255,0,0))\nct.draw_trajectories(color_trajectory= (255,255,0) )\n\ntable_positions = ct.positions_table\nio.imsave(path_results+'Blobs.tif',ct.zstack_color)\n\n#table_positions.to_csv(path_results+image[:-4]+'Positions_Table.csv')\n#%%\n# 3) Cell Tracking With Regions\n\nct2 = cellseg.cell_tracking(erk,dapi,dapi_color.copy())\n\n# Setting Segmentation Parameters\nct2.set_segment_param(enhance = False, blur = False, n_intensities = 2)\nct2.set_watershed_param(smooth_distance = True, kernel = 3)\n\n# Track with Blob\nct2.track_with_regions(min_slices =1, color_contours= (255,0,0))\nct2.draw_trajectories(color_trajectory= (255,255,0) )\n\n\n#table_positions.to_csv(path_results+image[:-4]+'Positions_Table.csv')\nio.imsave(path_results+'Regions.tif',ct2.zstack_color)\n\n\n","repo_name":"FenyoLab/cell-segmentation-analysis","sub_path":"video_tracking_greg.py","file_name":"video_tracking_greg.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"43027905336","text":"import misc.helpers as helpers\n\n# A 2d list that simply stores True if a tile has been seen, or False if it has not\nclass FieldOfView(list):\n def __init__(self, width, height):\n self.width = width\n self.height = height\n for _ in range(width):\n col = []\n for _ in range(height):\n col.append(False)\n self.append(col)\n \n # What each player can currently see, to darken tiles outside of FOV\n self.current = []\n self.reset_current()\n\n def reset_current(self):\n self.current = []\n for _ in range(self.width):\n col = []\n for _ in range(self.height):\n col.append(False)\n self.current.append(col)\n\n def contains(self, x, y):\n return self[x][y]\n \n def can_see(self, x, y):\n return self.current[x][y]\n\n # Add new tiles to the remembered FOV, and then recalculate the current tiles each player can see\n def update(self, world, creature):\n self.update_total(world, creature)\n self.refresh_current(world.players)\n \n def update_all(self, world, players):\n for p in players:\n self.update_total(world, p)\n self.refresh_current(world.players)\n\n def update_total(self, world, creature):\n for x in range(-creature.vision_radius, creature.vision_radius + 1):\n for y in range(-creature.vision_radius, creature.vision_radius + 1):\n to_x, to_y = creature.x + x, creature.y + y\n if to_x < 0 or to_y < 0 or to_x >= self.width or to_y >= self.height:\n continue\n\n if self[to_x][to_y]:\n continue\n\n # If a player can see a floor they can see the walls adjacent to it, so we dont get weird visual errors of \"floating\" walls\n if creature.can_see(to_x, to_y):\n if world.is_floor(to_x, to_y):\n self[to_x][to_y] = True\n if world.is_wall(to_x - 1, to_y):\n self[to_x-1][to_y] = True\n if world.is_wall(to_x, to_y - 1):\n self[to_x][to_y-1] = True\n if world.is_wall(to_x - 1, to_y - 1):\n self[to_x-1][to_y-1] = True\n\n def refresh_current(self, players):\n self.reset_current()\n for p in players:\n self.update_current(p)\n \n def update_current(self, creature):\n for x in range(-creature.vision_radius, creature.vision_radius + 1):\n for y in range(-creature.vision_radius, creature.vision_radius + 1):\n to_x, to_y = creature.x + x, creature.y + y\n if to_x < 0 or to_y < 0 or to_x >= self.width or to_y >= self.height:\n continue\n\n if self.current[to_x][to_y]:\n continue\n \n if creature.can_see(to_x, to_y):\n self.current[to_x][to_y] = True\n\n # If you see a non-active creature, activate all enemies in the room\n c = creature.world.get_creature_at_location(to_x, to_y)\n if c and c.can_be_activated() and not c.is_active():\n if c.home_room:\n c.world.activate_room_enemies(c.home_room, creature)\n else:\n c.activate(creature)\n s = \"You see a \" + c.name\n w = c.equipment.slot(\"Main\")\n if w:\n s += \" wielding a \" + w.name\n creature.notify(s)\n \n def print(self):\n for y in range(self.height):\n for x in range(self.width):\n if self[x][y] == True:\n c = '0'\n else:\n c = '1'\n print(c, end='')\n print()\n\ndef can_see(world, sx, sy, dx, dy, radius):\n if (sx - dx) * (sx - dx) + (sy - dy) * (sy - dy) > radius * radius:\n return False\n\n l = helpers.get_line(sx, sy, dx, dy)\n for p_x, p_y in l:\n if not world.block_sight(p_x, p_y) or (p_x == dx and p_y == dy):\n continue\n return False\n return True","repo_name":"alexs2112/isometric-demo","sub_path":"world/fov.py","file_name":"fov.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31899189730","text":"from __future__ import annotations\n\nimport json\nfrom typing import TYPE_CHECKING\n\nfrom django.core.exceptions import ObjectDoesNotExist\n\nif TYPE_CHECKING:\n from ..models import SiteProfile\n\n\ndef get_or_create_site_profile_obj(single_site, site_obj, apps) -> SiteProfile | None:\n site_profile_model_cls = apps.get_model(\"edc_sites\", \"SiteProfile\")\n opts = dict(\n title=single_site.description,\n country=single_site.country,\n country_code=single_site.country_code,\n languages=json.dumps(single_site.languages) if single_site.languages else None,\n )\n try:\n site_profile = site_profile_model_cls.objects.get(site=site_obj)\n except ObjectDoesNotExist:\n site_profile = site_profile_model_cls.objects.create(site=site_obj, **opts)\n else:\n for k, v in opts.items():\n setattr(site_profile, k, v)\n site_profile.save()\n return site_profile\n","repo_name":"clinicedc/edc-sites","sub_path":"edc_sites/utils/get_or_create_site_profile_obj.py","file_name":"get_or_create_site_profile_obj.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36986824302","text":"from FlickrCaptor import FlickrCaptor\nfrom FileManager import FileManager\nfrom datetime import datetime, timedelta\nfrom time import sleep\nimport json\nfrom LoggerBuilder import LoggerBuilder\nfrom ExitLogger import ExitLogger\nimport logging\nimport os\nimport sys\nimport requests\n\nsource = 'flickr'\nlogger = LoggerBuilder(source, logging.WARNING, logging.INFO).get_logger()\n# Limit for Flickr API calls: 3600 queries / h (http://www.flickr.com/services/developer/api/)\n# Limit for flickr.photos.search: Flickr will return at most the first 4,000 results for any given search query.\n\n# min_date should be written using the YYYY-MM-DD format\nmin_date_format = '%Y-%m-%d'\nmin_date_file = \"min_date\"\n\nclass InitError(Exception):\n\tpass\n\ndef main():\t\n\tlast_date = datetime.now() - timedelta(days=2)\n\n\tcaptor = FlickrCaptor(logger)\n\tfile_manager = FileManager()\n\t\n\ttry:\n\t\ttry:\n\t\t\twoe_ids = file_manager.get_locations(source)\n\t\texcept IOError:\n\t\t\traise InitError(\"File %s is missing\"% file_manager.get_locations_path(source))\n\t\texcept ValueError:\n\t\t\traise InitError(\"The %s file does not contain any correct JSON object\"% file_manager.get_locations_path(source))\n\t\t\n\t\tmin_date_file_path = file_manager.get_path(source, None, min_date_file)\n\t\ttry:\n\t\t\tmin_date_json = file_manager.read_json(source, min_date_file)\n\t\t\tmin_date = datetime.strptime(min_date_json['min_date'], min_date_format)\n\t\texcept IOError:\n\t\t\traise InitError(\"File %s is missing. You should create this file and set {'min_date':YYYY-MM-DD} in it.\"% min_date_file_path)\n\t\texcept (ValueError, KeyError):\n\t\t\traise InitError(\"You need to set {\\\"min_date\\\":\\\"YYYY-MM-DD\\\"} in file %s\"% min_date_file_path)\n\t\t\n\t\tzero_day = timedelta(days=0) \n\t\tone_day = timedelta(days=1)\n\n\t\tif (last_date - min_date) < zero_day:\n\t\t\traise InitError(\"The date set as min_date in %s is after 2 days ago.\"% min_date_file_path)\n\t\t\n\t\twhile (last_date - min_date) >= zero_day:\n\t\t\tlogger.warning(\"---- Capting for %s\" % min_date.strftime('%y-%m-%d'))\n\t\t\tfor city, woe_id in woe_ids.iteritems():\n\t\t\t\tmax_date = min_date + one_day\n\t\t\t\tlogger.warning(\"Capting for %s\" % city)\n\t\t\t\tloaded_json = captor.get_data(min_date, max_date, woe_id)\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tfile_manager.write_json(loaded_json, source, city, min_date.strftime('%y-%m-%d'))\n\t\t\t\texcept IOError:\n\t\t\t\t\traise InitError(\"Folder %s is missing\"% file_manager.get_folder_path(source, city))\n\n\t\t\t\tlogger.warning(\"New JSON written for %s \" % city)\n\t\t\t\n\t\t\tmin_date = max_date\n\t\t\tf = open(min_date_file_path, 'w+')\n\t\t\tjson.dump({'min_date':min_date.strftime(min_date_format)}, f)\n\t\t\tf.close()\n\n\t\t\tlogger.warning(\"... Sleeping for 5 s\")\n\t\t\tsleep(5)\n\t\treturn 0\n\t\t\n\texcept InitError as e:\n\t\tlogger.critical(\"%s: %s\"% (type(e).__name__, e))\n\t\treturn 1\t\n\texcept requests.exceptions.RequestException as e:\n\t\tlogger.critical(\"%s: %s\"% (type(e).__name__, e))\n\t\treturn 2\n\texcept FlickrCaptor.FlickrApiError as e:\n\t\tlogger.critical(\"%s: %s\"% (type(e).__name__, e))\n\t\treturn 3\n\texcept Exception as e:\n\t\tlogger.critical(e, exc_info=True)\n\t\treturn 4\n\nif __name__ == \"__main__\":\n\tstart = datetime.now().strftime('%y-%m-%d %H:%M:%S')\n\tlogger.critical(\"START\")\n\tres = main()\n\tlogger.critical(\"STOP (%d)\"% res)\n\tstop = datetime.now().strftime('%y-%m-%d %H:%M:%S')\n\tExitLogger(source).log(start, stop, res)\n\tsys.exit(res)","repo_name":"ComplexCity/capture","sub_path":"FlickrCapture.py","file_name":"FlickrCapture.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"41197328053","text":"from django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.urls import reverse\n\nregister = template.Library()\n\n\n@register.filter\ndef rating_score(obj, user):\n \"\"\"\n Returns the score a user has given an object\n \"\"\"\n if not user.is_authenticated or not hasattr(obj, \"_ratings_field\"):\n return False\n\n ratings_descriptor = getattr(obj, obj._ratings_field)\n try:\n rating = ratings_descriptor.get(user=user).score\n except ratings_descriptor.model.DoesNotExist:\n rating = None\n\n return rating\n\n\n@register.filter\ndef has_rated(user, obj):\n \"\"\"\n Returns whether or not the user has rated the given object\n \"\"\"\n return rating_score(obj, user) is not None\n\n\n@register.filter\ndef rate_url(obj, score=1):\n \"\"\"\n Generates a link to \"rate\" the given object with the provided score - this\n can be used as a form target or for POSTing via Ajax.\n \"\"\"\n return reverse(\n \"ratings_rate_object\",\n args=(\n ContentType.objects.get_for_model(obj).pk,\n obj.pk,\n score,\n ),\n )\n\n\n@register.filter\ndef unrate_url(obj):\n \"\"\"\n Generates a link to \"un-rate\" the given object - this\n can be used as a form target or for POSTing via Ajax.\n \"\"\"\n return reverse(\n \"ratings_unrate_object\",\n args=(\n ContentType.objects.get_for_model(obj).pk,\n obj.pk,\n ),\n )\n","repo_name":"django/djangosnippets.org","sub_path":"ratings/templatetags/ratings_tags.py","file_name":"ratings_tags.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":408,"dataset":"github-code","pt":"81"} +{"seq_id":"18124485031","text":"import logging\nimport math\nimport os\n\nimport paddle\nfrom paddle.amp import GradScaler, auto_cast\nfrom paddle.optimizer import AdamW\nfrom paddlenlp.transformers import (\n BertForQuestionAnswering,\n BertTokenizer,\n ErnieForQuestionAnswering,\n ErnieTokenizer,\n MPNetForQuestionAnswering,\n MPNetTokenizer,\n)\nfrom tqdm import tqdm\n\nfrom args import parse_args\nfrom data import get_dev_dataloader, get_train_dataloader\nfrom metric import compute_prediction, squad_evaluate\nfrom utils import (\n CrossEntropyLossForSQuAD,\n get_scheduler,\n get_writer,\n save_json,\n set_seed,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nMODEL_CLASSES = {\n \"bert\": (BertForQuestionAnswering, BertTokenizer, True),\n \"ernie\": (ErnieForQuestionAnswering, ErnieTokenizer, True),\n \"mpnet\": (MPNetForQuestionAnswering, MPNetTokenizer, False),\n}\n\n\n@paddle.no_grad()\ndef evaluate(model, data_loader, args, output_dir=\"./\"):\n model.eval()\n all_start_logits = []\n all_end_logits = []\n\n for batch in data_loader:\n input_ids, token_type_ids = batch\n start_logits_tensor, end_logits_tensor = (\n model(input_ids, token_type_ids=token_type_ids)\n if args.need_token_type_ids\n else model(input_ids)\n )\n all_start_logits.extend(start_logits_tensor.numpy().tolist())\n all_end_logits.extend(end_logits_tensor.numpy().tolist())\n\n all_predictions, all_nbest_json, scores_diff_json = compute_prediction(\n data_loader.dataset.data,\n data_loader.dataset.new_data,\n (all_start_logits, all_end_logits),\n args.version_2_with_negative,\n args.n_best_size,\n args.max_answer_length,\n args.null_score_diff_threshold,\n )\n\n save_json(all_predictions, os.path.join(output_dir, \"all_predictions.json\"))\n if args.save_nbest_json:\n save_json(all_nbest_json, os.path.join(output_dir, \"all_nbest_json.json\"))\n\n eval_results = squad_evaluate(\n examples=data_loader.dataset.data,\n preds=all_predictions,\n na_probs=scores_diff_json,\n )\n return eval_results\n\n\ndef main(args):\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n handlers=[\n logging.FileHandler(\n os.path.join(os.path.dirname(args.output_dir), \"run.log\"),\n mode=\"w\",\n encoding=\"utf-8\",\n )\n ],\n )\n logger.info(\"********** Configuration Arguments **********\")\n for arg, value in sorted(vars(args).items()):\n logger.info(f\"{arg}: {value}\")\n logger.info(\"**************************************************\")\n paddle.set_device(args.device)\n set_seed(args)\n writer = get_writer(args)\n\n # get model and tokenizer\n model_class, tokenizer_class, args.need_token_type_ids = MODEL_CLASSES[\n args.model_type\n ]\n model = model_class.from_pretrained(args.model_name_or_path)\n if args.use_huggingface_tokenizer and args.model_type == \"mpnet\":\n from transformers import MPNetTokenizerFast\n\n tokenizer = MPNetTokenizerFast.from_pretrained(\"./\")\n else:\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n\n # get dataloader\n train_dataloader = get_train_dataloader(tokenizer, args)\n dev_dataloader = get_dev_dataloader(tokenizer, args)\n\n num_update_steps_per_epoch = math.ceil(\n len(train_dataloader) / args.gradient_accumulation_steps\n )\n if args.max_train_steps > 0:\n args.num_train_epochs = math.ceil(\n args.max_train_steps / num_update_steps_per_epoch\n )\n else:\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n\n # get lr_scheduler\n lr_scheduler = get_scheduler(\n learning_rate=args.learning_rate,\n scheduler_type=args.scheduler_type,\n num_warmup_steps=args.warmup_steps\n if args.warmup_steps > 0\n else args.warmup_radio,\n num_training_steps=args.max_train_steps,\n )\n\n total_batch_size = args.train_batch_size * args.gradient_accumulation_steps\n\n decay_params = [\n p.name\n for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ]\n\n optimizer = AdamW(\n learning_rate=lr_scheduler,\n beta1=0.9,\n beta2=0.98,\n epsilon=args.adam_epsilon,\n parameters=model.parameters(),\n weight_decay=args.weight_decay,\n apply_decay_param_fun=lambda x: x in decay_params,\n )\n\n loss_fn = CrossEntropyLossForSQuAD()\n\n if args.use_amp:\n scaler = GradScaler(init_loss_scaling=args.scale_loss)\n\n logger.info(\"********** Running training **********\")\n logger.info(f\" Num examples = {len(train_dataloader.dataset)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Instantaneous train batch size = {args.train_batch_size}\")\n logger.info(f\" Instantaneous eval batch size = {args.eval_batch_size}\")\n logger.info(f\" Total train batch size (w. accumulation) = {total_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n\n save_json(vars(args), os.path.join(args.output_dir, \"args.json\"))\n progress_bar = tqdm(range(args.max_train_steps))\n\n global_steps = 0\n tr_loss, logging_loss = 0.0, 0.0\n\n for _ in range(args.num_train_epochs):\n for step, batch in enumerate(train_dataloader):\n model.train()\n with auto_cast(\n args.use_amp, custom_white_list=[\"layer_norm\", \"softmax\", \"gelu\"]\n ):\n input_ids, token_type_ids, start_positions, end_positions = batch\n logits = (\n model(input_ids, token_type_ids=token_type_ids)\n if args.need_token_type_ids\n else model(input_ids)\n )\n loss = (\n loss_fn(logits, (start_positions, end_positions))\n / args.gradient_accumulation_steps\n )\n tr_loss += loss.item()\n\n if args.use_amp:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n\n if (\n step % args.gradient_accumulation_steps == 0\n or step == len(train_dataloader) - 1\n ):\n if args.use_amp:\n scaler.minimize(optimizer, loss)\n else:\n optimizer.step()\n\n lr_scheduler.step()\n optimizer.clear_grad()\n progress_bar.update(1)\n global_steps += 1\n\n if args.logging_steps > 0 and global_steps % args.logging_steps == 0:\n writer.add_scalar(\"lr\", lr_scheduler.get_lr(), global_steps)\n writer.add_scalar(\n \"loss\",\n (tr_loss - logging_loss) / args.logging_steps,\n global_steps,\n )\n logger.info(\n \"global_steps {} - lr: {:.10f} loss: {:.8f}\".format(\n global_steps,\n lr_scheduler.get_lr(),\n (tr_loss - logging_loss) / args.logging_steps,\n )\n )\n logging_loss = tr_loss\n\n if args.save_steps > 0 and global_steps % args.save_steps == 0 and global_steps>=11000:\n logger.info(\"********** Running evaluating **********\")\n logger.info(f\"********** Step {global_steps} **********\")\n output_dir = os.path.join(args.output_dir, f\"step-{global_steps}\")\n os.makedirs(output_dir, exist_ok=True)\n eval_results = evaluate(model, dev_dataloader, args, output_dir)\n for k, v in eval_results.items():\n if \"exact\" in k or \"f1\" in k:\n writer.add_scalar(f\"eval/{k}\", v, global_steps)\n logger.info(f\" {k} = {v}\")\n model.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n logger.info(\"********** Evaluating Done **********\")\n\n if global_steps >= args.max_train_steps:\n logger.info(\"********** Training Done **********\")\n return\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n","repo_name":"JunnYu/paddle-mpnet","sub_path":"clean_squad代码/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8656,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"72196344906","text":"from tkinter import Tk # from tkinter import Tk for Python 3.x\nfrom tkinter.filedialog import askopenfilename\n\nimport numpy as np\nimport pandas as pd\nimport scipy.io as sio\nimport plot_extra as pe\n\nimport matplotlib.pyplot as plt\nplt.rcParams.update(plt.rcParamsDefault)\nplt.rcParams['mathtext.fontset'] = 'cm' # 'cm' Computer modern # 'dejavuserif', 'dejavusans'\nplt.rcParams['font.family'] = 'serif'\n# plt.rcParams['font.serif'] = 'cmr10' # 'https://matplotlib.org/3.1.1/gallery/text_labels_and_annotations/font_file.html\nplt.rc('axes', unicode_minus=False)\n#https://stackoverflow.com/questions/29188757/matplotlib-specify-format-of-floats-for-tick-labels\n\n#https://matplotlib.org/stable/tutorials/text/usetex.html\n# Matplotlib's LaTeX support requires a working LaTeX installation\n# Text handling through LaTeX is slower than Matplotlib's very capable mathtext,\n# but is more flexible, since different LaTeX packages (font packages, math packages, etc.) can be used.\nplt.rcParams['text.usetex'] = 'True'\n\n\n#############\n# solves a warning with a previous syntax\n#https://stackoverflow.com/questions/65645194/warning-set-it-to-a-single-string-instead\nplt.rcParams['text.latex.preamble'] = r'\\usepackage{amsmath} \\usepackage{crimson} \\usepackage{siunitx}'\n# from matplotlib.ticker import FormatStrFormatter\n# from matplotlib.offsetbox import AnchoredText\n\n\ndef plot_charts(figure_type='.pdf'):\n print(\"#####################\")\n print(\"Function name: \", plot_charts.__name__)\n\n ###############################################################\n # CASES - file names - chart limits\n ###############################################################\n csv_full_path = '../Modelos/Intro/SimpleMass_RawData.csv'\n figure_full_path = '../Modelos/Intro/PhaseFreqControl' + figure_type\n\n ###############################################################\n # Opening csv file\n ###############################################################\n print('###########################')\n print(' CSV ')\n print('Chosen file: ', csv_full_path)\n\n # https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html\n csv_df = pd.read_csv(csv_full_path, header=0)\n\n ###############################################################\n # size of the figure (in inches), and linestyles\n ###############################################################\n figsizex = 5\n figsizey = 5\n fig, axes = plt.subplots(3, 1, sharex=True,\n figsize=(figsizex, figsizey),\n num='Charts')\n\n axes[0].plot(csv_df['time'],\n csv_df['pload'],\n color='black',\n linewidth=1,\n linestyle='dashed',\n label=r'Electric load (consumption)')\n\n axes[0].plot(csv_df['time'],\n csv_df['pmec'],\n color=pe.cor_dalt['gray'],\n linewidth=1,\n linestyle='solid',\n label=r'Mechanical power (generation)')\n\n axes[1].plot(csv_df['time'],\n csv_df['f'],\n color='black',\n linewidth=1,\n linestyle='solid',\n label=r'Frequency')\n\n axes[2].plot(csv_df['time'],\n csv_df['pinert'],\n color=pe.cor_dalt['red'],\n linewidth=1,\n linestyle='solid',\n label=r'Inertial')\n\n axes[2].plot(csv_df['time'],\n csv_df['pprim'],\n color=pe.cor_dalt['blue'],\n linewidth=1,\n linestyle='solid',\n label=r'Primary')\n\n axes[2].plot(csv_df['time'],\n csv_df['psec'],\n color=pe.cor_dalt['green'],\n linewidth=1,\n linestyle='solid',\n label=r'Secondary')\n\n # axes[2].plot(csv_df['time'],\n # csv_df['pinert'] + csv_df['pprim'] + csv_df['psec'],\n # color=pe.cor_dalt['gray'],\n # linewidth=1,\n # linestyle='dotted',\n # label=r'Total reserve')\n\n ##########################################################################\n # axis limits\n ##########################################################################\n axes[2].set_xticks(np.arange(0, 100, 10))\n axes[2].set_xlim([0, 80])\n\n axes[0].set_yticks(np.arange(0.50, 0.60, 0.01))\n axes[0].set_ylim([0.499, 0.56])\n\n axes[1].set_yticks(np.arange(0.98, 1.01, 0.002))\n axes[1].set_ylim([0.9875, 1.0005])\n\n axes[2].set_yticks(np.arange(0, 0.08, 0.01))\n axes[2].set_ylim([-0.01, 0.06])\n\n ##########################################################################\n # axis names\n ##########################################################################\n axes[2].set_xlabel(r'Time (\\si{\\second})')\n\n axes[0].set_ylabel(r'Total power (\\si{pu})')\n axes[1].set_ylabel(r'Frequency (\\si{pu})')\n axes[2].set_ylabel(r'Reserve power (\\si{pu})')\n\n ##########################################################################\n # chart identification - legend - abcdefghi\n ##########################################################################\n # https://matplotlib.org/stable/gallery/color/named_colors.html\n # colors lightgray gray aliceblue whitesmoke\n corlegenda = 'whitesmoke'\n #\n axes[0].annotate(r'a', xy=(0.05, 0.25), xycoords='axes fraction',\n bbox=dict(boxstyle='circle', fc=corlegenda))\n\n axes[1].annotate(r'b', xy=(0.05, 0.25), xycoords='axes fraction',\n bbox=dict(boxstyle='circle', fc=corlegenda))\n\n axes[2].annotate(r'c', xy=(0.05, 0.25), xycoords='axes fraction',\n bbox=dict(boxstyle='circle', fc=corlegenda))\n\n ##########################################################################\n # annotations\n ##########################################################################\n axes[0].annotate(r'Nadir happens when powers equalize',\n xy=(12.02, 0.55), xycoords='data',\n xytext=(15, 0.53), textcoords='data',\n ha='left',\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"))\n\n axes[1].annotate(r'RoCoF',\n xy=(10.0, 0.99888), xycoords='data',\n xytext=(12.0, 0.995), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",connectionstyle=\"arc3\"))\n\n axes[1].annotate(r'Nadir',\n xy=(12.02, 0.9888), xycoords='data',\n xytext=(20, 0.9885), textcoords='data',\n va='center',\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"))\n\n axes[1].annotate(r'Level defined by primary response',\n xy=(20.0, 0.99), xycoords='data',\n xytext=(15.0, 0.999), textcoords='data',\n ha='left',\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"))\n\n axes[1].annotate(r'Secondary response',\n xy=(40.0, 0.995255), xycoords='data',\n xytext=(50.0, 0.9950), textcoords='data',\n ha='left',\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"))\n\n axes[2].annotate(r'Proportional response',\n xy=(26.0, 0.05), xycoords='data',\n xytext=(12.0, 0.0275), textcoords='data',\n ha='left',\n va='center',\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"))\n\n axes[2].annotate(r'Integral response',\n xy=(45.0, 0.032), xycoords='data',\n xytext=(60.0, 0.05), textcoords='data',\n ha='right',\n va='center',\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"))\n\n ##########################################################################\n # axis legends\n ##########################################################################\n axes[0].legend(loc='best', frameon=False, prop={'size': 10})\n axes[1].legend(loc='best', frameon=False, prop={'size': 10})\n axes[2].legend(loc='center right', frameon=False, prop={'size': 10})\n\n ##########################################################################\n # align, tighten, shown and save\n ##########################################################################\n fig.align_ylabels(axes[:])\n fig.tight_layout()\n # fig.show()\n\n if figure_type == '.pdf':\n plt.savefig(figure_full_path, format=\"pdf\", bbox_inches=\"tight\")\n elif figure_type == '.eps':\n plt.savefig(figure_full_path, format='eps')\n\n plt.show()\n\n\ndef main():\n print(\"#####################\")\n print(\"Function name: \", main.__name__)\n\n figure_type = '.pdf'\n # figure_type = ''\n\n plot_charts(figure_type=figure_type)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"santosmota/PhD_Daniel_Mota_Data","sub_path":"PythonCharts/Intro_FreqRespPhases.py","file_name":"Intro_FreqRespPhases.py","file_ext":"py","file_size_in_byte":9004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10356989069","text":"import json\nimport os\n\nimport boto3\n\nREGION = os.environ.get('REGION', 'us-east-1')\nDOMAIN_NAME = os.environ.get(\"DOMAIN_NAME\", \"*\")\nAWS_ENVIRONMENT = os.environ.get('AWSENV', 'AWS')\n\ndef check_request(message,allowed_method):\n \"\"\"\n Returns 400 if message does not validate with correct HTTP Method\n \"\"\"\n if ('pathParameters' not in message or\n message['httpMethod'] != allowed_method):\n \n return jsonresponse('msg\\': \\'Bad Request',400) \n \ndef get_dynamodb_table(table_name):\n if AWS_ENVIRONMENT == 'AWS_SAM_LOCAL':\n dynamodb_table = boto3.resource(\n 'dynamodb',\n endpoint_url='http://dynamodb:8000'\n )\n else:\n dynamodb_table = boto3.resource(\n 'dynamodb',\n region_name=REGION\n )\n\n return dynamodb_table.Table(table_name)\n\ndef jsonresponse(message, status_code=200):\n \"\"\"\n Return a properly formatted response for AWS Lambda proxy integration\n including CORS\n \"\"\"\n return {\n \"statusCode\": status_code,\n \"headers\": {\n \"Access-Control-Allow-Headers\": \"Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-requested-with\",\n \"Access-Control-Allow-Origin\": \"{}\".format(DOMAIN_NAME),\n \"Access-Control-Allow-Methods\": \"POST,GET,OPTIONS\"\n },\n \"body\": json.dumps(message)\n }\n","repo_name":"chrishollinworth/awschallenge","sub_path":"challenge1/three-tier-example/backend/common/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23041286035","text":"details = {\n \"name\" : \"Torikus sadik\",\n \"age\" : 22,\n \"favs\": [\"c\",\"c++\",\"js\",\"python\"]\n}\n\n\n# get method\nname = details.get(\"name\")\n# print(name)\n\n# copy method\ncopy = details.copy()\n# print(copy)\n\n\n# items method\nitems = copy.items()\n# print(items)\n\n# key method\nkeys = copy.keys()\nprint(keys)\n\n# values method\nvalues = copy.values()\nprint(values)\n\n\n# pop method\npoped = details.pop(\"age\")\n# print(poped)\n\n\n# popItem\npop = details.popitem()\nprint(pop)","repo_name":"shakil-babu/Python-Basic-Intermediate","sub_path":"Tutorial/12.Dictionaries/08.methods.py","file_name":"08.methods.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"19250785344","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches\n\nimport skimage\nimport skimage.measure\nimport skimage.color\nimport skimage.restoration\nimport skimage.io\nimport skimage.filters\nimport skimage.morphology\nimport skimage.segmentation\nfrom skimage.transform import resize\nimport copy \nimport math\nfrom skimage.morphology import erosion\nfrom skimage.morphology import square\n\nfrom nn import *\nfrom q4 import *\n# do not include any more libraries here!\n# no opencv, no sklearn, etc!\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=UserWarning)\n\nfor img in os.listdir('../images'):\n \n im1 = skimage.img_as_float(skimage.io.imread(os.path.join('../images',img)))\n \n bboxes, bw = findLetters(im1)\n \n plt.imshow(bw, cmap='gray')\n\n for bbox in bboxes:\n minr, minc, maxr, maxc = bbox\n rect = matplotlib.patches.Rectangle((minc, minr), maxc - minc, maxr - minr,\n fill=False, edgecolor='red', linewidth=2)\n plt.gca().add_patch(rect)\n plt.show()\n \n # sort rows (the same rows are nearby)\n bboxes.sort(key=lambda x:x[0])\n\n grey_image = skimage.color.rgb2gray(im1)\n\n # find the rows using..RANSAC, counting, clustering, etc.\n y_1, x_1, y_2, x_2 = bboxes[0]\n origin_row = (y_1 + y_2) // 2\n err = abs(origin_row - y_1) \n sort_temp = []\n sorted_bboxes = []\n for bbox in bboxes:\n minr, minc, maxr, maxc = bbox\n row = (minr + maxr) // 2\n if abs(origin_row - row) < err:\n sort_temp.append(bbox)\n else:\n sort_temp.sort(key=lambda x:x[1])\n sorted_bboxes.append(sort_temp)\n sort_temp = []\n sort_temp.append(bbox)\n origin_row = row\n sort_temp.sort(key=lambda x:x[1])\n sorted_bboxes.append(sort_temp)\n data_x_temp = []\n data_x = []\n # crop the bounding boxes\n # note.. before you flatten, transpose the image (that's how the dataset is!)\n # consider doing a square crop, and even using np.pad() to get your images looking more like the dataset\n for same_line in sorted_bboxes:\n for i, sort_loc in enumerate(same_line):\n minr, minc, maxr, maxc = sort_loc\n patch_image = grey_image[minr : maxr + 1, minc : maxc + 1]\n patch_image = erosion(patch_image, selem = square(8))\n r_diff = maxr - minr\n c_diff = maxc - minc\n pad_r = r_diff // 8\n pad_c = c_diff // 8\n patch_image = np.pad(patch_image, ((pad_r,pad_r),(pad_c,pad_c)), mode='maximum')\n patch_image = resize(patch_image, (32, 32))\n patch_image_T = patch_image.T\n data_x_temp.append(patch_image_T.flatten())\n data_x.append(np.array(data_x_temp))\n data_x_temp = []\n \n # load the weights\n # run the crops through your neural network and print them out\n \n import pickle\n import string\n letters = np.array([_ for _ in string.ascii_uppercase[:26]] + [str(_) for _ in range(10)])\n params = pickle.load(open('q3_weights.pickle','rb'))\n\n for group in data_x:\n h_1 = forward(group, params, 'layer1')\n predict_y = forward(h_1, params, 'output',softmax)\n max_output = np.amax(predict_y, axis = 1, keepdims = True)\n pred_class = np.ones(predict_y.shape) * (predict_y//max_output == 1)\n pred = np.nonzero(pred_class)[1]\n print(letters[pred])\n\n\n\n","repo_name":"ChangNeinei/Text_Extracting_NN","sub_path":"run_q4.py","file_name":"run_q4.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17347747706","text":"from transformers import AutoModelForCausalLM, AutoTokenizer\nfrom transformers.generation import GenerationConfig\nfrom datetime import datetime\n# 可选的模型包括: \"Qwen/Qwen-7B-Chat\", \"Qwen/Qwen-14B-Chat\"\nmodel_path=\"/opt/data/private/ytw/LLM/Qwen-14B-Chat\"\ntokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n\n# 打开bf16精度,A100、H100、RTX3060、RTX3070等显卡建议启用以节省显存\n# model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen-7B-Chat\", device_map=\"auto\", trust_remote_code=True, bf16=True).eval()\n# 打开fp16精度,V100、P100、T4等显卡建议启用以节省显存\n# model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen-7B-Chat\", device_map=\"auto\", trust_remote_code=True, fp16=True).eval()\n# 使用CPU进行推理,需要约32GB内存\n# model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen-7B-Chat\", device_map=\"cpu\", trust_remote_code=True).eval()\n# 默认使用自动模式,根据设备自动选择精度\nmodel = AutoModelForCausalLM.from_pretrained(model_path, device_map=\"auto\", trust_remote_code=True).eval()\n\n# 可指定不同的生成长度、top_p等相关超参\nmodel.generation_config = GenerationConfig.from_pretrained(model_path, trust_remote_code=True)\n\n# 第一轮对话\ns_time =datetime.now()\nfor i in range(10):\n \n response, history = model.chat(tokenizer, \"给我讲一个年轻人奋斗创业最终取得成功的故事。\", history=None)\n print(response)\n \ne_time =datetime.now()\nall_time = e_time-s_time\nprint(\"All time cost {}, per conversation cost {}\".format(all_time,all_time/100))\n# 你好!很高兴为你提供帮助。\n\n# 第二轮对话\n# response, history = model.chat(tokenizer, \"给我讲一个年轻人奋斗创业最终取得成功的故事。\", history=history)\n# print(response)\n# 这是一个关于一个年轻人奋斗创业最终取得成功的故事。\n# 故事的主人公叫李明,他来自一个普通的家庭,父母都是普通的工人。从小,李明就立下了一个目标:要成为一名成功的企业家。\n# 为了实现这个目标,李明勤奋学习,考上了大学。在大学期间,他积极参加各种创业比赛,获得了不少奖项。他还利用课余时间去实习,积累了宝贵的经验。\n# 毕业后,李明决定开始自己的创业之路。他开始寻找投资机会,但多次都被拒绝了。然而,他并没有放弃。他继续努力,不断改进自己的创业计划,并寻找新的投资机会。\n# 最终,李明成功地获得了一笔投资,开始了自己的创业之路。他成立了一家科技公司,专注于开发新型软件。在他的领导下,公司迅速发展起来,成为了一家成功的科技企业。\n# 李明的成功并不是偶然的。他勤奋、坚韧、勇于冒险,不断学习和改进自己。他的成功也证明了,只要努力奋斗,任何人都有可能取得成功。\n\n# 第三轮对话\n# response, history = model.chat(tokenizer, \"给这个故事起一个标题\", history=history)\n# print(response)s\n# 《奋斗创业:一个年轻人的成功之路》","repo_name":"augusyan/LLM_Eval","sub_path":"demo_qwen_chat.py","file_name":"demo_qwen_chat.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23042878430","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nfrom tqdm import tqdm, trange\nimport torch\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport numpy as np\nimport itertools\nimport argparse\nimport os\nimport torch.nn as nn\nfrom transformers import BertConfig, BertForSequenceClassification, BertTokenizer\nfrom bm25_custom import get_bm25_weights\nimport nltk\nnltk.download('stopwords')\nfrom nltk import word_tokenize\nfrom nltk.corpus import stopwords\nimport string\nimport re\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\n\ndef get_values(file):\n \"\"\"\n get label context and response.\n :param file: filel name\n :param get_c_d:\n :return:\n \"\"\"\n data = open(file, 'r').readlines()\n data = [sent.split('\\n')[0].split('\\t') for sent in data]\n cr = [\"\\n\".join([sen for sen in a[1:]]) for a in data]\n return cr\n\n\ndef makedocs():\n sample_to_doc = []\n all_docs_str = []\n all_docs_list = []\n doc = []\n corpus_lines = 0\n doc_cnt = 0\n with open(\"../tf-idf/post_train_ver2.txt\", \"r\", encoding='utf-8') as f:\n for line in tqdm(f, desc=\"Loading Dataset\", total=corpus_lines):\n line = line.strip()\n if line == \"\":\n if (len(doc) != 0):\n all_docs_str.append(\"\\n\".join(doc))\n all_docs_list.append(doc)\n doc_cnt = 0\n doc = []\n # remove last added sample because there won't be a subsequent line anymore in the doc\n sample_to_doc.pop()\n else:\n p = re.compile(\"[^0-9.]\")\n line=line.split()\n newline=[]\n for word in line:\n if \"\".join(p.findall(word)):\n newline.append(word)\n line=\" \".join(newline)\n # store as one sample\n if doc_cnt >= 10:\n continue\n sample = {\"doc_id\": len(all_docs_list),\n \"line\": len(doc)}\n sample_to_doc.append(sample)\n if line==\"\":\n continue\n doc.append(line.lower())\n doc_cnt += 1\n corpus_lines = corpus_lines + 1\n\n # if last row in file is not empty\n if all_docs_list[-1] != doc:\n all_docs_list.append(doc)\n all_docs_str.append(\"\\n\".join(doc))\n sample_to_doc.pop()\n\n for doc in all_docs_list:\n if len(doc) == 0:\n print(doc)\n num_docs = len(all_docs_list)\n return all_docs_str, all_docs_list\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--batch_size\",\n default=0,\n type=int,\n help=\"The batch size.\")\n parser.add_argument(\"--model_type\", default=\"bert\", type=str,\n help=\"Model type selected in the list: bert\")\n parser.add_argument(\"--model_name_or_path\", default=\"bert-base-uncased\", type=str,\n help=\"bert file location\")\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Pretrained config name or path if not the same as model_name\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\n parser.add_argument(\"--cache_dir\", default=\"bert_cache\", type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--do_lower_case\", action='store_true', default=True,\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--load\", default=False, type=bool)\n parser.add_argument(\"--save_path\",\n default=\"../tf-idf/checkpoint/ubuntu.bert.pt\",\n type=str,\n help=\"The path to save model.\")\n\n args = parser.parse_args()\n # load bert\n print(args)\n\n # load the vocab file\n targetlist = get_values('../tf-idf/valid.txt')\n l = 50000\n targetlist = targetlist[:l]\n all_docs_str, all_docs_list = makedocs()\n\n wholestring = \"\"\n\n #tiv = TfidfVectorizer(stop_words=\"english\").fit(all_docs_str)\n #tokenizer = tiv.build_tokenizer()\n #all_docs_numpy = tiv.transform(all_docs_str)\n #all_docs_text_numpy = np.array(all_docs_list)\n # way to find relative docs\n stop = stopwords.words('english') + list(string.punctuation)\n corpus=[]\n strcorpus=[]\n for doc in all_docs_str:\n for sent in doc.split('\\n'):\n strcorpus.append(sent)\n corpus.append(list(set([i for i in word_tokenize(sent.lower()) if i not in stop])))\n target = []\n target_part=[]\n for i,doc in enumerate(tqdm(targetlist)):\n if i % 1000==0 and target_part:\n target.append(target_part)\n target_part=[]\n target_part.append(list(set([i for i in word_tokenize(doc.split(\"\\n\")[-1].lower()) if i not in stop])))\n if target_part:\n target.append(target_part)\n\n strcorpus=np.array(strcorpus)\n cnt=0\n\n for target_part in tqdm(target):\n relevant_str_idx_part = get_bm25_weights(corpus,target_part, n_jobs=-1)\n\n for relevant_str_idx_single in (relevant_str_idx_part):\n with torch.no_grad():\n top10doc=strcorpus[relevant_str_idx_single]\n prev = \"\"\n sencount=0\n for i, sen in enumerate(top10doc):\n if sencount==10:\n break\n\n if prev != sen and len(sen.split())>2:\n wholestring += sen + '\\n'\n prev = sen\n sencount+=1\n\n if cnt == 440:\n print(\"0\")\n if prev==\"\":\n wholestring+='[empty]\\n'\n print(\"empty\")\n wholestring += '\\n'\n cnt+=1\n\n file = open(\"bm25_only_dev1_r.txt\", \"w\", encoding='utf-8')\n file.write(wholestring)\n\n # pickle.dump(dataset, open('ubuntu_data/dataset_1M.pkl', 'wb'))","repo_name":"hanjanghoon/NLP_Dialogue_BERT-Knowledge-IR","sub_path":"IR/bm25/onlybm25.py","file_name":"onlybm25.py","file_ext":"py","file_size_in_byte":6179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37647127930","text":"#Upwind for convection term. CD for diffusion term\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef tdma(a,b,c,d,T):\n \"\"\" a: ap(main diagonal), b: -ae, c: -aw, d: Su, T: variable vector \"\"\"\n N=len(a)\n P=np.ones(N)\n Q=np.ones(N)\n P[0]=-b[0]/a[0]\n Q[0]= d[0]/a[0]\n for i in range(1,N):\n denom=1/(a[i]+c[i]*P[i-1])\n P[i] = -b[i]*denom\n Q[i] = (d[i]-c[i]*Q[i-1])*denom\n T[N-1]=Q[N-1]\n for i in range(N-2,-1,-1):\n T[i]=P[i]*T[i+1]+Q[i]\n\n#Case\ncase=\"case1\"\n\n#geometric\nlength = 1 #m\n\n#fluid properties\nrho=1\nk=0.1\n\nif(case==\"case1\"):\n nx=5\n velocity=-0.1\nelif(case==\"case2\"):\n nx=5\n velocity=2.5\nelse:#case3 or junk case :)\n nx=20\n velocity=2.5\n\ndx = length/nx\n\n#bc and initial conditions\n\nTa=1\nTb=0\n\nF = rho*velocity\nD = k/dx\n\n#declaring and initialising arrays\nu = np.ones(nx)*velocity\nT=np.ones(nx)\naw=np.ones(nx)*(D+F)\nae=np.ones(nx)*D\nSp=np.zeros(nx)\nSu=np.zeros(nx)\n\n#leftmost node\naw[0]=0\nSp[0]=-(2*D+F)\nSu[0]=(2*D+F)*Ta\n#rightmost node\nae[nx-1]=0\nSp[nx-1]=-2*D\nSu[nx-1]=2*D*Tb\nap=aw+ae-Sp\n\n#solve\ntdma(ap,-ae,-aw,Su,T)\n\n#exact solution\nx=np.linspace(0,length,nx+1)+dx/2\nx=x[:-1]\nTexact = Ta+(Tb-Ta)* (np.exp(rho*velocity*x/k) -1) / (np.exp(rho*velocity*length/k) -1)\n\nprint(T)\nprint(Texact)\n\n#plot\nplt.plot(x,T,linestyle='--' ,marker='o',color='b',label='Numerical Solution [Upwind]')\nplt.plot(x,Texact,'-gD',label='Exact Solution')\n\nplt.title(\"Comparison of distribution of property\")\nplt.xlabel(\"Position\")\nplt.ylabel(\"Property\")\nplt.legend()\nplt.grid()\nfigname=\"comparison_5.2_\"+case\nplt.savefig(figname+\".png\")\nplt.show()\n","repo_name":"nipinl/codingVersteegMalalasekera","sub_path":"07_1dConvectionDifffusion_upwind_5.2.py","file_name":"07_1dConvectionDifffusion_upwind_5.2.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25782686384","text":"from selenium import webdriver\nimport time\nimport pandas as pd\nimport os\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport psycopg2\n\nDIR_PATH = os.path.abspath(os.path.dirname(__file__))\nfolder_name = \"_Output\"\nfile_name='Jodi_data.csv'\n\ndef download_file(chromedriver_url):\n path = DIR_PATH\n options = webdriver.ChromeOptions();\n prefs = {\"download.default_directory\": path};\n options.add_experimental_option(\"prefs\", prefs);\n driver = webdriver.Chrome(service_log_path=chromedriver_url, options=options)\n driver.get(\"http://www.jodidb.org/TableViewer/tableView.aspx?ReportId=93906\")\n driver.maximize_window()\n driver.find_element(By.XPATH,\"/html/body/table/tbody/tr[1]/td[2]/table[2]/tbody/tr[1]/td/div/table/tbody/tr/td[5]/a/img\").click()\n driver.find_element(By.XPATH,\"/html/body/table/tbody/tr[1]/td[2]/table[2]/tbody/tr[1]/td/div/table/tbody/tr/td[6]/div/div/p[3]/nobr/a\").click()\n driver.switch_to.window(driver.window_handles[1])\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '/html/body/form/table/tbody/tr[1]/td[2]/input[1]')))\n download = driver.find_element(By.XPATH, \"/html/body/form/table/tbody/tr[1]/td[2]/input[1]\").click()\n time.sleep(5)\n driver.close()\n\ndef process_file():\n path=os.path.abspath(os.path.dirname(__file__))\n list=os.listdir(path)\n time_sorted_list = sorted(list, key=os.path.getmtime)\n file_name = time_sorted_list[len(time_sorted_list) - 1]\n df = pd.read_csv(file_name, header=3)\n df.drop(0,axis=0, inplace=True)\n melted_df = df.melt(id_vars=['Time'], value_vars=df.loc['Algeria':'Total Africa NG'], var_name='Country',value_name='Value')\n melted_df.rename(columns = {'Time':'Country','Country':'Time'}, inplace = True)\n path = DIR_PATH + folder_name\n try:\n os.mkdir(path)\n except OSError as error:\n print(error)\n date(melted_df,path)\n\ndef date(melted_df,path):\n df=melted_df\n df['Time'] = pd.to_datetime(df['Time'], format='mixed', dayfirst=True).dt.strftime('%d-%m-%Y')\n df[\"Time\"] = pd.to_datetime(df[\"Time\"])\n print(df['Time'])\n df.to_csv(path+\"/Jodi_data.csv\",header=df.columns,index=False,encoding='utf-8')\n print(df)\n\ndef table():\n conn=psycopg2.connect(host='localhost',dbname='JodiDB',user='postgres',password='1234',port=5432)\n cur = conn.cursor()\n path=os.path.join(DIR_PATH,folder_name, file_name)\n\n #create table\n cur.execute('''CREATE TABLE IF NOT EXISTS jodi(\n country VARCHAR(40),\n Time DATE NOT NULL,\n value FlOAT)''')\n\n print(\"table created\")\n #copy data from csv\n sql2 = '''COPY jodi(country,Time,value)\n FROM 'E:/jodidb/venv/_Output/Jodi_data.csv'\n DELIMITER ','\n CSV HEADER;'''\n\n cur.execute(sql2)\n conn.commit()\n cur.close()\n conn.close()\n\n\ndownload_file(\"E:/selenium/chromedriver.exe\")\nprocess_file()\ntable()\n","repo_name":"KHemanth2001/jodidb","sub_path":"jodi_postgres.py","file_name":"jodi_postgres.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14969532073","text":"import os\nimport pandas as pd\nimport cv2\nimport numpy as np\n\n\ndef image_label(number, label): \n y = 0 # Unknown\n for index, row in label.iterrows():\n start = row['Start']\n end = row['End']\n if number >= start and number <= end:\n y = row[\"Label\"]\n return y\n return y\n\ndef create_training_csv_multiclass(data_path, label_path, sheet_name, writer):\n # Policy: Discard frames that don't meet the 12 sliding window requirement \n df = pd.read_excel(label_path, sheet_name=sheet_name)\n subjects = list(df['Subject'].unique())\n print(\"Subjects:\", subjects)\n \n for subject in subjects:\n print(\"Processing Subject\", subject)\n\n subject_data_path = os.path.join(data_path, subject) \n subject_df = df[df['Subject'] == subject]\n\n numbers = sorted([int(f[3:-4]) for f in os.listdir(subject_data_path)]) \n list_filename = []\n current_label = -1\n past_label = -1\n for number in numbers: \n filename = \"img\" + str(number).zfill(5) + \".jpg\"\n path_filename = os.path.join(subject_data_path, filename)\n \n # Label\n current_label = image_label(number, subject_df) \n\n # Discard if the image is in excepted image\n # RISK: if we allow the model to train on blank image, it could disrupt training process\n img = cv2.imread(path_filename)\n all_zeros = not np.any(img)\n if all_zeros:\n current_label = 0\n\n # Write to CSV\n if current_label == past_label:\n list_filename.append(path_filename)\n if len(list_filename) == 12: \n for filename in list_filename:\n writer.write(filename)\n writer.write(',')\n writer.write(str(past_label))\n writer.write('\\n')\n list_filename = []\n else:\n # POLICY DISCARD\n list_filename = []\n list_filename.append(path_filename)\n past_label = current_label \n \n\nif __name__ == \"__main__\": \n # Training\n filewriter = open('training_pubspeak_25042023_face_detection.csv', 'w')\n filewriter.write(\"1,2,3,4,5,6,7,8,9,10,11,12,Label\\n\")\n create_training_csv_multiclass( \"D:\\Dataset Skripsi Batch Final Image Face Detection\",\n \"D:\\CodeProject2\\SKRIPSI_FINAL\\pubspeak_label_25042023.xlsx\",\n \"Training\",\n filewriter)\n filewriter.close()\n\n # Testing\n filewriter = open('testing_pubspeak_25042023_face_detection.csv', 'w')\n filewriter.write(\"1,2,3,4,5,6,7,8,9,10,11,12,Label\\n\")\n create_training_csv_multiclass( \"D:\\Dataset Skripsi Batch Final Image Face Detection\",\n \"D:\\CodeProject2\\SKRIPSI_FINAL\\pubspeak_label_25042023.xlsx\",\n \"Testing\",\n filewriter)\n filewriter.close()\n \n\n","repo_name":"vincentdar/SKRIPSI_FINAL","sub_path":"training/create_training_csv_binary.py","file_name":"create_training_csv_binary.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41155428446","text":"class Solution:\n def subdomainVisits(self, cpdomains: List[str]) -> List[str]:\n \n hmap = collections.defaultdict(int)\n \n for d in cpdomains:\n count, domain = d.split(\" \")\n sub_domain = domain.split(\".\")\n for i in range(len(sub_domain)):\n hmap[\".\".join(sub_domain[i:])] += int(count)\n \n return [str(v)+ \" \" + k for k, v in hmap.items()]\n \n \n \n \n \n \n ","repo_name":"tejeshreddy/competitive-programming","sub_path":"811-subdomain-visit-count/811-subdomain-visit-count.py","file_name":"811-subdomain-visit-count.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"7825680324","text":"from flask import redirect, url_for\n\nimport db\nfrom models import Usuario, Proveedor, Producto\nclass ValidarLogin:\n#Clase ValidarLogin\n#Recibe un correo y una contrasena\n#Tiene un método llamado valida_usuario_login\n def __init__(self, correo, contrasena):\n self.correo = correo\n self.contrasena = contrasena\n\n\n def valida_usuario_login(self):\n #Método que valida que el inicio de sesión de un usuario, retorna el tipo de acceso si coinciden los datos\n usuario = db.session.query(Usuario).filter_by(correo_electronico=self.correo).first()\n if self.contrasena == \"\":\n return False\n elif usuario is None:\n return False\n elif usuario.contrasena == self.contrasena:\n return usuario\n else:\n return False\n\n\n @classmethod\n def get_by_id(self, id):\n usuario = db.session.query(Usuario).filter_by(id_usuario=id).first()\n if usuario != None:\n return usuario\n else:\n return None\n\n def __str__(self):\n return \"{} -> {}\".format(self.correo, self.contrasena)\n\nclass ValidarUsuario:\n#Clase validarUsuario valida los formularios de ingreso de usuarios y modificacion de usuario\n#args:\n#nombre hace referencia al nombre del usuario\n#apellido ahce referencia al apellido del usuario\n#correo_electronico hace referencia al correo del usuario (no se puede repetir)\n#contrasena hace referencia a la contraseña del usuario\n#tipo_acceso hace referencia al tipo de acceso del usuario (ADMINISTRADOR, CLIENTE, PROVEEDOR)\n def __init__(self, nombre, correo_electronico, contrasena, tipo_acceso, apellido=None):\n self.nombre = nombre\n self.apellido = apellido\n self.correo_electronico = correo_electronico\n self.contrasena = contrasena\n self.tipo_acceso = tipo_acceso\n\n\n def validar_formulario_nUsuario(self):\n #Método que valida el formulario de ingreso de un nuevo usuario\n\n #Se consulta en la base de datos si existe un usuario con el correo electronico del nuevo usuario\n validacion_correo = db.session.query(Usuario).filter_by(correo_electronico=self.correo_electronico).first()\n\n #Se validan que los campos obligatorios tengan informacion\n if self.nombre != \"\" and self.contrasena != \"\" and self.tipo_acceso != \"\" and self.correo_electronico != \"\":\n if validacion_correo is not None:#Si la variable contiene informacion de un usuario, es porque el correo electronico del nuevo usuario ya existe\n # print(\"error correo\")\n return \"errorCorreo\"\n else:\n return True\n else:#Si no se cumplen las condiciones anteriores es porque uno o mas campos han quedado vacíos\n return \"camposVacios\"\n\n def modifica_usuario(self, id_usuario):\n #Funcion que modifica los datos de un usuario\n\n #Se obtiene el usuario a modificar\n usuario = db.session.query(Usuario).filter_by(id_usuario=id_usuario).first()\n\n if self.nombre == \"\" and self.apellido == \"\" and self.correo_electronico == \"\" and self.contrasena == \"\":\n #Si se deja en blanco el formulario\n return False\n elif self.nombre != \"\" and self.apellido != \"\" and self.correo_electronico != \"\" and self.contrasena != \"\" and self.tipo_acceso != \"\":\n if usuario.correo_electronico == self.correo_electronico:\n #Se valida que no ingrese un correo repetido\n return False\n else:\n #Si se rellenan todos los campos\n usuario.nombre = self.nombre\n usuario.apellido = self.apellido\n usuario.correo_electronico = self.correo_electronico\n usuario.contrasena = self.contrasena\n usuario.tipo_acceso = self.tipo_acceso\n\n db.session.commit()\n return True\n else:\n #Si se llena solo algunos campos del formulario\n if self.nombre != \"\":\n # Si el nombre no esta vacio\n usuario.nombre = self.nombre\n if self.apellido != \"\":\n # Si el apellido no esta vacio\n usuario.apellido = self.apellido\n if self.correo_electronico != \"\":\n # Si el correo electronico no esta vacio\n usuario.correo_electronico = self.correo_electronico\n if self.contrasena != \"\":\n # Si la contraseña no esta vacia\n usuario.contrasena = self.contrasena\n if self.tipo_acceso != \"\":\n # Si el tipo de acceso no esta vacio\n usuario.tipo_acceso = self.tipo_acceso\n #Se guardan los cambios en la bd\n db.session.commit()\n return True\n\nclass ValidaProveedor:\n#Función que valida los formularios de ingreso de un nuevo proveedor y el formulario de modificaciones de proveedores\n def __init__(self, nombre_empresa, cif, correo_electronico, telefono, direccion, facturacion, descuento, iva):\n self.nombre_empresa = nombre_empresa\n self.cif = cif\n self.correo_electronico = correo_electronico\n self.telefono = telefono\n self.dirreccion = direccion\n self.facturacion = facturacion\n self.descuento = descuento\n self.iva = iva\n\n def __str__(self):\n return \"{} -> {} -> {} -> {} -> {} -> {} -> {} -> {}\".format(self.nombre_empresa, self.cif, self.telefono, self.direccion, self.correo_electronico,\n self.facturacion, self.descuento, self.iva)\n def validar_proveedor(self):\n #valida el formulario de ingreso de un nuevo proveedor\n\n #Se consulta a la base de datos si existe algun proveedor con el cif ingresado por el usuario\n proveedor = db.session.query(Proveedor).filter_by(cif=self.cif).first()\n\n #Se validan que los campos obligatorios no queden vacíos\n if self.nombre_empresa == \"\" or self.cif == \"\" or self.telefono == \"\" or self.dirreccion == \"\" or self.facturacion == \"\" or self.iva == \"\":\n return False\n elif proveedor != None:\n if proveedor.cif == self.cif: #Se valida que no exista un proveedor con el cif del nuevo proveedor\n return \"cifRepetido\"\n else:\n return True\n def modifica_proveedor(self):\n #Funcion que modifica los datos de un producto\n p = db.session.query(Proveedor).filter_by(cif=self.cif).first()\n\n if self.nombre_empresa == \"\" and self.correo_electronico == \"\" and self.telefono == \"\" and self.dirreccion == \"\" and self.facturacion == \"\" and self.descuento == \"\" and self.iva == \"\":\n #Si se deja en blanco el formulario\n return False\n elif self.nombre_empresa != \"\" and self.correo_electronico != \"\" and self.telefono != \"\" and self.dirreccion != \"\" and self.facturacion != \"\" and self.descuento != \"\" and self.iva != \"\":\n #Si se rellenan todos los campos\n p.nombre_empresa = self.nombre_empresa\n p.correo_electronico = self.correo_electronico\n p.direccion = self.dirreccion\n p.telefono = self.telefono\n p.facturacion = self.facturacion\n p.descuento = self.descuento\n p.iva = self.iva\n\n db.session.commit()\n return True\n else:\n #Si se llena solo algunos campos del formulario\n if self.nombre_empresa != \"\":\n # Si el nombre no esta vacio\n p.nombre_empresa = self.nombre_empresa\n if self.correo_electronico != \"\":\n # Si la descripcion no esta vacia\n p.correo_electronico = self.correo_electronico\n if self.dirreccion != \"\":\n # Si la marca no esta vacia\n p.direccion = self.dirreccion\n if self.telefono != \"\":\n # Si el stock no esta vacio\n p.telefono = self.telefono\n if self.facturacion != \"\":\n # Si el precio no esta vacio\n p.facturacion = self.facturacion\n if self.descuento != \"\":\n # Si la ubicacion no esta vacia\n p.descuento = self.descuento\n if self.iva != \"\":\n # Si la ubicacion no esta vacia\n p.iva = self.iva\n\n db.session.commit()\n return True\n\n def __str__(self):\n return \"{} -> {} -> {} -> {} -> {}\".format(self.nombre, self.apellido, self.correo_electronico, self.contrasena, self.tipo_usuario)\n\n\nclass ValidaProducto:\n#Clase que valida el ingreso de un nuevo producto o la modificación de uno existente\n\n def __init__(self, numero_referencia, nombre_producto, descripcion, marca, stock, precio, ubicacion):\n self.numero_referencia = numero_referencia\n self.nombre_producto = nombre_producto\n self.descripcion = descripcion\n self.marca = marca\n self.stock = stock\n self.precio = precio\n self.ubicacion = ubicacion\n\n def validar_producto(self):\n # Valida producto es una funcion que valida si los campos obligatorios del formulario crear producto han quedado vacios o no\n\n # Con el número de referencia se busca en la base de datos si hay algún otro producto con el mismo número\n producto = db.session.query(Producto).filter_by(numero_referencia=self.numero_referencia).first()\n\n if self.numero_referencia == \"\" or self.nombre_producto == \"\" or self.descripcion == \"\" or self.stock == \"\" or self.precio == \"\" or self.ubicacion == \"\":\n # Si alguno de los campos obligatorios queda vacío retorna una respuesta\n return False\n elif producto != None:\n # Si al consultar a la base de datos por ese numero de referencia retorna un valor, se retorna una respuesta de numero repetido\n if producto.numero_referencia == self.numero_referencia:\n return \"referencia_repetida\"\n else:\n # En caso contrario, está correcto\n return True\n\n def modifica_producto(self, nombre_imagen):\n #Funcion que modifica los datos de un producto\n p = db.session.query(Producto).filter_by(numero_referencia=self.numero_referencia).first()\n\n if self.nombre_producto == \"\" and self.stock == \"\" and self.marca == \"\" and self.precio == \"\" and self.ubicacion == \"\":\n #Si se deja en blanco el formulario\n return False\n elif self.nombre_producto != \"\" and self.descripcion != \"\" and self.marca != \"\" and self.stock != \"\" and self.precio != \"\" and self.ubicacion != \"\" :\n #Si se rellenan todos los campos\n p.nombre_producto = self.nombre_producto\n p.descripcion = self.descripcion\n p.marca = self.marca\n p.stock = self.stock\n p.precio = self.precio\n p.ubicacion = self.ubicacion\n p.imagen = nombre_imagen\n db.session.commit()\n return True\n else:\n #Si se llena solo algunos campos del formulario\n if self.nombre_producto != \"\":\n # Si el nombre no esta vacio\n p.nombre_producto = self.nombre_producto\n if self.descripcion != \"\":\n # Si la descripcion no esta vacia\n p.descripcion = self.descripcion\n if self.marca != \"\":\n # Si la marca no esta vacia\n p.marca = self.marca\n if self.stock != \"\":\n # Si el stock no esta vacio\n p.stock = self.stock\n if self.precio != \"\":\n # Si el precio no esta vacio\n p.precio = self.precio\n if self.ubicacion != \"\":\n # Si la ubicacion no esta vacia\n p.ubicacion = self.ubicacion\n\n if nombre_imagen != \"\":\n # Si el nombre de la imagen no está vacio\n p.imagen = nombre_imagen\n\n db.session.commit()\n return True\n\ndef __str__(self):\n return \"{} -> {} -> {} -> {} -> {} -> {} -> {} -> {}\".format(self.nombre_empresa, self.cif, self.correo_electronico, self.telefono, self.dirreccion, self.facturacion,\n self.descuento, self.iva)","repo_name":"jptamayoa/suministrosInformaticos","sub_path":"validaciones.py","file_name":"validaciones.py","file_ext":"py","file_size_in_byte":12378,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42669602628","text":"import glob\nimport math\nimport numpy as np\nimport os\nimport pathlib\n\nimport pandas as pd\nimport tensorflow as tf\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nPATHBASE = '/home/xandao/Imagens'\nPATCHES = [3]\n\n\ndef next_patch(spec, n):\n step = math.floor(spec.shape[0] / n)\n for i in range(n):\n yield spec[i * step:(i + 1) * step, :, :]\n\n\ndef get_model(model, **kwargs):\n if model == 'vgg16':\n return tf.keras.applications.vgg16.VGG16(**kwargs), tf.keras.applications.vgg16.preprocess_input\n if model == 'resnet50v2':\n return tf.keras.applications.resnet_v2.ResNet50V2(**kwargs), tf.keras.applications.resnet_v2.preprocess_input\n if model == 'mobilenetv2':\n return tf.keras.applications.mobilenet_v2.MobileNetV2(\n **kwargs), tf.keras.applications.mobilenet_v2.preprocess_input\n\n raise ValueError\n\n\n\ndef extract_features(cnn, color, dataset, gpuid, folds, image_size, input_path, level, minimum_image, output_path, patches, region):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpuid)\n spec_height = image_size[0]\n spec_width = image_size[1]\n input_path_proto = os.path.join(input_path, 'f%d', '*.jpeg')\n\n for n_patches in patches:\n print(\"Slicing images into %d non-overlapping patches...\" % (n_patches))\n tf.keras.backend.clear_session()\n\n input_shape = (math.floor(spec_height / n_patches), spec_width, 3)\n\n model, preprocess_input = get_model(cnn, weights='imagenet', include_top=False,\n input_shape=input_shape, pooling='avg')\n total_samples=0\n imgs_sliced = []\n for fold in folds:\n print(\"Extracting features for fold %d...\" % (fold))\n if len(glob.glob(input_path_proto % (fold))) == 0:\n raise RuntimeError(\"No files found in: %s\" % (input_path_proto % (fold)))\n\n features = []\n for fname in sorted(glob.glob(input_path_proto % (fold))):\n print('fname: %s' % fname)\n img = tf.keras.preprocessing.image.load_img(fname)\n spec = tf.keras.preprocessing.image.img_to_array(img)\n for p in next_patch(spec, n_patches):\n p = preprocess_input(p)\n imgs_sliced.append(tf.keras.preprocessing.image.array_to_img(p))\n p = np.expand_dims(p, axis=0)\n features.append(model.predict(p))\n\n features = np.concatenate(features)\n save_file('npy', features, fold, n_patches, output_path)\n save_file('npz', features, fold, n_patches, output_path)\n n_samples, n_features = features.shape\n total_samples+=n_samples\n print(total_samples)\n save_information(color, cnn, dataset, image_size, input_path, level, minimum_image, n_features, output_path, n_patches, region, total_samples)\n\n\ndef save_file(extension, features, fold, n_patches, output_path):\n output_path = os.path.join(output_path, extension)\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n output_path = os.path.join(output_path, 'fold-%d_patches-%d.' + extension)\n output_filename = output_path % (fold, n_patches)\n if extension == 'npy':\n np.save(output_filename, features, allow_pickle=True)\n else:\n np.savez_compressed(output_filename, x=features, y=np.repeat(fold, features.shape[0]))\n\n\ndef save_information(color, cnn, dataset, image_size, input_path, level, minimum_image, n_features, output_path, patch, region, total_samples):\n height = image_size[0]\n width = image_size[1]\n index = ['color', 'cnn', 'dataset', 'height', 'width', 'level', 'minimum_image', 'input_path', 'output_path',\n 'patch', 'n_features', 'total_samples']\n data = [cnn, color, dataset, height, width, level, minimum_image, input_path, output_path, patch, n_features, total_samples]\n\n if region:\n index.append('region')\n data.append(region)\n\n df = pd.DataFrame(data, index=index)\n filename = os.path.join(output_path, 'info.csv')\n df.to_csv(filename, sep=';', index=index, header=None, lineterminator='\\n', doublequote=True)\n\n\ndef prepare(cnn, color, dataset, image_size, level, minimum_image, input_path, region=None):\n if not os.path.exists(input_path):\n raise SystemError('path (%s) not exists' % input_path)\n\n list_folders = [f for f in pathlib.Path(input_path).glob('*') if f.is_dir()]\n folds = len(list_folders)\n folds = list(range(1, folds + 1))\n image_size = (int(image_size), int(image_size))\n gpuid = 0\n patches = PATCHES\n patches = list(patches)\n\n features_folder = dataset + '_features'\n if region:\n output_path = os.path.join(PATHBASE, features_folder, color, str(image_size[0]), level, region, minimum_image, cnn)\n else:\n output_path = os.path.join(PATHBASE, features_folder, color, str(image_size[0]), level, minimum_image, cnn)\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n print(\"Feature Extraction Parameters\")\n print(\"Pre-trained model: %s\" % cnn)\n print(\"Non-overlapping patches per image: %s\" % str(patches))\n print(\"Folds: %s\" % str(folds))\n print(\"Image Dimensions h=%s, w=%s \" % (image_size, image_size))\n print(\"Format string for input: %s \" % input_path)\n print(\"Format string for output: %s \" % output_path)\n print(\"GPU ID: %d\" % gpuid)\n\n extract_features(cnn, color, dataset, gpuid, folds, image_size, input_path, level, minimum_image, output_path, patches, region)\n\n\ndef main():\n list_color = ['RGB', 'GRAYSCALE']\n list_size = ['256', '400', '512']\n list_minimum_image = ['20', '10', '5']\n list_cnn = ['vgg16', 'resnet50v2', 'mobilenetv2']\n list_dataset = ['pr_dataset', 'br_dataset', 'region_dataset']\n list_level = ['specific_epithet_trusted']\n list_region = ['Norte', 'Nordeste', 'Sul', 'Sudeste', 'Centro-Oeste']\n for cnn in list_cnn:\n for color in list_color:\n for image_size in list_size:\n for minimum_image in list_minimum_image:\n for level in list_level:\n for dataset in list_dataset:\n print('cnn: %s color: %s dataset: %s image_size: %s level: %s minimum_image: %s '\n % (cnn, color, dataset, image_size, level, minimum_image))\n if 'region_dataset' == dataset:\n for region in list_region:\n path = os.path.join(PATHBASE, dataset, color, level, region, image_size,\n minimum_image)\n prepare(cnn, color, dataset, image_size, level, minimum_image, path, region=region)\n else:\n path = os.path.join(PATHBASE, dataset, color, level, image_size, minimum_image)\n prepare(cnn, color, dataset, image_size, level, minimum_image, path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"xaaaandao/deep_feature","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21108101229","text":"import numbers\nimport typing\nfrom typing import Any, Callable, Iterable, List, Optional, Tuple, Union\n\nfrom pyglove.core import geno\nfrom pyglove.core import object_utils\nfrom pyglove.core import symbolic\nfrom pyglove.core import typing as pg_typing\nfrom pyglove.core.hyper import base\nfrom pyglove.core.hyper import object_template\n\n\n@symbolic.members([\n ('num_choices', pg_typing.Int(min_value=0).noneable(),\n ('Number of choices to make. If set to None, any number of choices is '\n 'acceptable.')),\n ('candidates', pg_typing.List(pg_typing.Any()),\n ('Candidate values, which may contain nested hyper values.'\n 'Candidate can customize its display value (literal) by implementing the '\n '`pg.Formattable` interface.')),\n ('choices_distinct', pg_typing.Bool(True), 'Whether choices are distinct.'),\n ('choices_sorted', pg_typing.Bool(False), 'Whether choices are sorted.'),\n ('where', pg_typing.Callable([pg_typing.Object(base.HyperPrimitive)],\n returns=pg_typing.Bool()).noneable(),\n ('Callable object to filter nested hyper values. If None, all nested '\n 'hyper value will be included in the encoding/decoding process. '\n 'Otherwise only the hyper values on which `where` returns True will be '\n 'included. `where` can be useful to partition a search space into '\n 'separate optimization processes. '\n 'Please see `ObjectTemplate` docstr for details.'))\n])\nclass Choices(base.HyperPrimitive):\n \"\"\"Categorical choices from a list of candidates.\n\n Example::\n\n # A single categorical choice:\n v = pg.oneof([1, 2, 3])\n\n # A multiple categorical choice as a list:\n vs = pg.manyof(2, [1, 2, 3])\n\n # A hierarchical categorical choice:\n v2 = pg.oneof([\n 'foo',\n 'bar',\n pg.manyof(2, [1, 2, 3])\n ])\n\n See also:\n\n * :class:`pyglove.hyper.OneOf`\n * :class:`pyglove.hyper.ManyOf`\n * :func:`pyglove.oneof`\n * :func:`pyglove.manyof`\n * :func:`pyglove.permutate`\n \"\"\"\n\n def _on_bound(self):\n \"\"\"On members are bound.\"\"\"\n super()._on_bound()\n if self.num_choices > len(self.candidates) and self.choices_distinct:\n raise ValueError(\n f'{len(self.candidates)} candidates cannot produce '\n f'{self.num_choices} distinct choices.')\n self._candidate_templates = [\n object_template.ObjectTemplate(c, where=self.where)\n for c in self.candidates]\n # ValueSpec for candidate.\n self._value_spec = None\n\n def _update_children_paths(\n self, old_path: object_utils.KeyPath, new_path: object_utils.KeyPath):\n \"\"\"Customized logic to update children paths.\"\"\"\n super()._update_children_paths(old_path, new_path)\n for t in self._candidate_templates:\n t.root_path = self.sym_path\n\n @property\n def candidate_templates(self):\n \"\"\"Returns candidate templates.\"\"\"\n return self._candidate_templates\n\n @property\n def is_leaf(self) -> bool:\n \"\"\"Returns whether this is a leaf node.\"\"\"\n for t in self._candidate_templates:\n if not t.is_constant:\n return False\n return True\n\n def dna_spec(self,\n location: Optional[object_utils.KeyPath] = None) -> geno.Choices:\n \"\"\"Returns corresponding DNASpec.\"\"\"\n return geno.Choices(\n num_choices=self.num_choices,\n candidates=[ct.dna_spec() for ct in self._candidate_templates],\n literal_values=[self._literal_value(c)\n for i, c in enumerate(self.candidates)],\n distinct=self.choices_distinct,\n sorted=self.choices_sorted,\n hints=self.hints,\n name=self.name,\n location=location or object_utils.KeyPath())\n\n def _literal_value(\n self, candidate: Any, max_len: int = 120) -> Union[int, float, str]:\n \"\"\"Returns literal value for candidate.\"\"\"\n if isinstance(candidate, numbers.Number):\n return candidate\n\n literal = object_utils.format(candidate, compact=True,\n hide_default_values=True,\n hide_missing_values=True,\n strip_object_id=True)\n if len(literal) > max_len:\n literal = literal[:max_len - 3] + '...'\n return literal\n\n def _decode(self) -> List[Any]:\n \"\"\"Decode a DNA into a list of object.\"\"\"\n dna = self._dna\n if self.num_choices == 1:\n # Single choice.\n if not isinstance(dna.value, int):\n raise ValueError(\n object_utils.message_on_path(\n f'Did you forget to specify values for conditional choices?\\n'\n f'Expect integer for {self.__class__.__name__}. '\n f'Encountered: {dna!r}.', self.sym_path))\n if dna.value >= len(self.candidates):\n raise ValueError(\n object_utils.message_on_path(\n f'Choice out of range. Value: {dna.value!r}, '\n f'Candidates: {len(self.candidates)}.', self.sym_path))\n choices = [self._candidate_templates[dna.value].decode(\n geno.DNA(None, dna.children))]\n else:\n # Multi choices.\n if len(dna.children) != self.num_choices:\n raise ValueError(\n object_utils.message_on_path(\n f'Number of DNA child values does not match the number of '\n f'choices. Child values: {dna.children!r}, '\n f'Choices: {self.num_choices}.', self.sym_path))\n if self.choices_distinct or self.choices_sorted:\n sub_dna_values = [s.value for s in dna]\n if (self.choices_distinct\n and len(set(sub_dna_values)) != len(dna.children)):\n raise ValueError(\n object_utils.message_on_path(\n f'DNA child values should be distinct. '\n f'Encountered: {sub_dna_values}.', self.sym_path))\n if self.choices_sorted and sorted(sub_dna_values) != sub_dna_values:\n raise ValueError(\n object_utils.message_on_path(\n f'DNA child values should be sorted. '\n f'Encountered: {sub_dna_values}.', self.sym_path))\n choices = []\n for i, sub_dna in enumerate(dna):\n if not isinstance(sub_dna.value, int):\n raise ValueError(\n object_utils.message_on_path(\n f'Choice value should be int. '\n f'Encountered: {sub_dna.value}.',\n object_utils.KeyPath(i, self.sym_path)))\n if sub_dna.value >= len(self.candidates):\n raise ValueError(\n object_utils.message_on_path(\n f'Choice out of range. Value: {sub_dna.value}, '\n f'Candidates: {len(self.candidates)}.',\n object_utils.KeyPath(i, self.sym_path)))\n choices.append(self._candidate_templates[sub_dna.value].decode(\n geno.DNA(None, sub_dna.children)))\n return choices\n\n def encode(self, value: List[Any]) -> geno.DNA:\n \"\"\"Encode a list of values into DNA.\n\n Example::\n\n # DNA of an object containing a single OneOf.\n # {'a': 1} => DNA(0)\n {\n 'a': one_of([1, 2])\n }\n\n\n # DNA of an object containing multiple OneOfs.\n # {'b': 1, 'c': bar} => DNA([0, 1])\n {\n 'b': pg.oneof([1, 2]),\n 'c': pg.oneof(['foo', 'bar'])\n }\n\n # DNA of an object containing conditional space.\n # {'a': {'b': 1} => DNA(0, 0, 0)])\n # {'a': {'b': [4, 7]} => DNA(1, [(0, 1), 2])\n # {'a': {'b': 'bar'} => DNA(2)\n {\n 'a': {\n 'b': pg.oneof([\n pg.oneof([\n pg.oneof([1, 2]),\n pg.oneof(3, 4)]),\n pg.manyof(2, [\n pg.oneof([4, 5]),\n 6,\n 7\n ]),\n ]),\n 'bar',\n ])\n }\n }\n\n Args:\n value: A list of value that can match choice candidates.\n\n Returns:\n Encoded DNA.\n\n Raises:\n ValueError if value cannot be encoded.\n \"\"\"\n if not isinstance(value, list):\n raise ValueError(\n object_utils.message_on_path(\n f'Cannot encode value: value should be a list type. '\n f'Encountered: {value!r}.', self.sym_path))\n choices = []\n if self.num_choices is not None and len(value) != self.num_choices:\n raise ValueError(\n object_utils.message_on_path(\n f'Length of input list is different from the number of choices '\n f'({self.num_choices}). Encountered: {value}.', self.sym_path))\n for v in value:\n choice_id = None\n child_dna = None\n for i, b in enumerate(self._candidate_templates):\n succeeded, child_dna = b.try_encode(v)\n if succeeded:\n choice_id = i\n break\n if child_dna is None:\n raise ValueError(\n object_utils.message_on_path(\n f'Cannot encode value: no candidates matches with '\n f'the value. Value: {v!r}, Candidates: {self.candidates}.',\n self.sym_path))\n choices.append(geno.DNA(choice_id, [child_dna]))\n return geno.DNA(None, choices)\n\n\n@symbolic.members(\n [],\n init_arg_list=[\n 'num_choices', 'candidates', 'choices_distinct',\n 'choices_sorted', 'hints'\n ],\n # TODO(daiyip): Change to 'ManyOf' once existing code migrates to ManyOf.\n serialization_key='hyper.ManyOf',\n additional_keys=['pyglove.generators.genetic.ChoiceList']\n)\nclass ManyOf(Choices):\n \"\"\"N Choose K.\n\n Example::\n\n # Chooses 2 distinct candidates.\n v = pg.manyof(2, [1, 2, 3])\n\n # Chooses 2 non-distinct candidates.\n v = pg.manyof(2, [1, 2, 3], distinct=False)\n\n # Chooses 2 distinct candidates sorted by their indices.\n v = pg.manyof(2, [1, 2, 3], sorted=True)\n\n # Permutates the candidates.\n v = pg.permutate([1, 2, 3])\n\n # A hierarchical categorical choice:\n v2 = pg.manyof(2, [\n 'foo',\n 'bar',\n pg.oneof([1, 2, 3])\n ])\n\n See also:\n\n * :func:`pyglove.manyof`\n * :func:`pyglove.permutate`\n * :class:`pyglove.hyper.Choices`\n * :class:`pyglove.hyper.OneOf`\n * :class:`pyglove.hyper.Float`\n * :class:`pyglove.hyper.CustomHyper`\n \"\"\"\n\n def custom_apply(\n self,\n path: object_utils.KeyPath,\n value_spec: pg_typing.ValueSpec,\n allow_partial: bool,\n child_transform: Optional[Callable[\n [object_utils.KeyPath, pg_typing.Field, Any], Any]] = None\n ) -> Tuple[bool, 'Choices']:\n \"\"\"Validate candidates during value_spec binding time.\"\"\"\n # Check if value_spec directly accepts `self`.\n if value_spec.value_type and isinstance(self, value_spec.value_type):\n return (False, self)\n\n if self._value_spec:\n src_spec = self._value_spec\n dest_spec = value_spec\n if not dest_spec.is_compatible(src_spec):\n raise TypeError(\n object_utils.message_on_path(\n f'Cannot bind an incompatible value spec {dest_spec} '\n f'to {self.__class__.__name__} with bound spec {src_spec}.',\n path))\n return (False, self)\n\n list_spec = typing.cast(\n pg_typing.List,\n pg_typing.ensure_value_spec(\n value_spec, pg_typing.List(pg_typing.Any()), path))\n if list_spec:\n for i, c in enumerate(self.candidates):\n list_spec.element.value.apply(\n c,\n self._allow_partial,\n root_path=path + f'candidates[{i}]')\n self._value_spec = list_spec\n return (False, self)\n\n\n@symbolic.members(\n [\n ('num_choices', 1)\n ],\n init_arg_list=['candidates', 'hints', 'where'],\n serialization_key='hyper.OneOf',\n additional_keys=['pyglove.generators.genetic.ChoiceValue']\n)\nclass OneOf(Choices):\n \"\"\"N Choose 1.\n\n Example::\n\n # A single categorical choice:\n v = pg.oneof([1, 2, 3])\n\n # A hierarchical categorical choice:\n v2 = pg.oneof([\n 'foo',\n 'bar',\n pg.oneof([1, 2, 3])\n ])\n\n See also:\n\n * :func:`pyglove.oneof`\n * :class:`pyglove.hyper.Choices`\n * :class:`pyglove.hyper.ManyOf`\n * :class:`pyglove.hyper.Float`\n * :class:`pyglove.hyper.CustomHyper`\n \"\"\"\n\n def _on_bound(self):\n \"\"\"Event triggered when members are bound.\"\"\"\n super()._on_bound()\n assert self.num_choices == 1\n\n def _decode(self) -> Any:\n \"\"\"Decode a DNA into an object.\"\"\"\n return super()._decode()[0]\n\n def encode(self, value: Any) -> geno.DNA:\n \"\"\"Encode a value into a DNA.\"\"\"\n # NOTE(daiyip): Single choice DNA will automatically be pulled\n # up from children to current node. Thus we simply returns\n # encoded DNA from parent node.\n return super().encode([value])\n\n def custom_apply(\n self,\n path: object_utils.KeyPath,\n value_spec: pg_typing.ValueSpec,\n allow_partial: bool,\n child_transform: Optional[Callable[\n [object_utils.KeyPath, pg_typing.Field, Any], Any]] = None\n ) -> Tuple[bool, 'OneOf']:\n \"\"\"Validate candidates during value_spec binding time.\"\"\"\n # Check if value_spec directly accepts `self`.\n if value_spec.value_type and isinstance(self, value_spec.value_type):\n return (False, self)\n\n if self._value_spec:\n if not value_spec.is_compatible(self._value_spec):\n raise TypeError(\n object_utils.message_on_path(\n f'Cannot bind an incompatible value spec {value_spec} '\n f'to {self.__class__.__name__} with bound '\n f'spec {self._value_spec}.', path))\n return (False, self)\n\n for i, c in enumerate(self.candidates):\n value_spec.apply(\n c,\n self._allow_partial,\n root_path=path + f'candidates[{i}]')\n self._value_spec = value_spec\n return (False, self)\n\n#\n# Helper methods for creating hyper values.\n#\n\n\ndef oneof(candidates: Iterable[Any],\n *,\n name: Optional[str] = None,\n hints: Optional[Any] = None) -> Any:\n \"\"\"N choose 1.\n\n Example::\n\n @pg.members([\n ('x', pg.typing.Int())\n ])\n class A(pg.Object):\n pass\n\n # A single categorical choice:\n v = pg.oneof([1, 2, 3])\n\n # A complex type as candidate.\n v1 = pg.oneof(['a', {'x': 1}, A(1)])\n\n # A hierarchical categorical choice:\n v2 = pg.oneof([\n 'foo',\n 'bar',\n A(pg.oneof([1, 2, 3]))\n ])\n\n See also:\n\n * :class:`pyglove.hyper.OneOf`\n * :func:`pyglove.manyof`\n * :func:`pyglove.floatv`\n * :func:`pyglove.permutate`\n * :func:`pyglove.evolve`\n\n .. note::\n\n Under symbolic mode (by default), `pg.oneof` returns a ``pg.hyper.OneOf``\n object. Under dynamic evaluation mode, which is called under the context of\n :meth:`pyglove.hyper.DynamicEvaluationContext.collect` or\n :meth:`pyglove.hyper.DynamicEvaluationContext.apply`, it evaluates to\n a concrete candidate value.\n\n To use conditional search space in dynamic evaluation mode, the candidate\n should be wrapped with a `lambda` function, which is not necessary under\n symbolic mode. For example::\n\n pg.oneof([lambda: pg.oneof([0, 1], name='sub'), 2], name='root')\n\n Args:\n candidates: Candidates to select from. Items of candidate can be any type,\n therefore it can have nested hyper primitives, which forms a hierarchical\n search space.\n name: A name that can be used to identify a decision point in the search\n space. This is needed when the code to instantiate the same hyper\n primitive may be called multiple times under a\n `pg.DynamicEvaluationContext.collect` context or under a\n `pg.DynamicEvaluationContext.apply` context.\n hints: An optional value which acts as a hint for the controller.\n\n Returns:\n In symbolic mode, this function returns a `ChoiceValue`.\n In dynamic evaluation mode, this function returns one of the items in\n `candidates`.\n If evaluated under a `pg.DynamicEvaluationContext.apply` scope,\n this function will return the selected candidate.\n If evaluated under a `pg.DynamicEvaluationContext.collect`\n scope, it will return the first candidate.\n \"\"\"\n return OneOf(candidates=list(candidates), name=name, hints=hints)\n\n\ndef manyof(k: int,\n candidates: Iterable[Any],\n distinct: bool = True,\n sorted: bool = False, # pylint: disable=redefined-builtin\n *,\n name: Optional[str] = None,\n hints: Optional[Any] = None,\n **kwargs) -> Any:\n \"\"\"N choose K.\n\n Example::\n\n @pg.members([\n ('x', pg.typing.Int())\n ])\n class A(pg.Object):\n pass\n\n # Chooses 2 distinct candidates.\n v = pg.manyof(2, [1, 2, 3])\n\n # Chooses 2 non-distinct candidates.\n v = pg.manyof(2, [1, 2, 3], distinct=False)\n\n # Chooses 2 distinct candidates sorted by their indices.\n v = pg.manyof(2, [1, 2, 3], sorted=True)\n\n # A complex type as candidate.\n v1 = pg.manyof(2, ['a', {'x': 1}, A(1)])\n\n # A hierarchical categorical choice:\n v2 = pg.manyof(2, [\n 'foo',\n 'bar',\n A(pg.oneof([1, 2, 3]))\n ])\n\n .. note::\n\n Under symbolic mode (by default), `pg.manyof` returns a ``pg.hyper.ManyOf``\n object. Under dynamic evaluation mode, which is called under the context of\n :meth:`pyglove.hyper.DynamicEvaluationContext.collect` or\n :meth:`pyglove.hyper.DynamicEvaluationContext.apply`, it evaluates to\n a concrete candidate value.\n\n To use conditional search space in dynamic evaluate mode, the candidate\n should be wrapped with a `lambda` function, which is not necessary under\n symbolic mode. For example::\n\n pg.manyof(2, [\n lambda: pg.oneof([0, 1], name='sub_a'),\n lambda: pg.floatv(0.0, 1.0, name='sub_b'),\n lambda: pg.manyof(2, ['a', 'b', 'c'], name='sub_c')\n ], name='root')\n\n See also:\n\n * :class:`pyglove.hyper.ManyOf`\n * :func:`pyglove.manyof`\n * :func:`pyglove.floatv`\n * :func:`pyglove.permutate`\n * :func:`pyglove.evolve`\n\n Args:\n k: number of choices to make. Should be no larger than the length of\n `candidates` unless `choice_distinct` is set to False,\n candidates: Candidates to select from. Items of candidate can be any type,\n therefore it can have nested hyper primitives, which forms a hierarchical\n search space.\n distinct: If True, each choice needs to be unique.\n sorted: If True, choices are sorted by their indices in the\n candidates.\n name: A name that can be used to identify a decision point in the search\n space. This is needed when the code to instantiate the same hyper\n primitive may be called multiple times under a\n `pg.DynamicEvaluationContext.collect` context or a\n `pg.DynamicEvaluationContext.apply` context.\n hints: An optional value which acts as a hint for the controller.\n **kwargs: Keyword arguments for backward compatibility.\n `choices_distinct`: Old name for `distinct`.\n `choices_sorted`: Old name for `sorted`.\n\n Returns:\n In symbolic mode, this function returns a `Choices`.\n In dynamic evaluate mode, this function returns a list of items in\n `candidates`.\n If evaluated under a `pg.DynamicEvaluationContext.apply` scope,\n this function will return a list of selected candidates.\n If evaluated under a `pg.DynamicEvaluationContext.collect`\n scope, it will return a list of the first valid combination from the\n `candidates`. For example::\n\n # Evaluates to [0, 1, 2].\n manyof(3, range(5))\n\n # Evaluates to [0, 0, 0].\n manyof(3, range(5), distinct=False)\n \"\"\"\n choices_distinct = kwargs.pop('choices_distinct', distinct)\n choices_sorted = kwargs.pop('choices_sorted', sorted)\n return ManyOf(\n num_choices=k,\n candidates=list(candidates),\n choices_distinct=choices_distinct,\n choices_sorted=choices_sorted,\n name=name,\n hints=hints)\n\n\ndef permutate(candidates: Iterable[Any],\n name: Optional[str] = None,\n hints: Optional[Any] = None) -> Any:\n \"\"\"Permuatation of candidates.\n\n Example::\n\n @pg.members([\n ('x', pg.typing.Int())\n ])\n class A(pg.Object):\n pass\n\n # Permutates the candidates.\n v = pg.permutate([1, 2, 3])\n\n # A complex type as candidate.\n v1 = pg.permutate(['a', {'x': 1}, A(1)])\n\n # A hierarchical categorical choice:\n v2 = pg.permutate([\n 'foo',\n 'bar',\n A(pg.oneof([1, 2, 3]))\n ])\n\n .. note::\n\n Under symbolic mode (by default), `pg.manyof` returns a ``pg.hyper.ManyOf``\n object. Under dynamic evaluate mode, which is called under the context of\n :meth:`pyglove.hyper.DynamicEvaluationContext.collect` or\n :meth:`pyglove.hyper.DynamicEvaluationContext.apply`, it evaluates to\n a concrete candidate value.\n\n To use conditional search space in dynamic evaluate mode, the candidate\n should be wrapped with a `lambda` function, which is not necessary under\n symbolic mode. For example::\n\n pg.permutate([\n lambda: pg.oneof([0, 1], name='sub_a'),\n lambda: pg.floatv(0.0, 1.0, name='sub_b'),\n lambda: pg.manyof(2, ['a', 'b', 'c'], name='sub_c')\n ], name='root')\n\n See also:\n\n * :class:`pyglove.hyper.ManyOf`\n * :func:`pyglove.oneof`\n * :func:`pyglove.manyof`\n * :func:`pyglove.floatv`\n * :func:`pyglove.evolve`\n\n Args:\n candidates: Candidates to select from. Items of candidate can be any type,\n therefore it can have nested hyper primitives, which forms a hierarchical\n search space.\n name: A name that can be used to identify a decision point in the search\n space. This is needed when the code to instantiate the same hyper\n primitive may be called multiple times under a\n `pg.DynamicEvaluationContext.collect` context or a\n `pg.DynamicEvaluationContext.apply` context.\n hints: An optional value which acts as a hint for the controller.\n\n Returns:\n In symbolic mode, this function returns a `Choices`.\n In dynamic evaluate mode, this function returns a permutation from\n `candidates`.\n If evaluated under an `pg.DynamicEvaluationContext.apply` scope,\n this function will return a permutation of candidates based on controller\n decisions.\n If evaluated under a `pg.DynamicEvaluationContext.collect`\n scope, it will return the first valid permutation.\n For example::\n\n # Evaluates to [0, 1, 2, 3, 4].\n permutate(range(5), name='numbers')\n \"\"\"\n candidates = list(candidates)\n return manyof(\n len(candidates), candidates,\n choices_distinct=True, choices_sorted=False, name=name, hints=hints)\n","repo_name":"google/pyglove","sub_path":"pyglove/core/hyper/categorical.py","file_name":"categorical.py","file_ext":"py","file_size_in_byte":22595,"program_lang":"python","lang":"en","doc_type":"code","stars":306,"dataset":"github-code","pt":"81"} +{"seq_id":"11139197940","text":"\nimport torch.nn as nn\n\nimport math\nimport torch.utils.model_zoo as model_zoo\nfrom torchvision.ops import nms\nfrom tortoise.models.retinanet.utils import BasicBlock, Bottleneck, BBoxTransform, ClipBoxes\nfrom tortoise.models.retinanet.anchors import Anchors\nfrom tortoise.models.retinanet import losses\nfrom tortoise.models.retinanet.dataloader import CocoDataset, Resizer, AspectRatioBasedSampler, Normalizer, Augmenter, collater\nfrom tortoise.utils import timetaken, download_file\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nimport torch\n\nfrom pycocotools.cocoeval import COCOeval\nimport os, json, time, copy, yaml\nfrom tqdm import tqdm\n\n# https://github.com/Delgan/loguru\nfrom loguru import logger\n# logger.info(\"If you're using Python {}, prefer {feature} of course!\", 3.6, feature=\"f-strings\")\n\nclass PyramidFeatures(nn.Module):\n def __init__(self, C3_size, C4_size, C5_size, feature_size=256):\n super(PyramidFeatures, self).__init__()\n\n # upsample C5 to get P5 from the FPN paper\n self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)\n self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')\n self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n\n # add P5 elementwise to C4\n self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)\n self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')\n self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n\n # add P4 elementwise to C3\n self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)\n self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n\n # \"P6 is obtained via a 3x3 stride-2 conv on C5\"\n self.P6 = nn.Conv2d(C5_size, feature_size, kernel_size=3, stride=2, padding=1)\n\n # \"P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6\"\n self.P7_1 = nn.ReLU()\n self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)\n\n def forward(self, inputs):\n C3, C4, C5 = inputs\n\n P5_x = self.P5_1(C5)\n P5_upsampled_x = self.P5_upsampled(P5_x)\n P5_x = self.P5_2(P5_x)\n\n P4_x = self.P4_1(C4)\n P4_x = P5_upsampled_x + P4_x\n P4_upsampled_x = self.P4_upsampled(P4_x)\n P4_x = self.P4_2(P4_x)\n\n P3_x = self.P3_1(C3)\n P3_x = P3_x + P4_upsampled_x\n P3_x = self.P3_2(P3_x)\n\n P6_x = self.P6(C5)\n\n P7_x = self.P7_1(P6_x)\n P7_x = self.P7_2(P7_x)\n\n return [P3_x, P4_x, P5_x, P6_x, P7_x]\nclass RegressionModel(nn.Module):\n def __init__(self, num_features_in, num_anchors=9, feature_size=256):\n super(RegressionModel, self).__init__()\n\n self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)\n self.act1 = nn.ReLU()\n\n self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act2 = nn.ReLU()\n\n self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act3 = nn.ReLU()\n\n self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act4 = nn.ReLU()\n\n self.output = nn.Conv2d(feature_size, num_anchors * 4, kernel_size=3, padding=1)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.act1(out)\n\n out = self.conv2(out)\n out = self.act2(out)\n\n out = self.conv3(out)\n out = self.act3(out)\n\n out = self.conv4(out)\n out = self.act4(out)\n\n out = self.output(out)\n\n # out is B x C x W x H, with C = 4*num_anchors\n out = out.permute(0, 2, 3, 1)\n\n return out.contiguous().view(out.shape[0], -1, 4)\nclass ClassificationModel(nn.Module):\n def __init__(self, num_features_in, num_anchors=9, num_classes=80, prior=0.01, feature_size=256):\n super(ClassificationModel, self).__init__()\n\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n\n self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)\n self.act1 = nn.ReLU()\n\n self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act2 = nn.ReLU()\n\n self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act3 = nn.ReLU()\n\n self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act4 = nn.ReLU()\n\n self.output = nn.Conv2d(feature_size, num_anchors * num_classes, kernel_size=3, padding=1)\n self.output_act = nn.Sigmoid()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.act1(out)\n\n out = self.conv2(out)\n out = self.act2(out)\n\n out = self.conv3(out)\n out = self.act3(out)\n\n out = self.conv4(out)\n out = self.act4(out)\n\n out = self.output(out)\n out = self.output_act(out)\n\n # out is B x C x W x H, with C = n_classes + n_anchors\n out1 = out.permute(0, 2, 3, 1)\n\n batch_size, width, height, channels = out1.shape\n\n out2 = out1.view(batch_size, width, height, self.num_anchors, self.num_classes)\n\n return out2.contiguous().view(x.shape[0], -1, self.num_classes)\nclass ResNet(nn.Module):\n\n def __init__(self, num_classes, block, layers):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n if block == BasicBlock:\n fpn_sizes = [self.layer2[layers[1] - 1].conv2.out_channels, self.layer3[layers[2] - 1].conv2.out_channels,\n self.layer4[layers[3] - 1].conv2.out_channels]\n elif block == Bottleneck:\n fpn_sizes = [self.layer2[layers[1] - 1].conv3.out_channels, self.layer3[layers[2] - 1].conv3.out_channels,\n self.layer4[layers[3] - 1].conv3.out_channels]\n else:\n raise ValueError(f\"Block type {block} not understood\")\n\n self.fpn = PyramidFeatures(fpn_sizes[0], fpn_sizes[1], fpn_sizes[2])\n self.regressionModel = RegressionModel(256)\n self.classificationModel = ClassificationModel(256, num_classes=num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n prior = 0.01\n self.classificationModel.output.weight.data.fill_(0)\n self.classificationModel.output.bias.data.fill_(-math.log((1.0 - prior) / prior))\n self.regressionModel.output.weight.data.fill_(0)\n self.regressionModel.output.bias.data.fill_(0)\n self.freeze_bn()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion)\n )\n layers = [block(self.inplanes, planes, stride, downsample)]\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def freeze_bn(self):\n '''Freeze BatchNorm layers.'''\n for layer in self.modules():\n if isinstance(layer, nn.BatchNorm2d):\n layer.eval()\n\n def forward(self, inputs):\n img_batch = inputs\n x = self.conv1(img_batch)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x1 = self.layer1(x)\n x2 = self.layer2(x1)\n x3 = self.layer3(x2)\n x4 = self.layer4(x3)\n\n features = self.fpn([x2, x3, x4])\n regression = torch.cat([self.regressionModel(feature) for feature in features], dim=1)\n classification = torch.cat([self.classificationModel(feature) for feature in features], dim=1)\n return regression, classification\n\nclass RetinanetModel:\n def __init__(self):\n self.model_dir = \"weights\"\n self.exp_name = \"experiment\"\n self.model_config = {\n \"resnet18\": {\n \"block\": BasicBlock,\n \"layer\": [2, 2, 2, 2],\n \"resnet_url\": \"https://download.pytorch.org/models/resnet18-5c106cde.pth\",\n \"retinanet_url\": \"/media/nk/PortableSSD1/RnD/pytorch-multimodels/weights/resnet18/experiment_7/best.pth\",\n \"config\": \"/media/nk/PortableSSD1/RnD/pytorch-multimodels/data/hyps/retinanet/hyp.scratch-low.yaml\",\n \"sha256\": \"\"\n },\n \"resnet34\": {\n \"block\": BasicBlock,\n \"layer\": [3, 4, 6, 3],\n \"resnet_url\": \"https://download.pytorch.org/models/resnet34-333f7ec4.pth\",\n \"retinanet_url\": \"https://download.pytorch.org/models/resnet34-333f7ec4.pth\",\n \"sha256\": \"\"\n },\n \"resnet50\": {\n \"block\": Bottleneck,\n \"layer\": [3, 4, 6, 3],\n \"resnet_url\": \"https://download.pytorch.org/models/resnet50-19c8e357.pth\",\n \"retinanet_url\": \"https://download.pytorch.org/models/resnet50-19c8e357.pth\",\n \"sha256\": \"\"\n },\n \"resnet101\": {\n \"block\": Bottleneck,\n \"layer\": [3, 4, 23, 3],\n \"resnet_url\": \"https://download.pytorch.org/models/resnet101-5d3b4d8f.pth\",\n \"retinanet_url\": \"https://download.pytorch.org/models/resnet101-5d3b4d8f.pth\",\n \"sha256\": \"\"\n },\n \"resnet152\": {\n \"block\": Bottleneck,\n \"layer\": [3, 8, 36, 3],\n \"resnet_url\": \"https://download.pytorch.org/models/resnet152-b121ed2d.pth\",\n \"retinanet_url\": \"https://download.pytorch.org/models/resnet152-b121ed2d.pth\",\n \"sha256\": \"\"\n }\n }\n \n @classmethod\n def load_config(self, backbone, filename, model_dir):\n from munch import DefaultMunch\n if not os.path.exists(f\"{model_dir}/{filename}\"):\n download_file(self.model_config[backbone][\"config\"], filename, model_dir)\n with open(f\"{model_dir}/{filename}\", \"r\") as stream:\n try: config = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc); exit()\n # https://stackoverflow.com/a/24852544\n return DefaultMunch.fromDict(config) \n \n def get_info(self, backbone): return self.model_config[backbone][\"retinanet_url\"]\n @classmethod\n def from_scratch(self, backbone, model_dir=None, device=\"cuda\"):\n self.__init__(self)\n if model_dir: self.model_dir = model_dir \n if backbone not in self.model_config.keys(): \n logger.debug(f\"Backbone not Found: {backbone}\")\n assert False, \"backbone Not found\" \n \n self.args = self.load_config(backbone, \"config.yaml\", os.path.join(self.model_dir, backbone))\n self.args.device = device\n os.makedirs(os.path.join(self.model_dir, backbone), exist_ok=True)\n self.args.backbone = backbone\n self.model = ResNet(self.args.num_classes, self.model_config[backbone][\"block\"], self.model_config[backbone][\"layer\"])\n self.model.load_state_dict(model_zoo.load_url(self.model_config[backbone]['resnet_url'], model_dir=os.path.join(self.model_dir, backbone)), strict=False)\n self.args.exp_name = f\"{os.path.join(self.model_dir, backbone)}/{self.exp_name}_{len(os.listdir(os.path.join(self.model_dir, backbone)))}\"\n os.makedirs(f\"{self.args.exp_name }/\", exist_ok=True)\n logger.info(f\"Export directory: {self.args.exp_name}\")\n return self()\n \n @classmethod\n def from_pretrained(self, backbone, model_dir=None, exp_name=None, filename=\"best.pth\", device=\"cuda\"):\n self.__init__(self)\n if model_dir: self.model_dir = model_dir \n if backbone not in self.model_config.keys(): \n logger.debug(f\"Backbone not Found: {backbone}\")\n assert False, \"backbone Not found\"\n\n self.args = self.load_config(backbone, \"config.yaml\", os.path.join(self.model_dir, backbone))\n self.args.device = device\n os.makedirs(os.path.join(self.model_dir, backbone), exist_ok=True)\n self.args.backbone = backbone\n self.fun_exp_name = exp_name\n self.model = ResNet(self.args.num_classes, self.model_config[backbone][\"block\"], self.model_config[backbone][\"layer\"])\n if exp_name:\n self.model.load_state_dict(torch.load(os.path.join(self.model_dir, backbone, exp_name, filename))[\"state_dict\"])\n else:\n # self.model.load_state_dict(model_zoo.load_url(self.model_config[backbone]['retinanet_url'])[\"state_dict\"], model_dir=os.path.join(self.model_dir, backbone))\n self.model.load_state_dict(torch.load(self.model_config[backbone]['retinanet_url'])[\"state_dict\"])\n self.args.exp_name = f\"{os.path.join(self.model_dir, backbone)}/{exp_name}\" if exp_name else \\\n f\"{os.path.join(self.model_dir, backbone)}/{self.exp_name}_{len(os.listdir(os.path.join(self.model_dir, backbone)))}\"\n os.makedirs(f\"{self.args.exp_name }/\", exist_ok=True)\n logger.info(f\"Export directory: {self.args.exp_name}\")\n return self()\n \n @timetaken\n def run_oneepoch(self, dataloader_train, scaler, epoch):\n epoch_loss = torch.zeros(len(dataloader_train))\n pbar = tqdm(enumerate(dataloader_train), total=len(dataloader_train), desc=\"Epoch: {0}\".format(epoch))\n self.focalLoss = losses.FocalLoss()\n self.anchors = Anchors()\n for iteration, data in pbar:\n self.optimizer.zero_grad()\n with torch.cuda.amp.autocast():\n regressions, classifications = self.model(data['img'].to(self.args.device).float())\n classification_loss, regression_loss = self.focalLoss(classifications, regressions, self.anchors(data['img'].to(self.args.device).float()), data['annot']) \n classification_loss, regression_loss = classification_loss.mean(), regression_loss.mean()\n loss = classification_loss + regression_loss\n loss = loss * 2\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.1)\n scaler.scale(loss).backward()\n scaler.step(self.optimizer)\n scaler.update()\n\n epoch_loss[iteration] = loss.cpu().item()\n tb_lr = [x['lr'] for x in self.optimizer.param_groups][0]\n pbar.set_postfix_str(\"Loss: {:.6f} lr: {:.10f}\".format(epoch_loss.sum()/iteration, tb_lr))\n return epoch_loss.mean().item()\n \n @timetaken\n def train(self, best_acc=0.0):\n dataset_train = CocoDataset(self.args.coco_path, set_name='train2017',transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))\n sampler = AspectRatioBasedSampler(dataset_train, batch_size=self.args.train_batchsize, drop_last=False)\n dataloader_train = DataLoader(dataset_train, num_workers=8, collate_fn=collater, batch_sampler=sampler)\n self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay, betas=(0.9, 0.95))\n if self.fun_exp_name: self.optimizer.load_state_dict(torch.load(os.path.join(self.args.exp_name, \"best.pth\"))[\"optimizer\"])\n self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=3, verbose=True)\n scaler = torch.cuda.amp.GradScaler()\n self.model = torch.nn.DataParallel(self.model); self.training = True\n for epoch in range(self.args.start_epoch, self.args.total_epoch):\n self.model.train()\n self.model.module.freeze_bn()\n epoch_loss = self.run_oneepoch(dataloader_train, scaler, epoch)\n ap50_95, ap50 = self.validation()\n self.scheduler.step()\n if ap50_95 > best_acc: \n torch.save({\n \"iterations\": epoch * len(dataloader_train),\n \"state_dict\": self.model.module.state_dict(),\n \"optimizer\": self.optimizer.state_dict(),\n \"acc\": [ap50_95, ap50]\n }, f\"{self.args.exp_name}/best.pth\")\n best_acc = copy.deepcopy(ap50_95)\n torch.save({\n \"iterations\": epoch * len(dataloader_train),\n \"state_dict\": self.model.module.state_dict(),\n \"optimizer\": self.optimizer.state_dict(),\n \"acc\": [ap50_95, ap50]\n }, f\"{self.args.exp_name}/last.pth\")\n time.sleep(300)\n\n @torch.no_grad()\n def validation(self, model=None, threshold=0.05):\n self.model = model if isinstance(model, torch.nn.Module) else self.model\n self.model.eval().to(self.args.device)\n dataset = CocoDataset(self.args.coco_path, set_name='val2017', transform=transforms.Compose([Normalizer(), Resizer()]))\n results, image_ids = [], []\n self.anchors = Anchors()\n self.regressBoxes = BBoxTransform()\n self.clipBoxes = ClipBoxes()\n for index in tqdm(range(len(dataset)), desc=\"Validation \"):\n if index == 500 and not threshold: break\n data = dataset[index]\n scale = data['scale']\n img_batch = data['img'].permute(2, 0, 1).to(self.args.device).float().unsqueeze(dim=0)\n anchors = self.anchors(img_batch)\n regression, classification = self.model(img_batch)\n transformed_anchors = self.regressBoxes(anchors, regression)\n transformed_anchors = self.clipBoxes(transformed_anchors, img_batch)\n\n finalResult = [[], [], []]\n finalScores = torch.Tensor([])\n finalAnchorBoxesIndexes = torch.Tensor([]).long()\n finalAnchorBoxesCoordinates = torch.Tensor([])\n if torch.cuda.is_available():\n finalScores = finalScores.cuda()\n finalAnchorBoxesIndexes = finalAnchorBoxesIndexes.cuda()\n finalAnchorBoxesCoordinates = finalAnchorBoxesCoordinates.cuda()\n\n for i in range(classification.shape[2]):\n scores = torch.squeeze(classification[:, :, i])\n scores_over_thresh = (scores > 0.05)\n if scores_over_thresh.sum() == 0:\n # no boxes to NMS, just continue\n continue\n\n scores = scores[scores_over_thresh]\n anchorBoxes = torch.squeeze(transformed_anchors)\n anchorBoxes = anchorBoxes[scores_over_thresh]\n anchors_nms_idx = nms(anchorBoxes, scores, 0.5)\n\n finalResult[0].extend(scores[anchors_nms_idx])\n finalResult[1].extend(torch.tensor([i] * anchors_nms_idx.shape[0]))\n finalResult[2].extend(anchorBoxes[anchors_nms_idx])\n\n finalScores = torch.cat((finalScores, scores[anchors_nms_idx]))\n finalAnchorBoxesIndexesValue = torch.tensor([i] * anchors_nms_idx.shape[0])\n if torch.cuda.is_available():\n finalAnchorBoxesIndexesValue = finalAnchorBoxesIndexesValue.cuda()\n finalAnchorBoxesIndexes = torch.cat((finalAnchorBoxesIndexes, finalAnchorBoxesIndexesValue))\n finalAnchorBoxesCoordinates = torch.cat((finalAnchorBoxesCoordinates, anchorBoxes[anchors_nms_idx]))\n scores, labels, boxes = [finalScores, finalAnchorBoxesIndexes, finalAnchorBoxesCoordinates]\n scores, labels, boxes = scores.cpu(), labels.cpu(), boxes.cpu()\n boxes /= scale\n if boxes.shape[0] > 0:\n boxes[:, 2] -= boxes[:, 0]\n boxes[:, 3] -= boxes[:, 1]\n for box_id in range(boxes.shape[0]):\n score = float(scores[box_id])\n label = int(labels[box_id])\n box = boxes[box_id, :]\n if score < 0.05: break\n image_result = {\n 'image_id' : dataset.image_ids[index],\n 'category_id' : dataset.label_to_coco_label(label),\n 'score' : float(score),\n 'bbox' : box.tolist(),\n }\n results.append(image_result)\n image_ids.append(dataset.image_ids[index])\n if not len(results):\n logger.debug(\"No Object is detected...!!!\"); return 0.0, 0.0\n json.dump(results, open(f'{self.args.exp_name}/{dataset.set_name}_bbox_results.json', 'w'), indent=4)\n coco_true = dataset.coco\n coco_pred = coco_true.loadRes(f'{self.args.exp_name}/{dataset.set_name}_bbox_results.json')\n coco_eval = COCOeval(coco_true, coco_pred, 'bbox')\n coco_eval.params.imgIds = image_ids\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n \n # ap50_95, ap50\n return coco_eval.stats[0], coco_eval.stats[1]\n\n @timetaken\n def export(self, dummy_input, file_path):\n self.model.eval()\n torch.onnx.export(self.model, dummy_input, file_path, opset_version=12)\n\n # @timetaken\n def quantize(self, dummy_input: torch.nn.Module):\n from aimet_torch.model_preparer import prepare_model\n from aimet_common.defs import QuantScheme\n from aimet_torch.quantsim import QuantizationSimModel\n device = dummy_input.device\n # dummy_input = torch.randn(dummy_input)\n model = prepare_model(self.model).to(device)\n download_file(\n \"https://raw.githubusercontent.com/quic/aimet/develop/TrainingExtensions/common/src/python/aimet_common/quantsim_config/default_config_per_channel.json\",\n \"pcq_config.json\", os.path.join(self.model_dir, self.args.backbone)\n )\n quant_sim = QuantizationSimModel(model, dummy_input=dummy_input,\n quant_scheme=QuantScheme.post_training_tf_enhanced,\n default_param_bw=8, default_output_bw=8,\n config_file=os.path.join(self.model_dir, self.args.backbone, \"pcq_config.json\"))\n quant_sim.compute_encodings(self.validation, forward_pass_callback_args=(None))\n self.validation(quant_sim.model)\n os.makedirs(os.path.join(self.args.exp_name, \"quant\"), exist_ok=True)\n quant_sim.model.cpu()\n quant_sim.export(path=os.path.join(self.args.exp_name, \"quant\"), filename_prefix=f'quantized_{self.args.backbone}', dummy_input=dummy_input.cpu())\n \n \n @timetaken\n def predict(img, save_image=True):\n return []\n \n ","repo_name":"nkdatascientist/tortoise-models","sub_path":"tortoise/models/retinanet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":23706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3774007533","text":"import datetime as dt\nimport plotly.graph_objects as go\nimport pandas as pd\n\n\nclass PriceIndependentIndicator:\n\n def __init__(self, df: pd.DataFrame, base_indicator: str):\n self.df_plot = df.copy()\n self.fig = go.Figure()\n self.base_indicator = base_indicator\n\n self.add_timestring()\n self.create_line_fig(base_indicator)\n\n\n def add_timestring(self):\n \"\"\"Hack for removing weekend candles\"\"\"\n self.df_plot['sTime'] = [dt.datetime.strftime(x, \"s%y-%m-%d %H:%M\") for x in self.df_plot.time]\n\n\n def create_line_fig(self, base_indicator):\n self.fig.add_trace(go.Scatter(\n x=self.df_plot.sTime,\n y=self.df_plot[f'{base_indicator}'],\n mode='lines',\n name=f'{base_indicator}'\n ))\n\n\n def add_secondary_line_based_indicators(self, indicators: list):\n for indicator in indicators:\n self.fig.add_trace(go.Scatter(\n x=self.df_plot.sTime,\n y=self.df_plot[f\"{indicator}\"],\n mode='lines',\n name=f'{indicator}'\n ))\n\n def add_bar_based_indicators(self, indicators: list):\n for indicator in indicators:\n self.fig.add_trace(go.Bar(\n x=self.df_plot.sTime,\n y=self.df_plot[f\"{indicator}\"],\n name=f'{indicator}'\n ))\n\n\n # Show and Customise plot #\n\n\n def update_layout(self, width, height, ticks):\n self.fig.update_xaxes(\n gridcolor=\"#1f292f\",\n nticks=ticks,\n )\n\n self.fig.update_yaxes(\n gridcolor=\"#1f292f\"\n )\n\n self.fig.update_layout(\n width=width,\n height=height,\n paper_bgcolor=\"#2c303c\",\n plot_bgcolor=\"#2c303c\",\n margin=dict(l=10, r=10, b=10, t=10),\n font=dict(size=8, color=\"#e1e1e1\")\n )\n\n def show_plot(self, width=1600, height=900, ticks=5):\n self.update_layout(width, height, ticks)\n self.fig.show()","repo_name":"LMBroadhurst/algobot_statistical_finance_dashboard","sub_path":"oanda_trading_bot/charting/price_independent_indicator.py","file_name":"price_independent_indicator.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69910862984","text":"import pandas as pd\n\nimport math\n\n\nnumber_of_members = 0\nsm =['Sheet1','Sheet3','Sheet4','Sheet5'] # make sure to add the names of all the sheets here under sheetname variable sm.\ncn = ['HQ','P BTY','Q BTY','R BTY'] # make sure to add all the names of the companies.\ncount=0 \nm=0\n\nwriter = pd.ExcelWriter('final_attendence.xlsx', engine='xlsxwriter') # instead of 'final.xlsx', enter path of the new file to be created (This is where the data has to be saved)\nfor i in sm:\n \n status=[]\n df = pd.read_excel('test_final_1.xlsx', sheet_name= i) # instead of 'attendence.xlsx', enter path of the attendance sheet from which data has to be taken.\n \n\n #convert each column to list, remove NAN values \n name = df.columns[3]\n filtered_column_names = df.loc[9:,name].loc[~df[name].isin(['PARBAT ALI','Name'])].tolist()\n ecode = df.columns[2]\n filtered_column_code = df.loc[9:,ecode].loc[~df[ecode].isin(['E. Code'])].tolist()\n TD_time = df.columns[10] #TD Time is same as A in time\n filtered_column_TD = df.loc[9:,TD_time].loc[~df[TD_time].isin(['A. InTime'])].fillna(-1).tolist()\n SE_time = df.columns[8] #SE time is same as S out time\n filtered_column_SE = df.loc[9:,SE_time].loc[~df[SE_time].isin(['S. OutTime'])].fillna(-1).tolist()\n Sin = df.columns[6]\n filtered_column_Sin = df.iloc[9:df[Sin].last_valid_index() + 1, df.columns.get_loc(Sin)].fillna(-1).tolist()\n\n\n\n size = len(filtered_column_names)\n members =[]\n x=0\n while x = 0 else len(items_copy) + index\n indices.append(normalized_index)\n indices.sort(reverse=True)\n\n if self.whitelist:\n index = len(items)-1\n while index >= 0:\n if index not in indices:\n del items_copy[index]\n index -= 1\n else:\n for index in indices:\n if index < len(items_copy):\n del items_copy[index]\n\n return items_copy\n","repo_name":"narranoid/Blender-Texture-Grapher","sub_path":"stringfiddle/format/filter/index_filter.py","file_name":"index_filter.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18661656687","text":"import unittest\nfrom unittest.mock import patch\nfrom io import StringIO\nfrom gradescope_utils.autograder_utils.decorators import weight, visibility\ntry:\n import hw11 as hw11\nexcept Exception:\n raise Exception(f'Could not process your file, remember to code under the main() function, not call the main() function, and set the file name correctly.')\nimport solution.hw11 as solution\nimport random\n\nclass TestHW(unittest.TestCase):\n def setUpAns(self):\n solution.main()\n ansFile = open(\"[Solution]StudentSubmissions.txt\", \"r\")\n ansData = ansFile.readlines()\n return ansData\n \n def setUpSub(self):\n hw11.main()\n subFile = open(\"StudentSubmissions.txt\", \"r\")\n subData = subFile.readlines()\n return subData\n \n # @visibility(\"hidden\")\n @weight(9) \n def test_hw(self):\n ans = self.setUpAns()\n submission = self.setUpSub()\n self.assertEqual(ans, submission)\n \n","repo_name":"pnngocdoan/autograder_SUA","sub_path":"Week 4/hw11/tests/test_hw.py","file_name":"test_hw.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14905076895","text":"from tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Conv2D, Dropout, BatchNormalization, Input, SeparableConv2D, GlobalAveragePooling2D, Dense, MaxPooling2D, Activation, Flatten\nfrom tensorflow.keras.layers import add as add_concat\nfrom keras import backend as K\nfrom constants import strategy, bnmomemtum, SHAPE\nwith strategy.scope():\n def fire(x, filters, kernel_size):\n if not isinstance(filters, list):\n filters = [filters, filters]\n x = SeparableConv2D(filters[0], kernel_size, padding='same', use_bias=False)(x)\n x = BatchNormalization(axis=channel_axis, center=True, scale=False, momentum=bnmomemtum)(x)\n x = Activation('relu')(x)\n x = SeparableConv2D(filters[1], kernel_size, padding='same', use_bias=False)(x)\n return BatchNormalization(axis=channel_axis, center=True, scale=False, momentum=bnmomemtum)(x)\n\n def fire_module_separable_conv(filters, kernel_size=(3, 3)):\n return lambda x: fire(x, filters, kernel_size)\n\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n img_input = Input(shape=SHAPE)\n\n x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False)(img_input)\n x = BatchNormalization(axis=channel_axis, center=True, scale=False, momentum=bnmomemtum)(x)\n x = Activation('relu')(x)\n\n x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)\n x = BatchNormalization(axis=channel_axis, center=True, scale=False, momentum=bnmomemtum)(x)\n x = Activation('relu')(x)\n\n residual = Conv2D(128, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)\n residual = BatchNormalization(axis=channel_axis, center=True, scale=False, momentum=bnmomemtum)(residual)\n\n x = fire_module_separable_conv(128)(x)\n\n x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)\n x = add_concat([x, residual])\n\n residual = Conv2D(256, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)\n residual = BatchNormalization(axis=channel_axis, center=True, scale=False, momentum=bnmomemtum)(residual)\n\n x = Activation('relu')(x)\n x = fire_module_separable_conv(256)(x)\n\n x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)\n x = add_concat([x, residual])\n\n for i in range(4):\n residual = x\n\n x = Activation('relu')(x)\n x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)\n x = BatchNormalization(axis=channel_axis, center=True, scale=False, momentum=bnmomemtum)(x)\n x = Activation('relu')(x)\n x = fire_module_separable_conv(256)(x)\n\n x = add_concat([x, residual])\n\n\n x = fire_module_separable_conv([728, 1024])(x)\n x = Activation('relu')(x)\n y = GlobalAveragePooling2D()(x)\n\n y = Dense(3096)(y)\n y = Activation('relu')(y)\n y = Dropout(0.3)(y)\n\n y = Dense(1548)(y)\n y = Activation('relu')(y)\n y = Dropout(0.3)(y)\n\n head_root = Dense(168, activation = 'softmax', name='head_root')(y)\n head_vowel = Dense(11, activation = 'softmax', name='head_vowel')(y)\n head_consonant = Dense(7, activation = 'softmax', name='head_consonant')(y)\n\n model = Model(inputs=img_input, outputs=[head_root, head_vowel, head_consonant])\n\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n","repo_name":"MaximDrobchak/ML-Bengali_Handwritten_Grapheme_Classification","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20252199710","text":"from typing import Sequence\n\nimport attrs\n\nfrom apischema import deserialize, serialize, settings\nfrom apischema.json_schema import deserialization_schema\nfrom apischema.objects import ObjectField\n\nprev_default_object_fields = settings.default_object_fields\n\n\ndef attrs_fields(cls: type) -> Sequence[ObjectField] | None:\n if hasattr(cls, \"__attrs_attrs__\"):\n return [\n ObjectField(\n a.name, a.type, required=a.default == attrs.NOTHING, default=a.default\n )\n for a in getattr(cls, \"__attrs_attrs__\")\n ]\n else:\n return prev_default_object_fields(cls)\n\n\nsettings.default_object_fields = attrs_fields\n\n\n@attrs.define\nclass Foo:\n bar: int\n\n\nassert deserialize(Foo, {\"bar\": 0}) == Foo(0)\nassert serialize(Foo, Foo(0)) == {\"bar\": 0}\nassert deserialization_schema(Foo) == {\n \"$schema\": \"http://json-schema.org/draft/2020-12/schema#\",\n \"type\": \"object\",\n \"properties\": {\"bar\": {\"type\": \"integer\"}},\n \"required\": [\"bar\"],\n \"additionalProperties\": False,\n}\n","repo_name":"wyfo/apischema","sub_path":"examples/examples/attrs_support.py","file_name":"attrs_support.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"81"} +{"seq_id":"15881092364","text":"from tornado.ioloop import IOLoop, PeriodicCallback\nfrom tornado import gen\nfrom tornado.websocket import websocket_connect\n\nimport numpy as np\nimport h5py\n\nimport json\nimport random\nimport string\n\nimport sys\n\nimport random\n\nclass Client(object):\n def __init__(self, url, timeout):\n self.f = h5py.File('pollen_scraped.h5', 'a')\n self.day = np.zeros((1, 24), dtype=np.int32)\n self.dataset = np.zeros((0, 24), dtype=np.int32)\n\n self.stations = ['albacete', 'alcazar', 'alicante', 'almeria', 'avila', 'badajoz',\n 'barcelona', 'barcelona-uab', 'bejar', 'bilbao', 'burgos', 'burjassot', 'caceres',\n 'cadiz', 'cartagena', 'castellon-de-la-plana', 'ciudad-real', 'cordoba', 'coruña',\n 'cuenca', 'elche', 'gerona', 'granada', 'gijon', 'guadalajara', 'huelva', 'huesca',\n 'jaen-hospital', 'jaen', 'jativa', 'las-palmas', 'leon', 'lerida', 'logroño',\n 'madrid-subiza', 'madrid-hospital', 'malaga', 'murcia', 'oviedo', 'palencia',\n 'palma-mallorca', 'pamplona', 'ponferrada', 'pontevedra', 'salamanca', 'san-sebastian',\n 'santa-cruz-tenerife', 'santander', 'santiago-compostela', 'segovia', 'sevilla-macarena',\n 'sevilla-tomillar', 'soria', 'talavera', 'tarragona', 'teruel', 'toledo', 'torrelavega',\n 'tudela', 'valencia', 'valladolid', 'vitoria', 'zamora', 'zaragoza']\n\n\n self.ids = [1, 2, 3, 4, 5, 7, 6, 50, 8, 9, 11, 10, 12, 58, 16, 13, 15, 62, 14, 17, 18,\n 19, 65, 20, 21, 60, 59, 23, 22, 24, 28, 25, 26, 27, 29, 57, 30, 31, 32, 33, 35, 34, 36,\n 37, 38, 43, 45, 39, 64, 40, 41, 47, 42, 61, 44, 46, 48, 49, 63, 51, 54, 53, 55, 56]\n\n self.s = 5\n self.counter = 0\n\n self.url = url\n self.timeout = timeout\n self.ioloop = IOLoop.instance()\n self.ws = None\n self.connect()\n self.ioloop.start()\n\n @gen.coroutine\n def connect(self):\n print(\"trying to connect\")\n try:\n self.ws = yield websocket_connect(self.url)\n except Exception(e):\n print(\"connection error\")\n else:\n print(\"connected\")\n self.run()\n\n @gen.coroutine\n def run(self):\n while True:\n payload = yield self.ws.read_message()\n if payload is None:\n print(\"connection closed\")\n self.ws = None\n break\n\n if payload[0] == \"o\":\n msg = json.dumps(['{\"msg\":\"connect\", \"version\":\"1\", \"support\":[\"1\",\"pre2\",\"pre1\"]}'])\n self.ws.write_message(msg.encode('utf8'))\n elif payload[0] == 'a':\n frame = json.loads(payload[1:])\n body = json.loads(frame[0])\n\n if 'msg' in body:\n if body['msg'] == 'connected':\n self.day = np.zeros((1, 24), dtype=np.int32)\n self.dataset = np.zeros((0, 24), dtype=np.int32)\n self.counter = 0\n\n print('scraping from', self.stations[self.s])\n\n stringmsg = '{{\"msg\":\"sub\",\"id\":\"die7LBp7mEbzW3ffQ\",\"name\":\"polenes2015\",\"features\":[{{\"selector\":{{\"$and\":[{{\"fecha\":{{\"$gte\":19700101,\"$lte\":20201231}}}},{{\"idEstacion\":{}}}]}},\"options\":{{\"sort\":{{\"fecha\":1}}}},\"jump\":1}}]}}'.format(self.ids[self.s]);\n #print(stringmsg)\n msg = json.dumps([stringmsg])\n self.ws.write_message(msg.encode('utf8'));\n elif body['msg'] == 'ping':\n print('pong')\n msg = json.dumps(['{\"msg\": \"pong\"}'])\n self.ws.write_message(msg.encode('utf8'));\n elif body['msg'] == 'ready':\n print(self.dataset.shape)\n\n if self.dataset.shape[0] > 0:\n print(self.dataset[0, 0], '-', self.dataset[-1, 0])\n\n if self.stations[self.s] in self.f:\n del self.f[self.stations[self.s]]\n self.f.create_dataset(self.stations[self.s], data=self.dataset)\n print('------------')\n\n self.s += 1\n if self.s < len(self.stations):\n self.day = np.zeros((1, 24), dtype=np.int32)\n self.dataset = np.zeros((0, 24), dtype=np.int32)\n self.counter = 0\n\n stringmsg = \"{\\\"msg\\\":\\\"unsub\\\",\\\"id\\\":\\\"die7LBp7mEbzW3ffQ\\\"}\"\n msg = json.dumps([stringmsg])\n self.ws.write_message(msg.encode('utf8'));\n\n print('scraping from', self.stations[self.s])\n\n stringmsg = '{{\"msg\":\"sub\",\"id\":\"die7LBp7mEbzW3ffQ\",\"name\":\"polenes2015\",\"features\":[{{\"selector\":{{\"$and\":[{{\"fecha\":{{\"$gte\":19700101,\"$lte\":20201231}}}},{{\"idEstacion\":{}}}]}},\"options\":{{\"sort\":{{\"fecha\":1}}}},\"jump\":1}}]}}'.format(self.ids[self.s]);\n #print(stringmsg)\n msg = json.dumps([stringmsg])\n self.ws.write_message(msg.encode('utf8'));\n\n else:\n sys.exit()\n elif body['msg'] == 'added':\n self.day[0][0] = body['fields']['fecha'];\n\n self.counter += 1\n if self.counter % 365 == 0:\n print(self.counter//365, 'years scraped')\n\n for i in range(1, 23):\n if str(i) in body['fields'] and isinstance(body['fields'][str(i)], (int, float)):\n self.day[0][i] = body['fields'][str(i)]\n else:\n if str(i) in body['fields']:\n print('type error:', body['fields'][str(i)])\n self.day[0][i] = 0;\n self.dataset = np.append(self.dataset, self.day, axis = 0);\n\n\nif __name__ == \"__main__\":\n client = Client(\"wss://www.polenes.com/sockjs/490/j6ieo28e/websocket\", 5)\n","repo_name":"bolito2/pollen-forecast","sub_path":"tornado_scraper.py","file_name":"tornado_scraper.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"27872611963","text":"import os, sys\nimport numpy as np\nimport argparse\nfrom time import time\nfrom meta_model import *\nfrom utils import *\nimport pdb\nfrom pydoc import locate\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--settings_path', type=str)\nFLAGS = parser.parse_args()\n\n\n# main function\ndef main():\n settings = get_settings(settings_path=FLAGS.settings_path)\n model = IDK(settings=settings)\n\n # if the model has already been trained, load it.\n # else, train it\n new_run = 1\n if model.retrain:\n print('Begin training!')\n model.train()\n model.loadModel() # need to do this because last validation run needs to be cleared\n else:\n try:\n model.loadModel()\n print('Skip training!')\n new_run = 0\n except:\n print('Begin training!')\n model.train()\n model.loadModel() # need to do this because last validation run needs to be cleared\n\n # test the saved model\n if new_run or model.retest:\n print('Begin testing!')\n model.test()\n\n\ndef get_settings(settings_path):\n # read in settings\n settings = file_to_dict(settings_path)\n saving_path = os.path.dirname(settings_path)\n\n if \"experimentType\" not in settings:\n settings[\"experimentType\"] = 'pathak'\n\n # add settings\n settings[\"saving_path\"] = saving_path\n settings[\"train_data_path\"] = settings[\"data_pathname\"]\n settings[\"test_data_path\"] = settings[\"data_pathname\"]\n settings[\"delta_t\"] = settings[\"dt\"]\n ODE = locate('odelibrary.{}'.format(settings[\"f0_name\"]))\n if settings['f0_name']=='L96M':\n if settings[\"usef0\"]:\n physics = ODE(slow_only=settings['slowOnly'])\n settings[\"f0\"] = lambda t, y: physics.rhs(y, t)\n if settings[\"diff\"]==\"TrueDeriv\":\n raise ValueError(\"True Derivatives not yet setup for L96MS case\")\n else:\n if settings[\"usef0\"]:\n if settings[\"experimentType\"]=='pathak':\n physics1 = ODE()\n eps = settings['f0eps']\n physics1.b = physics1.b*(1+eps)\n elif settings[\"experimentType\"]=='GPerror':\n physics1 = ODE(random_closure=True, epsGP=settings['f0eps'])\n elif settings[\"experimentType\"]=='wrongModel':\n physics1 = ODE()\n settings[\"f0\"] = lambda t, y: physics1.rhs(y, t)\n\n if settings[\"diff\"]==\"TrueDeriv\":\n physics2 = ODE()\n settings[\"fTRUE\"] = lambda t, y: physics2.rhs(y, t)\n return settings\n\nif __name__ == '__main__':\n start_time = time()\n main()\n total_time = time() - start_time\n print(\"Total run time is {:2.2f} minutes\".format(total_time/60))\n","repo_name":"mattlevine22/ode_model_error","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30146011534","text":"from django.contrib.auth import models, backends\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.conf import settings\n#from user import models as models_user\nfrom django.db.models.query_utils import Q\n\n\nclass FacebookBackend(backends.ModelBackend):\n def authenticate(self, facebook_id=None, facebook_email=None):\n '''\n Authenticate the facebook user by id OR facebook_email\n '''\n filter_clause = False\n if facebook_id:\n filter_clause = Q(facebook_id=facebook_id)\n\n if facebook_email:\n email_filter = Q(user__email=facebook_email)\n if filter_clause:\n filter_clause |= email_filter\n else:\n filter_clause = email_filter\n\n if filter_clause:\n try:\n profile_string = settings.AUTH_PROFILE_MODULE\n except AttributeError:\n profile_string = None\n if profile_string:\n profile_model = profile_string.split('.')[-1]\n profile_class = ContentType.objects.get(model=profile_model.lower())\n profile = profile_class.get_object_for_this_type(filter_clause)\n #profiles = profile_class.objects.filter(filter_clause).order_by('user')[:1]\n if profile:\n user = profile.user\n return user\n else:\n raise KeyError\n\n\n\n","repo_name":"MyaThandarKyaw/Django-facebook","sub_path":"django_facebook/auth_backends.py","file_name":"auth_backends.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"23218478869","text":"import argparse\nimport os\nimport torch\nimport torch.nn as nn\nimport pandas as pd\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import Sampler\nimport matplotlib.pyplot as plt\nfrom predict import *\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom dataset import ECGDataset\nfrom resnet import resnet34\nfrom utils import cal_f1s, cal_aucs, split_data, accuracy_score\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data-dir', type=str, default=r'D:\\ECG_omri_maor\\Data\\WFDB', help='Directory for data dir')\n parser.add_argument('--leads', type=str, default='all', help='ECG leads to use')\n parser.add_argument('--seed', type=int, default=42, help='Seed to split data')\n parser.add_argument('--num-classes', type=int, default=int, help='Num of diagnostic classes')\n parser.add_argument('--lr', '--learning-rate', type=float, default=0.0001, help='Learning rate')\n parser.add_argument('--batch-size', type=int, default=32, help='Batch size')\n parser.add_argument('--num-workers', type=int, default=4, help='Num of workers to load data')\n parser.add_argument('--phase', type=str, default='train', help='Phase: train or test')\n parser.add_argument('--epochs', type=int, default=28, help='Training epochs')\n parser.add_argument('--resume', default=False, action='store_true', help='Resume')\n parser.add_argument('--use-gpu', default=True, action='store_true', help='Use GPU')\n parser.add_argument('--model-path', type=str, default='', help='Path to saved model')\n return parser.parse_args()\n\n\ndef train(dataloader, net, args, criterion, epoch, scheduler, optimizer, device):\n print('Training epoch %d:' % epoch)\n net.train()\n running_loss = 0\n output_list, labels_list = [], []\n for _, (data, labels) in enumerate(tqdm(dataloader)):\n data, labels = data.to(device), labels.to(device)\n output = net(data)\n loss = criterion(output, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n output_list.append(output.data.cpu().numpy())\n labels_list.append(labels.data.cpu().numpy())\n # scheduler.step()\n print(f'Avg Loss per epoch:{running_loss / (len(tqdm(dataloader)))}')\n y_trues = np.vstack(labels_list)\n y_scores = np.vstack(output_list)\n f1s = cal_f1s(y_trues, y_scores)\n avg_f1 = np.mean(f1s)\n # y_train_accuracy = y_scores > 0.5 # Ask Ariel if OK\n # accuracies = accuracy_score(y_trues, y_train_accuracy)\n # avg_accuracy = np.mean(accuracies)\n output_loss = running_loss / len(output_list)\n return output_loss, avg_f1 # , avg_accuracy\n\n\ndef evaluate(dataloader, net, args, criterion, device):\n print('Validating...')\n net.eval()\n running_loss = 0\n output_list, labels_list = [], []\n for _, (data, labels) in enumerate(tqdm(dataloader)):\n data, labels = data.to(device), labels.to(device)\n output = net(data)\n loss = criterion(output, labels)\n running_loss += loss.item()\n output = torch.sigmoid(output)\n output_list.append(output.data.cpu().numpy())\n labels_list.append(labels.data.cpu().numpy())\n print(f'Avg Loss per epoch:{running_loss / (len(output_list))}')\n y_trues = np.vstack(labels_list)\n y_scores = np.vstack(output_list)\n f1s = cal_f1s(y_trues, y_scores)\n avg_f1 = np.mean(f1s)\n output_loss = running_loss / len(output_list)\n print('F1s:', f1s)\n print('Avg F1: %.4f' % avg_f1)\n if args.phase == 'train' and avg_f1 > args.best_metric:\n args.best_metric = avg_f1\n torch.save(net.state_dict(), args.model_path)\n else:\n aucs = cal_aucs(y_trues, y_scores)\n avg_auc = np.mean(aucs)\n print('AUCs:', aucs)\n print('Avg AUC: %.4f' % avg_auc)\n return output_loss, avg_f1\n\n\ndef calc_weights(df, dataloader, lambda_param=0.01):\n df = df.labels.iloc[:, 1:8]\n label_counts = df.sum()\n label_weights = np.array(1 / label_counts)\n sample_weights = [0] * len(tqdm(dataloader))\n for idx, (data, label) in enumerate(tqdm(dataloader)):\n sample_weights[idx] = np.dot(label_weights, label)\n # regularization_term = lambda_param * sum(sample_weights ** 2)\n # sample_weights = [1/label_counts[i] for i in range(len(df.columns))]\n return sample_weights\n\n\ndef plot_metrics(all_metrics_dict):\n epoch_vec = np.arange(args.epochs)\n\n # F1\n fig, ax = plt.subplots(nrows=1, ncols=1) # create figure & 1 axis\n ax.plot(epoch_vec, all_metrics_dict[\"train_f1\"], epoch_vec, all_metrics_dict[\"val_f1\"])\n ax.set_xlabel(\"epoch\")\n ax.set_ylabel(\"F1 score\")\n ax.set_title('F1 as a function of epochs')\n ax.legend(['train', 'val'])\n fig.savefig(\"plots/f1.png\")\n\n # Loss\n fig, ax = plt.subplots(nrows=1, ncols=1) # create figure & 1 axis\n ax.plot(epoch_vec, all_metrics_dict[\"train_loss\"], epoch_vec, all_metrics_dict[\"val_loss\"])\n ax.set_xlabel(\"epoch\")\n ax.set_ylabel(\"Loss\")\n ax.legend(['train', 'val'])\n ax.set_title('Loss as a function of epochs')\n fig.savefig(\"plots/loss.png\")\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n args.best_metric = 0\n data_dir = os.path.normpath(args.data_dir)\n database = os.path.basename(data_dir)\n\n if not args.model_path:\n args.model_path = f'models/resnet34_{database}_{args.leads}_{args.seed}.pth'\n\n if args.use_gpu and torch.cuda.is_available():\n device = torch.device('cuda:0')\n else:\n device = 'cpu'\n\n if args.leads == 'all':\n leads = 'all'\n nleads = 12\n else:\n leads = args.leads.split(',')\n nleads = len(leads)\n label_csv = os.path.join(data_dir, 'labels.csv')\n train_folds, val_folds, test_folds = split_data(seed=args.seed)\n folds_df = pd.DataFrame({'train': [train_folds, val_folds, test_folds]})\n folds_df.to_csv('folds_val.csv')\n train_dataset = ECGDataset('train', data_dir, label_csv, train_folds, leads)\n # train_weights = calc_weights(train_dataset, train_dataset)\n # train_sampler = torch.utils.data.sampler.WeightedRandomSampler(train_weights, len(train_weights), replacement=True)\n train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers,\n pin_memory=True, shuffle=True) # , sampler=train_sampler)\n val_dataset = ECGDataset('val', data_dir, label_csv, val_folds, leads)\n val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers,\n pin_memory=True)\n test_dataset = ECGDataset('test', data_dir, label_csv, test_folds, leads)\n test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers,\n pin_memory=True)\n net = resnet34(input_channels=nleads).to(device)\n optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 10, gamma=0.1)\n\n criterion = nn.BCEWithLogitsLoss()\n # train_accuracy = []\n train_loss = []\n val_loss = []\n train_f1 = []\n val_f1 = []\n if args.phase == 'train':\n if args.resume:\n net.load_state_dict(torch.load(args.model_path, map_location=device))\n for epoch in range(args.epochs):\n curr_train_loss, curr_train_f1 = train(train_loader, net, args, criterion, epoch,\n scheduler, optimizer, device)\n curr_val_loss, curr_val_f1 = evaluate(val_loader, net, args, criterion, device)\n train_loss.append(curr_train_loss)\n val_loss.append(curr_val_loss)\n # train_accuracy.append(curr_train_accuracy)\n train_f1.append(curr_train_f1)\n val_f1.append(curr_val_f1)\n\n # Testing and documentation phase\n args.threshold_path = f'models/{database}-threshold.pkl'\n thresholds = get_thresholds(val_loader, net, device, threshold_path=args.threshold_path)\n print('Thresholds:', thresholds)\n\n print('Results on validation data:')\n apply_thresholds(val_loader, net, device, thresholds, 'val')\n\n print('Results on test data:')\n apply_thresholds(test_loader, net, device, thresholds, 'test')\n net.load_state_dict(torch.load(args.model_path, map_location=device))\n test_loss, test_f1 = evaluate(test_loader, net, args, criterion, device)\n all_metrics_dict = {'train_f1': train_f1,\n 'val_f1': val_f1, 'train_loss': train_loss, 'val_loss': val_loss}\n plot_metrics(all_metrics_dict)\n print(f'Test results: Loss:{test_loss}, F1:{test_f1}')\n","repo_name":"OmriDan/ecg-diagnosis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"40341157390","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPDU generic module for commands in state PDU.\n\"\"\"\n\nimport six\nimport abc\nfrom moler.cmd.commandtextualgeneric import CommandTextualGeneric\n\n__author__ = 'Marcin Usielski'\n__copyright__ = 'Copyright (C) 2020, Nokia'\n__email__ = 'marcin.usielski@nokia.com'\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass GenericPduAten(CommandTextualGeneric):\n def __init__(self, connection, prompt=None, newline_chars=None, runner=None):\n \"\"\"\n Base class for Aten PDU commands in all states.\n\n :param connection: connection to device.\n :param prompt: expected prompt sending by device after command execution. Maybe String or compiled re.\n :param runner: runner to run command.\n \"\"\"\n\n super(GenericPduAten, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars,\n runner=runner)\n","repo_name":"nokia/moler","sub_path":"moler/cmd/pdu_aten/generic_pdu_aten.py","file_name":"generic_pdu_aten.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"78"} +{"seq_id":"14782098118","text":"# %%\nfrom aocd.models import Puzzle \npuzzle = Puzzle(year = 2016, day=21)\n# %%\nX = [x.split() for x in puzzle.input_data.split('\\n')]\n\ndef swap_pos(pwd, instr):\n x = int(instr[2])\n y = int(instr[5])\n tmp = pwd[x]\n pwd[x] = pwd[y]\n pwd[y] = tmp\n return pwd\n\ndef swap_let(pwd, instr):\n x = pwd.index(instr[2])\n y = pwd.index(instr[5])\n tmp = pwd[x]\n pwd[x] = pwd[y]\n pwd[y] = tmp\n return pwd\n\ndef swap(pwd, instr):\n return swap_pos(pwd, instr) if instr[1] == 'position' else swap_let(pwd, instr)\n\ndef rotate_lr(pwd, d, s):\n if d == 'right':\n s = len(pwd) - (s % len(pwd))\n return pwd[s%len(pwd):] + pwd[:s%len(pwd)]\n\ndef rotate(pwd, instr):\n if instr[1] == 'based':\n n = pwd.index(instr[6])\n n += (1 if n < 4 else 2)\n return rotate_lr(pwd, 'right', n)\n else:\n return rotate_lr(pwd, instr[1], int(instr[2]))\n\ndef reverse(pwd, instr):\n x = int(instr[2])\n y = int(instr[4])\n head = pwd[:x]\n mid = pwd[x:y+1]\n tail = pwd[y+1:]\n return head + mid[::-1] + tail\n\ndef move(pwd, instr):\n x = int(instr[2])\n y = int(instr[5])\n tmp = pwd.pop(x)\n pwd.insert(y, tmp)\n return pwd\n\nfmap = {'swap': swap, 'rotate': rotate, 'reverse': reverse, 'move': move}\n\n\n# %%\npwd = list('abcdefgh')\nfor instr in X:\n res = fmap[instr[0]](pwd, instr)\n if len(res) != len(pwd):\n print(\"error\")\n print(pwd)\n print(res)\n print(instr)\n pwd = res\nres = ''.join(pwd)\nres\n# %%\npuzzle.answer_a = res\n# %%\n\ndef r_rotate_lr(pwd, d, s):\n if d == 'left':\n s = len(pwd) - (s % len(pwd))\n return pwd[s%len(pwd):] + pwd[:s%len(pwd)]\n\ndef r_rotate(pwd, instr):\n if instr[1] == 'based':\n n = pwd.index(instr[6])\n n += (1 if n < 4 else 2)\n for i in range(1, len(pwd)+5):\n attempt = rotate_lr(pwd.copy(), 'left', i)\n if rotate(attempt, instr) == pwd:\n return attempt\n else:\n return r_rotate_lr(pwd, instr[1], int(instr[2]))\n\ndef r_move(pwd, instr):\n x = int(instr[5])\n y = int(instr[2])\n tmp = pwd.pop(x)\n pwd.insert(y, tmp)\n return pwd\n\nfmap = {'swap': swap, 'rotate': r_rotate, 'reverse': reverse, 'move': r_move}\n\npwd = list('fbgdceah')\nfor instr in reversed(X):\n pwd = fmap[instr[0]](pwd, instr)\nres = ''.join(pwd)\nres\n# %%\npuzzle.answer_b = res\n# %%\n","repo_name":"elibaldwin/aoc2020","sub_path":"practice/aoc2016day21.py","file_name":"aoc2016day21.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"423133729","text":"#!/usr/bin/python3\n# coding=utf-8\n\n\nimport threading\nimport time\n\ndef sing():\n for i in range(3):\n print(\"singing...%d\" % i)\n time.sleep(1)\n\ndef dance():\n for i in range(3):\n print(\"dancing...%d\" % i)\n time.sleep(1)\n\nif __name__ == '__main__':\n \n sing_thread = threading.Thread(target = sing)\n dance_thread = threading.Thread(target = dance)\n\n sing_thread.start()\n dance_thread.start()\n\n","repo_name":"zhangshengasdf/python-notes","sub_path":"Multitask/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21885546152","text":"#Keep entering pizza toppings until active is False.\n\nprompt = \"\\nWhat sort of toppings would you like?\"\nprompt += \"\\nEnter 'quit' when you have added all of your toppings! \"\n\nactive = True\n\nwhile active == True:\n toppings = input(prompt)\n \n if toppings == 'quit':\n active = False\n else:\n print(toppings.title()+\"? Nice! I'll throw it on your pizza.\")\n\n","repo_name":"jaoist/pycc","sub_path":"Inputs_and_Whiles/7-6active.py","file_name":"7-6active.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70858383612","text":"import os\nimport discord\nfrom commands import *\nimport random\nfrom replit import db\nfrom keep_alive import keep_alive\nimport utils\nfrom random import seed\nfrom random import randint\nfrom vector2 import Vector2\nfrom game import Game\n\nseed(1)\n\nlast_game_state_id = 0\n\nclient = discord.Client()\ngames = {}\n\n@client.event\nasync def on_ready():\n print(\"We have logged in as {0.user}\".format(client))\n\n@client.event\nasync def on_message(message):\n msg = message.content\n if message.author == client.user:\n if msg.endswith('move?'):\n await controls(message, client)\n elif msg.startswith('Guess'):\n await guesses(message, client)\n else:\n\n if msg.startswith('!play'):\n global last_game_state_id\n last_game_state_id = message.id\n games[message.author] = Game(Vector2(0,0), Vector2(0,0), Vector2(0,0))\n games[message.author].new_level()\n await play(message, client, games[message.author].get_board())\n\n if msg.startswith('!help'):\n await help(message, client)\n\n if msg.startswith('!guess'):\n await guess(message, client)\n\n@client.event\nasync def on_reaction_add(reaction, user):\n if user == client.user:\n #print(\"Failed because user is bot\")\n return\n\n if reaction.message.author != client.user:\n #print(\"Failed because the message wasnt the message created by this bot\")\n return\n\n #if reaction.message.id == last_game_state_id:\n #print(\"Failed because the message reacted to wasnt the game state message\")\n #return\n\n if reaction.message.content.endswith('move?'):\n user_input = 0\n if reaction.emoji == utils.up_arrow_emoji:\n user_input = 0\n elif reaction.emoji == utils.down_arrow_emoji:\n user_input = 2\n elif reaction.emoji == utils.left_arrow_emoji:\n user_input = 3\n elif reaction.emoji == utils.right_arrow_emoji:\n user_input = 1\n\n users = await reaction.users().flatten()\n users = users[0]\n if games[user].update_state(user_input):\n await update_game(reaction.message.channel, user.name, games[user].get_board())\n else:\n await reaction.message.channel.send('Cannot move in that direction.')\n await update_game(reaction.message.channel, user.name, games[user].get_board())\n\n elif reaction.message.content.startswith('Guess'):\n guess = 0\n answer = randint(0, 10)\n print(answer)\n if reaction.emoji == utils.one_emoji:\n guess = 1\n elif reaction.emoji == utils.two_emoji:\n guess = 2\n elif reaction.emoji == utils.three_emoji:\n guess = 3\n elif reaction.emoji == utils.four_emoji:\n guess = 4\n elif reaction.emoji == utils.five_emoji:\n guess = 5\n\n if guess == answer:\n await reaction.message.channel.send(\"Ding ding ding correct it was {0}.\".format(answer))\n else:\n await reaction.message.channel.send(\"Incorrect, the correct answer was {0}.\".format(answer))\n\n\nkeep_alive()\n\nmy_secret = os.environ['TOKEN']\nclient.run(my_secret)","repo_name":"CedarBishop/GameBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11460252085","text":"# 6.0001/6.00 Problem Set 5 - RSS Feed Filter\n# Name:\n# Collaborators:\n# Time:\n\nimport feedparser\nimport string\nimport time\nimport threading\nfrom project_util import translate_html\nfrom datetime import datetime, timezone\nimport pytz\n\n\n#-----------------------------------------------------------------------\n\n#======================\n# Code for retrieving and parsing\n# Google and Yahoo News feeds\n# Do not change this code\n#======================\n\ndef process(url):\n \"\"\"\n Fetches news items from the rss url and parses them.\n Returns a list of NewsStory-s.\n \"\"\"\n feed = feedparser.parse(url)\n entries = feed.entries\n ret = []\n\n for entry in entries:\n guid = entry.guid\n title = translate_html(entry.title)\n link = entry.link\n description = \"\"\n try:\n description = translate_html(entry.description)\n except:\n description = translate_html(entry.title_detail)\n pubdate = translate_html(entry.published)\n\n try:\n pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %Z\")\n pubdate.replace(tzinfo=pytz.timezone(\"GMT\"))\n # pubdate = pubdate.astimezone(pytz.timezone('EST'))\n # pubdate.replace(tzinfo=None)\n except ValueError:\n pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %z\")\n\n #newsStory = NewsStory(guid, title, description, link, pubdate)\n newsStory = NewsStory(guid, title, description, link, pubdate)\n ret.append(newsStory)\n return ret\n\n#======================\n# Data structure design\n#======================\n\n# Problem 1\n\nclass NewsStory(object):\n\n # deffining the data attribute for class NewsStory\n def __init__(self, guid, title, description, link, pubdate):\n self.guid = guid\n self.title = title\n self.description = description\n self.link = link\n self.pubdate = pubdate\n\n # creating different ways to manipulate the class NewsStory\n def get_guide(self):\n return self.guid\n\n def get_title(self):\n return self.title\n\n def get_description(self):\n return self.description\n\n def get_link(self):\n return self.link\n\n def get_pubdate(self):\n return self.pubdate\n#======================\n# Triggers\n#======================\n\nclass Trigger(object):\n def evaluate(self, story):\n \"\"\"\n Returns True if an alert should be generated\n for the given news item, or False otherwise.\n \"\"\"\n # DO NOT CHANGE THIS!\n raise NotImplementedError\n\n# PHRASE TRIGGERS\n\n# Problem 2\n# TODO: PhraseTrigger\nclass PhraseTrigger(Trigger):\n\n def __init__(self, phrase):\n self.phrase = phrase.upper()\n\n # returning true if the phrase exist in the string argument text, false otherwise\n def is_phrase_in(self, string_argument):\n\n # converting string_argument to upper for casse sensitive string_argument text. then splitting the word in the phrase with the space\n self.string_argument = string_argument.upper()\n phrase_split = self.phrase.split()\n word_list = \"\"\n\n # iterating through the string_argument text and if the char is withing the list of string.pronunciatio(!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~),\n # replacing that with space and if not in the list, just adding the char in the word_list\n # for char in string_argument:\n for char in self.string_argument:\n if char in string.punctuation:\n word_list += \" \"\n else:\n word_list += char\n\n # splitting the word_list with the space and iterating over word_list to remove the '' if any in the word_list\n word_list = word_list.split()\n while \"\" in word_list:\n word_list.remove(\"\")\n\n # iterating over phrase_split and if words in the word_list and the phrase mathes, returning true else returning false\n for word in phrase_split:\n word_list_join = \" \".join(word_list)\n if word in word_list and self.phrase in word_list_join:\n result = True\n else:\n result = False\n\n return result\n\n\n# Problem 3\n# TODO: TitleTrigger\n# returning true if the title of the story contains a valid phrase, false otherwise\nclass TitleTrigger(PhraseTrigger):\n\n def __init__(self, phrase):\n PhraseTrigger.__init__(self, phrase)\n\n def evaluate(self, story):\n return self.is_phrase_in(story.get_title())\n\n\n# Problem 4\n# TODO: DescriptionTrigger\n# returning true if the description of the story contains a valid phrase, false otherwise\nclass DescriptionTrigger(PhraseTrigger):\n\n def __init__(self, phrase):\n PhraseTrigger.__init__(self, phrase)\n\n def evaluate(self, story):\n return self.is_phrase_in(story.get_description())\n\n\n\n# TIME TRIGGERS\n\n# Problem 5\n# TODO: TimeTrigger\n# Constructor:\n# Input: Time has to be in EST and in the format of \"%d %b %Y %H:%M:%S\".\n# Convert time from string to a datetime before saving it as an attribute.\nclass TimeTrigger(Trigger):\n\n def __init__(self, time):\n time = datetime.strptime(time, \"%d %b %Y %H:%M:%S\")\n time = time.replace(tzinfo=pytz.timezone(\"EST\"))\n self.time = time\n\n# Problem 6\n# TODO: BeforeTrigger and AfterTrigger\n# BeforeTrigger fires when a story is published strictly before the trigger’s time,\nclass BeforeTrigger(TimeTrigger):\n\n def evaluate(self, story):\n pub_date = story.get_pubdate().replace(tzinfo=pytz.timezone(\"EST\"))\n return self.time > pub_date\n\n# AfterTrigger fires when a story is published strictly after the trigger’s time\nclass AfterTrigger(TimeTrigger):\n\n def evaluate(self, story):\n pub_date = story.get_pubdate().replace(tzinfo=pytz.timezone(\"EST\"))\n return self.time < pub_date\n\n\n# COMPOSITE TRIGGERS\n\n# Problem 7\n# TODO: NotTrigger\nclass NotTrigger(Trigger):\n\n # trigger should take this other trigger as an argument to its constructor\n def __init__(self, trigger):\n self.trigger = trigger\n\n #This trigger should produce its output by inverting the output of another trigger.\n def evaluate(self, story):\n return not self.trigger.evaluate(story)\n\n# Problem 8\n# TODO: AndTrigger\nclass AndTrigger(Trigger):\n\n #This trigger should take two triggers as arguments to its constructor\n def __init__(self, trigger_1, trigger_2):\n self.trigger1 = trigger_1\n self.trigger2 = trigger_2\n\n #should fire if either one (or both) of its inputted triggers would fire on that item.\n def evaluate(self, story):\n return self.trigger1.evaluate(story) and self.trigger2.evaluate(story)\n\n# Problem 9\n# TODO: OrTrigger\nclass OrTrigger(Trigger):\n\n #This trigger should take two triggers as arguments to its constructor\n def __init__(self, trigger_1, trigger_2):\n self.trigger1 = trigger_1\n self.trigger2 = trigger_2\n\n #should fire if either one (or both) of its inputted triggers would fire on that item\n def evaluate(self, story):\n return self.trigger1.evaluate(story) or self.trigger2.evaluate(story)\n\n#======================\n# Filtering\n#======================\n\n# Problem 10\n#returning a list of only the stories for which a trigger fires.\ndef filter_stories(stories, triggerlist):\n \"\"\"\n Takes in a list of NewsStory instances.\n\n Returns: a list of only the stories for which a trigger in triggerlist fires.\n \"\"\"\n # TODO: Problem 10\n # This is a placeholder\n # (we're just returning all the stories, with no filtering)\n triggered_stories = []\n for story in stories:\n for trigger in triggerlist:\n if trigger.evaluate(story):\n if story not in triggered_stories:\n triggered_stories.append(story)\n return triggered_stories\n\n\n\n#======================\n# User-Specified Triggers\n#======================\n# Problem 11\ndef read_trigger_config(filename):\n \"\"\"\n filename: the name of a trigger configuration file\n\n Returns: a list of trigger objects specified by the trigger configuration\n file.\n \"\"\"\n # We give you the code to read in the file and eliminate blank lines and\n # comments. You don't need to know how it works for now!\n trigger_file = open(filename, 'r')\n lines = []\n for line in trigger_file:\n line = line.rstrip()\n if not (len(line) == 0 or line.startswith('//')):\n lines.append(line)\n\n # TODO: Problem 11\n # line is the list of lines that you need to parse and for which you need\n # to build triggers\n\n print(lines) # for now, print it so you see what it contains!\n\n\n\nSLEEPTIME = 120 #seconds -- how often we poll\n\n\nif __name__ == '__main__':\n # A sample trigger list - you might need to change the phrases to correspond\n # to what is currently in the news\n try:\n #t1 = TitleTrigger(\"election\")\n #t2 = DescriptionTrigger(\"Trump\")\n #t3 = DescriptionTrigger(\"Biden\")\n #t4 = AndTrigger(t2, t3)\n #triggerlist = [t1, t4]\n\n # Problem 11\n # TODO: After implementing read_trigger_config, uncomment this line\n #triggerlist = read_trigger_config('triggers.txt')s\n\n # HELPER CODE - you don't need to understand this!\n # Reads and writes Newsstories to stories.txt in specified format\n # Retrieves and filters the stories from the RSS feeds\n guidShown = []\n def get_cont(newstory):\n if newstory.get_guid() not in guidShown:\n guidShown.append(newstory.get_guid())\n\n while True:\n\n\n print(\"Polling . . .\", end=' ')\n # Get stories from BBC's Top Stories RSS news feed\n stories = process(\"http://feeds.bbci.co.uk/news/rss.xml\")\n\n # Get stories from Yahoo's Top Stories RSS news feed\n stories.extend(process(\"http://news.yahoo.com/rss/topstories\"))\n\n stories = filter_stories(stories, triggerlist)\n\n\n #@ISMAMA\n file = open('stories.txt', 'w')\n for s in stories:\n file.write(s.title.strip())\n file.write(\"\\n\")\n for i in range(len(s.title)):\n file.write(\"-\")\n file.write(\"\\n\")\n file.write(s.description.strip())\n file.write(\"\\n\")\n file.write(s.link.strip())\n file.write(\"\\n\")\n file.write(\"_\"*60)\n for s in range(2):\n file.write(\"\\n\")\n file.close()\n\n #Do not uncomment these lines\n #Dlist(map(get_cont, stories))\n #Dscrollbar.config(command=cont.yview)\n\n\n print(\"Sleeping...\")\n time.sleep(SLEEPTIME)\n\n except Exception as e:\n print(e)","repo_name":"thapaSujit/CS50","sub_path":"OOP/ps5.py","file_name":"ps5.py","file_ext":"py","file_size_in_byte":10780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74724104252","text":"#infinite loop\r\n\r\n\r\n\r\n#i=0\r\n#while i<=10:\r\n # print(\"hello world\",i)\r\n # i=i+1\r\n #\r\n#while True:\r\n # print(\"deepanshu\")\r\n # # for loop\r\n# name=\"deepanshu\"\r\n# for i in range( 1, 10):\r\n# print(f\"your name is {name}\",i) \r\n\r\n# from playsound import playsound\r\n# # playsound('audio.mp3') \r\n\r\nnum=input(\"Enter the number\")\r\nnum1=int(num)\r\nfor i in range(1,11):\r\n print(num1, 'X' ,i, '=' , num1*i)","repo_name":"deepanshu136/Python-Tutorials","sub_path":"python tutorial/tutorial4/tutorial6/basic5.py","file_name":"basic5.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5270836403","text":"from PIL import ImageFile\nfrom copy import deepcopy\nimport collections\nfrom datasets import load_from_disk, set_caching_enabled\nfrom utils import data_utils, utils\nfrom utils.args_helper import (\n DataArguments,\n ModelArguments,\n TrainingArguments\n)\nfrom tqdm import tqdm\nfrom torchvision.transforms import (\n CenterCrop,\n ColorJitter,\n Compose,\n Normalize,\n RandomHorizontalFlip,\n RandomVerticalFlip,\n RandomResizedCrop,\n RandomRotation,\n Resize,\n ToTensor,\n)\nfrom trainer.detr_trainer import DetrTrainer\nfrom transformers import HfArgumentParser\nfrom transformers.trainer_utils import get_last_checkpoint, is_main_process\nfrom typing import Dict, Union, Any, Optional, List, Tuple\n\nimport datasets\nimport json\nimport logging\nimport numpy as np\nimport os\nimport pandas as pd\nimport sys\nimport torch\nimport torch.nn as nn\nimport transformers\n\nfrom simmc2.model.utils import ambiguous_candidates_evaluation as eval_utils\nfrom trainer.detr_trainer import DetrTrainer \nfrom tqdm import tqdm\n\nfrom torch.utils.data import DataLoader\n \nset_caching_enabled(True)\nlogger = logging.getLogger(__name__)\n\n#####\n# Main Functions\n#####\ndef run(model_args, data_args, training_args):\n training_args.output_dir=\"{}/{}_{}_lr{}_bs{}\".format(\n training_args.output_dir,\n model_args.model_name_or_path.replace(\"/\", \"_\"),\n training_args.lr_scheduler_type,\n training_args.learning_rate,\n training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps\n )\n os.makedirs(training_args.output_dir, exist_ok=True)\n cache_dir_path = \"{}/{}_{}_lr{}_bs{}\".format(\n data_args.cache_dir_name,\n model_args.model_name_or_path.replace(\"/\", \"_\").replace('.',''),\n training_args.lr_scheduler_type,\n training_args.learning_rate,\n training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps\n )\n os.makedirs(cache_dir_path, exist_ok=True)\n\n # Data loading\n eval_dset, meta_dset, gold_data = data_utils.load_image_text_eval_dataset(data_path=data_args.devtest_dataset_path)\n # eval_dset = eval_dset.train_test_split(0.05)['test']\n \n if (data_args.prediction_path is None or not os.path.exists(data_args.prediction_path)):\n eval_dset = eval_dset.map(\n data_utils.convert_dialogue_to_caption,\n num_proc=data_args.preprocessing_num_workers,\n desc=\"convert object attributes to caption\",\n load_from_cache_file=True,\n cache_file_name=os.path.join(cache_dir_path, \"ds_converted.arrow\"),\n fn_kwargs={\"num_utterances\": data_args.num_utterances},\n remove_columns=[\"dialogue\"]\n )\n \n # Preprocessing\n tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path)\n if data_args.additional_special_token_path is not None and os.path.isfile(data_args.additional_special_token_path):\n with open(data_args.additional_special_token_path, \"rb\") as handle:\n special_tokens_dict = json.load(handle)\n num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)\n logger.info(f\"Added {num_added_toks} tokens\")\n logger.info(f\"All special tokens: {tokenizer.all_special_tokens}\")\n feature_extractor = transformers.AutoFeatureExtractor.from_pretrained(model_args.model_name_or_path)\n processor = transformers.CLIPProcessor(feature_extractor, tokenizer)\n\n eval_dset = eval_dset.map(\n data_utils.tokenize_captions,\n num_proc=data_args.preprocessing_num_workers,\n desc=\"tokenize captions\",\n fn_kwargs={\n \"tokenizer\": tokenizer,\n \"max_seq_length\": data_args.max_seq_length,\n }\n )\n \n normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)\n eval_transforms = Compose(\n [\n Resize(feature_extractor.size),\n # CenterCrop(feature_extractor.size),\n ToTensor(),\n # normalize,\n ]\n )\n\n def eval_image_preprocess(example_batch): \n images = [\n eval_transforms(\n image.convert(\"RGB\").crop((\n bbox[0], bbox[1], bbox[0]+max(5, bbox[3]), bbox[1]+max(5, bbox[2])\n ))\n )\n for image, bbox in zip(example_batch[\"image\"], example_batch[\"bbox\"])\n ]\n captions = [caption for caption in example_batch[\"caption\"]]\n example_batch[\"pixel_values\"] = feature_extractor(\n images=images, text=captions, return_tensors=\"pt\")[\"pixel_values\"]\n return example_batch\n\n eval_dset = eval_dset.with_transform(eval_image_preprocess)\n\n # Training and evaluation\n model = transformers.CLIPModel.from_pretrained(model_args.model_name_or_path)\n\n def collate_fn(examples):\n pixel_values = torch.stack([example[\"pixel_values\"] for example in examples])\n input_ids = torch.tensor([example[\"input_ids\"] for example in examples], dtype=torch.long)\n attention_mask = torch.tensor([example[\"attention_mask\"] for example in examples], dtype=torch.long)\n return {\n \"pixel_values\": pixel_values,\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"return_loss\": True,\n }\n\n trainer = DetrTrainer(\n model=model,\n args=training_args,\n data_collator=collate_fn,\n train_dataset=None,\n eval_dataset=None,\n tokenizer=processor\n )\n\n # Evaluation\n # predictions = trainer.predict(eval_dset)\n\n dataloader = DataLoader(\n eval_dset, shuffle=False,\n batch_size=training_args.per_device_train_batch_size, \n num_workers=training_args.dataloader_num_workers,\n collate_fn=collate_fn\n )\n\n print('Performing inference on test data...')\n model = model.cuda()\n logits_batch = []\n for batch in tqdm(dataloader):\n batch[\"pixel_values\"] = batch[\"pixel_values\"].cuda()\n batch[\"input_ids\"] = batch[\"input_ids\"].cuda()\n batch[\"attention_mask\"] = batch[\"attention_mask\"].cuda()\n outputs = model(**batch)\n logits_batch.append(outputs.logits_per_image.diagonal().cpu().detach().numpy())\n logits = np.concatenate(logits_batch)\n\n data_args.prediction_path = f'{cache_dir_path}/prediction_logits.pt'\n torch.save(logits, open(data_args.prediction_path, 'wb'))\n else:\n logits = torch.load(open(data_args.prediction_path, 'rb'))\n\n # Compute Metrics\n def compute_metrics(logits):\n \"\"\"Aggregate predictions & compute evaluation metric per utterance\"\"\"\n\n print('Collecting metadata for predictions...')\n pred_dict = {'dialog_id': [], 'turn_id': [], 'object_id': [], 'logit': [], 'num_labels': []}\n for row, logit in tqdm(zip(meta_dset, logits)):\n pred_dict['dialog_id'].append(row['dialog_id'])\n pred_dict['turn_id'].append(row['turn_id'])\n pred_dict['object_id'].append(row['object_id'])\n pred_dict['num_labels'].append(len(row['labels']))\n pred_dict['logit'].append(logit)\n\n print('Aggregating predictions...')\n df = pd.DataFrame(pred_dict)\n agg_preds = df.groupby(['dialog_id','turn_id','num_labels']).agg({'object_id': list, 'logit': list})\n agg_preds = agg_preds.reset_index().to_dict(orient='records')\n\n print('Filtering per utterance predictions...')\n results = collections.defaultdict(list)\n for agg_pred in agg_preds:\n dialog_id, turn_id, num_labels = agg_pred['dialog_id'], agg_pred['turn_id'], agg_pred['num_labels']\n object_ids, logits = np.array(agg_pred['object_id']), np.array(agg_pred['logit'])\n\n # ALL\n # indexes = range(len(logits))\n\n # ORACLE\n indexes = np.argpartition(logits, -num_labels)[-num_labels:] if num_labels != 0 else []\n\n # Top-k\n # indexes = np.argpartition(logits, -min(len(logits), 15))[-min(len(logits), 15):]\n\n # THRESHOLD\n # indexes = np.where(logits > np.mean(logits))[0]\n # indexes = np.where(logits > np.min(logits))[0]\n # indexes = np.where(logits > np.median(logits))[0]\n # print(logits)\n # logits = torch.sigmoid(torch.from_numpy(logits))\n # indexes = np.where(logits >= 0.5)[0]\n acc_object_ids = object_ids[indexes].tolist()\n\n new_instance = {\n \"turn_id\": turn_id,\n \"disambiguation_candidates\": acc_object_ids\n }\n results[dialog_id].append(new_instance)\n\n # Restructure results JSON and save.\n print('Comparing predictions with ground truths...')\n results = [{\n \"dialog_id\": dialog_id,\n \"predictions\": predictions,\n } for dialog_id, predictions in results.items()]\n\n # print(\"results\", results[0])\n # print()\n # print(\"gold_data\", gold_data[\"dialogue_data\"][0])\n # print()\n if \"coref_candidates\" in data_args.devtest_dataset_path:\n metrics = eval_utils.evaluate_ambiguous_candidates(gold_data, results, is_actually_coref=True)\n else:\n metrics = eval_utils.evaluate_ambiguous_candidates(gold_data, results, is_actually_coref=False)\n\n print('== Eval Metrics ==')\n print('Recall: ', metrics[\"recall\"])\n print('Precision: ', metrics[\"precision\"])\n print('F1-Score: ', metrics[\"f1\"])\n\n return metrics\n\n print('Calculating evaluation metrics...')\n metrics = compute_metrics(logits)\n \n # Report Metrics\n trainer.log_metrics(\"test\", metrics)\n trainer.save_metrics(\"test\", metrics) \n\ndef main():\n ###\n # Parsing & Initialization\n ###\n # Parse argument\n parser = HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n # Set random seed\n utils.init_env(training_args.seed)\n \n # Detect last checkpoint\n last_checkpoint = None\n if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:\n last_checkpoint = get_last_checkpoint(training_args.output_dir)\n if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty.\"\n \"Use --overwrite_output_dir to overcome.\"\n )\n elif last_checkpoint is not None:\n logger.info(\n f\"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change \"\n \"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n\n ###\n # Prepare logger\n ###\n # Init logging\n os.makedirs(\"./log\", exist_ok=True)\n \n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout), logging.FileHandler(\n \"./log/log_{}_{}_lr{}_bs{}\".format(\n model_args.model_name_or_path.replace(\"/\", \"_\"),\n training_args.lr_scheduler_type,\n training_args.learning_rate,\n training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps\n ), mode=\"w\")],\n )\n logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n # Set the verbosity to warn of the Transformers logger (on main process only):\n if is_main_process(training_args.local_rank):\n transformers.utils.logging.set_verbosity(transformers.logging.WARNING)\n logger.info(\"Training/evaluation parameters %s\", training_args)\n \n ###\n # RUN RUN RUN!!!\n ###\n run(model_args, data_args, training_args)\n \nif __name__ == '__main__':\n main()","repo_name":"holylovenia/multimodal-object-identification","sub_path":"text_image_alignment_prediction.py","file_name":"text_image_alignment_prediction.py","file_ext":"py","file_size_in_byte":12847,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"29474073874","text":"import numpy as np\nimport cv2\n\n\ndef binary_to_multi_object_mask(binary_masks):\n\n \"\"\"\n Encode multiple 2d binary masks into a single 2d multi-object segmentation mask\n\n Parameters\n ----------\n binary_masks: numpy.ndarray of shape (n_objects, height, width)\n 2d binary masks\n\n Returns\n -------\n multi_object_mask: numpy.ndarray of shape (height, width)\n 2d multi-object mask\n \"\"\"\n\n multi_object_mask = np.zeros((binary_masks.shape[1], binary_masks.shape[2]))\n for i, binary_mask in enumerate(binary_masks):\n non_zero_idx = binary_mask == 1\n multi_object_mask[non_zero_idx] = i + 1\n\n return multi_object_mask\n\n\ndef polygon_to_mask(polygon, shape):\n\n \"\"\"\n Create binary segmentation mask from polygon\n\n Parameters\n ----------\n polygon: list of shape (n_polygons, n_points, 2)\n List of polygons\n\n shape: tuple of shape (2)\n Height and width of the mask\n\n Returns\n -------\n mask: numpy.ndarray of shape (height, width)\n 2d segmentation mask\n \"\"\"\n\n mask = np.zeros(shape)\n # Convert list of points to tuple pairs of X and Y coordinates\n points = np.array(polygon).reshape(-1, 2)\n # Draw mask from the polygon\n cv2.fillPoly(mask, [points], 1, lineType=cv2.LINE_8, shift=0)\n mask = np.array(mask).astype(np.uint8)\n\n return mask\n\n\ndef mask_to_bounding_box(mask):\n\n \"\"\"\n Get bounding box from a binary segmentation mask\n\n Parameters\n ----------\n mask: numpy.ndarray of shape (height, width)\n 2d binary mask\n\n Returns\n -------\n bounding_box: list of shape (4)\n Bounding box\n \"\"\"\n\n non_zero_idx = np.where(mask == 1)\n bounding_box = [\n int(np.min(non_zero_idx[1])),\n int(np.min(non_zero_idx[0])),\n int(np.max(non_zero_idx[1])),\n int(np.max(non_zero_idx[0]))\n ]\n\n return bounding_box\n\n\ndef coco_to_voc_bounding_box(bounding_box):\n\n \"\"\"\n Convert bounding box annotation from VOC to COCO format\n\n Parameters\n ----------\n bounding_box: list of shape (4)\n Bounding box with x1, y1, width, height values\n\n Returns\n -------\n bounding_box: list of shape (4)\n Bounding box with x1, y1, x2, y2 values\n \"\"\"\n\n x1 = bounding_box[0]\n y1 = bounding_box[1]\n x2 = x1 + bounding_box[2]\n y2 = y1 + bounding_box[3]\n\n return x1, y1, x2, y2\n\n\ndef coco_to_yolo_bounding_box(bounding_box):\n\n \"\"\"\n Convert bounding box annotation from COCO to YOLO format\n\n Parameters\n ----------\n bounding_box: list of shape (4)\n Bounding box with x1, y1, width, height values\n\n Returns\n -------\n bounding_box: list of shape (4)\n Bounding box with x_center, y_center, width, height values\n \"\"\"\n\n x1 = bounding_box[0]\n y1 = bounding_box[1]\n width = bounding_box[2]\n height = bounding_box[3]\n x_center = x1 + (width // 2)\n y_center = y1 + (height // 2)\n\n return x_center, y_center, width, height\n","repo_name":"gunesevitan/rsna-2023-abdominal-trauma-detection","sub_path":"src/annotation_utilities.py","file_name":"annotation_utilities.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"17598197179","text":"import random\n\nfrom die import Die\nfrom player import Player\n\nMAX_PLAYERS = 4\nMIN_PLAYERS = 2\n\nscreens = {\n 'START': 1,\n 'TURNS': 2,\n 'BOARD': 3,\n 'PROPS': 4,\n 'CARDS': 5\n}\n\n\nclass Game:\n def __init__(self, id):\n self.game_id = id\n self.players = []\n self.player_turn = 0 # The index of the players array that holds the player whose turn it is\n self.dice_values = [1, 1]\n self.is_playing = False\n self.current_screen = screens.get('START')\n self.available_icons = [True, True, True, True]\n self.turn_rolls = []\n\n def get_id(self):\n return self.game_id\n\n def get_players(self):\n return self.players\n\n def get_num_players(self):\n return len(self.players)\n\n def roll(self):\n self.dice_values = [random.randint(0, 5), random.randint(0, 5)]\n\n def done_roll(self, last_roll):\n if self.current_screen == screens.get('TURNS'):\n self.turn_rolls.append(last_roll)\n if len(self.turn_rolls) == len(self.players):\n unset_players = self.players.copy()\n for j in range(0, len(unset_players)):\n largest = self.turn_rolls.index(max(self.turn_rolls))\n self.players[j] = unset_players[largest] # appends correct player to empty list\n self.turn_rolls[largest] = -1 # get rid of the largest element in list\n self.current_screen = screens.get('BOARD')\n self.players[self.player_turn].last_roll = last_roll\n\n def next_player(self):\n self.player_turn = (self.player_turn + 1) % self.get_num_players()\n if self.get_curr_player().bankrupt:\n self.next_player()\n\n def set_screen(self, screen_name):\n self.current_screen = screens.get(screen_name)\n\n def screen_is(self, screen_name):\n return self.current_screen == screens.get(screen_name)\n\n def get_curr_player(self):\n return self.players[self.player_turn]\n\n def add_player(self):\n icon_num = 0\n while not self.available_icons[icon_num]:\n icon_num = (icon_num + 1) % len(self.available_icons)\n self.players.append(Player(False, icon_num, 'Player ' + str(self.get_num_players() + 1), 1, []))\n self.available_icons[icon_num] = False\n\n def set_icon(self, player_num, icon_num):\n \"\"\"\n Changes the given player's icon\n :param player_num:\n :param icon_num:\n :return:\n \"\"\"\n player = self.players[player_num]\n\n # Make old icon available for other players\n self.available_icons[player.icon_num] = True\n player.icon_num = icon_num\n # Make new icon unavailable\n self.available_icons[icon_num] = False\n","repo_name":"eobrie17/ski-monopoly","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26362443205","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\n\nlist_county_a = [['Ahrensfelde', '0'],\n ['Altlandsberg', '1'],\n ['Am Mellensee', '0'],\n ['Altdöbern', '0'],\n ['Alt Tucheband', '0'],\n ['Angermünde', '0'],\n ['Althüttendorf', '0']\n ['Alt Zauche-Wußwerk', '0']\n ]\n\n","repo_name":"hyacinth0213/spider_project","sub_path":"plot_spider/myPlotSpider/landinfo/brandenburg_county_list.py","file_name":"brandenburg_county_list.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25627244688","text":"import sys\nimport hashlib\nimport argparse\nimport os\n\nPATH_TO_CACHE = \"/tmp/nginx_cache\"\n\n\ndef get_request_uri(command_args):\n parser = argparse.ArgumentParser()\n parser.add_argument('--request_uri', help='request uri')\n args = parser.parse_args()\n request_uri = args.request_uri\n return request_uri\n\n\ndef format_uri(uri):\n uri_parts = filter(lambda uri_item: uri_item != 'purge', uri.split('/'))\n formatted_uri = '/'.join(uri_parts)\n return formatted_uri\n\n\ndef hash_uri(uri):\n hashed_uri = hashlib.md5(uri.encode('utf-8')).hexdigest()\n return hashed_uri\n\n\ndef get_all_cache_files():\n return os.listdir(PATH_TO_CACHE)\n\n\ndef get_cache_by_request_uri(uri):\n formatted_uri = format_uri(uri)\n hashed_uri = hash_uri(formatted_uri)\n all_files = get_all_cache_files()\n cache_file = next((file for file in all_files if file == hashed_uri), None)\n return cache_file\n\n\ndef purge_cache(cache_file):\n path = os.path.join(PATH_TO_CACHE, cache_file)\n if os.path.isfile(path):\n os.remove(path)\n else:\n print(\"Error: %s file not found\" % path)\n\n\ndef main(command_args):\n request_uri = get_request_uri(command_args)\n print('Request cache for purge %s' % request_uri)\n cache = get_cache_by_request_uri(request_uri)\n if cache:\n purge_cache(cache)\n print('Cache successful purged for uri %s' % request_uri)\n\n\n\nif __name__ == '__main__':\n print('Python %s on %s' % (sys.version, sys.platform))\n main(sys.argv)","repo_name":"T1dehunter/home-work","sub_path":"lesson-7-nginx-customization/nginx-cache/cache_cleaner.py","file_name":"cache_cleaner.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74376130812","text":"\"\"\"Module to init the application\"\"\"\nfrom flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom extract.extraction import Extract\nfrom models.engine import db, ma\nfrom models.models import Data, DataSchema\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_DATABASE_URI'] = os.getenv(\"DATABASE_URL\")\n\ndb.init_app(app)\nma.init_app(app)\nwith app.app_context():\n db.create_all()\n\n\n@app.route('/extract', methods=['POST'])\ndef add_extraction():\n \"\"\"Add extract information from path document to database\"\"\"\n pdf_path = request.args.get('doc_path')\n doc_data = Extract()\n try:\n image_path = doc_data.convert_PDF_to_image(pdf_path)\n except:\n return jsonify({\"error_msg\": \"Couldn't find path to the pdf file\"})\n image = doc_data.cv_image(image_path)\n text = doc_data.ocr_reading(image, image_path)\n data = doc_data.get_data_from_text(text)\n data['doc_path'] = pdf_path\n try:\n save_data = Data(\n vendor_name=data['Vendor name'],\n fiscal_number=data['Fiscal number'], contract=data['Contract number'],\n comments=data['Comments'],\n start_date=data['Start date'],\n end_date=data['End date'],\n doc_path=data['doc_path']\n )\n db.session.add(save_data)\n db.session.commit()\n result = [[True, save_data.id], data]\n except:\n return jsonify({\"error_msg\": \"Couldn't store data in database\"})\n return jsonify(result)\n\n\n@app.route('/db_data/', methods=['GET'])\ndef get_extraction_data():\n \"\"\"Query all data from database\"\"\"\n request_data = request.args.get('table_name')\n if request_data == Data.__tablename__:\n data = Data.query.order_by(Data.id.desc()).all()\n data_schema = DataSchema(many=True)\n result = data_schema.dump(data)\n return jsonify(result)\n else:\n return jsonify({\"error_msg\": \"Table name '{}\\' doesn't exist\".format(request_data)})\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"el-dani-cortes/PRGX_technical_challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11526396562","text":"# Задать список из n чисел последовательности (1+1/n)**n и вывести на экран их сумму\n\namount = int(input('Введите количество элементов последовательности: '))\nsumma = 0\n\n# ----------- Вариант 1 -----------------\n# for i in range(1, amount): \n# summa = summa + ((1 + 1 / i) ** i)\n# print(summa)\n\n\n# ----------- Вариант 2 -----------------\nsummas = [1 + (1 / i) ** i for i in range(1, amount)]\nprint(sum(summas))\n","repo_name":"netonblpb/PythonTasks","sub_path":"Task16.py","file_name":"Task16.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17163926267","text":"n = int(input())\ncard = list(map(int, input().split()))\ncard.sort()\nm= int(input())\nnum = list(map(int, input().split()))\n\ndef binary(target):\n start =0\n end = n-1\n while start <= end:\n mid = (start + end) //2\n if target == card[mid]:\n return 1\n elif target < card[mid]:\n end = mid-1\n else:\n start = mid+1\n return 0\nfor i in num:\n print(binary(i),end = \" \")\nprint()","repo_name":"twodf78/coding_test","sub_path":"SearchAlgorithm/binary/10815.py","file_name":"10815.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34580457770","text":"import numpy as np\nfrom PIL import Image\n\n\nPATH = \"data/5\"\nRESOLUTION = 75\n\nVALUE_MATRIX = np.zeros((RESOLUTION, RESOLUTION, RESOLUTION), dtype=int)\n\n\n\n\n# read matrix from file\n\nfile = open(PATH+\"/value_matrix\", \"r\")\ninput = file.read()\nslices = input.split(\"]], [[\")\nslices[0] = slices[0][3:]\nslices[-1] = slices[-1][:-3]\nfor x in range(RESOLUTION):\n print(x)\n rows = slices[x].split(\"], [\")\n for y in range(RESOLUTION):\n row = rows[y]\n row = row.split(\",\")\n for z in range(RESOLUTION):\n VALUE_MATRIX[x][y][z] = int(row[z])\n z += 1\n y += 1\n\n\n\n\n\n\nfor i in range(RESOLUTION):\n img = Image.fromarray(np.uint8(VALUE_MATRIX[i]), 'L')\n img.save(f\"{PATH}/vals/image{i}.png\")","repo_name":"cle4rly/extreme-image-segmentation_rpml","sub_path":"src/show_values.py","file_name":"show_values.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20668851079","text":"import gc\r\nfrom multiprocessing import reduction\r\n#exercisey\r\nclass Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\n\r\nclass CircularLinkedList:\r\n def __init__(self):\r\n self.last = None\r\n\r\n def addToEmpty(self, data):\r\n\r\n if self.last != None:\r\n return self.last\r\n\r\n newNode = Node(data)\r\n\r\n self.last = newNode\r\n\r\n self.last.next = self.last\r\n return self.last\r\n\r\n def addFront(self, data):\r\n\r\n if self.last == None:\r\n return self.addToEmpty(data)\r\n\r\n newNode = Node(data)\r\n\r\n newNode.next = self.last.next\r\n\r\n self.last.next = newNode\r\n\r\n return self.last\r\n\r\n def addEnd(self, data):\r\n if self.last == None:\r\n return self.addToEmpty(data)\r\n\r\n newNode = Node(data)\r\n\r\n newNode.next = self.last.next\r\n self.last.next = newNode\r\n\r\n self.last = newNode\r\n\r\n return self.last\r\n\r\n def addAfter(self, data, item):\r\n\r\n if self.last == None:\r\n return None\r\n\r\n newNode = Node(data)\r\n p = self.last.next\r\n while p:\r\n\r\n if p.data == item:\r\n\r\n newNode.next = p.next\r\n p.next = newNode\r\n\r\n if p == self.last:\r\n self.last = newNode\r\n return self.last\r\n else:\r\n return self.last\r\n p = p.next\r\n if p == self.last.next:\r\n print(item, \"The given node is not present in the list\")\r\n break\r\n\r\n def deleteNode(self, last, key):\r\n\r\n if last == None:\r\n return\r\n\r\n if (last).data == key and (last).next == last:\r\n\r\n last = None\r\n\r\n temp = last\r\n d = None\r\n\r\n if (last).data == key:\r\n while temp.next != last:\r\n temp = temp.next\r\n\r\n temp.next = (last).next\r\n last = temp.next\r\n\r\n while temp.next != last and temp.next.data != key:\r\n temp = temp.next\r\n\r\n if temp.next.data == key:\r\n d = temp.next\r\n temp.next = d.next\r\n\r\n return last\r\n\r\n def traverse(self):\r\n if self.last == None:\r\n print(\"The list is empty\")\r\n return\r\n\r\n newNode = self.last.next\r\n while newNode:\r\n print(newNode.data, end=\" \")\r\n newNode = newNode.next\r\n if newNode == self.last.next:\r\n break\r\n\r\n\t\r\n\r\n#exercise function\r\n\r\n\r\ndef exercise(bmi,bmr):\r\n \r\n print(\"\\n\\n\\t\\t\\t\\t\\tJOGGING or CYCLING or SKIPPING\")\r\n print(\"\\nBased on BMI\")\r\n if bmi<18:\r\n print(\"under weight\")\r\n elif bmi>=18 and bmi<=24:\r\n print(\"Healthy\")\r\n else:\r\n print(\"Over weight\")\r\n print(\"\\nMaintanence Factor : \",1800)\r\n ex=input(\"\\nENTER 1 for joggin 2 for cycling 3 for skipping\")\r\n exer=abs((bmr-1700)+(bmr-2000))\r\n val=0\r\n if ex=='1':\r\n joggin=75\r\n val=exer//joggin\r\n print(\"Jog for atleast \",val,\"km today\")\r\n\r\n elif ex=='2':\r\n cycle=100\r\n val=exer//cycle\r\n print(\"Do cycling for atleast \",val,\"km today\")\r\n\r\n else:\r\n # per min\r\n skip=20\r\n val=exer//skip\r\n print(\"Skipping for atleast \",val,\"mins today\")\r\n \r\n return val\r\n#food\r\ndef food(bmi,bmr):\r\n # node creation\r\n\r\n class Node:\r\n\r\n def __init__(self, data,calorie):\r\n self.data = data\r\n self.calorie=calorie\r\n self.next = None\r\n self.prev = None\r\n\r\n\r\n class DoublyLinkedList:\r\n\r\n def __init__(self):\r\n self.head = None\r\n def insert_front(self, data,cal):\r\n new_node = Node(data,cal)\r\n new_node.next = self.head\r\n if self.head is not None:\r\n self.head.prev = new_node\r\n self.head = new_node\r\n def insert_after(self, prev_node, data,cal):\r\n if prev_node is None:\r\n print(\"previous node cannot be null\")\r\n return\r\n new_node = Node(data,cal)\r\n new_node.next = prev_node.next\r\n prev_node.next = new_node\r\n new_node.prev = prev_node\r\n if new_node.next:\r\n new_node.next.prev = new_node\r\n def insert_end(self, data,cal):\r\n new_node = Node(data,cal)\r\n if self.head is None:\r\n self.head = new_node\r\n return\r\n temp = self.head\r\n while temp.next:\r\n temp = temp.next\r\n temp.next = new_node\r\n new_node.prev = temp\r\n\r\n return\r\n def deleteNode(self, dele):\r\n if self.head is None or dele is None:\r\n return\r\n if self.head == dele:\r\n self.head = dele.next\r\n if dele.next is not None:\r\n dele.next.prev = dele.prev\r\n if dele.prev is not None:\r\n dele.prev.next = dele.next\r\n gc.collect()\r\n def display_list(self, node):\r\n\r\n while node:\r\n print(node.data,node.calorie)\r\n last = node\r\n node = node.next\r\n #search\r\n def search(self,head,data):\r\n temp=head\r\n while temp:\r\n if temp.data==data:\r\n return temp.calorie\r\n \r\n temp = temp.next\r\n if temp==None:\r\n print(\"The given data doesnt exist:\")\r\n return 0\r\n \r\n\r\n # initialize an empty node\r\n d_linked_list = DoublyLinkedList()\r\n m_linked_list = DoublyLinkedList()\r\n l_linked_list = DoublyLinkedList()\r\n\r\n d_linked_list.insert_end(\"Chaapati\",71)\r\n d_linked_list.insert_end(\"Dosai\",100)\r\n d_linked_list.insert_end(\"Idli\",39)\r\n d_linked_list.insert_end(\"bread\",70)\r\n d_linked_list.insert_end(\"dhal\",120)\r\n d_linked_list.insert_end(\"chicken\",250)\r\n d_linked_list.insert_end(\"mutton\",154)\r\n d_linked_list.insert_end(\"chenna\",719)\r\n d_linked_list.insert_end(\"soya\",300)\r\n d_linked_list.insert_end(\"egg\",78)\r\n\r\n\r\n #lunch\r\n l_linked_list.insert_end(\"chicken\",250)\r\n l_linked_list.insert_end(\"mutton\",154)\r\n l_linked_list.insert_end(\"fish\",200)\r\n l_linked_list.insert_end(\"meals\",150)\r\n l_linked_list.insert_end(\"veg_side_dish\",120)\r\n l_linked_list.insert_end(\"egg\",78)\r\n\r\n\r\n\r\n # morning\r\n m_linked_list.insert_end(\"chenna\",719)\r\n m_linked_list.insert_end(\"chease\",402)\r\n m_linked_list.insert_end(\"chease\",402)\r\n m_linked_list.insert_end(\"Chaapati\",71)\r\n m_linked_list.insert_end(\"Dosai\",100)\r\n m_linked_list.insert_end(\"Idli\",39)\r\n m_linked_list.insert_end(\"oats\",300)\r\n m_linked_list.insert_end(\"dhal\",120)\r\n\r\n\r\n\r\n print()\r\n food=input(\"\\t\\t\\t\\t\\t DIET PLAN\\n\\n\\t\\t\\t\\t\\tBreakfast(1)\\n\\t\\t\\t\\t\\t Lunch(2)\\n\\t\\t\\t\\t\\t dinner(3)\\nEnter options(1,2,3):\")\r\n \r\n\r\n print(\"Calories are described for 100grms\")\r\n\r\n print(\"ENTER CALORIE INTAKE:\",2000)\r\n q=0\r\n print()\r\n tt='y'\r\n items=[]\r\n ee='n'\r\n #2300d_linked_list.display_list(d_linked_list.head)\r\n print(\"ENTER THE FOODS U WANT IN AND THE COUNT for a day :\")\r\n while q<2000 and tt==\"y\" and ee=='n':\r\n \r\n if food=='1':\r\n print(\"Breakfast\")\r\n m_linked_list.display_list(m_linked_list.head) \r\n elif food==\"2\":\r\n print(\"Lunch\")\r\n l_linked_list.display_list(l_linked_list.head)\r\n else:\r\n print(\"Dinner\")\r\n d_linked_list.display_list(d_linked_list.head) \r\n v=input()\r\n qty=int(input(\"How many:=\"))\r\n \r\n if food==1:\r\n s=m_linked_list.search(m_linked_list.head,v)\r\n elif food==2:\r\n s=l_linked_list.search(l_linked_list.head,v)\r\n else:\r\n s=d_linked_list.search(d_linked_list.head,v)\r\n temp=q+(s*qty)\r\n if temp>bmr:\r\n t=input(\"Remove an food from list(Y/N): \")\r\n if t=='y':\r\n k=input()\r\n if food=='1':\r\n s=m_linked_list.search(k)\r\n elif food==\"2\":\r\n s=l_linked_list.search(k)\r\n else:\r\n s=d_linked_list.search(k)\r\n print()\r\n ty=input(\"How much you wana remove: \")\r\n q-=s*ty\r\n else:\r\n tt=input(\"if Insuffiecency in nutrient(Y/N): \")\r\n \r\n else:\r\n q+=s*qty\r\n items.append(v)\r\n print(\"calories=\",[q,qty])\r\n ee=input(\"Wanna stop (y/n):\")\r\n food=input(\"\\nbreakfast lunch dinner(1,2,3):\")\r\n print(\"TOTAL CALORIE OF FOOD:=\",q)\r\n print(\"food list:- \",items) \r\nclass TreeNode:\r\n def __init__(self, data,password,bmr):\r\n self.data = data\r\n self.password=password\r\n self.bmr=bmr\r\n self.parent = None\r\n self.left = None\r\n self.right = None\r\n\r\n\r\nclass SplayTree:\r\n def __init__(self):\r\n self.root = None\r\n\r\n def leftRotate(self, x):\r\n y = x.right\r\n x.right = y.left\r\n if y.left != None:\r\n y.left.parent = x\r\n\r\n y.parent = x.parent\r\n # x is root\r\n if x.parent == None:\r\n self.root = y\r\n # x is left child\r\n elif x == x.parent.left:\r\n x.parent.left = y\r\n # x is right child\r\n else:\r\n x.parent.right = y\r\n\r\n y.left = x\r\n x.parent = y\r\n\r\n def rightRotate(self, x):\r\n y = x.left\r\n x.left = y.right\r\n if y.right != None:\r\n y.right.parent = x\r\n\r\n y.parent = x.parent\r\n # x is root\r\n if x.parent == None:\r\n self.root = y\r\n # x is right child\r\n elif x == x.parent.right:\r\n x.parent.right = y\r\n # x is left child\r\n else:\r\n x.parent.left = y\r\n\r\n y.right = x\r\n x.parent = y\r\n\r\n def splay(self, n):\r\n # node is not root\r\n while n.parent != None:\r\n # node is child of root, one rotation\r\n if n.parent == self.root:\r\n if n == n.parent.left:\r\n self.rightRotate(n.parent)\r\n else:\r\n self.leftRotate(n.parent)\r\n\r\n else:\r\n p = n.parent\r\n g = p.parent # grandparent\r\n\r\n if n.parent.left == n and p.parent.left == p: # both are left children\r\n self.rightRotate(g)\r\n self.rightRotate(p)\r\n\r\n elif n.parent.right == n and p.parent.right == p: # both are right children\r\n self.leftRotate(g)\r\n self.leftRotate(p)\r\n\r\n elif n.parent.right == n and p.parent.left == p:\r\n self.leftRotate(p)\r\n self.rightRotate(g)\r\n\r\n elif n.parent.left == n and p.parent.right == p:\r\n self.rightRotate(p)\r\n self.leftRotate(g)\r\n\r\n def insert(self, n):\r\n y = None\r\n temp = self.root\r\n while temp != None:\r\n y = temp\r\n if n.data < temp.data:\r\n temp = temp.left\r\n else:\r\n temp = temp.right\r\n\r\n n.parent = y\r\n\r\n if y == None: # newly added node is root\r\n self.root = n\r\n elif n.data < y.data:\r\n y.left = n\r\n else:\r\n y.right = n\r\n\r\n self.splay(n)\r\n\r\n def bstSearch(self, n, x, y):\r\n if x == n.data and y==n.password:\r\n self.splay(n)\r\n print(\"Logged In\")\r\n return n. bmr\r\n elif x < n.data:\r\n return self.bstSearch(n.left, x)\r\n elif x > n.data:\r\n return self.bstSearch(n.right, x)\r\n else:\r\n return None\r\n\r\n def preOrder(self, n):\r\n if n != None:\r\n print(n.data,n.password,n.bmr)\r\n self.preOrder(n.left)\r\n self.preOrder(n.right)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n tree = SplayTree()\r\n cll = CircularLinkedList()\r\n\r\n u=\"yes\"\r\n print(\"\\t\\t\\t\\tWELCOME TO SSN MINI FITNESS APP\")\r\n while(u==\"yes\"):\r\n new=input(\"IF NEW USER (Y/N): \")\r\n if new=='y':\r\n user_name=input(\"ENTER NAME: \")\r\n password=input(\"ENTER PASSWORD: \")\r\n recheck=input(\"ENTER PASSWORD AGAIN: \")\r\n while password!=recheck:\r\n print(\"WRONG PASSWORD\")\r\n password=input(\"ENTER PASSWORD: \")\r\n recheck=input(\"ENTER PASSWORD AGAIN: \")\r\n heigh=int(input(\"ENTER HEGHT(cm): \"))\r\n weight=int(input(\"ENTER WEIGHT(kg): \"))\r\n age=int(input(\"ENTER AGE: \"))\r\n bmi=weight//(heigh*heigh*0.0001)\r\n sex=input(\"ENTER SEX (MALE/FEMLAE): \")\r\n print(\"YOUR BMI IS \",bmi)\r\n pound_weight=int(weight*2.20462)\r\n\r\n if sex==\"female\":\r\n bmr=int(655+(9.563*weight )+(1.850*heigh)-(4.676*age))\r\n else:\r\n bmr=int(66.47+(13.75*weight) + (5.003*heigh)-(6.755*age))\r\n print(\"YOUR BMR: \",bmr)\r\n a = TreeNode(user_name,password,bmr)\r\n tree.insert(a)\r\n f=input(\"Diet plan(d) / Exercise(e)\")\r\n if f=='d':\r\n food(bmi,bmr)\r\n else:\r\n goma=exercise(bmi,bmr)\r\n if goma!=0:\r\n cll.addFront(goma)\r\n vk=input(\"Wanna trace your routine:(y/n):\")\r\n if vk=='y':\r\n cll.traverse()\r\n\r\n else:\r\n \r\n user_name=input(\"ENTER NAME: \")\r\n password=input(\"ENTER PASSWORD: \")\r\n bmr=tree.bstSearch(tree.root, user_name,password)\r\n if bmr==None: \r\n print(\"Wrong password or wrong user name\")\r\n user_name=input(\"ENTER NAME: \")\r\n password=input(\"ENTER PASSWORD: \")\r\n bmr=tree.bstSearch(tree.root, user_name,password)\r\n f=input(\"Diet plan(d) / Exercise(e)\")\r\n if f=='d':\r\n food(bmi,bmr)\r\n else:\r\n goma=exercise(bmi,bmr)\r\n if goma!=0:\r\n cll.addFront(goma)\r\n vk=input(\"Wanna trace your routine:(y/n):\")\r\n if vk=='y':\r\n cll.traverse()\r\n u=input(\"\\n\\n\\t\\t\\t\\tif you want to contniue (yes/no):\")\r\n\r\n","repo_name":"JDeepak45/ADS-Mini-project","sub_path":"exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":14606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71850451453","text":"###########################################\n# Author : Bastien Girardet, Deborah De Wolff\n# Date : 13.05.2018\n# Course : Applications in Object-oriented Programming and Databases\n# Teachers : Binswanger Johannes, Zürcher Ruben\n# Project : Bibliotek\n# Name : book.py SQL Alchemy Model\n# #########################################\n\nfrom db import db\n\nclass BookModel(db.Model):\n \"\"\"SQLAlchemy Book Model\"\"\"\n\n # We assign the correct table\n __tablename__ = 'books'\n \n # Table columns\n bookId = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(300))\n publisher = db.Column(db.String(150))\n published_date = db.Column(db.String(15))\n description = db.Column(db.Text())\n isbn10 = db.Column(db.String(20))\n isbn13 = db.Column(db.String(20))\n booktype = db.Column(db.String(30))\n language = db.Column(db.String(30))\n thumbnail = db.Column(db.String(200))\n page_count = db.Column(db.Integer)\n md5 = db.Column(db.String(32))\n url_info = db.Column(db.String(200))\n dl_link1 = db.Column(db.String(200))\n dl_link2 = db.Column(db.String(200))\n chosen_url = db.Column(db.String(200))\n filepath = db.Column(db.String(200))\n\n # Foreign key a book belongs to a category and an author\n categoryId = db.Column('categoryId', db.Integer, db.ForeignKey('categories.categoryId'))\n authorId = db.Column('authorId', db.Integer, db.ForeignKey('authors.authorId'))\n\n # We reference the parents\n category = db.relationship('CategoryModel',cascade=\"save-update\")\n author = db.relationship('AuthorModel',cascade=\"save-update\")\n \n # A book might many portfolio book relations\n portfolios_books = db.relationship(\"PortfolioBookModel\", cascade=\"save-update, merge, delete\")\n\n def __init__(self, title, authorId, categoryId,publisher,published_date,description,isbn10,isbn13,booktype,language,thumbnail,page_count,md5,url_info,dl_link1,dl_link2,chosen_url,filepath):\n \"\"\"[summary]\n \n Arguments:\n title {string} -- title of the book\n authorId {int} -- id of the author of the book\n categoryId {int} -- id of the category of the book\n publisher {string} -- name of the publisher of the book\n published_date {string} -- publishing date\n description {string} -- description of the book\n isbn10 {string} -- isbn10 of the book\n isbn13 {string} -- isbn13 of the book\n booktype {string} -- type of the book (extension)\n language {string} -- Language of the book\n thumbnail {string} -- Url of the book's thumbnail\n page_count {int} -- Page count of the book\n md5 {string} -- libgen.io identifier\n url_info {string} -- google book api book info url\n dl_link1 {string} -- download link 1\n dl_link2 {string} -- download link 2\n chosen_url {string} -- Chosen url for the download (Whether dl_link1 or dl_link2)\n filepath {string} -- file path where the book is digitally contained\n \"\"\"\n # Instance variables\n self.title = title\n self.authorId = authorId\n self.categoryId = categoryId\n self.publisher = publisher\n self.published_date = published_date\n self.description = description\n self.isbn10 = isbn10\n self.isbn13 = isbn13\n self.booktype = booktype\n self.language = language\n self.thumbnail = thumbnail\n self.page_count = page_count\n self.md5 = md5\n self.url_info = url_info\n self.dl_link1 = dl_link1\n self.dl_link2 = dl_link2\n self.filepath = filepath\n self.chosen_url = chosen_url\n\n def json(self):\n \"\"\"Return a JSON data of the instance variables\"\"\"\n\n return {\n 'bookId' : self.bookId,\n 'title' : self.title,\n 'authorId' : self.authorId,\n 'categoryId' : self.categoryId,\n 'publisher' : self.publisher,\n 'published_date' : self.published_date,\n 'description' : self.description,\n 'isbn10' : self.isbn10,\n 'isbn13' : self.isbn13,\n 'booktype' : self.booktype,\n 'language' : self.language,\n 'thumbnail' : self.thumbnail,\n 'page_count' : self.page_count,\n 'md5' : self.md5,\n 'url_info' : self.url_info,\n 'dl_link1' : self.dl_link1,\n 'dl_link2' : self.dl_link2,\n 'filepath' : self.filepath,\n 'chosen_url' : self.chosen_url}\n\n # Important methods used to retrieve data through SQL Alchemy\n @classmethod\n def find_by_title(cls, title):\n \"\"\"Retrieve the book provided its title\"\"\"\n \n return cls.query.filter_by(title=title).first()\n\n @classmethod\n def find_by_id(cls, bookId):\n \"\"\"Retrieve the book provided its bookId\"\"\"\n\n return cls.query.filter_by(bookId=bookId).first()\n\n @classmethod\n def find_by_author_id(cls, authorId):\n \"\"\"Retrieve the book provided its authorId\"\"\"\n\n return cls.query.filter_by(authorId=authorId).all()\n \n @classmethod\n def find_by_category_id(cls, categoryId):\n \"\"\"Retrieve the book provided its categoryId\"\"\"\n\n return cls.query.filter_by(categoryId=categoryId).all()\n\n def save_to_db(self):\n \"\"\"Methods used to push and commit to the database\"\"\"\n\n db.session.add(self)\n db.session.commit()\n\n def delete_from_db(self):\n \"\"\"Methods used to delete and commit to the database\"\"\" \n\n db.session.delete(self)\n db.session.commit()\n","repo_name":"basgir/bibliotek","sub_path":"models/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":5730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10876847576","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom RedditUtils import RedditUtils\nfrom subprocess import call\nimport datetime\nimport random\n\ndef daysSinceEpoch():\n epoch = datetime.datetime.utcfromtimestamp(0)\n today = datetime.datetime.today()\n d = today - epoch\n return str(d.days)\n\n\n\ndef runMain(utilsInstance, subReddit):\n call([\"./reset.sh\"], shell=True)\n\n submissionsList = utilsInstance.returnTop()\n #postNos = input(\"enter post numbers (separated by a space): \")\n numTopComments = 14 #int(input(\"how many top comments: \"))\n globalCount = 0\n \n top = [0,1,2,3,4]\n #for post in postNos.split(\" \"):\n for post in top:\n globalCount += 1\n title = submissionsList[int(post)].title\n postHtml = utilsInstance.generatePostTitle(submissionsList[int(post)])\n utilsInstance.genVideoClip(postHtml, submissionsList[int(post)].title, globalCount)\n\n topComments = utilsInstance.getTopComments(numTopComments, submissionsList[int(post)].comments.list()) \n # commentsList = submissionsList[int(post)].comments.list()\n for commentList in topComments:\n \n # first comment, create image & audioFile\n globalCount += 1\n html = utilsInstance.generateOneComment(commentList[0])\n utilsInstance.genVideoClip(html, commentList[0].body, globalCount)\n \n # create image and audiofile for second comment if one exists\n if(len(commentList) == 2):\n globalCount += 1\n html = utilsInstance.generateTwoComments(commentList[0], commentList[1])\n utilsInstance.genVideoClip(html, commentList[1].body, globalCount)\n fileName = subReddit + \"_\" + daysSinceEpoch() + \"_\" + str(random.randint(1,1000000))\n # combine the individual video files into one file and put it in /output\n call([\"./cleanCombine.sh \" + fileName], shell=True)\n # create a thumbnail\n try:\n utilsInstance.createThumbnail(title, fileName)\n youtubeUpload(title, submissionsList[int(post)].url, subReddit, fileName)\n except:\n pass\n\ndef youtubeUpload(videoTitle, videoDesc, board, fileName):\n #upload video to youtube\n videoParams = '--title \"' + videoTitle + '\" --client-secrets=\"./yt_creds_file.json\" --category=Entertainment --description=\"' + videoDesc + '\" --tags=\"reddit,' + board + ',trending\" --default-language=\"en\" --privacy=\"public\" --default-audio-language=\"en\" --embeddable=True --thumbnail ./output/' + fileName + '.jpg ./output/' + fileName + '_vid.mp4'\n print(\"calling...\")\n print(\"youtube-upload \")# + videoParams)\n call([\"youtube-upload \" + videoParams], shell=True)\n","repo_name":"forbesjon2/redditYT","sub_path":"Run.py","file_name":"Run.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20221165067","text":"import threading\n\ndef first_func():\n global x\n\n while(x < 300):\n x += 1\n\n print(threading.current_thread())\n if x == 300:\n print(x)\n \ndef second_func():\n global x\n \n x = 450\n while(x < 600):\n x += 1\n \n print(threading.current_thread())\n print(x)\n\ndef main():\n global x\n x = 0\n\n first_thread = threading.Thread(target = first_func, name = 'first thread function')\n first_thread.start()\n #first_thread.join()\n ''' mian thread will wait for the child threads to get completed before completing its execution.\n Its like, where ever threrad's join will be invoked, the control flow of execution wont execute its parent thread, and parent thread will wait for that child thread to get completed.\n Then only, the next command/instruction of parent thread will execute.'''\n\n second_thread = threading.Thread(target = second_func, name = 'second thread function')\n second_thread.start()\n\n print(threading.enumerate())\n print(threading.current_thread())\n\n\nif (__name__ == '__main__'):\n main()","repo_name":"SharanJaiswal/py","sub_path":"multithreading_multiprocessing/02_mt.py","file_name":"02_mt.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33660162967","text":"from m5.SimObject import *\nfrom m5.params import *\nfrom m5.proxy import *\n\nfrom m5.objects.ClockedObject import ClockedObject\nfrom m5.objects.IndexingPolicies import *\nfrom m5.objects.ReplacementPolicies import *\n\n\nclass HWPProbeEvent(object):\n def __init__(self, prefetcher, obj, *listOfNames):\n self.obj = obj\n self.prefetcher = prefetcher\n self.names = listOfNames\n\n def register(self):\n if self.obj:\n for name in self.names:\n self.prefetcher.getCCObject().addEventProbe(\n self.obj.getCCObject(), name\n )\n\n\nclass BasePrefetcher(ClockedObject):\n type = \"BasePrefetcher\"\n abstract = True\n cxx_class = \"gem5::prefetch::Base\"\n cxx_header = \"mem/cache/prefetch/base.hh\"\n cxx_exports = [PyBindMethod(\"addEventProbe\"), PyBindMethod(\"addMMU\")]\n sys = Param.System(Parent.any, \"System this prefetcher belongs to\")\n\n # Get the block size from the parent (system)\n block_size = Param.Int(Parent.cache_line_size, \"Block size in bytes\")\n\n on_miss = Param.Bool(False, \"Only notify prefetcher on misses\")\n on_read = Param.Bool(True, \"Notify prefetcher on reads\")\n on_write = Param.Bool(True, \"Notify prefetcher on writes\")\n on_data = Param.Bool(True, \"Notify prefetcher on data accesses\")\n on_inst = Param.Bool(True, \"Notify prefetcher on instruction accesses\")\n prefetch_on_access = Param.Bool(\n Parent.prefetch_on_access,\n \"Notify the hardware prefetcher on every access (not just misses)\",\n )\n prefetch_on_pf_hit = Param.Bool(\n Parent.prefetch_on_pf_hit,\n \"Notify the hardware prefetcher on hit on prefetched lines\",\n )\n use_virtual_addresses = Param.Bool(\n False, \"Use virtual addresses for prefetching\"\n )\n page_bytes = Param.MemorySize(\n \"4KiB\", \"Size of pages for virtual addresses\"\n )\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._events = []\n self._mmus = []\n\n def addEvent(self, newObject):\n self._events.append(newObject)\n\n # Override the normal SimObject::regProbeListeners method and\n # register deferred event handlers.\n def regProbeListeners(self):\n for mmu in self._mmus:\n self.getCCObject().addMMU(mmu.getCCObject())\n for event in self._events:\n event.register()\n self.getCCObject().regProbeListeners()\n\n def listenFromProbe(self, simObj, *probeNames):\n if not isinstance(simObj, SimObject):\n raise TypeError(\"argument must be of SimObject type\")\n if len(probeNames) <= 0:\n raise TypeError(\"probeNames must have at least one element\")\n self.addEvent(HWPProbeEvent(self, simObj, *probeNames))\n\n def registerMMU(self, simObj):\n if not isinstance(simObj, SimObject):\n raise TypeError(\"argument must be a SimObject type\")\n self._mmus.append(simObj)\n\n\nclass MultiPrefetcher(BasePrefetcher):\n type = \"MultiPrefetcher\"\n cxx_class = \"gem5::prefetch::Multi\"\n cxx_header = \"mem/cache/prefetch/multi.hh\"\n\n prefetchers = VectorParam.BasePrefetcher([], \"Array of prefetchers\")\n\n\nclass QueuedPrefetcher(BasePrefetcher):\n type = \"QueuedPrefetcher\"\n abstract = True\n cxx_class = \"gem5::prefetch::Queued\"\n cxx_header = \"mem/cache/prefetch/queued.hh\"\n latency = Param.Int(1, \"Latency for generated prefetches\")\n queue_size = Param.Int(32, \"Maximum number of queued prefetches\")\n max_prefetch_requests_with_pending_translation = Param.Int(\n 32,\n \"Maximum number of queued prefetches that have a missing translation\",\n )\n queue_squash = Param.Bool(True, \"Squash queued prefetch on demand access\")\n queue_filter = Param.Bool(True, \"Don't queue redundant prefetches\")\n cache_snoop = Param.Bool(\n False, \"Snoop cache to eliminate redundant request\"\n )\n\n tag_prefetch = Param.Bool(\n True, \"Tag prefetch with PC of generating access\"\n )\n\n # The throttle_control_percentage controls how many of the candidate\n # addresses generated by the prefetcher will be finally turned into\n # prefetch requests\n # - If set to 100, all candidates can be discarded (one request\n # will always be allowed to be generated)\n # - Setting it to 0 will disable the throttle control, so requests are\n # created for all candidates\n # - If set to 60, 40% of candidates will generate a request, and the\n # remaining 60% will be generated depending on the current accuracy\n throttle_control_percentage = Param.Percent(\n 0,\n \"Percentage of requests \\\n that can be throttled depending on the accuracy of the prefetcher.\",\n )\n\n\nclass StridePrefetcherHashedSetAssociative(SetAssociative):\n type = \"StridePrefetcherHashedSetAssociative\"\n cxx_class = \"gem5::prefetch::StridePrefetcherHashedSetAssociative\"\n cxx_header = \"mem/cache/prefetch/stride.hh\"\n\n\nclass StridePrefetcher(QueuedPrefetcher):\n type = \"StridePrefetcher\"\n cxx_class = \"gem5::prefetch::Stride\"\n cxx_header = \"mem/cache/prefetch/stride.hh\"\n\n # Do not consult stride prefetcher on instruction accesses\n on_inst = False\n\n confidence_counter_bits = Param.Unsigned(\n 3, \"Number of bits of the confidence counter\"\n )\n initial_confidence = Param.Unsigned(\n 4, \"Starting confidence of new entries\"\n )\n confidence_threshold = Param.Percent(\n 50, \"Prefetch generation confidence threshold\"\n )\n\n use_requestor_id = Param.Bool(True, \"Use requestor id based history\")\n\n degree = Param.Int(4, \"Number of prefetches to generate\")\n\n table_assoc = Param.Int(4, \"Associativity of the PC table\")\n table_entries = Param.MemorySize(\"64\", \"Number of entries of the PC table\")\n table_indexing_policy = Param.BaseIndexingPolicy(\n StridePrefetcherHashedSetAssociative(\n entry_size=1, assoc=Parent.table_assoc, size=Parent.table_entries\n ),\n \"Indexing policy of the PC table\",\n )\n table_replacement_policy = Param.BaseReplacementPolicy(\n RandomRP(), \"Replacement policy of the PC table\"\n )\n\n\nclass TaggedPrefetcher(QueuedPrefetcher):\n type = \"TaggedPrefetcher\"\n cxx_class = \"gem5::prefetch::Tagged\"\n cxx_header = \"mem/cache/prefetch/tagged.hh\"\n\n degree = Param.Int(2, \"Number of prefetches to generate\")\n\n\nclass IndirectMemoryPrefetcher(QueuedPrefetcher):\n type = \"IndirectMemoryPrefetcher\"\n cxx_class = \"gem5::prefetch::IndirectMemory\"\n cxx_header = \"mem/cache/prefetch/indirect_memory.hh\"\n pt_table_entries = Param.MemorySize(\n \"16\", \"Number of entries of the Prefetch Table\"\n )\n pt_table_assoc = Param.Unsigned(16, \"Associativity of the Prefetch Table\")\n pt_table_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1,\n assoc=Parent.pt_table_assoc,\n size=Parent.pt_table_entries,\n ),\n \"Indexing policy of the pattern table\",\n )\n pt_table_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(), \"Replacement policy of the pattern table\"\n )\n max_prefetch_distance = Param.Unsigned(16, \"Maximum prefetch distance\")\n num_indirect_counter_bits = Param.Unsigned(\n 3, \"Number of bits of the indirect counter\"\n )\n ipd_table_entries = Param.MemorySize(\n \"4\", \"Number of entries of the Indirect Pattern Detector\"\n )\n ipd_table_assoc = Param.Unsigned(\n 4, \"Associativity of the Indirect Pattern Detector\"\n )\n ipd_table_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1,\n assoc=Parent.ipd_table_assoc,\n size=Parent.ipd_table_entries,\n ),\n \"Indexing policy of the Indirect Pattern Detector\",\n )\n ipd_table_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(), \"Replacement policy of the Indirect Pattern Detector\"\n )\n shift_values = VectorParam.Int([2, 3, 4, -3], \"Shift values to evaluate\")\n addr_array_len = Param.Unsigned(4, \"Number of misses tracked\")\n prefetch_threshold = Param.Unsigned(\n 2, \"Counter threshold to start the indirect prefetching\"\n )\n stream_counter_threshold = Param.Unsigned(\n 4, \"Counter threshold to enable the stream prefetcher\"\n )\n streaming_distance = Param.Unsigned(\n 4, \"Number of prefetches to generate when using the stream prefetcher\"\n )\n\n\nclass SignaturePathPrefetcher(QueuedPrefetcher):\n type = \"SignaturePathPrefetcher\"\n cxx_class = \"gem5::prefetch::SignaturePath\"\n cxx_header = \"mem/cache/prefetch/signature_path.hh\"\n\n signature_shift = Param.UInt8(\n 3, \"Number of bits to shift when calculating a new signature\"\n )\n signature_bits = Param.UInt16(12, \"Size of the signature, in bits\")\n signature_table_entries = Param.MemorySize(\n \"1024\", \"Number of entries of the signature table\"\n )\n signature_table_assoc = Param.Unsigned(\n 2, \"Associativity of the signature table\"\n )\n signature_table_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1,\n assoc=Parent.signature_table_assoc,\n size=Parent.signature_table_entries,\n ),\n \"Indexing policy of the signature table\",\n )\n signature_table_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(), \"Replacement policy of the signature table\"\n )\n\n num_counter_bits = Param.UInt8(\n 3, \"Number of bits of the saturating counters\"\n )\n pattern_table_entries = Param.MemorySize(\n \"4096\", \"Number of entries of the pattern table\"\n )\n pattern_table_assoc = Param.Unsigned(\n 1, \"Associativity of the pattern table\"\n )\n strides_per_pattern_entry = Param.Unsigned(\n 4, \"Number of strides stored in each pattern entry\"\n )\n pattern_table_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1,\n assoc=Parent.pattern_table_assoc,\n size=Parent.pattern_table_entries,\n ),\n \"Indexing policy of the pattern table\",\n )\n pattern_table_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(), \"Replacement policy of the pattern table\"\n )\n\n prefetch_confidence_threshold = Param.Float(\n 0.5, \"Minimum confidence to issue prefetches\"\n )\n lookahead_confidence_threshold = Param.Float(\n 0.75, \"Minimum confidence to continue exploring lookahead entries\"\n )\n\n\nclass SignaturePathPrefetcherV2(SignaturePathPrefetcher):\n type = \"SignaturePathPrefetcherV2\"\n cxx_class = \"gem5::prefetch::SignaturePathV2\"\n cxx_header = \"mem/cache/prefetch/signature_path_v2.hh\"\n\n signature_table_entries = \"256\"\n signature_table_assoc = 1\n pattern_table_entries = \"512\"\n pattern_table_assoc = 1\n num_counter_bits = 4\n prefetch_confidence_threshold = 0.25\n lookahead_confidence_threshold = 0.25\n\n global_history_register_entries = Param.MemorySize(\n \"8\", \"Number of entries of global history register\"\n )\n global_history_register_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1,\n assoc=Parent.global_history_register_entries,\n size=Parent.global_history_register_entries,\n ),\n \"Indexing policy of the global history register\",\n )\n global_history_register_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(), \"Replacement policy of the global history register\"\n )\n\n\nclass AccessMapPatternMatching(ClockedObject):\n type = \"AccessMapPatternMatching\"\n cxx_class = \"gem5::prefetch::AccessMapPatternMatching\"\n cxx_header = \"mem/cache/prefetch/access_map_pattern_matching.hh\"\n\n block_size = Param.Unsigned(\n Parent.block_size,\n \"Cacheline size used by the prefetcher using this object\",\n )\n\n limit_stride = Param.Unsigned(\n 0, \"Limit the strides checked up to -X/X, if 0, disable the limit\"\n )\n start_degree = Param.Unsigned(\n 4, \"Initial degree (Maximum number of prefetches generated\"\n )\n hot_zone_size = Param.MemorySize(\"2KiB\", \"Memory covered by a hot zone\")\n access_map_table_entries = Param.MemorySize(\n \"256\", \"Number of entries in the access map table\"\n )\n access_map_table_assoc = Param.Unsigned(\n 8, \"Associativity of the access map table\"\n )\n access_map_table_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1,\n assoc=Parent.access_map_table_assoc,\n size=Parent.access_map_table_entries,\n ),\n \"Indexing policy of the access map table\",\n )\n access_map_table_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(), \"Replacement policy of the access map table\"\n )\n high_coverage_threshold = Param.Float(\n 0.25, \"A prefetch coverage factor bigger than this is considered high\"\n )\n low_coverage_threshold = Param.Float(\n 0.125, \"A prefetch coverage factor smaller than this is considered low\"\n )\n high_accuracy_threshold = Param.Float(\n 0.5, \"A prefetch accuracy factor bigger than this is considered high\"\n )\n low_accuracy_threshold = Param.Float(\n 0.25, \"A prefetch accuracy factor smaller than this is considered low\"\n )\n high_cache_hit_threshold = Param.Float(\n 0.875, \"A cache hit ratio bigger than this is considered high\"\n )\n low_cache_hit_threshold = Param.Float(\n 0.75, \"A cache hit ratio smaller than this is considered low\"\n )\n epoch_cycles = Param.Cycles(256000, \"Cycles in an epoch period\")\n offchip_memory_latency = Param.Latency(\n \"30ns\", \"Memory latency used to compute the required memory bandwidth\"\n )\n\n\nclass AMPMPrefetcher(QueuedPrefetcher):\n type = \"AMPMPrefetcher\"\n cxx_class = \"gem5::prefetch::AMPM\"\n cxx_header = \"mem/cache/prefetch/access_map_pattern_matching.hh\"\n ampm = Param.AccessMapPatternMatching(\n AccessMapPatternMatching(), \"Access Map Pattern Matching object\"\n )\n\n\nclass DeltaCorrelatingPredictionTables(SimObject):\n type = \"DeltaCorrelatingPredictionTables\"\n cxx_class = \"gem5::prefetch::DeltaCorrelatingPredictionTables\"\n cxx_header = \"mem/cache/prefetch/delta_correlating_prediction_tables.hh\"\n deltas_per_entry = Param.Unsigned(\n 20, \"Number of deltas stored in each table entry\"\n )\n delta_bits = Param.Unsigned(12, \"Bits per delta\")\n delta_mask_bits = Param.Unsigned(\n 8, \"Lower bits to mask when comparing deltas\"\n )\n table_entries = Param.MemorySize(\"128\", \"Number of entries in the table\")\n table_assoc = Param.Unsigned(128, \"Associativity of the table\")\n table_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1, assoc=Parent.table_assoc, size=Parent.table_entries\n ),\n \"Indexing policy of the table\",\n )\n table_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(), \"Replacement policy of the table\"\n )\n\n\nclass DCPTPrefetcher(QueuedPrefetcher):\n type = \"DCPTPrefetcher\"\n cxx_class = \"gem5::prefetch::DCPT\"\n cxx_header = \"mem/cache/prefetch/delta_correlating_prediction_tables.hh\"\n dcpt = Param.DeltaCorrelatingPredictionTables(\n DeltaCorrelatingPredictionTables(),\n \"Delta Correlating Prediction Tables object\",\n )\n\n\nclass IrregularStreamBufferPrefetcher(QueuedPrefetcher):\n type = \"IrregularStreamBufferPrefetcher\"\n cxx_class = \"gem5::prefetch::IrregularStreamBuffer\"\n cxx_header = \"mem/cache/prefetch/irregular_stream_buffer.hh\"\n\n num_counter_bits = Param.Unsigned(\n 2, \"Number of bits of the confidence counter\"\n )\n chunk_size = Param.Unsigned(\n 256, \"Maximum number of addresses in a temporal stream\"\n )\n degree = Param.Unsigned(4, \"Number of prefetches to generate\")\n training_unit_assoc = Param.Unsigned(\n 128, \"Associativity of the training unit\"\n )\n training_unit_entries = Param.MemorySize(\n \"128\", \"Number of entries of the training unit\"\n )\n training_unit_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1,\n assoc=Parent.training_unit_assoc,\n size=Parent.training_unit_entries,\n ),\n \"Indexing policy of the training unit\",\n )\n training_unit_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(), \"Replacement policy of the training unit\"\n )\n\n prefetch_candidates_per_entry = Param.Unsigned(\n 16, \"Number of prefetch candidates stored in a SP-AMC entry\"\n )\n address_map_cache_assoc = Param.Unsigned(\n 128, \"Associativity of the PS/SP AMCs\"\n )\n address_map_cache_entries = Param.MemorySize(\n \"128\", \"Number of entries of the PS/SP AMCs\"\n )\n ps_address_map_cache_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1,\n assoc=Parent.address_map_cache_assoc,\n size=Parent.address_map_cache_entries,\n ),\n \"Indexing policy of the Physical-to-Structural Address Map Cache\",\n )\n ps_address_map_cache_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(),\n \"Replacement policy of the Physical-to-Structural Address Map Cache\",\n )\n sp_address_map_cache_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1,\n assoc=Parent.address_map_cache_assoc,\n size=Parent.address_map_cache_entries,\n ),\n \"Indexing policy of the Structural-to-Physical Address Mao Cache\",\n )\n sp_address_map_cache_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(),\n \"Replacement policy of the Structural-to-Physical Address Map Cache\",\n )\n\n\nclass SlimAccessMapPatternMatching(AccessMapPatternMatching):\n start_degree = 2\n limit_stride = 4\n\n\nclass SlimDeltaCorrelatingPredictionTables(DeltaCorrelatingPredictionTables):\n table_entries = \"256\"\n table_assoc = 256\n deltas_per_entry = 9\n\n\nclass SlimAMPMPrefetcher(QueuedPrefetcher):\n type = \"SlimAMPMPrefetcher\"\n cxx_class = \"gem5::prefetch::SlimAMPM\"\n cxx_header = \"mem/cache/prefetch/slim_ampm.hh\"\n\n ampm = Param.AccessMapPatternMatching(\n SlimAccessMapPatternMatching(), \"Access Map Pattern Matching object\"\n )\n dcpt = Param.DeltaCorrelatingPredictionTables(\n SlimDeltaCorrelatingPredictionTables(),\n \"Delta Correlating Prediction Tables object\",\n )\n\n\nclass BOPPrefetcher(QueuedPrefetcher):\n type = \"BOPPrefetcher\"\n cxx_class = \"gem5::prefetch::BOP\"\n cxx_header = \"mem/cache/prefetch/bop.hh\"\n score_max = Param.Unsigned(31, \"Max. score to update the best offset\")\n round_max = Param.Unsigned(100, \"Max. round to update the best offset\")\n bad_score = Param.Unsigned(10, \"Score at which the HWP is disabled\")\n rr_size = Param.Unsigned(64, \"Number of entries of each RR bank\")\n tag_bits = Param.Unsigned(12, \"Bits used to store the tag\")\n offset_list_size = Param.Unsigned(\n 46, \"Number of entries in the offsets list\"\n )\n negative_offsets_enable = Param.Bool(\n True,\n \"Initialize the offsets list also with negative values \\\n (i.e. the table will have half of the entries with positive \\\n offsets and the other half with negative ones)\",\n )\n delay_queue_enable = Param.Bool(True, \"Enable the delay queue\")\n delay_queue_size = Param.Unsigned(\n 15, \"Number of entries in the delay queue\"\n )\n delay_queue_cycles = Param.Cycles(\n 60,\n \"Cycles to delay a write in the left RR table from the delay \\\n queue\",\n )\n\n\nclass SBOOEPrefetcher(QueuedPrefetcher):\n type = \"SBOOEPrefetcher\"\n cxx_class = \"gem5::prefetch::SBOOE\"\n cxx_header = \"mem/cache/prefetch/sbooe.hh\"\n latency_buffer_size = Param.Int(32, \"Entries in the latency buffer\")\n sequential_prefetchers = Param.Int(9, \"Number of sequential prefetchers\")\n sandbox_entries = Param.Int(1024, \"Size of the address buffer\")\n score_threshold_pct = Param.Percent(\n 25,\n \"Min. threshold to issue a \\\n prefetch. The value is the percentage of sandbox entries to use\",\n )\n\n\nclass STeMSPrefetcher(QueuedPrefetcher):\n type = \"STeMSPrefetcher\"\n cxx_class = \"gem5::prefetch::STeMS\"\n cxx_header = \"mem/cache/prefetch/spatio_temporal_memory_streaming.hh\"\n\n spatial_region_size = Param.MemorySize(\n \"2KiB\", \"Memory covered by a hot zone\"\n )\n active_generation_table_entries = Param.MemorySize(\n \"64\", \"Number of entries in the active generation table\"\n )\n active_generation_table_assoc = Param.Unsigned(\n 64, \"Associativity of the active generation table\"\n )\n active_generation_table_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1,\n assoc=Parent.active_generation_table_assoc,\n size=Parent.active_generation_table_entries,\n ),\n \"Indexing policy of the active generation table\",\n )\n active_generation_table_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(), \"Replacement policy of the active generation table\"\n )\n\n pattern_sequence_table_entries = Param.MemorySize(\n \"16384\", \"Number of entries in the pattern sequence table\"\n )\n pattern_sequence_table_assoc = Param.Unsigned(\n 16384, \"Associativity of the pattern sequence table\"\n )\n pattern_sequence_table_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1,\n assoc=Parent.pattern_sequence_table_assoc,\n size=Parent.pattern_sequence_table_entries,\n ),\n \"Indexing policy of the pattern sequence table\",\n )\n pattern_sequence_table_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(), \"Replacement policy of the pattern sequence table\"\n )\n\n region_miss_order_buffer_entries = Param.Unsigned(\n 131072, \"Number of entries of the Region Miss Order Buffer\"\n )\n add_duplicate_entries_to_rmob = Param.Bool(\n True, \"Add duplicate entries to RMOB\"\n )\n reconstruction_entries = Param.Unsigned(\n 256, \"Number of reconstruction entries\"\n )\n\n\nclass HWPProbeEventRetiredInsts(HWPProbeEvent):\n def register(self):\n if self.obj:\n for name in self.names:\n self.prefetcher.getCCObject().addEventProbeRetiredInsts(\n self.obj.getCCObject(), name\n )\n\n\nclass PIFPrefetcher(QueuedPrefetcher):\n type = \"PIFPrefetcher\"\n cxx_class = \"gem5::prefetch::PIF\"\n cxx_header = \"mem/cache/prefetch/pif.hh\"\n cxx_exports = [PyBindMethod(\"addEventProbeRetiredInsts\")]\n\n prec_spatial_region_bits = Param.Unsigned(\n 2, \"Number of preceding addresses in the spatial region\"\n )\n succ_spatial_region_bits = Param.Unsigned(\n 8, \"Number of subsequent addresses in the spatial region\"\n )\n compactor_entries = Param.Unsigned(2, \"Entries in the temp. compactor\")\n stream_address_buffer_entries = Param.Unsigned(7, \"Entries in the SAB\")\n history_buffer_size = Param.Unsigned(16, \"Entries in the history buffer\")\n\n index_entries = Param.MemorySize(\"64\", \"Number of entries in the index\")\n index_assoc = Param.Unsigned(64, \"Associativity of the index\")\n index_indexing_policy = Param.BaseIndexingPolicy(\n SetAssociative(\n entry_size=1, assoc=Parent.index_assoc, size=Parent.index_entries\n ),\n \"Indexing policy of the index\",\n )\n index_replacement_policy = Param.BaseReplacementPolicy(\n LRURP(), \"Replacement policy of the index\"\n )\n\n def listenFromProbeRetiredInstructions(self, simObj):\n if not isinstance(simObj, SimObject):\n raise TypeError(\"argument must be of SimObject type\")\n self.addEvent(\n HWPProbeEventRetiredInsts(self, simObj, \"RetiredInstsPC\")\n )\n","repo_name":"gem5/gem5","sub_path":"src/mem/cache/prefetch/Prefetcher.py","file_name":"Prefetcher.py","file_ext":"py","file_size_in_byte":23971,"program_lang":"python","lang":"en","doc_type":"code","stars":1196,"dataset":"github-code","pt":"78"} +{"seq_id":"41759965483","text":"from django.urls import path\nfrom single_pages import views\n\nurlpatterns = [\n path('', views.home), # 홈페이지: 서버IP/\n path('best/', views.best), # 베스트 상품 페이지: 서버IP/best/\n path('mypage/', views.mypage), # 마이페이지: 서버IP/mypage/\n path('company/', views.company), # 회사소개 페이지: 서버IP/company/\n path('cart/', views.cart), # 장바구니 페이지: 서버IP/cart/\n path('check_order/', views.check_order), # 주문 재고 확인: 서버IP/check_order/\n path('order/', views.OrderCreate.as_view()), # 주문하기 페이지: 서버IP/order/\n]\n","repo_name":"soohyxn/S-Market","sub_path":"single_pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5470106291","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5 import QtWidgets,QtCore\n\n\nclass TestForm(QMainWindow): #PyQt5.QtWidgets에서 상속됨\n #생성자\n def __init__(self):\n super().__init__()#부모의 생성자\n self.setupUI()#함수 선언\n def setupUI(self):\n self.setWindowTitle(\"파이큐티 테스트\")\n self.setGeometry(800,400,500,500) # 창크기 앞쪽 두개는 윈도우 위치 뒤쪽 두개는 윈도우창 크기\n\n label1=QLabel(\"입력 테스트\",self)\n label1.move(20,20)\n\n label2=QLabel(\"출력 테스트\",self)\n label2.move(20,60)\n\n self.lineEdit=QLineEdit(\"\",self) #default값\n self.plainEdit=QtWidgets.QPlainTextEdit(self)\n self.plainEdit.setReadOnly(True) #쓰기방지\n\n self.lineEdit.move(100,20)\n self.plainEdit.setGeometry(QtCore.QRect(20,90,260,200))\n\n self.lineEdit.textChanged.connect(self.lineEditChanged)\n self.lineEdit.returnPressed.connect(self.lineEditEnter)\n\n #상태바\n self.statusBar=QStatusBar(self)\n self.setStatusBar(self.statusBar)\n\n def lineEditChanged(self):\n self.statusBar.showMessage(self.lineEdit.text())\n\n def lineEditEnter(self):\n self.plainEdit.appendPlainText(self.lineEdit.text())\n self.lineEdit.clear() # 메모리 해제\n\n\nif __name__ == \"__main__\":\n app=QApplication(sys.argv)\n window=TestForm()\n window.show()\n app.exec_()\n","repo_name":"dreamer9107/MINIPROJECT2","sub_path":"example/1-3.py","file_name":"1-3.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37872735384","text":"import base64, datetime, html, json, os\n\nimport requests\n\nfrom commands.CommandTemplate import CommandTemplate\nimport Constants, GlobalStore, PermissionLevel\nfrom util import DateTimeUtil, IrcFormattingUtil, StringUtil\nfrom IrcMessage import IrcMessage\nfrom CustomExceptions import CommandInputException, WebRequestException\nfrom StringWithSuffix import StringWithSuffix\n\n\nclass Command(CommandTemplate):\n\ttriggers = ['twitterwatcher', 'twitterwatch']\n\thelptext = \"Automatically says new tweets of watched accounts. Use parameter 'add' to add an account to watch and 'remove' to stop watching an account. 'latest' shows latest tweet. \" \\\n\t\t\t \"Use 'setname' and 'removename' to set and remove a display name. These parameters need to be followed by a Twitter name. 'list' lists all accounts being watched\"\n\tscheduledFunctionTime = 300.0 #Check every 5 minutes\n\trunInThread = True\n\n\twatchData = {} #keys are Twitter usernames, contains fields with highest ID and which channel(s) to report new tweets to, and a display name if specified\n\tMAX_TWEETS_TO_MENTION = 3\n\tSECONDS_AGE_FOR_FULL_DATE = 604800 # After 7 days, don't list a tweet as '6 days, 7 hours ago', but as the full date\n\n\tdef onLoad(self):\n\t\tGlobalStore.commandhandler.addCommandFunction(__file__, 'getTweetDescription', self.getTweetDescription)\n\n\t\t#First retrieve which Twitter accounts we should follow, if that file exists\n\t\twatchedFilepath = os.path.join(GlobalStore.scriptfolder, 'data', 'WatchedTwitterAccounts.json')\n\t\tif os.path.exists(watchedFilepath):\n\t\t\twith open(watchedFilepath, 'r', encoding='utf-8') as watchedFile:\n\t\t\t\tself.watchData = json.load(watchedFile)\n\t\t#If we can't identify to Twitter, disable the automatic check for new messages\n\t\tif not GlobalStore.commandhandler.getApiKey('key', 'twitter') or not GlobalStore.commandhandler.getApiKey('secret', 'twitter'):\n\t\t\tself.logWarning(\"[TwitterWatcher] Twitter API credentials not found!\")\n\t\t\tself.scheduledFunctionTime = None\n\n\tdef executeScheduledFunction(self):\n\t\tself.checkForNewTweets()\n\n\tdef execute(self, message):\n\t\t\"\"\"\n\t\t:type message: IrcMessage\n\t\t\"\"\"\n\t\tif message.messagePartsLength == 0:\n\t\t\tmessage.reply(self.helptext)\n\t\t\treturn\n\n\t\tparameter = message.messageParts[0].lower()\n\t\tserverChannelPair = [message.bot.serverfolder, message.source] #List not tuple, because JSON can't save tuples and converts them to a list\n\n\t\t#Start with the commands that don't need a username parameter\n\t\tif parameter == 'help':\n\t\t\tmessage.reply(self.helptext)\n\t\t\treturn\n\t\tif parameter == 'list':\n\t\t\t#List all the accounts we're watching for this channel\n\t\t\twatchlist = []\n\t\t\tfor username, usernameData in self.watchData.items():\n\t\t\t\tif serverChannelPair in usernameData['targets']:\n\t\t\t\t\twatchlist.append(self.getDisplayName(username))\n\t\t\twatchlistLength = len(watchlist)\n\t\t\tif watchlistLength == 0:\n\t\t\t\treply = \"I'm not watching any Twitter users for this channel\"\n\t\t\telif watchlistLength == 1:\n\t\t\t\treply = \"I just watch {} for the people here\".format(watchlist[0])\n\t\t\telse:\n\t\t\t\twatchlist.sort()\n\t\t\t\treply = \"I watch {:,} Twitter users for this channel: {}\".format(watchlistLength, \"; \".join(watchlist))\n\t\t\tmessage.reply(reply)\n\t\t\treturn\n\t\t#'update' forces an update check, but it's only available to admins. Also doesn't need a username\n\t\tif parameter == 'update':\n\t\t\tif not message.doesSenderHavePermission(PermissionLevel.BOT):\n\t\t\t\treply = \"Only my bot admin(s) can force an update, sorry!\"\n\t\t\telif self.scheduledFunctionIsExecuting:\n\t\t\t\treply = \"I was updating already! Lucky you, now it'll be done quicker\"\n\t\t\telse:\n\t\t\t\tself.checkForNewTweets()\n\t\t\t\tself.resetScheduledFunctionGreenlet()\n\t\t\t\treply = \"Finished forced TwitterWatcher update check\"\n\t\t\tmessage.reply(reply)\n\t\t\treturn\n\n\t\t#All the other parameters need an account name, so check for that now\n\t\tif message.messagePartsLength == 1:\n\t\t\tmessage.reply(\"Please add a Twitter account name too, so I know where to look\")\n\t\t\treturn\n\n\t\taccountName = message.messageParts[1]\n\t\taccountNameLowered = accountName.lower()\n\t\tisUserBeingWatchedHere = accountNameLowered in self.watchData and serverChannelPair in self.watchData[accountNameLowered]['targets']\n\n\t\tif parameter == 'add':\n\t\t\tif isUserBeingWatchedHere:\n\t\t\t\treply = \"I'm already keeping a close eye on {}. On their tweets, I mean\".format(self.getDisplayName(accountNameLowered, accountName))\n\t\t\telif not message.doesSenderHavePermission(PermissionLevel.CHANNEL):\n\t\t\t\traise CommandInputException(\"Sorry, only my channel admins are allowed to add people to my watch list\")\n\t\t\telse:\n\t\t\t\t#New account\n\t\t\t\tif accountNameLowered not in self.watchData:\n\t\t\t\t\tself.watchData[accountNameLowered] = {'targets': [serverChannelPair]}\n\t\t\t\t#Existing account\n\t\t\t\telse:\n\t\t\t\t\tself.watchData[accountNameLowered]['targets'].append(serverChannelPair)\n\t\t\t\t#If a display name was provided, add that too\n\t\t\t\tif message.messagePartsLength > 2:\n\t\t\t\t\tself.watchData[accountNameLowered]['displayname'] = \" \".join(message.messageParts[2:])\n\t\t\t\telif accountName != accountNameLowered:\n\t\t\t\t\tself.watchData[accountNameLowered]['displayname'] = accountName\n\t\t\t\t#Save the whole thing\n\t\t\t\tself.saveWatchData()\n\t\t\t\tself.checkForNewTweets([accountNameLowered], False)\n\t\t\t\treply = \"Ok, I'll keep you informed about any new tweets {}... makes? Tweets? What's the verb here?\".format(self.getDisplayName(accountNameLowered))\n\t\telif parameter == 'remove':\n\t\t\tif not isUserBeingWatchedHere:\n\t\t\t\treply = \"I already wasn't watching {}! Not even secretly\".format(accountName)\n\t\t\telif not message.doesSenderHavePermission(PermissionLevel.CHANNEL):\n\t\t\t\traise CommandInputException(\"Only my channel admins are allowed to remove people from my watch list, sorry\")\n\t\t\telse:\n\t\t\t\tself.watchData[accountNameLowered]['targets'].remove(serverChannelPair)\n\t\t\t\t#If this channel was the only place we were reporting this user's tweets to, remove it all together\n\t\t\t\tif len(self.watchData[accountNameLowered]['targets']) == 0:\n\t\t\t\t\tdel self.watchData[accountNameLowered]\n\t\t\t\tself.saveWatchData()\n\t\t\t\treply = \"Ok, I won't keep you updated on whatever {} posts. Tweets. Messages? I don't know the proper verb\".format(accountName)\n\t\telif parameter == 'latest':\n\t\t\t#Download the latest tweet for the provided username\n\t\t\ttry:\n\t\t\t\ttweets = self.downloadTweets(accountNameLowered, 1)\n\t\t\texcept WebRequestException as wre:\n\t\t\t\tself.logError(\"[TwitterWatcher] Error occured while downloading single tweet for user {}: {}\".format(accountName, wre))\n\t\t\t\treply = \"Woops, something went wrong there. Tell my owner(s), maybe it's something they can fix. Or maybe it's Twitter's fault, in which case all we can do is wait\"\n\t\t\telse:\n\t\t\t\tif not tweets:\n\t\t\t\t\treply = \"Sorry, I couldn't find any tweets by {}. Maybe they haven't tweeted yet, or maybe you made a typo?\".format(accountName)\n\t\t\t\telse:\n\t\t\t\t\treply = self.formatNewTweetText(accountName, tweets[0], addTweetAge=True)\n\t\telif parameter == 'setname':\n\t\t\t#Allow users to set a display name\n\t\t\tif not isUserBeingWatchedHere:\n\t\t\t\treply = \"I'm not watching {}, so I can't change the display name. Add them with the 'add' parameter first\".format(accountName)\n\t\t\telif not message.doesSenderHavePermission(PermissionLevel.CHANNEL):\n\t\t\t\traise CommandInputException(\"Sorry, only my channel admins are allowed to set nicknames for the people on my watch list\")\n\t\t\telif message.messagePartsLength < 2:\n\t\t\t\treply = \"Please add a display name for '{}' too. You don't want me thinking up nicknames for people\".format(accountName)\n\t\t\telse:\n\t\t\t\tself.watchData[accountNameLowered]['displayname'] = \" \".join(message.messageParts[2:])\n\t\t\t\tself.saveWatchData()\n\t\t\t\treply = \"Ok, I will call {} '{}' from now on\".format(accountName, self.watchData[accountNameLowered]['displayname'])\n\t\telif parameter == 'removename':\n\t\t\tif not isUserBeingWatchedHere:\n\t\t\t\treply = \"I wasn't calling them anything anyway, since I'm not following {}\".format(accountName)\n\t\t\telif 'displayname' not in self.watchData[accountNameLowered]:\n\t\t\t\treply = \"I didn't have a nickname listed for {} anyway, so I guess I did what you asked?\".format(accountNameLowered)\n\t\t\telif not message.doesSenderHavePermission(PermissionLevel.CHANNEL):\n\t\t\t\traise CommandInputException(\"Only my channel admins are allowed to remove nicknames for the people on my watch list, sorry\")\n\t\t\telse:\n\t\t\t\tdel self.watchData[accountNameLowered]['displayname']\n\t\t\t\tself.saveWatchData()\n\t\t\t\treply = \"Ok, I will just call them by their account name, {}\".format(accountName)\n\t\telse:\n\t\t\treply = \"I don't know what to do with the parameter '{}', sorry. Try rereading the help text?\".format(parameter)\n\n\t\tmessage.replyWithLengthLimit(reply)\n\n\tdef updateTwitterToken(self):\n\t\tapiKey = GlobalStore.commandhandler.getApiKey('key', 'twitter')\n\t\tapiSecret = GlobalStore.commandhandler.getApiKey('secret', 'twitter')\n\t\tif not apiKey or not apiSecret:\n\t\t\tself.logError(\"[TwitterWatcher] No Twitter API key and/or secret found\")\n\t\t\treturn False\n\n\t\tcredentials = base64.b64encode(\"{}:{}\".format(apiKey, apiSecret))\n\t\theaders = {\"Authorization\": \"Basic {}\".format(credentials), \"Content-Type\": \"application/x-www-form-urlencoded;charset=UTF-8\"}\n\t\tdata = \"grant_type=client_credentials\"\n\n\t\treq = requests.post(\"https://api.twitter.com/oauth2/token\", data=data, headers=headers)\n\t\treply = json.loads(req.text)\n\t\tif 'access_token' not in reply:\n\t\t\tself.logError(\"[TwitterWatcher] An error occurred while retrieving Twitter token: \" + json.dumps(reply))\n\t\t\treturn False\n\n\t\tGlobalStore.commandhandler.apikeys['twitter']['token'] = reply['access_token']\n\t\tGlobalStore.commandhandler.apikeys['twitter']['tokentype'] = reply['token_type']\n\t\tGlobalStore.commandhandler.saveApiKeys()\n\t\treturn True\n\n\tdef downloadTweets(self, username, maxTweetCount=200, downloadNewerThanId=None, downloadOlderThanId=None, includeReplies=False, includeRetweets=False):\n\t\t# First check if we can even connect to the Twitter API\n\t\tapiToken = GlobalStore.commandhandler.getApiKey('token', 'twitter')\n\t\tapiTokenType = GlobalStore.commandhandler.getApiKey('tokentype', 'twitter')\n\t\tif not apiToken or not apiTokenType:\n\t\t\tself.logInfo(\"[TwitterWatcher] No twitter token found, retrieving a new one\")\n\t\t\ttokenUpdateSuccess = self.updateTwitterToken()\n\t\t\tif not tokenUpdateSuccess:\n\t\t\t\tself.logError(\"Unable to retrieve a new Twitter token!\")\n\t\t\t\traise WebRequestException(\"Unable to retrieve Twitter authentication token!\")\n\n\t\t# Now download tweets!\n\t\theaders = {'Authorization': \"{} {}\".format(apiTokenType, apiToken)}\n\t\tparams = {'screen_name': username, 'count': min(200, maxTweetCount), 'trim_user': 'true', 'tweet_mode': 'extended',\n\t\t\t\t 'exclude_replies': 'false' if includeReplies else 'true',\n\t\t\t\t 'include_rts': True} # Always get retweets, remove them later if necessary. Needed because 'count' always includes retweets, even if you don't want them\n\t\tif downloadOlderThanId:\n\t\t\tparams['max_id'] = downloadOlderThanId\n\n\t\ttweets = []\n\t\tif downloadNewerThanId:\n\t\t\tparams['since_id'] = downloadNewerThanId\n\t\treq = None\n\t\twhile len(tweets) < maxTweetCount:\n\t\t\tparams['count'] = maxTweetCount - len(tweets) # Get as many tweets as we still need\n\t\t\ttry:\n\t\t\t\treq = requests.get(\"https://api.twitter.com/1.1/statuses/user_timeline.json\", headers=headers, params=params, timeout=20.0)\n\t\t\t\tapireply = json.loads(req.text)\n\t\t\texcept requests.exceptions.Timeout:\n\t\t\t\tself.logError(\"[TwitterWatcher] Twitter API reply took too long to arrive\")\n\t\t\t\traise WebRequestException(\"Twitter took too long to respond\")\n\t\t\texcept ValueError:\n\t\t\t\tself.logError(\"[TwitterWatcher] Didn't get parsable JSON return from Twitter API: {}\".format(StringUtil.removeNewlines(req.text, '|') if req else \"[no response retrieved]\"))\n\t\t\t\traise WebRequestException(\"Twitter API returned unexpected data\")\n\t\t\texcept Exception as e:\n\t\t\t\tself.logError(\"[TwitterWatcher] Tweet download threw an unexpected error of type '{}': {}\".format(type(e), str(e)))\n\t\t\t\traise WebRequestException(\"Unknown error occurred while retrieving Twitter API data\")\n\n\t\t\tif len(apireply) == 0:\n\t\t\t\t# No more tweets to parse!\n\t\t\t\tbreak\n\t\t\t# Check for errors\n\t\t\tif req.status_code == 404:\n\t\t\t\traise WebRequestException(\"The provided tweet(s) couldn't be found, sorry\")\n\t\t\tif isinstance(apireply, dict) and 'errors' in apireply:\n\t\t\t\terrorMessages = '; '.join(e['message'] for e in apireply['errors'])\n\t\t\t\tself.logError(\"[TwitterWatcher] Error occurred while retrieving tweets for {}. Parameters: {}; apireply: {}; errors: {}\".format(username, params, apireply, errorMessages))\n\t\t\t\traise WebRequestException(\"The Twitter API reply contained errors\")\n\t\t\t# Sometimes the API does not return a list of tweets for some reason. Catch that\n\t\t\tif not isinstance(apireply, list):\n\t\t\t\tself.logError(\"[TwitterWatcher] Unexpected reply from Twitter API. Expected tweet list, got {}: {}\".format(type(apireply), apireply))\n\t\t\t\traise WebRequestException(\"The Twitter API reply contained unexpected data\")\n\t\t\t# Tweets are sorted reverse-chronologically, so we can get the highest ID from the first tweet\n\t\t\tparams['since_id'] = apireply[0]['id']\n\t\t\t# Remove retweets if necessary (done manually to make the 'count' variable be accurate)\n\t\t\tif not includeRetweets:\n\t\t\t\tapireply = [t for t in apireply if 'retweeted_status' not in t]\n\t\t\t# There are tweets, store those\n\t\t\ttweets.extend(apireply)\n\t\treturn tweets\n\n\tdef checkForNewTweets(self, usernamesToCheck=None, reportNewTweets=True):\n\t\tif not usernamesToCheck:\n\t\t\tusernamesToCheck = self.watchData #Don't copy the username list\n\t\tif not usernamesToCheck:\n\t\t\treturn\n\n\t\tnow = datetime.datetime.utcnow()\n\t\twatchDataChanged = False\n\t\ttweetAgeCutoff = self.scheduledFunctionTime * 1.1 #Give tweet age a little grace period, so tweets can't fall between checks\n\t\t#Retrieve the latest tweets for every account.\n\t\tfor username in usernamesToCheck:\n\t\t\tif username not in self.watchData:\n\t\t\t\tself.logWarning(\"[TwitterWatcher] Asked to check account '{}' for new tweets, but it is not in the watchlist\".format(username))\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\ttweets = self.downloadTweets(username, maxTweetCount=10, downloadNewerThanId=self.watchData[username].get('highestId', None), includeRetweets=False)\n\t\t\texcept WebRequestException as wre:\n\t\t\t\tself.logError(\"[TwitterWatcher] Couldn't retrieve tweets for '{}': {}\".format(username, wre))\n\t\t\t\tcontinue\n\t\t\t#If there aren't any new tweets, move on\n\t\t\tif len(tweets) == 0:\n\t\t\t\tcontinue\n\t\t\t#Always store the highest ID, so we don't encounter the same tweet twice\n\t\t\twatchDataChanged = True\n\t\t\tself.watchData[username]['highestId'] = tweets[0]['id']\n\t\t\t#If we don't have to actually report the tweets, then we have nothing left to do\n\t\t\tif not reportNewTweets:\n\t\t\t\tcontinue\n\n\t\t\t#Go through the tweets to check if they're not too old to report\n\t\t\tfirstOldTweetIndex = -1\n\t\t\tfor index, tweet in enumerate(tweets):\n\t\t\t\tif (now - self.getTweetPostTime(tweet['created_at'])).total_seconds() > tweetAgeCutoff:\n\t\t\t\t\tfirstOldTweetIndex = index\n\t\t\t\t\tbreak\n\t\t\t#If all tweets are old, stop here\n\t\t\tif firstOldTweetIndex == 0:\n\t\t\t\tcontinue\n\t\t\t#Otherwise remove the old tweet and every older tweet\n\t\t\telif firstOldTweetIndex > -1:\n\t\t\t\ttweets = tweets[:firstOldTweetIndex]\n\n\t\t\t#To prevent spam, only mention the latest few tweets, in case of somebody posting a LOT in a short timespan\n\t\t\tif len(tweets) > self.MAX_TWEETS_TO_MENTION:\n\t\t\t\ttweetsSkipped = len(tweets) - self.MAX_TWEETS_TO_MENTION\n\t\t\t\ttweets = tweets[-self.MAX_TWEETS_TO_MENTION:]\n\t\t\telse:\n\t\t\t\ttweetsSkipped = 0\n\n\t\t\t#Reverse the tweets so we get them old to new, instead of new to old\n\t\t\ttweets.reverse()\n\t\t\t#New recent tweets! Shout about it (if we're in the place where we should shout)\n\t\t\tfor target in self.watchData[username]['targets']:\n\t\t\t\t#'target' is a tuple with the server name at [0] and the channel name at [1]\n\t\t\t\t#Just ignore it if we're either not on the server or not in the channel\n\t\t\t\tif target[0] not in GlobalStore.bothandler.bots:\n\t\t\t\t\tcontinue\n\t\t\t\ttargetbot = GlobalStore.bothandler.bots[target[0]]\n\t\t\t\tif target[1] not in targetbot.channelsUserList:\n\t\t\t\t\tcontinue\n\t\t\t\ttargetchannel = target[1]\n\t\t\t\t#Now go tell that channel all about the tweets\n\t\t\t\tfor tweet in tweets:\n\t\t\t\t\tformattedTweet = self.formatNewTweetText(username, tweet)\n\t\t\t\t\ttargetbot.sendLengthLimitedMessage(targetchannel, formattedTweet.mainString, suffix=formattedTweet.suffix)\n\t\t\t\t#If we skipped a few tweets, make a mention of that too\n\t\t\t\tif tweetsSkipped > 0:\n\t\t\t\t\ttargetbot.sendMessage(targetchannel, \"(skipped {:,} of {}'s tweets)\".format(tweetsSkipped, self.getDisplayName(username)))\n\t\tif watchDataChanged:\n\t\t\tself.saveWatchData()\n\n\tdef formatNewTweetText(self, username, tweetData, addTweetAge=False, addTweetUrl=True):\n\t\t# Fix special characters (convert '&' to '&' for instance), and remove newlines\n\t\tformattedTweetText = StringUtil.removeNewlines(html.unescape(tweetData['full_text']), Constants.GREY_SEPARATOR)\n\t\tsuffixes = []\n\t\t#Remove the link to the photo at the end, but mention that there is one\n\t\tif 'media' in tweetData['entities']:\n\t\t\tmediaTypes = []\n\t\t\tfor mediaItem in tweetData['entities']['media']:\n\t\t\t\tformattedTweetText = formattedTweetText.replace(mediaItem['url'], '')\n\t\t\t\tif mediaItem['type'] not in mediaTypes:\n\t\t\t\t\tmediaTypes.append(mediaItem['type'])\n\t\t\tsuffixes.append(\"(has {})\".format(\", \".join(mediaTypes)))\n\t\t# Finalize the return text\n\t\tformattedTweetText = \"{}: {}\".format(IrcFormattingUtil.makeTextBold(self.getDisplayName(username)), formattedTweetText)\n\t\tif addTweetAge:\n\t\t\tpostDateTime = self.getTweetPostTime(tweetData['created_at'])\n\t\t\ttweetAge = datetime.datetime.utcnow() - postDateTime\n\t\t\t# For older tweets, list the post date, otherwise list how old it is\n\t\t\ttweetAgeString = \" | \" # Use a normal pipe here instead of GREY_SEPARATOR, because we're going to color the whole age string grey\n\t\t\tif tweetAge.total_seconds() > self.SECONDS_AGE_FOR_FULL_DATE:\n\t\t\t\ttweetAgeString += postDateTime.strftime('%Y-%m-%d')\n\t\t\telif tweetAge.total_seconds() <= 60:\n\t\t\t\ttweetAgeString += \"posted just now\"\n\t\t\telse:\n\t\t\t\ttweetAgeString += f\"{DateTimeUtil.durationSecondsToText(tweetAge.total_seconds(), precision=DateTimeUtil.MINUTES)} ago\"\n\t\t\tsuffixes.append(IrcFormattingUtil.makeTextColoured(tweetAgeString, IrcFormattingUtil.Colours.GREY))\n\t\tif addTweetUrl:\n\t\t\tsuffixes.append(\" | https://twitter.com/_/status/\") #Use _ instead of username to save some characters\n\t\t\tsuffixes.append(tweetData['id_str'])\n\t\treturn StringWithSuffix(formattedTweetText, suffixes)\n\n\tdef getTweetDescription(self, twitterUsername, tweetId, addTweetUrl=True):\n\t\t\"\"\"\n\t\tGet a display string describing the tweet from the provided ID\n\t\t:param twitterUsername: The username of the person that made the tweet\n\t\t:param tweetId: The tweet ID to get a description of\n\t\t:param addTweetUrl: If True (the default), the URL to the tweet will be added to the end of the display string\n\t\t:return: A StringWithSuffix describing the tweet, or None if the tweet couldn't be retrieved\n\t\t\"\"\"\n\t\tif not isinstance(tweetId, int):\n\t\t\ttweetId = int(tweetId, 10)\n\t\ttweetList = self.downloadTweets(username=twitterUsername, downloadNewerThanId=tweetId-1, downloadOlderThanId=tweetId+1, maxTweetCount=1)\n\t\tif not tweetList:\n\t\t\treturn None\n\t\treturn self.formatNewTweetText(twitterUsername, tweetList[0], addTweetAge=True, addTweetUrl=addTweetUrl)\n\n\t@staticmethod\n\tdef getTweetPostTime(createdAt):\n\t\treturn datetime.datetime.strptime(createdAt, \"%a %b %d %H:%M:%S +0000 %Y\")\n\n\tdef getDisplayName(self, username, alternativeName=None):\n\t\tif username not in self.watchData:\n\t\t\treturn username\n\t\treturn self.watchData[username].get('displayname', alternativeName if alternativeName else username)\n\n\tdef saveWatchData(self):\n\t\twatchDataFilePath = os.path.join(GlobalStore.scriptfolder, 'data', 'WatchedTwitterAccounts.json')\n\t\twith open(watchDataFilePath, 'w', encoding='utf-8') as watchDataFile:\n\t\t\twatchDataFile.write(json.dumps(self.watchData))\n","repo_name":"Didero/DideRobot","sub_path":"commands/TwitterWatcher.py","file_name":"TwitterWatcher.py","file_ext":"py","file_size_in_byte":19778,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"4763575162","text":"\r\ndef sentenceMaker(phrase):\r\n capitalized = phrase.capitalize()\r\n interrogatives = \"how\", \"what\", \"why\"\r\n if phrase.startswith(interrogatives):\r\n return (capitalized + '?')\r\n else:\r\n return (capitalized + '.')\r\n\r\ninputs = []\r\nwhile True:\r\n user_input = input(\"Say something: \")\r\n if user_input == '/end':\r\n break\r\n else:\r\n inputs.append(sentenceMaker(user_input))\r\n\r\nprint(\" \".join(inputs))","repo_name":"xraycodes/python_practice","sub_path":"text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22540251734","text":"\"\"\"Policy for MAGE using PyTorch.\"\"\"\nfrom typing import List, Tuple\n\nfrom nnrl.nn.critic import HardValue\nfrom nnrl.optim import build_optimizer\n\nfrom raylab.agents.sop import SOPTorchPolicy\nfrom raylab.options import configure, option\nfrom raylab.policy import EnvFnMixin\nfrom raylab.policy.action_dist import WrapDeterministicPolicy\nfrom raylab.policy.losses import MAGE, MaximumLikelihood\nfrom raylab.policy.model_based.lightning import LightningModelTrainer, TrainingSpec\nfrom raylab.policy.model_based.policy import MBPolicyMixin\nfrom raylab.utils.types import StatDict\n\n\ndef default_model_training() -> dict:\n \"\"\"Model training routine used by MAGE paper.\"\"\"\n spec = TrainingSpec()\n spec.datamodule.holdout_ratio = 0.0\n spec.datamodule.max_holdout = 0\n spec.datamodule.batch_size = 256\n spec.datamodule.shuffle = True\n spec.datamodule.num_workers = 0\n spec.training.max_epochs = None\n spec.training.max_steps = 120\n spec.training.patience = None\n spec.warmup = spec.training\n return spec.to_dict()\n\n\n@configure\n@option(\"model_training\", default=default_model_training())\n@option(\"model_update_interval\", default=25)\n@option(\"improvement_steps\", default=10, override=True)\n@option(\"policy_delay\", 2, override=True)\n@option(\"batch_size\", default=1024, override=True)\n@option(\"lambda\", default=0.05, help=\"TD error regularization for MAGE loss\")\n@option(\"module/type\", \"MAGE\", override=True)\n@option(\"optimizer/models\", default={\"type\": \"Adam\", \"lr\": 1e-4, \"weight_decay\": 1e-4})\n@option(\"optimizer/actor\", default={\"type\": \"Adam\", \"lr\": 1e-4}, override=True)\n@option(\"optimizer/critics\", default={\"type\": \"Adam\", \"lr\": 1e-4}, override=True)\n@option(\"exploration_config/pure_exploration_steps\", 1000, override=True)\nclass MAGETorchPolicy(MBPolicyMixin, EnvFnMixin, SOPTorchPolicy):\n \"\"\"MAGE policy in PyTorch to use with RLlib.\n\n Attributes:\n loss_model: maximum likelihood loss for model ensemble\n loss_actor: deterministic policy gradient loss\n loss_critic: model-based action-value-gradient estimator loss\n \"\"\"\n\n # pylint:disable=too-many-ancestors\n dist_class = WrapDeterministicPolicy\n model_trainer: LightningModelTrainer\n\n def __init__(self, observation_space, action_space, config):\n super().__init__(observation_space, action_space, config)\n self._set_model_loss()\n self._set_critic_loss()\n self.build_timers()\n self.model_trainer = LightningModelTrainer(\n models=self.module.models,\n loss_fn=self.loss_model,\n optimizer=self.optimizers[\"models\"],\n replay=self.replay,\n config=self.config,\n )\n\n def _set_model_loss(self):\n self.loss_model = MaximumLikelihood(self.module.models)\n\n def _set_critic_loss(self):\n module = self.module\n target_critic = HardValue(\n policy=module.target_actor, q_value=module.target_critics\n )\n self.loss_critic = MAGE(\n critics=module.critics,\n policy=module.actor,\n target_critic=target_critic,\n models=module.models,\n )\n self.loss_critic.gamma = self.config[\"gamma\"]\n self.loss_critic.lambd = self.config[\"lambda\"]\n\n def train_dynamics_model(\n self, warmup: bool = False\n ) -> Tuple[List[float], StatDict]:\n return self.model_trainer.optimize(warmup=warmup)\n\n def compile(self):\n super().compile()\n for loss in (self.loss_model, self.loss_actor, self.loss_critic):\n loss.compile()\n\n def _set_reward_hook(self):\n self.loss_critic.set_reward_fn(self.reward_fn)\n\n def _set_termination_hook(self):\n self.loss_critic.set_termination_fn(self.termination_fn)\n\n def _make_optimizers(self):\n optimizers = super()._make_optimizers()\n config = self.config[\"optimizer\"]\n optimizers[\"models\"] = build_optimizer(self.module.models, config[\"models\"])\n return optimizers\n","repo_name":"0xangelo/raylab","sub_path":"raylab/agents/mage/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"78"} +{"seq_id":"41609747806","text":"import machine\nfrom machine import Pin, ADC, Timer\nimport utime\n\nsensor_temp = ADC(4)\nconversion_factor = 3.3 / (65535)\n\n\"\"\"\ntim = Timer()\ndef tick(timer):\n reading = sensor_temp.read_u16() * conversion_factor \n temperature = 27 - (reading - 0.706)/0.001721\n# print(temperature)\n print(\"Temperature : \" + str(temperature) + \"°C\")\n \n freqValue = int(machine.freq() / 1000000)\n print(\"MCU Clock Speed : \" + str(freqValue), \" MHz\")\n\ntim.init(freq=5, mode=Timer.PERIODIC, callback=tick)\n\"\"\"\n\nfreqValue = int(machine.freq() / 1000000)\nprint(\"MCU Clock Speed : \" + str(freqValue), \" MHz\")\n\nwhile True:\n reading = sensor_temp.read_u16() * conversion_factor \n temperature = 27 - (reading - 0.706)/0.001721\n print(\"MCU Temperature : \" + str(temperature) + \"°C\")\n \n utime.sleep(2)","repo_name":"AxiomaticEmbedded/MLS-Hat","sub_path":"BaseA1/MicroPython/CpuTemperature.py","file_name":"CpuTemperature.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14219877363","text":"'''\n增加两个变量 一个记录当前待打印的节点数量 一个记录下一层待打印的节点数量\n'''\nfrom collections import deque\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n if not root:\n return []\n result,queue=[],deque()\n queue.append(root)\n toBeprint = 1\n nextlevel = 0\n while queue:\n mid = []\n for _ in range(toBeprint):\n node = queue.popleft()\n mid.append(node.val)\n if node.left:\n queue.append(node.left)\n nextlevel+=1\n if node.right:\n queue.append(node.right)\n nextlevel+=1\n toBeprint = nextlevel\n nextlevel = 0\n result.append(mid)\n return result","repo_name":"KyleC14/SwordToOfferPractice","sub_path":"code/Question32-2/Solution1.py","file_name":"Solution1.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8638530926","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\n\ndef prime_under_man():\n arr = [2]\n for i in range(3, 10000, 2):\n j = 3\n find = True\n while j*j <= i:\n if i % j == 0:\n find = False\n break\n j += 2\n if find:\n arr.append(i)\n return arr\n\n\nprimes = prime_under_man()\nfor _ in range(int(input())):\n n = int(input())\n min = 10000\n arr = []\n filtered_primes = list(filter(lambda x: x < n, primes))\n for prime in filtered_primes:\n if prime > n - prime:\n continue\n if n - prime in filtered_primes and min > abs(prime - n + prime):\n arr = sorted([prime, n-prime])\n min = abs(prime - n + prime)\n print(' '.join(list(map(str, arr))))\n","repo_name":"yangwooseong/algorithm","sub_path":"boj/9020.py","file_name":"9020.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24478805745","text":"from tests.base import TestBase\n\nfrom aim.sdk.run import Run\nfrom aim.storage.context import Context\n\n\nclass TestRunSequenceHomogeneousValues(TestBase):\n def test_different_types_on_different_contexts_and_runs(self):\n run = Run(system_tracking_interval=None)\n # same sequence name, different contexts\n run.track(1., name='numbers', context={'type': 'float'})\n run.track(1, name='numbers', context={'type': 'integer'})\n\n run2 = Run(system_tracking_interval=None)\n # same sequence name, different runs\n run2.track(1, name='numbers', context={'type': 'float'})\n\n def test_incompatible_type_during_tracking(self):\n run = Run(system_tracking_interval=None)\n run.track(1., name='numbers', context={})\n with self.assertRaises(ValueError) as cm:\n run.track(1, name='numbers', context={})\n exception = cm.exception\n self.assertEqual('Cannot log value \\'1\\' on sequence \\'numbers\\'. Incompatible data types.', exception.args[0])\n\n def test_incompatible_type_after_tracking_restart(self):\n run = Run(system_tracking_interval=None)\n run_hash = run.hash\n run.track(1., name='numbers', context={})\n run.finalize()\n del run\n\n new_run = Run(run_hash=run_hash, system_tracking_interval=None)\n with self.assertRaises(ValueError) as cm:\n new_run.track(1, name='numbers', context={})\n exception = cm.exception\n self.assertEqual('Cannot log value \\'1\\' on sequence \\'numbers\\'. Incompatible data types.', exception.args[0])\n\n def test_type_compatibility_for_empty_list(self):\n run = Run(system_tracking_interval=None)\n context = {}\n ctx = Context(context)\n seq_name = 'obj_list'\n\n sequence_info = run.meta_run_tree.subtree(('traces', ctx.idx, seq_name))\n typed_sequences_info = run.meta_tree.subtree('traces_types')\n\n run.track([], name=seq_name, context=context)\n self.assertEqual('list', sequence_info['dtype'])\n self.assertEqual(1, typed_sequences_info['list', ctx.idx, seq_name])\n self.assertIsNone(typed_sequences_info.get(('list(float)', ctx.idx, seq_name), None))\n\n run.track([], name=seq_name, context=context)\n self.assertEqual('list', sequence_info['dtype'])\n self.assertIsNone(typed_sequences_info.get(('list(float)', ctx.idx, seq_name), None))\n\n run.track([1.], name=seq_name, context=context)\n self.assertEqual('list(float)', sequence_info['dtype'])\n self.assertEqual(1, typed_sequences_info['list(float)', ctx.idx, seq_name])\n\n run.track([], name=seq_name, context=context)\n self.assertEqual('list(float)', sequence_info['dtype'])\n\n with self.assertRaises(ValueError) as cm:\n run.track([5], name=seq_name, context=context)\n exception = cm.exception\n self.assertEqual(\n f'Cannot log value \\'{[5]}\\' on sequence \\'{seq_name}\\'. Incompatible data types.',\n exception.args[0])\n","repo_name":"Dong-Ki-Lee/aim-test","sub_path":"tests/sdk/test_run_track_type_checking.py","file_name":"test_run_track_type_checking.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71916894973","text":"def longestConsecutive(nums: list[int]) -> int:\n nSet = set(nums)\n output = 0\n longest = 0 \n for i in range(len(nums)):\n # print(\"numsi: \", str(nums[i]))\n if (nums[i] - 1) not in nSet:\n longest = 0\n while nums[i] + longest in nSet:\n # print(\"numsi + longest: \" + str(nums[i] + longest))\n longest += 1\n output = max(output, longest)\n return output\n\n\nprint(\n longestConsecutive(nums = [9,1,4,7,3,-1,0,5,8,-1,6])\n)","repo_name":"IonHaryono/LeetCode","sub_path":"128. Longest Consecutive Sequence.py","file_name":"128. Longest Consecutive Sequence.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39226012448","text":"\r\nfrom collections import deque\r\nimport sys\r\n\r\ninput = sys.stdin.readline\r\nn, m = map(int, input().split())\r\n\r\n# graph\r\ngraph = [list(input().rstrip()) for _ in range(n)]\r\n\r\n# visited 4차원 배열...?\r\nvisited = [[[[False] * m for _ in range(n)] for _ in range(m)] for _ in range(n)]\r\n\r\ndx = [-1, 1, 0, 0]\r\ndy = [0, 0, -1, 1]\r\n\r\nq = deque()\r\n\r\n# init() to find R, B index\r\ndef init():\r\n rx, ry, bx, by = 0, 0, 0, 0\r\n for i in range(n):\r\n for j in range(m):\r\n if graph[i][j] == \"R\":\r\n rx, ry = i, j\r\n\r\n elif graph[i][j] == \"B\":\r\n bx, by = i, j\r\n\r\n q.append((rx, ry, bx, by, 0))\r\n visited[rx][ry][bx][by] = True\r\n\r\n\r\n# move() loop while not # not O\r\ndef move(x, y, dx, dy, cnt):\r\n # while next idx isNotWall and current idx isNotHole\r\n while graph[x + dx][y + dy] != \"#\" and graph[x][y] != \"O\":\r\n x += dx\r\n y += dy\r\n cnt += 1\r\n return x, y, cnt\r\n\r\n\r\n# bfs() 4 dir search while depth < 10 / R != O\r\ndef bfs():\r\n\r\n while q:\r\n rx, ry, bx, by, depth = q.popleft()\r\n\r\n if depth >= 10:\r\n break\r\n\r\n for i in range(4):\r\n nrx, nry, rcnt = move(rx, ry, dx[i], dy[i], 0)\r\n nbx, nby, bcnt = move(bx, by, dx[i], dy[i], 0)\r\n\r\n # if B == \"O\": continue\r\n if graph[nbx][nby] == \"O\":\r\n continue\r\n\r\n if graph[nrx][nry] == \"O\":\r\n print(1)\r\n return\r\n\r\n if nrx == nbx and nry == nby:\r\n if rcnt > bcnt:\r\n nrx, nry = nrx - dx[i], nry - dy[i]\r\n else: # rcnt < bcnt:\r\n nbx, nby = nbx - dx[i], nby - dy[i]\r\n\r\n # visited check:\r\n if not visited[nrx][nry][nbx][nby]:\r\n visited[nrx][nry][nbx][nby] = True\r\n q.append((nrx, nry, nbx, nby, depth + 1))\r\n\r\n print(0)\r\n\r\n\r\ninit()\r\nbfs()","repo_name":"unboxing96/ALGO","sub_path":"백준/Gold/13459. 구슬 탈출/구슬 탈출.py","file_name":"구슬 탈출.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13430206336","text":"\n# 微调多国语言版T5做Seq2Seq任务\n# 介绍链接:https://kexue.fm/archives/7867\n# 数据集:https://github.com/CLUEbenchmark/CLGE 中的CSL数据集\n# 补充了评测指标bleu、rouge-1、rouge-2、rouge-l\n# mt5主要特点:gated-gelu, decoder的最后的dense层独立权重,rmsnorm\n# valid_data: {'rouge-1': 0.43454686332522263, 'rouge-2': 0.3217250949304608, 'rouge-l': 0.42204007502153934, 'bleu': 0.16675070297852404, 'best_bleu': 0.16675070297852404}\n\nimport json\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom nlpcol.callback import Callback\nfrom nlpcol.config import TrainConfig, WordDir, device\nfrom nlpcol.model import build_transformer_model\nfrom nlpcol.tokenizers import SpTokenizer\nfrom nlpcol.trainer import Trainer\nfrom nlpcol.utils.snippets import (ListDataset, get_pool_emb,\n save_model_parameter, seed_everything,\n sequence_padding, torch_gc)\nfrom nlpcol.utils.snippets4examples import model_name_gene, trans_entity2tuple\nfrom nltk.translate.bleu_score import SmoothingFunction, sentence_bleu\nfrom rouge import Rouge # pip install rouge\nfrom tqdm import tqdm\n\n# 基本参数\nmax_c_len = 256\nmax_t_len = 32\nbatch_size = 16\nepochs = 50\nsteps_per_epoch = None\npad_token_id = -100\n\ntrain_config = TrainConfig(batch_size=batch_size, epochs=epochs, max_seq_length=128)\n\n\nmodel_path = \"/home/dataset/pretrain_ckpt/t5/mt5-base\"\nconfig_path = model_path + \"/config.json\"\ncheckpoint_path = model_path + '/pytorch_model.bin'\n# spm_path = model_path + '/spiece.model'\n# 下面两个config是从bert4keras中拿的,项目连接https://github.com/bojone/t5_in_bert4keras\nspm_path = model_path + '/sentencepiece_cn.model'\nkeep_tokens_path = model_path + '/sentencepiece_cn_keep_tokens.json'\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nseed_everything(42)\n\n\ntokenizer = SpTokenizer(spm_path, token_start=None, token_end='')\nkeep_tokens = json.load(open(keep_tokens_path))\n\n\nmodel = build_transformer_model(\n checkpoint_path,\n config_path,\n model='t5',\n extra_config={\"max_seq_length\": 512},\n keep_tokens=keep_tokens, # 只保留keep_tokens中的字,精简原字表\n skip_init=True\n).to(device)\n\n\ndef generate(text):\n input_ids, _ = tokenizer.encode(text)\n input_ids = torch.tensor([input_ids], device=device)\n\n # logits = model.generate(input_ids, mode='do_sample', top_k=20, top_p=0.9, temperature=0.9)\n logits = model.generate(input_ids, mode='greedy_search')\n # logits = model.generate(input_ids, mode='beam_search', num_beams=4)\n\n logits=logits[:,1:] # 去掉bos\n predict_label = [tokenizer.decode(i) for i in logits]\n return predict_label[0]\n\n\n# 准备数据\nclass MyDataset(ListDataset):\n @staticmethod\n def load_data(filename):\n \"\"\"加载数据\n 单条格式:(标题, 正文)\n \"\"\"\n D = []\n with open(filename, encoding='utf-8') as f:\n for l in f:\n l = json.loads(l)\n title, content = l['title'], l['abst']\n D.append((title, content))\n return D\n\ndef collate_fn(batch):\n \"\"\"TODO padding 转入 tokenizer中\n \"\"\"\n batch_content_ids, batch_titile_ids = [], []\n for title, content in batch:\n content_ids, _ = tokenizer.encode(content, maxlen=max_c_len)\n batch_content_ids.append(content_ids)\n\n titile_ids, _ = tokenizer.encode(title, maxlen=max_t_len)\n batch_titile_ids.append(titile_ids)\n\n batch_content_ids = torch.tensor(sequence_padding(batch_content_ids), dtype=torch.long, device=device)\n batch_titile_ids = torch.tensor(sequence_padding(batch_titile_ids, value=-100), dtype=torch.long, device=device)\n \n return {\n \"input_ids\": batch_content_ids,\n \"labels\": batch_titile_ids,\n }\n \ntrain_dataset = MyDataset('/home/dataset/corpus/seq2seq/summary/csl_title_public/csl_title_train.json')\nvalid_dataset = MyDataset('/home/dataset/corpus/seq2seq/summary/csl_title_public/csl_title_dev.json')\n\n\n# 定义训练流程\noptimizer = optim.Adam(model.parameters(), 1e-4)\nsave_path = model_name_gene(train_config, 'mt5', 'csl_title_public', prefix='test')\n\n\n# loss_fn = CrossEntropyLoss()\ntrainer = Trainer(model, train_config, optimizer=optimizer, collate_fn=collate_fn)\n\n\nclass Evaluator(Callback):\n \"\"\"评估与保存\n \"\"\"\n def __init__(self, trainer:Trainer, save_path:str):\n self.rouge = Rouge()\n self.smooth = SmoothingFunction().method1\n self.best_bleu = 0.\n self.trainer = trainer\n self.save_path = save_path\n\n def on_epoch_end(self, steps, epoch, logs=None):\n just_show()\n metrics = self.evaluate() # 评测模型\n if metrics['bleu'] > self.best_bleu:\n self.best_bleu = metrics['bleu']\n self.trainer.save_weights(self.save_path)\n\n metrics['best_bleu'] = self.best_bleu\n print('valid_data:', metrics)\n \n def evaluate(self):\n total = 0\n rouge_1, rouge_2, rouge_l, bleu = 0, 0, 0, 0\n for title, content in tqdm(valid_dataset):\n total += 1\n title = ' '.join(title).lower()\n pred_title = ' '.join(generate(content)).lower()\n if pred_title.strip():\n scores = self.rouge.get_scores(hyps=pred_title, refs=title)\n rouge_1 += scores[0]['rouge-1']['f']\n rouge_2 += scores[0]['rouge-2']['f']\n rouge_l += scores[0]['rouge-l']['f']\n bleu += sentence_bleu(references=[title.split(' ')], hypothesis=pred_title.split(' '),\n smoothing_function=self.smooth)\n rouge_1, rouge_2, rouge_l, bleu = rouge_1/total, rouge_2/total, rouge_l/total, bleu/total\n return {'rouge-1': rouge_1, 'rouge-2': rouge_2, 'rouge-l': rouge_l, 'bleu': bleu}\n\n\ndef just_show():\n s1 = u'抽象了一种基于中心的战术应用场景与业务,并将网络编码技术应用于此类场景的实时数据多播业务中。在分析基于中心网络与Many-to-all业务模式特性的基础上,提出了仅在中心节点进行编码操作的传输策略以及相应的贪心算法。分析了网络编码多播策略的理论增益上界,仿真试验表明该贪心算法能够获得与理论相近的性能增益。最后的分析与仿真试验表明,在这种有中心网络的实时数据多播应用中,所提出的多播策略的实时性能要明显优于传统传输策略。'\n s2 = u'普适计算环境中未知移动节点的位置信息是定位服务要解决的关键技术。在普适计算二维空间定位过程中,通过对三角形定位单元区域的误差分析,提出了定位单元布局(LUD)定理。在此基础上,对多个定位单元布局进行了研究,定义了一个新的描述定位单元中定位参考点覆盖效能的物理量——覆盖基,提出了在误差最小情况下定位单元布局的覆盖基定理。仿真实验表明定位单元布局定理能更好地满足对普适终端实时定位的需求,且具有较高的精度和最大覆盖效能。'\n for s in [s1, s2]:\n print(u'生成标题:', generate(s))\n\n\nif __name__ == '__main__':\n evaluator = Evaluator(trainer, save_path)\n print(u'生成标题:', generate(u'���国的首都是extra0京')) # 和huggingface的结果一致 \n # '北京 北京 北京 首都'\n\n trainer.train(train_dataset, callbacks=[evaluator])\n\n# else:\n# trainer.load_weights(save_path)\n","repo_name":"zouweidong91/nlpcol","sub_path":"examples/seq2seq/autotitle_csl_mt5.py","file_name":"autotitle_csl_mt5.py","file_ext":"py","file_size_in_byte":7543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15199239170","text":"# -*- encoding: UTF-8 -*-\n\nimport logging\nfrom strategy import turtle_trade\n\n\n# “停机坪”策略\ndef check(code_name, data, end_date=None, threshold=15):\n origin_data = data\n\n if end_date is not None:\n mask = (data['日期'] <= end_date)\n data = data.loc[mask]\n\n if len(data) < threshold:\n logging.debug(\"{0}:样本小于{1}天...\\n\".format(code_name, threshold))\n return\n\n data = data.tail(n=threshold)\n\n flag = False\n\n # 找出涨停日\n for index, row in data.iterrows():\n try:\n if float(row['p_change']) > 9.5:\n if turtle_trade.check_enter(code_name, origin_data, row['日期'], threshold):\n if check_internal(code_name, data, row):\n flag = True\n except KeyError as error:\n logging.debug(\"{}处理异常:{}\".format(code_name, error))\n\n return flag\n\n\ndef check_internal(code_name, data, limitup_row):\n limitup_price = limitup_row['收盘']\n limitup_end = data.loc[(data['日期'] > limitup_row['日期'])]\n limitup_end = limitup_end.head(n=3)\n if len(limitup_end.index) < 3:\n return False\n\n consolidation_day1 = limitup_end.iloc[0]\n consolidation_day23 = limitup_end = limitup_end.tail(n=2)\n\n if not(consolidation_day1['收盘'] > limitup_price and consolidation_day1['开盘'] > limitup_price and\n 0.97 < consolidation_day1['收盘'] / consolidation_day1['开盘'] < 1.03):\n return False\n\n threshold_price = limitup_end.iloc[-1]['收盘']\n\n for index, row in consolidation_day23.iterrows():\n try:\n if not (0.97 < (row['收盘'] / row['开盘']) < 1.03 and -5 < row['p_change'] < 5\n and row['收盘'] > limitup_price and row['开盘'] > limitup_price):\n return False\n except KeyError as error:\n logging.debug(\"{}处理异常:{}\".format(code_name, error))\n\n logging.debug(\"股票{0} 涨停日期:{1}\".format(code_name, limitup_row['日期']))\n\n return True\n\n","repo_name":"sngyai/Sequoia","sub_path":"strategy/parking_apron.py","file_name":"parking_apron.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":1505,"dataset":"github-code","pt":"78"} +{"seq_id":"42362547000","text":"try:\n from pip import main as pipmain\nexcept:\n from pip._internal import main as pipmain\nimport os\n\n\nfor dirpath, dirnames, filenames in os.walk(\".\"):\n for filename in filenames:\n if filename==\"numpy-1.14.5+mkl-cp27-cp27m-win32.whl\":\n fullFileName = (os.path.realpath(os.path.join(dirpath, filename)))\npipmain(['install','wheel'])\n# pipmain(['uninstall','-y','numpy'])\n# pipmain(['uninstall','-y','pandas'])\npipmain(['install',\"pandas\"])\n# pipmain(['uninstall','-y','pygeoprocessing'])\npipmain(['install','pygeoprocessing==0.3.3'])\npipmain(['install','Image'])\n\npipmain(['install',fullFileName])\n","repo_name":"priyanshukumar0309/DHM-WM","sub_path":"libraries.py","file_name":"libraries.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35449747471","text":"import json\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom aiogram.types.web_app_info import WebAppInfo\nimport config\n\nbot = Bot(config.BOT_TOKEN)\ndp = Dispatcher(bot)\n\n\n@dp.message_handler(commands=[\"start\"])\nasync def start(message: types.Message):\n markup = types.ReplyKeyboardMarkup()\n markup.add(types.KeyboardButton(\"Open web\", web_app=WebAppInfo(url=\"https://itproger.com/telegram.html\")))\n await message.answer(\"Hello!\", reply_markup=markup)\n\n\n@dp.message_handler(content_types=[\"web_app_data\"])\nasync def web_app(message: types.Message):\n res = json.loads(message.web_app_data.data)\n await message.answer(f\"Name: {res['name']}. Email: {res['email']}. Phone: {res['phone']}\")\n\n\n@dp.message_handler(commands=[\"payment\"])\nasync def payment(message: types.Message):\n await bot.send_invoice(message.chat.id, \"Buy product\", \"Product\", \"invoice\", config.PAYMENT_TOKEN, \"USD\",\n [types.LabeledPrice(\"Buy product\", 5 * 100)])\n\n\n@dp.message_handler(content_types=types.ContentType.SUCCESSFUL_PAYMENT)\nasync def success(message: types.Message):\n await message.answer(f\"Success: {message.successful_payment.order_info}\")\n\nexecutor.start_polling(dp)\n","repo_name":"AlisonSav/Aiogram-web-app-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73888001852","text":"class Errors:\n\n @staticmethod\n def getMessage(code):\n for attribute in Errors.__dict__:\n if attribute.islower():\n continue\n if Errors.__dict__[attribute] == code:\n return attribute.replace('_', ' ').capitalize()\n return ''\n\n SUCCESS = 0\n SEVER_ERROR = 1000\n","repo_name":"itang85/main","sub_path":"app/common/base/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1053338736","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport os\n\nimport torch\nfrom src import logger\nfrom src.baseline.modeling import ModelingCtcBert\nfrom src.baseline.tokenizer import CtcTokenizer\n\n\nclass PredictorCtc:\n def __init__(\n self,\n in_model_dir,\n ctc_label_vocab_dir='src/baseline/ctc_vocab',\n use_cuda=True,\n cuda_id=None,\n ):\n\n self.in_model_dir = in_model_dir\n self.model = ModelingCtcBert.from_pretrained(\n in_model_dir)\n self._id2dtag, self._dtag2id, self._id2ctag, self._ctag2id = self.load_label_dict(\n ctc_label_vocab_dir)\n logger.info('model loaded from dir {}'.format(\n self.in_model_dir))\n self.use_cuda = use_cuda\n if self.use_cuda and torch.cuda.is_available():\n if cuda_id is not None:\n torch.cuda.set_device(cuda_id)\n self.model.cuda()\n self.model.half()\n self.model.eval()\n self.tokenizer = CtcTokenizer.from_pretrained(in_model_dir)\n\n try:\n self._start_vocab_id = self.tokenizer.vocab['[START]']\n except KeyError:\n self._start_vocab_id = self.tokenizer.vocab['[unused1]']\n\n def load_label_dict(self, ctc_label_vocab_dir):\n dtag_fp = os.path.join(ctc_label_vocab_dir, 'ctc_detect_tags.txt')\n ctag_fp = os.path.join(ctc_label_vocab_dir, 'ctc_correct_tags.txt')\n\n id2dtag = [line.strip() for line in open(dtag_fp, encoding='utf8')]\n d_tag2id = {v: i for i, v in enumerate(id2dtag)}\n\n id2ctag = [line.strip() for line in open(ctag_fp, encoding='utf8')]\n c_tag2id = {v: i for i, v in enumerate(id2ctag)}\n logger.info('d_tag num: {}, d_tags:{}'.format(len(id2dtag), d_tag2id))\n return id2dtag, d_tag2id, id2ctag, c_tag2id\n\n def id_list2ctag_list(self, id_list) -> list:\n\n return [self._id2ctag[i] for i in id_list]\n\n @torch.no_grad()\n def predict(self, texts, return_topk=1, batch_size=32):\n if isinstance(texts, str):\n texts = [texts]\n else:\n texts = texts\n outputs = []\n for start_idx in range(0, len(texts), batch_size):\n batch_texts = texts[start_idx:start_idx+batch_size]\n\n batch_texts = [' ' + t for t in batch_texts] # 开头加一个占位符\n inputs = self.tokenizer(batch_texts,\n return_tensors='pt')\n # 把 ' ' 换成 _start_vocab_id\n inputs['input_ids'][..., 1] = self._start_vocab_id\n if self.use_cuda and torch.cuda.is_available():\n inputs['input_ids'] = inputs['input_ids'].cuda()\n inputs['attention_mask'] = inputs['attention_mask'].cuda()\n inputs['token_type_ids'] = inputs['token_type_ids'].cuda()\n\n d_preds, preds, loss = self.model(\n input_ids=inputs['input_ids'],\n attention_mask=inputs['attention_mask'],\n token_type_ids=inputs['token_type_ids'],\n )\n\n preds = torch.softmax(preds[:, 1:, :], dim=-1) # 从cls后面开始\n recall_top_k_probs, recall_top_k_ids = preds.topk(\n k=return_topk, dim=-1, largest=True, sorted=True)\n recall_top_k_probs = recall_top_k_probs.tolist()\n recall_top_k_ids = recall_top_k_ids.tolist()\n recall_top_k_chars = [[self.id_list2ctag_list(\n char_level) for char_level in sent_level] for sent_level in recall_top_k_ids]\n batch_texts = [['']+list(t)[1:] for t in batch_texts] # 占位符\n batch_outputs = [list(zip(text, top_k_char, top_k_prob)) for text, top_k_char, top_k_prob in zip(\n batch_texts, recall_top_k_chars, recall_top_k_probs)]\n outputs.extend(batch_outputs)\n return outputs\n\n @staticmethod\n def output2text(output):\n\n pred_text = ''\n for src_token, pred_token_list, pred_prob_list in output:\n pred_token = pred_token_list[0]\n if '$KEEP' in pred_token:\n pred_text += src_token\n elif '$DELETE' in pred_token:\n continue\n elif '$REPLACE' in pred_token:\n pred_text += pred_token.split('_')[-1]\n elif '$APPEND' in pred_token:\n pred_text += src_token+pred_token.split('_')[-1]\n\n return pred_text","repo_name":"bitallin/MiduCTC-competition","sub_path":"src/baseline/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"78"} +{"seq_id":"27708390021","text":"import twisted.internet.asyncioreactor\r\ntwisted.internet.asyncioreactor.install()\r\nfrom twisted.internet import reactor, task\r\nimport ipywidgets, datetime, subprocess, functools, os\r\n\r\nclass DoneError(Exception):\r\n pass\r\n\r\ndef time_out_counter(reactor):\r\n label = ipywidgets.Label(\"Time left: 5:00\")\r\n current_seconds = datetime.timedelta(minutes=5).total_seconds()\r\n def decrement(count):\r\n nonlocal current_seconds\r\n current_seconds -= count\r\n time_left = datetime.timedelta(seconds=max(current_seconds, 0))\r\n minutes, left = divmod(time_left, minute)\r\n seconds = int(left.total_seconds())\r\n label.value = f\"Time left: {minutes}:{seconds:02}\"\r\n if current_seconds < 0:\r\n raise DoneError(\"finished\")\r\n minute = datetime.timedelta(minutes=1)\r\n call = task.LoopingCall.withCount(decrement)\r\n call.reactor = reactor\r\n d = call.start(1)\r\n d.addErrback(lambda f: f.trap(DoneError))\r\n return d, label\r\n\r\ndef editor(fname):\r\n textarea = ipywidgets.Textarea(continuous_update=False)\r\n textarea.rows = 20\r\n output = ipywidgets.Output()\r\n runner = functools.partial(subprocess.run, capture_output=True, text=True, check=True)\r\n def save(_ignored):\r\n with output:\r\n with open(fname, \"w\") as fpout:\r\n fpout.write(textarea.value)\r\n print(\"Sending...\", end='')\r\n try:\r\n runner([\"git\", \"add\", fname])\r\n runner([\"git\", \"commit\", \"-m\", f\"updated {fname}\"])\r\n runner([\"git\", \"push\"])\r\n except subprocess.CalledProcessError as exc:\r\n print(\"Could not send\")\r\n print(exc.stdout)\r\n print(exc.stderr)\r\n else:\r\n print(\"Done\")\r\n textarea.observe(save, names=\"value\")\r\n return textarea, output, save\r\n\r\ndef journal():\r\n date = str(datetime.date.today())\r\n title = f\"Log: Startdate {date}\"\r\n filename = os.path.join(f\"{date}.txt\")\r\n d, clock = time_out_counter(reactor)\r\n textarea, output, save = editor(filename)\r\n box = ipywidgets.VBox([\r\n ipywidgets.Label(title),\r\n textarea,\r\n clock,\r\n output\r\n ])\r\n d.addCallback(save)\r\n return box\r\n\r\njournal()","repo_name":"nsanka/scripts","sub_path":"python/journal.py","file_name":"journal.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72715120893","text":"# 오랜만에 풀어봤는데 도저히 모르겠어서 검색했음\n# 수학을 좀 알아야 할 듯함\nimport math\n\nt = int(input())\ns = []\na = []\ngcd = 0\nfor i in range(t):\n s.append(int(input()))\n if i == 1:\n gcd = abs(s[1] - s[0])\n gcd = math.gcd(abs(s[i] - s[i - 1]), gcd)\ngcd_a = int(gcd ** 0.5)\nfor i in range(2, gcd_a + 1):\n if gcd % i == 0:\n a.append(i)\n a.append(gcd // i)\na.append(gcd)\na = list(set(a))\na.sort()\nfor i in a:\n print(i, end = ' ')","repo_name":"hhh57463/algorithm","sub_path":"code/2981.py","file_name":"2981.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39197877734","text":"from invoke import Program, Collection\nimport sys\nimport core\nimport plugins\nimport pkgutil\n\n\ndef unhandled_exception(type, value, traceback):\n \"\"\"Override for `sys.excepthook` without stack trace\"\"\"\n print(f\"{type.__name__}: {str(value)}\")\n\n\nclass VastCloudProgram(Program):\n \"\"\"A custom Program that doesn't print useless core options\"\"\"\n\n def print_help(self) -> None:\n print(\n f\"\"\"Usage: {self.binary} [--core-opts] [--subcommand-opts] ...\n\nCore options:\n\n -e, --echo Echo executed commands before running.\n -h [STRING], --help[=STRING] Show core or per-task help and exit.\n -V, --version Show version and exit. \n\"\"\"\n )\n self.list_tasks()\n\n\nif __name__ == \"__main__\":\n sys.excepthook = unhandled_exception\n\n namespace = Collection.from_module(core)\n\n # import all the modules in the plugins folder as collections\n for importer, modname, ispkg in pkgutil.iter_modules(plugins.__path__):\n mod = importer.find_module(modname).load_module(modname)\n namespace.add_collection(Collection.from_module(mod))\n\n program = VastCloudProgram(\n binary=\"./vast-cloud\",\n namespace=namespace,\n version=\"0.1.0\",\n )\n program.run()\n","repo_name":"25280841/vast","sub_path":"cloud/aws/cli/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"3716032670","text":"import random\n\nTRAITS = [\n \"shy\",\n \"hesitant\",\n \"embarrassed\",\n \"confused\",\n \"sad\",\n \"scared\",\n \"nervous\",\n \"happy\",\n \"starstruck\",\n \"excited\",\n \"crazy\",\n]\n\n\ndef random_trait() -> str:\n \"\"\"Randomly sample a character trait for the guest of The Expert Zone.\"\"\"\n return random.choice(TRAITS)\n","repo_name":"camille-vanhoffelen/wet-toast-talk-radio","sub_path":"wet_toast_talk_radio/scriptwriter/prolove/traits.py","file_name":"traits.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"74317014973","text":"import collections\nimport json\nimport pandas as pd\nimport sys\n\nimport valentine as va\nimport valentine.algorithms \nimport valentine.metrics as valentine_metrics\nfrom collections import defaultdict\nfrom scipy.optimize import linprog\nfrom gurobipy import *\n\n\ndef json_to_dataframe(json_file_path):\n \"\"\"\n Purpose: Normalize the JSON file\n @param: json_file_path: Path of JSON file\n @returns: dataframe: Flat table \n \"\"\"\n df_list = []\n with open(json_file_path) as json_file:\n for line in json_file:\n json_data = json.loads(line)\n df = pd.json_normalize(json_data)\n df_list.append(df)\n return pd.concat(df_list).reset_index(drop = True)\n\n\ndef find_valentine(df1, df2, ground_truth):\n \"\"\"\n Purpose: Match source keys to target keys\n @param: df1: Target dataframe, df2: Source dataframe\n @returns: match_dict: dictionary of matching keys\n \"\"\"\n matcher = valentine.algorithms.SimilarityFlooding()\n matches = va.valentine_match(df1, df2, matcher, \"target\", \"source\")\n metrics = valentine_metrics.all_metrics(matches, ground_truth)\n match_dict = defaultdict(list)\n for match in ground_truth:\n match_dict[match[0]].append(match[1])\n return match_dict\n\n\ndef linear_programming(match_dict):\n problem = LpProblem(name = \"Match_columns\", sense = LpMaximize)\n nested_pairs = {} \n all_variables = []\n\n for t_key, s_keys in match_dict.items():\n target_prefix = \"\"\n # Get the prefix of the target key if it has one\n if '.' not in t_key:\n target_prefix = '_tROOT_'\n else:\n target_prefix, _ = t_key.rsplit('.', maxsplit = 1)\n target_prefix = '_tROOT_' + target_prefix\n \n variables = []\n nested_vars = {}\n \n # Loop over source keys that match with target key\n for s_key in s_keys:\n # Create binary variable for matching source key\n var_name = 'T_' + t_key + '-----' + 'S_' + s_key\n s_x = LpVariable(name = var_name, lowBound = 0, upBound = 1, cat = \"Binary\")\n all_variables.append(s_x)\n variables.append(s_x)\n \n # Get the prefix of the source key if it has one. Create a.b_O.d, ...\n if '.' not in s_key:\n source_prefix = '_sROOT_'\n else:\n source_prefix, _ = s_key.rsplit('.', maxsplit = 1)\n source_prefix = '_sROOT_' + source_prefix\n \n # Get the nested key\n nested_key = 'T_' + t_key + '_O_S' + source_prefix\n # Create a variable for each nested key\n if nested_key not in nested_vars:\n nested_vars[nested_key] = LpVariable(name = nested_key, lowBound = 0, upBound = 1, cat = \"Binary\")\n nested_var = nested_vars[nested_key]\n #print(\"nested\", nested_var)\n \n # Constraint a.O_d >= a.b_d.e (and a.O_d >= a.b_d.f, ...)\n problem += (nested_var >= s_x)\n\n # Create a variable for pair key. P.a_O.d (and P.a_O, ...)\n pair_key = 'T' + target_prefix + 'S' + source_prefix\n if pair_key not in nested_pairs:\n nested_pairs[pair_key] = LpVariable(name = pair_key, lowBound = 0, upBound = 1, cat = \"Binary\")\n pair_var = nested_pairs[pair_key]\n #print(\"pair\", pair_var)\n # Constraint P.a_O.d <= a.b_O.d (and P.a_O.d >= a.c_O.d, ...)\n problem += pair_var >= nested_var\n\n # At most one matching for each path in the source schema\n problem += lpSum(variables) <= 1\n\n # Objective function: P.a_O.d + P.a_O + ...\n # problem += lpSum(list(nested_pairs.values()))\n problem += lpSum(all_variables)\n LpSolverDefault.msg = 1 \n # Solve the objective function\n status = problem.solve(PULP_CBC_CMD(msg = 1))\n print(\"Status:\", LpStatus[status])\n\n # Loop over problem variables and print their optimum values\n for variable in problem.variables():\n print(variable.name, \"=\", variable.varValue)\n \n\ndef get_key_prefix(key):\n \"\"\"\n Purpose: Get the prefix of a key\n @param: key: target or source key\n @returns: prefix\n \"\"\"\n if '.' not in key:\n prefix = 'ROOT_'\n else:\n prefix, _ = key.rsplit('.', maxsplit = 1)\n prefix = 'ROOT_' + prefix\n return prefix\n\n\ndef show_matches(source_vars, match_dict):\n '''\n Purpose: Store final matches in dictionary\n @param: source_vars: variables of source keys, match_dict: pairs of keys that possibly match\n @returns: dictionary of keys that definitely match\n '''\n final_matching = {}\n for s_keys in match_dict.values():\n for s_key in s_keys:\n for (s_var, t_key) in source_vars[s_key]:\n if bool(round(s_var.X)):\n final_matching[s_key] = t_key\n break\n return final_matching\n\n\ndef quadratic_programming(match_dict):\n \"\"\"\n Purpose: Create a quadratic model to choose the best match\n @param: match_dict: Dictionary of keys that can possibly match\n @returns: final_match: Dictionary of keys that match\n \"\"\"\n #try:\n\n # Initialize the quadratic model\n quadratic_model = Model(\"quadratic\")\n\n # Dictionary to store the binary variables for nested target keys\n nested_t_vars = {}\n\n # Dictionary to store the binary variables for root target and source keys\n root_vars = {} \n\n # Dictionary to store source variables\n source_vars = collections.defaultdict(list)\n\n # List to store all the quadratic variables\n all_vars = []\n\n # Loop over matching target and source keys\n for t_key, s_keys in match_dict.items():\n target_prefix = 't' + get_key_prefix(t_key)\n\n # List to store the binary variables for nested source keys\n nested_s_vars = {}\n \n # Loop over source keys\n for s_key in s_keys:\n # Create a binary variable for the match between target and source keys\n nested_var_name = 'T_' + t_key + '-----' + 'S_' + s_key\n s_var = quadratic_model.addVar(name=nested_var_name, vtype=GRB.BINARY) \n source_vars[s_key].append((s_var, t_key))\n all_vars.append(s_var)\n \n # Create a binary variable for \"nested\" target keys\n nested_t_key = 'T_' + t_key + '-----' + 'S_' + 's' + get_key_prefix(s_key)\n if nested_t_key not in nested_t_vars:\n nested_t_vars[nested_t_key] = quadratic_model.addVar(name=nested_t_key, vtype=GRB.BINARY)\n nested_t_var = nested_t_vars[nested_t_key]\n \n # Create a binary variable for \"root\" target keys\n root_var_name = 'T_' + target_prefix + '-----' + 'S_' + 's' + get_key_prefix(s_key)\n if root_var_name not in root_vars:\n root_vars[root_var_name] = quadratic_model.addVar(name=root_var_name, vtype=GRB.BINARY)\n root_var = root_vars[root_var_name]\n \n # Create a binary variable for \"nested\" source keys\n nested_s_key = 'T_' + target_prefix + '-----' + 'S_' + s_key\n if nested_s_key not in nested_s_vars:\n nested_s_vars[nested_s_key] = quadratic_model.addVar(name=nested_s_key, vtype=GRB.BINARY)\n nested_s_var = nested_s_vars[nested_s_key]\n\n # Add constraints\n quadratic_model.addConstr(nested_t_var - root_var <= 0, name=\"Matching nested target implies matching root target.\")\n quadratic_model.addConstr(nested_s_var - root_var <= 0, name=\"Matching nested source implies matching root source.\")\n \n # Call update if you need to examine the model because optimization\n quadratic_model.update()\n \n # Loop over the source keys\n for s_key in s_keys:\n # Sum source variables constraints, It must be <= 1\n total = sum(s[0] for s in source_vars[s_key])\n #print('TOTAL:', total)\n # Add constraint: \n quadratic_model.addConstr(total <= 1, name=\"Each source key can be matched to at most one target key.\")\n\n\n # Objective function\n quadratic_model.setObjective(sum(all_vars), GRB.MAXIMIZE)\n quadratic_model.setParam(\"OutputFlag\", False)\n quadratic_model.optimize()\n if quadratic_model.getAttr(GRB.Attr.Status) == GRB.INFEASIBLE:\n quadratic_model.computeIIS()\n quadratic_model.write('iis.ilp')\n quadratic_model.write('model.lp')\n\n return show_matches(source_vars, match_dict)\n \n #print(f\"Optimal objective value: {quadratic_model.objVal}\")\n # Loop over model variables and print their rounded optimum values\n ''' \n for variable in quadratic_model.getVars():\n print(variable.varName, \"=\", bool(round(variable.x)))\n print()\n '''\n\n\n\n\n","repo_name":"jrn1325/Schema_Matching","sub_path":"csm.py","file_name":"csm.py","file_ext":"py","file_size_in_byte":8785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8260488951","text":"'''for i in range(10):\r\n s=input('请输入你的用户名(只能是字母)\\n')\r\n if s.isalpha():\r\n print('格式正确!')\r\n else:\r\n print('格式错误!')'''\r\n\r\ndef coco(n): #函数\r\n if n==1:\r\n return 1\r\n else:\r\n return n*coco(n-1)\r\n\r\nprint(coco(6))\r\nprint('----------------------------------')\r\n\r\nfor i in range(1,7):\r\n print(coco(i))","repo_name":"Anghosts/lovely","sub_path":"学习代码/Test_01/demo3.py","file_name":"demo3.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72190094332","text":"import matplotlib.pyplot as plt\n\nname = input(\"Type the name of the loan here: \")\nloan_size = float(input(\"Type the size of the loan here: \"))\nmonthly_payment = float(input(\"Type the amount you would like to pay every month: \"))\nyearly_interrest = float(input(\"Type in the yearly interrest here: \"))\n\ntotal = loan_size\ntotal_paid = 0\n\nloan_arr = []\nyear_arr = []\n\nyear = 0\nprint(name + \": \" + str(round(loan_size)))\nyear_arr.append(year)\nloan_arr.append(loan_size)\nwhile loan_size > 0:\n # Payment\n payment = monthly_payment * 12\n loan_size = loan_size - payment\n\n # Interrest rate\n interrest = loan_size * (yearly_interrest / 100)\n loan_size = loan_size + interrest\n\n # Total\n total_paid += payment\n if loan_size < 0:\n total_paid += loan_size\n\n # Year\n year += 1\n year_arr.append(year)\n loan_arr.append(loan_size)\n print(\"Year: \" + str(year))\n print(\"Remaining loan: \" + str(round(loan_size)))\n print(\"Total paid: \" + str(round(total_paid)))\n\n# Generate graph\nx=year_arr\ny=loan_arr\nplt.plot(x,y)\nplt.xlabel('Year')\nplt.ylabel('Remaning loan')\nplt.title(name)\nplt.show()\n","repo_name":"toko17/loan_calculator","sub_path":"loan.py","file_name":"loan.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30242924228","text":"\nfrom robot import Robot\nimport time\n\n__author__ = 'stefan'\n\nrobot = Robot()\n\ntry:\n\n while True:\n\n print(\"driving forward\")\n robot.drive_forward(80)\n\n while not robot.sees_obstacle():\n time.sleep(0.1)\n\n print(\"obstacle - turning left\")\n robot.turn_left(100)\n\n # if obstacle is close (2)\n while robot.sees_obstacle() == 2:\n # let it turn a while\n time.sleep(0.25)\n\nexcept KeyboardInterrupt:\n\n robot.stop()\n","repo_name":"wendlers/edubot-nodemcu-fw","sub_path":"remote/python/examples/obstacle_avoider.py","file_name":"obstacle_avoider.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8434255012","text":"import math\nfrom fpdf import FPDF\nimport pandas as pd\n\n# CONFIG\n\n# A4 = 297mm x 210mm\na4_width = 297\na4_height = 210\n\n# size in mm\nbadge_height = 100\nbadge_width = 75\n\nlogo_1_path = \"data/l1.png\"\nlogo_2_path = \"data/l2.png\"\nlogo_3_path = \"data/l3.png\"\n\nfont_path = \"data/Roboto-Regular.ttf\"\n\nevent_name = \"MY SUPER COOL EVENT NAME - 2023\"\n\ndef main():\n global badge_height, badge_width, event_name\n\n # badge_height = input_number(\"Enter badge height in mm (Default 100):\", 100)\n # badge_width = input_number(\"Enter badge width in mm (Default 75):\", 75)\n check_valid_size()\n\n # event_name = input(\"Enter event name:\")\n\n customers = pd.read_excel(r'data/names.xlsx')\n\n badges_to_print = len(customers[\"forename\"])\n\n pages_to_print = math.ceil(badges_to_print / int((a4_width-10) / badge_width))\n\n print(\"Pages to print:\", pages_to_print)\n\n badge_number = 0\n pdf_file = FPDF(orientation='L', unit='mm', format='A4')\n pdf_file.add_font('Roboto', '', font_path,\n uni=True) # - family, style, fname, uni(Unicode flag)\n pdf_file.set_display_mode(zoom=\"real\")\n pdf_file.set_font(\"Roboto\")\n\n for page in range(pages_to_print):\n print(\"Page: \", page + 1)\n\n pdf_file.add_page()\n draw_cutting_lines(pdf_file)\n boarder_lr = int(287 % badge_width / 2)\n\n for x in range(5 + boarder_lr, a4_width - boarder_lr - 5, badge_width):\n if badge_number >= badges_to_print:\n break\n pdf_file.rotate(0, a4_width / 2, a4_height / 2)\n draw_badge(pdf_file, x, customers, badge_number)\n pdf_file.rotate(180, a4_width / 2, a4_height / 2)\n draw_badge(pdf_file, a4_width - badge_width - x, customers, badge_number)\n badge_number += 1\n\n pdf_file = pdf_file.output(name=\"generated_badge.pdf\")\n\n\ndef input_number(message, default_value):\n while True:\n try:\n user_input = int(input(message))\n except ValueError:\n print(\"Used default.\", default_value)\n return default_value\n else:\n return user_input\n\n\ndef check_valid_size():\n if a4_height - 10 < badge_height * 2:\n raise ValueError\n if (a4_width - 10) < badge_width:\n raise ValueError\n\n\ndef draw_cutting_lines(pdf_file, draw_full=False):\n boarder_lr = int(287 % badge_width / 2)\n if draw_full:\n # draw horizontal lines\n for y in range(5, 210, badge_height):\n pdf_file.line(5, y, a4_width-5, y)\n # draw vertical lines\n for x in range(5 + boarder_lr, a4_width, badge_width):\n pdf_file.line(x, 5, x, a4_height-5)\n else:\n for y in range(5, 210, badge_height):\n pdf_file.line(0, y, boarder_lr, y)\n pdf_file.line(297 - boarder_lr - 5, y, 297, y)\n for x in range(5 + boarder_lr, 298 - boarder_lr, badge_width):\n pdf_file.line(x, 0, x, 5)\n pdf_file.line(x, 210 - 5, x, 210)\n\n\ndef draw_badge(pdf_file, x, customers, badge_number):\n # Read Data\n first_name = customers[\"forename\"][badge_number]\n last_name = customers[\"surname\"][badge_number]\n profession = str(customers[\"profession\"][badge_number])\n company = str(customers[\"company\"][badge_number])\n\n print(\"Badge: \", badge_number + 1, \" - \", first_name, last_name, \" - \", profession, \" - \", company)\n\n # Logo 1\n if logo_1_path is not None:\n pdf_file.image(logo_1_path, x=x + 20, y=5 + 2, w=37)\n\n # Event Name\n pdf_file.set_font_size(size=10)\n pdf_file.set_xy(x=x + 4, y=5 + 25)\n pdf_file.multi_cell(w=badge_width - 6, h=5, txt=event_name, align=\"L\")\n\n # First Name + Last Name\n pdf_file.set_font_size(size=22)\n pdf_file.text(x=x + 5, y=5 + 47, txt=first_name)\n pdf_file.text(x=x + 5, y=5 + 55, txt=last_name)\n\n # Profession + Company\n pdf_file.set_font_size(size=14)\n pdf_file.set_xy(x=x + 4, y=5 + 57)\n pdf_file.multi_cell(w=badge_width - 8, h=5, txt=company, align=\"L\")\n\n # Logo 2\n if logo_2_path is not None:\n pdf_file.set_font_size(size=10)\n pdf_file.text(x=x + 5, y=5 + 72, txt=\"Hosted by\")\n pdf_file.image(logo_2_path, x=x + 3, y=5 + 78, w=33)\n\n # Logo 3\n if logo_3_path is not None:\n pdf_file.image(logo_3_path, x=x + 42, y=5 + 75, w=27)\n\nif __name__ == '__main__':\n main()\n","repo_name":"clFaster/Badge-Generator","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"28261257653","text":"import csv\n\n\ndef read(path):\n print(\"Headings:\")\n with open(path) as file:\n reader = csv.reader(file)\n headings = next(reader)\n print(headings)\n print(\"Values:\")\n for line in reader:\n print(line)\n file.close()\n\n\ndef run():\n read(\"bots.csv\")\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"boitzenburgeruk/com411","sub_path":"data/files/csv/read_csv.py","file_name":"read_csv.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6761871121","text":"import os\nimport numpy as np\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom database.data_base import DataBase\n\n\nclass Cleaner(object):\n\n def __init__(self, db_name=\"db.sqlite3\", window=100, log_dir=\"./cleaner_log\"):\n self.db = DataBase(db_name=db_name)\n self.window = window\n self.log_dir = log_dir\n try:\n os.stat(log_dir)\n except:\n os.mkdir(log_dir)\n\n def remove_duplicates_from_dir(self, dir_name):\n \"\"\"\n Remove image duplicates from dir_name folder\n\n :param dir_name:\n :return:\n \"\"\"\n\n print(\"Removing duplicates from {0}\".format(dir_name))\n log_file_path = os.path.join(self.log_dir, \"{0}.log\".format(dir_name))\n\n img_dict = {}\n for f in tqdm(os.listdir(dir_name)):\n img_dict[f] = np.array(Image.open(os.path.join(dir_name, f)))\n\n for f in tqdm(os.listdir(dir_name)):\n n = int(f.split(\"_\")[0])\n\n for d in os.listdir(dir_name):\n m = int(d.split(\"_\")[0])\n diff = m - n\n if d != f and diff in range(self.window):\n if np.array_equal(img_dict[f], img_dict[d]):\n os.remove(os.path.join(dir_name, d))\n print(\"{0} removed\".format(os.path.join(dir_name, d)))\n\n with open(log_file_path, \"w+\") as log_file:\n log_file.write(d)\n\n def remove_duplicates_from_db(self, dir_name):\n \"\"\"\n Remove image duplicates from database\n (from all the tables)\n image names file constructed on dir_name folder\n\n :param dir_name:\n :return:\n \"\"\"\n\n log_file_path = os.path.join(self.log_dir, \"{0}.log\".format(dir_name))\n with open(log_file_path, \"r\") as log_file:\n img_names = log_file.readlines()\n\n table_names = self.db.get_table_names()\n\n for img_name in img_names:\n for table_name in table_names:\n self.db.delete_by_name(table_name=table_name, img_name=img_name)\n print(\"{0} removed from {1}\".format(img_name, table_name))\n\n","repo_name":"nataliaklochko/smartproject","sub_path":"database/cleaner.py","file_name":"cleaner.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13652316949","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom keras import datasets,layers,Sequential,metrics\n\n(x,y),(x_test,y_test) = datasets.mnist.load_data()\nprint(x.shape,y.shape)\nbachsize = 128\ndb = tf.data.Dataset.from_tensor_slices((x,y))\ndb_test = tf.data.Dataset.from_tensor_slices((x_test,y_test))\ndef pro(x,y):\n x = tf.cast(x,tf.float32)/255.\n x = tf.reshape(x,[-1,28*28])\n x = tf.squeeze(x)\n y = tf.cast(y,tf.int32)\n y = tf.one_hot(y,depth=10)\n return x,y\ndb = db.map(pro).shuffle(1000).batch(bachsize)\ndb_test = db_test.map(pro).batch(bachsize)\ndb_iter = iter(db)\nsample = next(db_iter)\nsample = next(db_iter)\nsample = next(db_iter)\nsample = next(db_iter)\nsample = next(db_iter)\nsample = next(db_iter)\nsample = next(db_iter)\nprint(sample[0].shape,sample[1].shape)\nprint(sample[0])\n\nmodel = Sequential([\nlayers.Dense(256,activation=tf.nn.relu),\nlayers.Dense(128,activation=tf.nn.relu),\nlayers.Dense(64,activation=tf.nn.relu),\nlayers.Dense(10)\n])\nmodel.build(input_shape=[None,784])\n# 模型参数初始化\nmodel.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-3),loss=tf.losses.CategoricalCrossentropy(from_logits=True),metrics=['accuracy'])\n# 模型训练\nmodel.fit(db,epochs=20,validation_data=db_test,validation_steps=1)\n# 模型测试\nmodel.evaluate(db_test)\n# 保存模型权重\nmodel.save_weights('./checkpaoint/myw')\ndel model\nmodel = Sequential([\nlayers.Dense(256,activation=tf.nn.relu),\nlayers.Dense(128,activation=tf.nn.relu),\nlayers.Dense(64,activation=tf.nn.relu),\nlayers.Dense(10)\n])\nmodel.build(input_shape=[None,784])\nmodel.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-3),loss=tf.losses.CategoricalCrossentropy(from_logits=True),metrics=['accuracy'])\n# 加载模型\nmodel.load_weights('./checkpaoint/myw')\nacc = model.evaluate(db_test)\nmodel.save(\"./cc/yy\")","repo_name":"wangofcong/TensorFlow","sub_path":"cfep.py","file_name":"cfep.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17528975476","text":"import sys, time, requests\n\ndef loading_dots():\n\t\"\"\"\n\tPrints three loading dots.\n\t\"\"\"\n\n\tfor i in range(3):\n\t\tprint(\".\", end=\"\")\n\t\tsys.stdout.flush()\n\t\ttime.sleep(0.75)\n\n\tfor j in range(3):\n\t\tprint(\"\\b \", end = \"\\b\")\n\t\tsys.stdout.flush()\n\t\ttime.sleep(0.75)\n\n\ndef main():\n\t\"\"\"\n\tWaits for flask webserver to start, and prints loading dots in the meantime.\n\t\"\"\"\n\n\ti = 0\n\twhile True:\n\t\ttry:\n\t\t\trequests.get(\"http://127.0.0.1:5000/\")\n\t\texcept requests.exceptions.ConnectionError as e:\n\t\t\tif i == 0:\n\t\t\t\tprint(\"Waiting for flask webserver to start\", end = \"\")\n\t\t\t\ti = 1\n\t\t\tloading_dots()\n\t\telse:\n\t\t\tbreak\n\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt:\n\t\tsys.exit(1)","repo_name":"IBM/multicloud-incident-response-navigator","sub_path":"run/wait.py","file_name":"wait.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"18873152338","text":"x=5\nwhile(x<5):\n print(x)\n x+=1\n print(x)\ntotal=0\nnum=20\nwhile (num<=25):\n total=total+num\n num=num+1\nprint(\"Value of the total from the while loop\",total)","repo_name":"Vipulhere/Python-practice-Code","sub_path":"Module 3/8.1 while loop.py","file_name":"8.1 while loop.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"37012715416","text":"from cobra.io import write_sbml_model, read_sbml_model\n\nmodel_name = 'iAF1260_mutant_delta_f6p_c'\nmodel_path_sbml = f'./genome/Mutants/{model_name}.xml'\nmodel_path_yaml= f'./genome/{model_name}/'\n\ndef generate_mutant(path=model_path_sbml):\n\n model = read_sbml_model(path)\n\n for i in [i.id for i in model.metabolites.f6p_c.reactions]:\n reaction = model.reactions.get_by_id(i)\n model.remove_reactions([reaction])\n return model\n\ndef export_model(model ,path='./genome/Mutants/', filename=model_name):\n write_sbml_model(model, f'{path}{filename}.xml')\n\nif __name__ == '__main__':\n mutant = generate_mutant()\n export_model(mutant)","repo_name":"SergioPachonDotor/OptKnock-Threonine_Prod","sub_path":"Control_model_for_gapfind_gapfill.py","file_name":"Control_model_for_gapfind_gapfill.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37224603960","text":"import sys\nimport PConstant\nimport PConfig\nimport ElasticSearchClient\nimport ESDatabaseMetaStore\nimport logging\nimport json\n\nclass ElasticSearchDriver(object):\n\n def __init__(self, indexname, typename):\n self.es = ElasticSearchClient.ElasticSearchClient()\n self.esdb = ESDatabaseMetaStore.ESDatabaseMetaStore()\n self.indexname = indexname\n self.typename = typename\n self._flogger()\n\n def createtable(self, schemanamecount):\n request_body = self.esdb.get_request_body(schemanamecount)\n self._logger.info(\"Create Table ' %s '\", self.indexname)\n self.es.create_index(self.indexname, request_body) \n\n def pushbulkdata(self, bulk_data):\n self._logger.info(\"Collecting Data (%d)\",len(bulk_data))\n self.es.add_bulk_document(self.indexname, bulk_data)\n\n def pushdata(self, data_dict):\n self._logger.info(\"Pushing data ...\")\n self.es.add_document(self.indexname,self.typename, data_dict, data_dict.keys()) \n\n def searchresult(self):\n self._logger.info(\"Searching results ... index=%s type=%s\", self.indexname, self.typename)\n queryhandle = {}\n res = self.es.search_result(self.indexname, self.typename, queryhandle)\n resultlist = []\n for doc in res['hits']['hits']:\n resultmap = {}\n resultmap.setdefault('_id', doc['_id'])\n resultmap.setdefault('_source', doc['_source'])\n resultlist.append(resultmap)\n return resultlist\n\n\n def searchresult(self, query):\n self._logger.info(\"Searching results ... index=%s type=%s\", self.indexname, self.typename)\n self._logger.info(\"Query '%s'\", query)\n #queryhandle = { 'query' : json.loads(query) }\n #jquery = json.loads(query)\n #queryhandle = {\"query\": {\"match\": {\"body\": \"McGrath\"}}}\n queryhandle = {\"query\": {\"match\": query}}\n self._logger.info(\"Queryi Handle '%s'\", json.dumps(queryhandle))\n res = self.es.search_result(self.indexname, self.typename, queryhandle)\n resultlist = []\n for doc in res['hits']['hits']:\n resultmap = {}\n resultmap.setdefault('_id', doc['_id'])\n resultmap.setdefault('_source', doc['_source']) \n resultlist.append(resultmap)\n return resultlist\n\n def scanresult(self, query):\n\n #queryhandle = {\"query\": {\"match\": query}}\n queryhandle = query\n pagelist = self.es.scan_result_sync(self.indexname, self.typename, query)\n resultlist = []\n\n self._logger.info(\"Scan Handle '%s' '%s' '%s'\", self.indexname, self.typename, queryhandle)\n for page in pagelist:\n for doc in page:\n resultmap = {}\n resultmap.setdefault('_id', doc['_id'])\n resultmap.setdefault('_source', doc['_source'])\n resultlist.append(resultmap)\n return resultlist\n\n\n def _flogger(self):\n\n self._logger = logging.getLogger('ElasticSearchDriver')\n self._logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n self._logger.addHandler(ch)\n\n","repo_name":"somilasthana/esearch","sub_path":"ElasticSearchDriver.py","file_name":"ElasticSearchDriver.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18596103789","text":"from numbers import Integral\nfrom typing import List, Union, Dict, Any\n\n\nclass Parser:\n \"\"\" Parser base class.\n\n The attributes listed below make up a public interface common to all parsers. They can be accessed directly\n once the dataset is constructed and annotations are populated.\n\n Attributes:\n\n cat_names (list[str]):\n list of category (class) names, with background class at position 0.\n cat_ids (list[union[str, int]):\n list of dataset specific, unique integer or string category ids, does not include background\n cat_id_to_label (dict):\n map from category id to integer 1-indexed class label\n\n img_ids (list):\n list of dataset specific, unique image ids corresponding to valid samples in dataset\n img_ids_invalid (list):\n list of image ids corresponding to invalid images, not used as samples\n img_infos (list[dict]):\n image info, list of info dicts with filename, width, height for each image sample\n \"\"\"\n def __init__(\n self,\n bbox_yxyx: bool = False,\n has_labels: bool = True,\n include_masks: bool = False,\n include_bboxes_ignore: bool = False,\n ignore_empty_gt: bool = False,\n min_img_size: int = 32,\n ):\n \"\"\"\n Args:\n yxyx (bool): output coords in yxyx format, otherwise xyxy\n has_labels (bool): dataset has labels (for training validation, False usually for test sets)\n include_masks (bool): include segmentation masks in target output (not supported yet for any dataset)\n include_bboxes_ignore (bool): include ignored bbox in target output\n ignore_empty_gt (bool): ignore images with no ground truth (no negative images)\n min_img_size (bool): ignore images with width or height smaller than this number\n sub_sample (int): sample every N images from the dataset\n \"\"\"\n # parser config, determines how dataset parsed and validated\n self.yxyx = bbox_yxyx\n self.has_labels = has_labels\n self.include_masks = include_masks\n self.include_bboxes_ignore = include_bboxes_ignore\n self.ignore_empty_gt = ignore_empty_gt\n self.min_img_size = min_img_size\n self.label_offset = 1\n\n # Category (class) metadata. Populated by _load_annotations()\n self.cat_names: List[str] = []\n self.cat_ids: List[Union[str, Integral]] = []\n self.cat_id_to_label: Dict[Union[str, Integral], Integral] = dict()\n\n # Image metadata. Populated by _load_annotations()\n self.img_ids: List[Union[str, Integral]] = []\n self.img_ids_invalid: List[Union[str, Integral]] = []\n self.img_infos: List[Dict[str, Any]] = []\n\n @property\n def cat_dicts(self):\n \"\"\"return category names and labels in format compatible with TF Models Evaluator\n list[dict(name=, id=)]\n \"\"\"\n return [\n dict(\n name=name,\n id=cat_id if not self.cat_id_to_label else self.cat_id_to_label[cat_id]\n ) for name, cat_id in zip(self.cat_names, self.cat_ids)]\n\n @property\n def max_label(self):\n if self.cat_id_to_label:\n return max(self.cat_id_to_label.values())\n else:\n assert len(self.cat_ids) and isinstance(self.cat_ids[0], Integral)\n return max(self.cat_ids)\n","repo_name":"rwightman/efficientdet-pytorch","sub_path":"effdet/data/parsers/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":1526,"dataset":"github-code","pt":"78"} +{"seq_id":"6210934982","text":"from __future__ import print_function\r\nimport os\r\nimport argparse\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\r\nfrom sklearn.metrics import accuracy_score, balanced_accuracy_score\r\nfrom pointnet2_ops_lib.pointnet2_ops import pointnet2_utils\r\n# custom module\r\nfrom model import DGCNN_cls\r\nfrom util import cal_loss, IOStream\r\nfrom data import ScanObject_coseg, TrainingBatchSampler\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0, 1, 2, 3, 4\"\r\n\r\n\r\ndef _init_():\r\n if not os.path.exists('checkpoints'):\r\n os.makedirs('checkpoints')\r\n if not os.path.exists('checkpoints/'+args.path):\r\n os.makedirs('checkpoints/'+args.path)\r\n if not os.path.exists('checkpoints/'+args.path+'/'+'models'):\r\n os.makedirs('checkpoints/'+args.path+'/'+'models')\r\n os.system('cp exp_classification.py checkpoints'+'/' +\r\n args.path+'/'+'exp_classification.py.backup')\r\n os.system('cp model.py checkpoints' + '/' +\r\n args.path + '/' + 'model.py.backup')\r\n os.system('cp util.py checkpoints' + '/' +\r\n args.path + '/' + 'util.py.backup')\r\n os.system('cp data.py checkpoints' + '/' +\r\n args.path + '/' + 'data.py.backup')\r\n\r\n\r\ndef train(args, io, model, train_loader, opt):\r\n # I will NEVER train the model on a CPU so I simply set \"cuda\"\r\n device = torch.device(\"cuda\")\r\n\r\n criterion = cal_loss\r\n train_loss = 0.0\r\n count = 0.0\r\n model.train()\r\n train_pred = []\r\n train_true = []\r\n for data, label, mask in train_loader:\r\n # load data\r\n data, label = data.to(device), label.to(device).squeeze()\r\n data = data.permute(0, 2, 1)\r\n batch_size = data.size()[0]\r\n\r\n # forward propagation and back propagation\r\n opt.zero_grad()\r\n logits = model(data)\r\n loss = criterion(logits, label)\r\n loss.backward()\r\n opt.step()\r\n\r\n # record results\r\n preds = logits.max(dim=1)[1]\r\n count += batch_size\r\n train_loss += loss.item() * batch_size\r\n train_true.append(label.cpu().numpy())\r\n train_pred.append(preds.detach().cpu().numpy())\r\n\r\n # record results\r\n train_true = np.concatenate(train_true)\r\n train_pred = np.concatenate(train_pred)\r\n io.cprint('Train '\r\n 'loss: %.6f, '\r\n 'train acc: %.6f, '\r\n 'train avg acc: %.6f'\r\n % (train_loss / count,\r\n accuracy_score(train_true, train_pred),\r\n balanced_accuracy_score(train_true, train_pred)))\r\n\r\n return\r\n\r\n\r\ndef test(args, io, model, test_loader, test_name,n_point):\r\n with torch.no_grad():\r\n device = torch.device(\"cuda\")\r\n model = model.eval()\r\n\r\n # initialize the parameters\r\n criterion = cal_loss\r\n test_loss = 0.0\r\n count = 0.0\r\n model.eval()\r\n test_pred = []\r\n test_true = []\r\n for data, label, mask in test_loader:\r\n # load data\r\n data, label = data.to(device), label.to(device).squeeze()\r\n #Farest point sample\r\n #data: (B*N*3) => (B*n_point*3)\r\n data_flipped = data.transpose(1, 2).contiguous()\r\n\r\n data = (\r\n pointnet2_utils.gather_operation(\r\n data_flipped, pointnet2_utils.furthest_point_sample(data, n_point)\r\n )\r\n .transpose(1, 2)\r\n .contiguous()\r\n if n_point is not None\r\n else None\r\n )\r\n\r\n data = data.permute(0, 2, 1)\r\n batch_size = data.size()[0]\r\n\r\n # predict\r\n logits = model(data)\r\n loss = criterion(logits, label)\r\n preds = logits.max(dim=1)[1]\r\n\r\n # record results\r\n count += batch_size\r\n test_loss += loss.item() * batch_size\r\n test_true.append(label.cpu().numpy())\r\n test_pred.append(preds.detach().cpu().numpy())\r\n\r\n # record results\r\n test_true = np.concatenate(test_true)\r\n test_pred = np.concatenate(test_pred)\r\n test_acc = accuracy_score(test_true, test_pred)\r\n avg_per_class_acc = balanced_accuracy_score(test_true, test_pred)\r\n\r\n io.cprint(' * %s '\r\n 'loss: %.6f, '\r\n 'test acc: %.6f, '\r\n 'test avg acc: %.6f'\r\n % (test_name,\r\n test_loss / count,\r\n test_acc,\r\n avg_per_class_acc))\r\n\r\n return test_acc\r\n\r\n\r\ndef experiment(n_points_choices, path):\r\n args.n_points_choices = n_points_choices\r\n args.path = path\r\n\r\n # make path\r\n _init_()\r\n\r\n # record args\r\n io = IOStream('checkpoints/' + args.path + '/run.log')\r\n io.cprint(str(args))\r\n\r\n io.cprint('Using GPU : ' + str(torch.cuda.current_device()) +\r\n ' from ' + str(torch.cuda.device_count()) + ' devices')\r\n\r\n # set seeds\r\n torch.manual_seed(1)\r\n torch.cuda.manual_seed(1)\r\n\r\n # initialize train_loader\r\n train_dataset = ScanObject_coseg(partition='training')\r\n train_sampler = TrainingBatchSampler(train_dataset,\r\n args.n_points_choices,\r\n args.batch_size)\r\n train_loader = DataLoader(train_dataset,\r\n batch_sampler=train_sampler,\r\n num_workers=32)\r\n\r\n # initialize test data loaders\r\n test_dataset256 = ScanObject_coseg(partition='test', n_points=256)\r\n test_loader256 = DataLoader(test_dataset256, num_workers=16,\r\n batch_size=args.test_batch_size)\r\n test_dataset512 = ScanObject_coseg(partition='test', n_points=512)\r\n test_loader512 = DataLoader(test_dataset512, num_workers=16,\r\n batch_size=args.test_batch_size)\r\n test_dataset1024 = ScanObject_coseg(partition='test', n_points=1024)\r\n test_loader1024 = DataLoader(test_dataset1024, num_workers=16,\r\n batch_size=args.test_batch_size)\r\n test_dataset2048 = ScanObject_coseg(partition='test', n_points=2048)\r\n test_loader2048 = DataLoader(test_dataset2048, num_workers=16,\r\n batch_size=args.test_batch_size)\r\n\r\n # Load models\r\n device = torch.device(\"cuda\")\r\n model = DGCNN_cls(args).to(device)\r\n model = nn.DataParallel(model)\r\n\r\n # Use SGD and CosineAnnealingLR to train\r\n print(\"Use SGD\")\r\n opt = optim.SGD(model.parameters(), lr=args.lr,\r\n momentum=args.momentum, weight_decay=1e-4)\r\n scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=1e-3)\r\n\r\n # start training\r\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\r\n for i in range(args.epochs):\r\n io.cprint('Epoch [%d]' % (i + 1))\r\n # train model\r\n train(args, io, model, train_loader, opt)\r\n\r\n # adjust learning rate\r\n scheduler.step()\r\n\r\n # test\r\n test(args, io, model, test_loader256, 'Test 256 ',256)\r\n test(args, io, model, test_loader512, 'Test 512 ',515)\r\n test(args, io, model, test_loader1024, 'Test 1024',1024)\r\n test(args, io, model, test_loader2048, 'Test 2048',2048)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Training settings\r\n parser = argparse.ArgumentParser(description='Point Cloud Recognition')\r\n parser.add_argument('--exp_name', type=str, default='exp', metavar='N',\r\n help='Name of the experiment')\r\n parser.add_argument('--dataset', type=str, default='scanobjectnn', metavar='N',\r\n choices=['scanobjectnn'])\r\n parser.add_argument('--batch_size', type=int, default=128, metavar='batch_size',\r\n help='Size of batch)')\r\n parser.add_argument('--test_batch_size', type=int, default=32, metavar='batch_size',\r\n help='Size of batch)')\r\n parser.add_argument('--epochs', type=int, default=250, metavar='N',\r\n help='number of episode to train ')\r\n parser.add_argument('--lr', type=float, default=0.1, metavar='LR',\r\n help='learning rate')\r\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\r\n help='SGD momentum (default: 0.9)')\r\n parser.add_argument('--dropout', type=float, default=0.5,\r\n help='initial dropout rate')\r\n parser.add_argument('--emb_dims', type=int, default=1024, metavar='N',\r\n help='Dimension of embeddings')\r\n parser.add_argument('--k', type=int, default=20, metavar='N',\r\n help='Num of nearest neighbors to use')\r\n args = parser.parse_args()\r\n\r\n experiment([256], 'train256')\r\n experiment([512], 'train512')\r\n experiment([1024], 'train1024')\r\n experiment([2048], 'train2048')\r\n experiment([256, 512, 1024, 2048], 'train_mix')\r\n","repo_name":"KaivinC/dgcnn-experiment","sub_path":"exp_classification.py","file_name":"exp_classification.py","file_ext":"py","file_size_in_byte":9051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"971635433","text":"\"\"\"File to grab entries from gradebook.db and put them in a readable csv format\"\"\"\nimport csv\nimport sqlite3\n\n\ndef main():\n db_fname = \"gradebook.db\"\n conn = sqlite3.connect(db_fname)\n cur = conn.cursor()\n data = cur.execute(\"SELECT * FROM GRADES;\")\n with open('grades.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['userid', 'grade', 'section', 'lab', 'timestamp'])\n writer.writerows(data)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"data-8/gofer_service","sub_path":"dump_grades.py","file_name":"dump_grades.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"70425421051","text":"import os\n\nfrom module import file_util, container_util\nfrom document import Document\n\n'''\n 문서의 TF, IDF를 계산해주는 클래스\n'''\n\nclass TFIDFModel() :\n # Field & Member Variable\n\n # Constructor\n\n def __init__(self):\n self.encoding = 'UTF-8'\n \n self.documents = []\n self.df_dict = {}\n # encoding 유니코드 언어 설정\n # documents = 파일 리스트로 저장\n # 읽은 파일에 음절이 있는 지 없는 지 빈도 수 확인\n \n '''\n Method\n 1. 파일 불러오기\n '''\n\n '''\n 1. 파일 불러오기\n '''\n def file_load(self, input_path, output_path, encoding):\n file_paths = file_util.get_file_paths(input_path, False)\n \n for file_path in file_paths:\n # Document class를 불러와서 document별 tf_dict 생성\n document = Document()\n document.load(file_path, encoding, self.df_dict, output_path)\n \n # for 문을 벗어나면 document 파일이 지워지기 때문에 list에 append해줌\n self.documents.append(document)\n \n self.save_dict(output_path) \n self.print_dict()\n \n def save_dict(self, output_path):\n output_path = os.path.join(output_path, 'df_dict.dict')\n self.df_dict = container_util.sorted_dict(self.df_dict)\n file_util.open_file(output_path, encoding, 'w')\n file_util.write_dict(output_path, encoding, self.df_dict, ' : ')\n \n \n def print_dict(self):\n print('df_dict len :', len(self.df_dict))\n for term in self.df_dict:\n print(f'\\t{term} : {self.df_dict[term]}')\n print()\n \n \n \n# main\nif __name__ == \"__main__\" :\n input_path = 'input_file'\n output_path = 'output_file'\n encoding = 'UTF-8'\n \n tfidf_model = TFIDFModel()\n tfidf_model.file_load(input_path, output_path, encoding)","repo_name":"Wildturkeyy/Russells","sub_path":"Python/NLP_Study/02.TFIDF_Model/02.tfidf_second/source/TFIDFModel.py","file_name":"TFIDFModel.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"167201619","text":"import torch\nfrom torch.nn.functional import grid_sample\n\n\ndef back_project(coords, origin, voxel_size, feats, KRcam):\n '''\n Unproject the image fetures to form a 3D (sparse) feature volume\n\n :param coords: coordinates of voxels,\n dim: (num of voxels, 4) (4 : batch ind, x, y, z)\n :param origin: origin of the partial voxel volume (xyz position of voxel (0, 0, 0))\n dim: (batch size, 3) (3: x, y, z)\n :param voxel_size: floats specifying the size of a voxel\n :param feats: image features\n dim: (num of views, batch size, C, H, W)\n :param KRcam: projection matrix\n dim: (num of views, batch size, 4, 4)\n :return: feature_volume_all: 3D feature volumes\n dim: (num of voxels, c + 1)\n :return: count: number of times each voxel can be seen\n dim: (num of voxels,)\n '''\n n_views, bs, c, h, w = feats.shape\n\n feature_volume_all = torch.zeros(coords.shape[0], c + 1, device=feats.device)\n count = torch.zeros(coords.shape[0], device=feats.device)\n\n for batch in range(bs):\n batch_ind = torch.nonzero(coords[:, 0] == batch, as_tuple=False).squeeze(1)\n coords_batch = coords[batch_ind][:, 1:]\n\n coords_batch = coords_batch.view(-1, 3)\n origin_batch = origin[batch].unsqueeze(0)\n feats_batch = feats[:, batch]\n proj_batch = KRcam[:, batch]\n\n grid_batch = coords_batch * voxel_size + origin_batch.float()\n rs_grid = grid_batch.unsqueeze(0).expand(n_views, -1, -1)\n rs_grid = rs_grid.permute(0, 2, 1).contiguous()\n nV = rs_grid.shape[-1]\n rs_grid = torch.cat([rs_grid, torch.ones([n_views, 1, nV], device=feats.device)], dim=1)\n\n # Project grid\n im_p = proj_batch @ rs_grid\n im_x, im_y, im_z = im_p[:, 0], im_p[:, 1], im_p[:, 2]\n im_x = im_x / im_z\n im_y = im_y / im_z\n\n im_grid = torch.stack([2 * im_x / (w - 1) - 1, 2 * im_y / (h - 1) - 1], dim=-1)\n mask = im_grid.abs() <= 1\n mask = (mask.sum(dim=-1) == 2) & (im_z > 0)\n\n feats_batch = feats_batch.view(n_views, c, h, w)\n im_grid = im_grid.view(n_views, 1, -1, 2)\n features = grid_sample(feats_batch, im_grid, padding_mode='zeros', align_corners=True)\n\n features = features.view(n_views, c, -1)\n mask = mask.view(n_views, -1)\n im_z = im_z.view(n_views, -1)\n # remove nan\n features[mask.unsqueeze(1).expand(-1, c, -1) == False] = 0\n # features = features * mask.unsqueeze(1).float()\n\n im_z[mask == False] = 0\n\n count[batch_ind] = mask.sum(dim=0).float()\n\n # aggregate multi view\n features = features.sum(dim=0)\n mask = mask.sum(dim=0)\n invalid_mask = mask == 0\n mask[invalid_mask] = 1\n in_scope_mask = mask.unsqueeze(0)\n features /= in_scope_mask\n features = features.permute(1, 0).contiguous()\n\n # concat normalized depth value\n im_z = im_z.sum(dim=0).unsqueeze(1) / in_scope_mask.permute(1, 0).contiguous()\n im_z_mean = im_z[im_z > 0].mean()\n im_z_std = torch.norm(im_z[im_z > 0] - im_z_mean) + 1e-5\n im_z_norm = (im_z - im_z_mean) / im_z_std\n im_z_norm[im_z <= 0] = 0\n features = torch.cat([features, im_z_norm], dim=1)\n\n feature_volume_all[batch_ind] = features\n\n return feature_volume_all, count\n","repo_name":"neu-vi/PlanarRecon","sub_path":"ops/back_project.py","file_name":"back_project.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","stars":257,"dataset":"github-code","pt":"78"} +{"seq_id":"7586377643","text":"import astropy\nimport pyspeckit\ntry:\n from astropy.io import fits as pyfits\nexcept ImportError:\n import pyfits\nimport numpy as np\n\n# Load the spectrum\nsp = pyspeckit.Cube('region5_hcn_crop.fits')\nerrmap = pyfits.getdata('region5.hcn.errmap.fits')\n\n# Register the fitter\n# The N2H+ fitter is 'built-in' but is not registered by default; this example\n# shows how to register a fitting procedure\n# 'multi' indicates that it is possible to fit multiple components and a background will not automatically be fit\n# 4 is the number of parameters in the model (excitation temperature, optical depth, line center, and line width)\nsp.Registry.add_fitter('hcn_amp',pyspeckit.models.hcn.hcn_amp,3)\n\n# Run the fitter\nsp.mapplot()\n\n# use an individual spectrum selected semi-arbitrarily from the map to get an estimate of the error\n# this method has been rendered obsolete - use the error map instead\n#s = sp.get_spectrum(20,20)\n#s.plotter()\n#s.Registry = sp.Registry\n#s.specfit.Registry = sp.Registry\n#s.specfit(fittype='hcn_amp',guesses=[2.5,-5.6,1.5],show_components=True,debug=True,quiet=False)\n#s.specfit(fittype='hcn_amp',guesses=[2.5,-5.6,1.5],show_components=True,debug=True,quiet=False)\n#sp.error = s.specfit.errspec\n\n\n# Compute the moments at each position to come up with reasonable guesses.\n# This speeds up the process enormously, but can easily mess up the fits if\n# there are bad pixels\nsp.momenteach(vheight=False, verbose=False)\nsp.momentcube[2,:,:] /= 2.5 # the HCN line profile makes the fitter assume a 2.5x too large line\nsp.fiteach(fittype='hcn_amp', errmap=errmap,\n guesses=[1.0,-5.6,1.5], verbose_level=2, signal_cut=4,\n usemomentcube=True, blank_value=np.nan, verbose=False,\n direct=True, multicore=4)\n\n# steal the header from the error map\nf = pyfits.open('region5.hcn.errmap.fits')\n# start replacing components of the pyfits object\nf[0].data = np.concatenate([sp.parcube,sp.errcube,sp.integralmap])\nf[0].header['PLANE1'] = 'amplitude'\nf[0].header['PLANE2'] = 'velocity'\nf[0].header['PLANE3'] = 'sigma'\nf[0].header['PLANE4'] = 'err_amplitude'\nf[0].header['PLANE5'] = 'err_velocity'\nf[0].header['PLANE6'] = 'err_sigma'\nf[0].header['PLANE7'] = 'integral'\nf[0].header['PLANE8'] = 'integral_error'\nf[0].header['CDELT3'] = 1\nf[0].header['CTYPE3'] = 'FITPAR'\nf[0].header['CRVAL3'] = 0\nf[0].header['CRPIX3'] = 1\n# save your work\nif astropy.version.major >= 2 or (astropy.version.major==1 and astropy.version.minor>=3):\n f.writeto('region5.hcn.nosmooth.fit.fits', overwrite=True)\nelse:\n f.writeto('region5.hcn.nosmooth.fit.fits', clobber=True)\n","repo_name":"pyspeckit/pyspeckit","sub_path":"examples/hcn_cube_test.py","file_name":"hcn_cube_test.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"78"} +{"seq_id":"12377178874","text":"import random\nfrom prettytable import PrettyTable\nfrom matplotlib import pyplot as plt\n\n# set principle\nprinciple = float(input('设定本金: '))\n\n# starting wager\nsw = float(input('设定初始下注(需为最小面额筹码的整数倍): '))\n\n# multiplier when loss\nmp = float(input('设定乘数: '))\n\n# take profit (percentage of priciple, 1 indicates doubling the principle)\npl = float(input('设定止盈线(如计划在本金增加50%时停止游戏, 请输入0.5): '))\n\ndef play(principle):\n def bet(_wager):\n res = random.randrange(2)\n if res == 0:\n txn.append(txn[-1] + _wager)\n return [sw, 'success']\n elif res == 1:\n txn.append(txn[-1] - _wager)\n return [_wager * mp, 'failure']\n\n txn = [principle]\n endGame = False\n wager = sw\n while (endGame == False):\n if (txn[-1] < wager):\n wager = 1\n data = bet(wager)\n # print(data[1])\n wager = data[0]\n if (txn[-1] == 0):\n return [txn, 'bankrupt']\n elif (txn[-1] >= principle * (1 + pl)):\n return [txn, 'take profit']\n\n# trial\nprofitTimes = 0\nbankruptTimes = 0\ntrials = int(input('设定模拟次数, 建议为1000: '))\ni = 0\nwhile (i < trials):\n result = play(principle)\n # print(len(result[0]))\n # print(result[1])\n figStat = None\n if result[1] == 'take profit':\n profitTimes += 1\n figStat = 'success'\n elif result[1] == 'bankrupt':\n bankruptTimes += 1\n figStat = 'failure'\n plt.plot(result[0])\n plt.suptitle(f'Change of Balance - Trial {i}',fontsize= 15, fontweight='bold')\n plt.title(f'{principle}-{sw}-{mp}-{pl}-{i}-{figStat}',fontsize= 10)\n plt.xlabel('No. of bets')\n plt.ylabel('Balance')\n plt.savefig(f'./fig/{principle}-{sw}-{mp}-{pl}-{i}.jpg')\n plt.clf()\n i += 1\n\nx = PrettyTable()\nx.field_names = ['Earning Counts', 'Bankcruptcy Counts', 'Success Rate', 'Bankruptcy Rate']\nx.add_row([profitTimes,trials-profitTimes,profitTimes/trials,1-profitTimes/trials]) \nprint(x) \n","repo_name":"songyuew/baccarat_double_up_sim","sub_path":"bjl_rev.py","file_name":"bjl_rev.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33336279032","text":"from funkcje import *\n\n# Aby uruchomić ten skrypt wybierz 'shell' w prawym dolnym rogu i wpisz 'python stworz_gify.py'.\n# Zapoznaj się ponadto z 'funkcje.py'.\n\nkonfiguracja = [\n [\"wejscie/barabasi_40.csv\", \"siec bezskalowa - 40 wezlow\"],\n [\"wejscie/erdos_40.csv\", \"siec losowa - 40 wezlow\"]\n]\n\nfor symulacja in konfiguracja: \n print(symulacja)\n plik_wejsciowy=symulacja[0]\n nazwa=symulacja[1]\n graf = wczytaj_graf(plik_wejsciowy)\n gamma = 0.1\n beta = 0.03\n wynik = symuluj(50, graf, gamma=gamma, beta=beta, poczatkowa_liczba_chorych=4)\n wynik[\"gamma\"] = gamma\n wynik[\"beta\"] = beta\n wynik[\"nazwa\"] = nazwa\n animuj(graf, wynik, plik_wyjsciowy=nazwa + \".gif\")\n \n # rysuj_wykresy_chorych(symulacja[2], \"100 wezlow, sredni stopien: 10\")","repo_name":"kubajal/MDCS-wyklad-replit","sub_path":"stworz_gify.py","file_name":"stworz_gify.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5983856006","text":"import streamlit as st\nfrom user import session_prompt,ask, append_interaction_to_chat_log\nimport speech_recognition as sr\n\n\nst.set_page_config(page_title=\"Munshi: AI Chatbot\", page_icon=\":guardsman:\", layout=\"wide\")\n\ndef recognize_speech():\n # Create an instance of the Recognizer class\n r = sr.Recognizer()\n \n # Use the microphone as the audio source\n with sr.Microphone() as source:\n # Adjust for ambient noise\n r.adjust_for_ambient_noise(source)\n \n # Ask the user for input\n print(\"Say something!\")\n \n # Listen for the user's input\n audio = r.listen(source)\n \n try:\n # Recognize speech using Google Speech Recognition\n query = r.recognize_google(audio)\n return query\n except sr.UnknownValueError:\n print(\"Sorry, I didn't understand that.\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n \n return \"\"\n\n\ndef chat_bot(question):\n global chat_log\n #print(\"question=\",question)\n answer = ask(question, chat_log)\n chat_log = append_interaction_to_chat_log(question, answer, chat_log)\n #print(\"answer=\",answer)\n return answer\n\nchat_log = session_prompt\n\nst.title(\"Munshi: AI Chatbot\")\nst.markdown(\"Chat with Munshi, an AI chatbot powered by OpenAI.\")\n\n# Create a button to allow users to input text using speech\nif st.button(\"Speak\"):\n query = recognize_speech()\n st.text_input(\"Query\", value=query)\nelse:\n # Otherwise, show a regular text input field\n query = st.text_input(\"Query\")\n\nif query:\n response = chat_bot(query)\n st.write(response)\n\n \n ","repo_name":"shrey2003/M-GPT","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"3677479595","text":"\"\"\"\n 746. 使用最小花费爬楼梯:\n\n 数组的每个索引做为一个阶梯,第 i 个阶梯对应着一个非负的体力花费值 cost[i](索引从 0 开始)\n 每当你爬上一个阶梯你都要花费对应的体力花费值,然后你可以选择继续爬一个阶梯或者两个阶梯。\n 你需要找到达到楼层顶部的最低花费。在开始时,你可以选择索引从 0 或 1 的元素作为初始阶梯。\n\n 示例 1:\n\n\n 输入: [10, 15, 20]\n 输出:15\n 解释:最低花费是从cost[1]开始,然后走两步即可到阶梯顶,一共花费15。\n 走一步,cost[1]=15, 下一次我直接越过20,走两步,到顶了。\n\n 示例 2:\n\n 输入: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]\n 输出: 6\n 解释: 最低花费方式是从cost[0]开始,逐个经过那些1,跳过cost[3],一共花费6。\n\n 注意:\n\n cost 的长度将会在 [2, 1000]。\n 每一个 cost[i] 将会是一个Integer类型,范围为 [0, 999]。\n\n\"\"\"\n\n\n# 到达当前台阶时判断下从前一个台阶过来省事,还是从前一个的前一个过来省事,\n# 一直累加到最后一个台阶完,最小值就是最省体力的。\n# 用p1和p2表示前两个和前一个台阶所耗费的体力,一遍循环就可以了。\n\nclass Solution(object):\n def minCostClimbingStairs(self, cost):\n p1, p2 = 0, 0\n for i in range(2, len(cost) + 1):\n p1, p2 = p2, min(p2 + cost[i - 1], p1 + cost[i - 2])\n print(p1, p2)\n return p2\n\n\nif __name__ == \"__main__\":\n S = Solution()\n cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]\n print(S.minCostClimbingStairs(cost))","repo_name":"zzkk007/LeetCode","sub_path":"Easy/array/746_minCostClimbingStairs.py","file_name":"746_minCostClimbingStairs.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7407828959","text":"# One dimensional diffusion system, with four species, A\n# B, C and D, which interact as in Gillespie_AB_system.py\n#\n# k1 k2\n# A + A -> C A + B -> D\n#\n# A is created only in 0 < x < 9L/10, and\n# B is created only in 2L/5 < x < L\n#\n# This follows section 4.1 of:\n#\n# \"A Practical Guide to Stochastic Simulations of\n# Reaction-Diffusion Processes\" by Erban et al.\n#\n# The last two panels in the Figure can be compared to\n# Figure 4.1 of Erban et al.\n#\n\nfrom openrxn.systems.ODESystem import ODESystem\nfrom openrxn.systems.GillespieSystem import GillespieSystem\nfrom openrxn.reactions import Reaction, Species\nfrom openrxn.model import Model\nfrom openrxn.compartments.arrays import CompartmentArray1D\nfrom openrxn import unit\nfrom openrxn.systems.reporters import AllReporter, SumReporter, SelectionReporter\nfrom openrxn.connections import IsotropicConnection\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nd = 0.16/unit.sec\nK = 40 # number of compartments\nL = 1*unit.mm\nh = L/K\n\n# define species and reactions\nA = Species('A')\nB = Species('B')\nC = Species('C')\nD = Species('D')\n\n# h is the \"volume\" of the compartment here\n# second order rate coefficients should have units of\n# M^-1 s^-1, or L^d / (s * mol), where L is units of length and\n# d is the dimensionality of the system (here, 1)\n\nconc = 1.0*unit.mol/h\nk1 = 1e-3/unit.sec/conc\nk2 = 1e-2/unit.sec/conc\nk3 = 1.2/unit.sec\nk4 = 1.0/unit.sec\n\nrxns = []\nrxns.append(Reaction('AAC',[A],[C],[2],[1],kf=k1))\nrxns.append(Reaction('ABD',[A,B],[D],[1,1],[1],kf=k2))\nrxns.append(Reaction('birth_A',[],[A],[],[1],kf=k3))\nrxns.append(Reaction('birth_B',[],[B],[],[1],kf=k4))\n\n# create a Model\nboundaries = np.linspace(0,L.magnitude,K+1)*L.units\nconn = IsotropicConnection({'A' : d, 'B' : d},dim=1)\ncomp_array = CompartmentArray1D('main',boundaries,conn)\ncomp_array.add_rxns_to_array([rxns[0],rxns[1]])\nfor c in comp_array.compartments.values():\n if c.pos[0][1] <= 9*L/10:\n c.add_rxn_to_compartment(rxns[2])\n if c.pos[0][1] > 2*L/5:\n c.add_rxn_to_compartment(rxns[3])\n\nmodel = Model(arrays=[comp_array])\nflat_model = model.flatten()\n\n# create a system\nsys = ODESystem(flat_model)\n\n# set initial concentrations\nsys.set_q(np.arange(sys.state.size),0)\n\node_results = sys.run(1800)\n\nfig, ax = plt.subplots(nrows=1,ncols=2)\nax[0].set_ylabel('Number of A molecules')\nax[1].set_ylabel('Number of B molecules')\nax[0].set_xlabel('x (mm)')\nax[1].set_xlabel('x (mm)')\nplt.show()\n\nIDs_A = []\nIDs_B = []\npos_x = []\nfor i in range(K):\n c_name = 'main-{0}'.format(i)\n IDs_A.append(sys.state.index[c_name]['A'])\n IDs_B.append(sys.state.index[c_name]['B'])\npos_x = sys.state.x_pos[IDs_A]/1000000 # in mm\n\nt4 = ode_results.y.shape[1]-1\nax[0].plot(pos_x,ode_results.y[IDs_A,t4],label='ODE')\nax[1].plot(pos_x,ode_results.y[IDs_B,t4],label='ODE')\n\n#----\n# Now create a GillespieSystem for the same model\n#---\n\nfor i in range(1):\n Gillespie_sys = GillespieSystem(flat_model)\n Gillespie_sys.add_reporter(AllReporter(freq=100))\n Gillespie_sys.set_q(np.arange(sys.state.size),0)\n tmp = Gillespie_sys.run(1800)\n IDs_A = []\n IDs_B = []\n for j in range(K):\n c_name = 'main-{0}'.format(j)\n IDs_A.append(sys.state.index[c_name]['A'])\n IDs_B.append(sys.state.index[c_name]['B'])\n reports = Gillespie_sys.reporters[0].reports()\n ax[0].plot(pos_x,reports[-1]['report'][IDs_A],label='run {0}'.format(i))\n ax[1].plot(pos_x,reports[-1]['report'][IDs_B],label='run {0}'.format(i))\n\nax[0].legend()\nax[1].legend()\nplt.show()\n","repo_name":"ADicksonLab/OpenRXN","sub_path":"examples/1D_reac_diff_AB.py","file_name":"1D_reac_diff_AB.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"11059848275","text":"\n\narr = [1,2,4]\n\nfor i in arr:\n print(i)\n\n'''\n要区分两个概念\niterable: 可迭代对象,可以通过iter()函数转换为iterator, 也可以通过for循环遍历。它没有状态。\n\niterator: 迭代器,可以通过next()函数获取下一个元素,它持有关联的容器的当前访问的状态。\n'''\n\n\n","repo_name":"Robinguo2012/Python001","sub_path":"design patten/iterator.py","file_name":"iterator.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41682808338","text":"import cv2\nimport os\nfrom solutions import Difficult,Medium,Easy\nfrom utils import evaluate\nimport time\n\ndef main():\n reco_res = {}\n ground_truth = {'1-1.jpg': '沪EWM957', '1-2.jpg': '沪AF02976', '1-3.jpg': '鲁NBK268',\n '2-1.jpg': '沪EWM957', '2-2.jpg': '豫B20E68', '2-3.jpg': '沪A93S20',\n '3-1.jpg': '沪EWM957', '3-2.jpg': '沪ADE6598', '3-3.jpg': '皖SJ6M07'}\n\n levels = ['easy','medium','difficult']\n proc_time = {}\n for level in levels:\n time_start = time.time()\n path = './images/'+level\n if level == 'difficult':\n for filename in os.listdir(path):\n img = cv2.imread(path+'/'+filename)\n IMG_h = 800\n IMG_w = 1000\n rec = Difficult(img,IMG_w,IMG_h,filename)\n res = rec.process()\n print(level,filename,res)\n reco_res[filename]=res\n if level == 'medium':\n for filename in os.listdir(path):\n img = cv2.imread(path+'/'+filename)\n height = len(img)\n width = len(img[0])\n rec = Medium(img,width,height,filename)\n res = rec.process()\n print(level,filename,res)\n reco_res[filename]=res\n if level == 'easy':\n for filename in os.listdir(path):\n img = cv2.imread(path+'/'+filename)\n height = len(img)\n width = len(img[0])\n rec = Easy(img,width,height,filename)\n res = rec.process()\n print(level,filename,res)\n reco_res[filename]=res\n time_end = time.time()\n proc_time[level]=(time_end-time_start)/3\n\n\n precision = evaluate(reco_res, ground_truth)\n print('识别结果:', reco_res)\n print('正确答案:', ground_truth)\n print('准确率:', precision)\n print('识别时间:',proc_time)\n\nif __name__ == '__main__':\n main()","repo_name":"chesiy/lisence_recognition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74094355131","text":"\r\n## Library and Settings\r\nimport pandas as pd\r\nimport numpy as np\r\nimport requests\r\nimport warnings\r\nimport time\r\nimport os\r\nimport boto3\r\nimport shutil\r\n\r\nfrom bs4 import BeautifulSoup\r\nfrom tqdm import tqdm, trange\r\nfrom datetime import datetime, timedelta\r\nfrom sqlalchemy import create_engine\r\n\r\nwarnings.simplefilter(action='ignore', category=FutureWarning)\r\npd.set_option('display.max_columns', None)\r\n\r\n\r\n\r\n\r\ndef db_connection():\r\n\r\n id = \"###########\"\r\n pw = \"###########\"\r\n host = \"##############\"\r\n port = \"############\"\r\n db = \"##########\"\r\n\r\n engine = create_engine(f\"mysql+pymysql://{id}:{pw}@{host}:{port}/{db}\")\r\n \r\n return engine\r\n\r\n\r\n\r\n\r\ndef get_index_df(engine, date):\r\n \r\n index_query = f\"\"\"\r\n SELECT *\r\n FROM ftc.FTC_MAIN_INDEX_HIST_TB AS A\r\n WHERE 1=1\r\n AND A.search_dt = \"{date}\"\r\n \"\"\"\r\n index_df = pd.read_sql(index_query, engine)\r\n\r\n return index_df\r\n\r\n\r\n\r\n\r\n\r\ndef get_main_df(engine):\r\n \r\n main_query = f\"\"\"\r\n SELECT *\r\n FROM ftc.FTC_HEAD_INFO_HIST AS A\r\n \"\"\"\r\n main_df = pd.read_sql(main_query, engine)\r\n\r\n return main_df\r\n\r\n\r\n\r\n\r\n\r\ndef get_new_df(index_df, main_df):\r\n \r\n new_df = index_df[~index_df[\"page_id\"].isin(main_df[\"page_id\"])]\r\n new_df = new_df.reset_index(drop = True)\r\n\r\n return new_df\r\n\r\n\r\n\r\n\r\n\r\ndef to_ncp(s3, date, file_name):\r\n\r\n s3.put_object(Bucket = \"winkstone-data-lake\", Key = f\"crawler/01_FTC/\")\r\n\r\n local_file_path = f\"./{date}/{file_name}.html\"\r\n\r\n try:\r\n s3.upload_file(local_file_path, \"winkstone-data-lake\", f\"crawler/01_FTC/{date}/{file_name}.html\")\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\n\r\n\r\n\r\ndef crawler(page_id):\r\n\r\n url = f\"https://franchise.ftc.go.kr/mnu/00013/program/userRqst/view.do?firMstSn={page_id}\"\r\n\r\n response = requests.get(url)\r\n \r\n if response.status_code == 200:\r\n soup = BeautifulSoup(response.content, \"html.parser\")\r\n html_list = soup.select(\"table\")\r\n table_list = []\r\n for i in range(len(html_list)):\r\n table_html = str(html_list[i])\r\n table_df = pd.read_html(table_html)[0]\r\n table_list.append(table_df)\r\n head = table_list[0]['상호'][0]\r\n head = head.replace(\"상호 \", \"\")\r\n brand = table_list[0]['영업표지'][0]\r\n brand = brand.replace(\"영업표지 \", \"\")\r\n\r\n if head != '상호':\r\n # print(f\"{timestemp} Saved - {page_id}, {head} - {brand}\")\r\n return soup\r\n \r\n else:\r\n # print(f\"{timestemp} Pass - {page_id}, {head} - {brand}\")\r\n pass\r\n else:\r\n # print(f\"{timestemp} Retry - {page_id}, {head} - {brand}\")\r\n time.sleep(1)\r\n crawler(page_id)\r\n\r\n\r\n\r\n\r\n\r\ndef save_html_file(folder_path, file_name, soup):\r\n\r\n if not os.path.exists(folder_path):\r\n os.makedirs(folder_path)\r\n \r\n with open(f\"{folder_path}/{file_name}.html\", \"w\") as file:\r\n file.write(str(soup))\r\n\r\n\r\n\r\n\r\ndef get_s3_client():\r\n\r\n service_name = '##'\r\n endpoint_url = 'https://kr.object.fin-ncloudstorage.com'\r\n region_name = 'kr-standard'\r\n access_key = '#################'\r\n secret_key = '#####################'\r\n\r\n s3 = boto3.client(service_name, endpoint_url = endpoint_url, aws_access_key_id = access_key, aws_secret_access_key = secret_key)\r\n \r\n return s3\r\n\r\n\r\n\r\n\r\ndef main_html_download():\r\n\r\n engine = db_connection()\r\n\r\n date = datetime.today().strftime(\"%Y-%m-%d\")\r\n\r\n print(\"Getting new data from URL ... \")\r\n index_df = get_index_df(engine, date)\r\n main_df = get_main_df(engine)\r\n new_df = get_new_df(index_df, main_df)\r\n\r\n\r\n print(\"Downloading HTML files in local and cloud ... \")\r\n s3 = get_s3_client()\r\n\r\n for page_id in tqdm(new_df[\"page_id\"]):\r\n\r\n soup = crawler(page_id)\r\n\r\n folder_path = f\"./{date}\"\r\n file_name = str(page_id)\r\n\r\n save_html_file(folder_path, file_name, soup)\r\n to_ncp(s3, date, page_id)\r\n\r\n\r\n print(\"All done ! \")\r\n\r\n\r\nif __name__ == '__main__':\r\n main_html_download()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"JihyeonKim14/WK_partners","sub_path":"ETL/s3_html_load.py","file_name":"s3_html_load.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15420693360","text":"from collections import deque\n# n, m = map(int, input().split())\n# g = [list(map(int, input().split())) for _ in range(n)]\nn, m, g = 6, 5, [[1, 1, 0, 1, 1], [0, 1, 1, 0, 0], [0, 0, 0, 0, 0], [1, 0, 1, 1, 1], [0, 0, 1, 1, 1], [0, 0, 1, 1, 1]] # -> 4, 9\n# print(n,m,g)\n\ndef bfs(i,j):\n q = deque()\n q.append([i,j])\n ck[i][j] = 1\n cnt = 1\n\n while q:\n a, b = q.popleft()\n \n for i, j in zip(dx, dy):\n nx, ny = a + i, b + j\n\n if 0 <= nx < n and 0 <= ny < m and ck[nx][ny] == 0 and g[nx][ny] == 1:\n q.append([nx, ny])\n g[nx][ny] = g[a][b] + 1\n cnt += 1\n ck[nx][ny] = 1\n res.append(cnt)\n\ndx, dy = [1,0,-1,0], [0,1,0,-1]\nres = []\nck = [[0] * m for _ in range(n)]\n\nfor i in range(n):\n for j in range(m):\n if g[i][j] == 1:\n bfs(i, j)\nif res:\n print(len(res))\n print(max(res))\nelse:\n print(0)\n print(0) \n","repo_name":"rkdalsdn94/algoalgo","sub_path":"baekjoon/그림_1926.py","file_name":"그림_1926.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18417608840","text":"import numpy as np\nfrom jsonextended import units, mockpath, edict\nfrom ejplugins.utils import split_numbers, codata, validate_against_schema\n\ntry:\n import pathlib\nexcept ImportError:\n import pathlib2 as pathlib\n\n\nclass GaussianCube(object):\n \"\"\" parser plugin for jsonextended\n\n as specified at http://h5cube-spec.readthedocs.io/en/latest/cubeformat.html\n\n \"\"\"\n plugin_name = 'gaussian_cube'\n plugin_descript = 'read gaussian cube charge/spin density file'\n file_regex = '*.cube'\n\n def read_file(self, f, **kwargs):\n comments1 = f.readline().strip()\n comments2 = f.readline().strip()\n line = f.readline().strip()\n inputs = split_numbers(line)\n if len(inputs) > 4 and inputs[4] != 1:\n # TODO implement NVAL != 1\n raise NotImplementedError(\"not yet implemented NVAL != 1: {0}\".format(line))\n natoms = inputs[0]\n centre = -1 * np.array(inputs[1:4]) * codata[(\"Bohr\", \"Angstrom\")]\n if natoms < 0:\n # TODO implement DSET_IDS\n raise NotImplementedError(\"not yet implemented DSET_IDS\")\n an, ax, ay, az = split_numbers(f.readline().strip())\n bn, bx, by, bz = split_numbers(f.readline().strip())\n cn, cx, cy, cz = split_numbers(f.readline().strip())\n\n if an <= 0 or bn <= 0 or cn <= 0:\n raise ValueError(\"an, bn and cn must be positive integers\")\n\n avec = [a * an * codata[(\"Bohr\", \"Angstrom\")] for a in [ax, ay, az]]\n bvec = [b * bn * codata[(\"Bohr\", \"Angstrom\")] for b in [bx, by, bz]]\n cvec = [c * cn * codata[(\"Bohr\", \"Angstrom\")] for c in [cx, cy, cz]]\n #centre = 0.5 * (np.array(avec) + np.array(bvec) + np.array(cvec))\n\n atomic_numbers = []\n nuclear_charges = []\n ccoords = []\n for _ in range(int(natoms)):\n anum, ncharge, x, y, z = split_numbers(f.readline().strip())\n atomic_numbers.append(int(anum))\n nuclear_charges.append(ncharge)\n ccoord = (np.asarray([x, y, z]) * codata[(\"Bohr\", \"Angstrom\")]) + centre\n ccoords.append(ccoord.tolist())\n\n values = []\n line = f.readline().strip()\n while line:\n values += line.split()\n line = f.readline().strip()\n\n return {\n \"title\": comments1,\n #\"na\": int(an), \"nb\": int(bn), \"nc\": int(cn),\n \"cell_vectors\": {\n \"a\": {\"units\": \"angstrom\", \"magnitude\": avec},\n \"b\": {\"units\": \"angstrom\", \"magnitude\": bvec},\n \"c\": {\"units\": \"angstrom\", \"magnitude\": cvec}\n },\n #\"centre\": [0, 0, 0],\n \"densities\": [{\n \"type\": comments2,\n \"magnitude\": np.array(values, dtype=float).reshape((int(an), int(bn), int(cn)))\n }],\n \"atoms\": {\"ccoords\": {\"units\": \"angstrom\",\n \"magnitude\": ccoords},\n \"nuclear_charge\": nuclear_charges,\n \"atomic_number\": atomic_numbers}\n }\n\n\ndef ejdict_to_gcube(data, fpath=None, density=0,\n include_atoms=True, adata=None, cell_tol=1E-3):\n \"\"\"\n\n Parameters\n ----------\n data: dict\n fpath: str or None\n output file path or, if None, write to MockPath\n density: int\n take density from data[\"densities\"][density]\n include_atoms: bool\n include atoms in gaussian cube\n adata: dict or None\n separate atom data (for instance for Crystal output)\n cell_tol: float or None\n if not None, raise and error if the data and adata cell vectors are not within this tolerance\n\n Returns\n -------\n fpath: pathlib.Path or jsonextended.mockpath.MockPath\n\n \"\"\"\n if include_atoms and adata is not None:\n if cell_tol:\n cdiff = edict.diff(data[\"cell_vectors\"], adata[\"cell_vectors\"],\n np_allclose=True, rtol=cell_tol, atol=cell_tol)\n if cdiff:\n raise ValueError(\"data and adata have different cell vectors: {}\".format(cdiff))\n data[\"atoms\"] = adata[\"atoms\"]\n\n validate_against_schema(data, \"edensity\")\n\n if fpath is None:\n fpath = mockpath.MockPath(\"test.cube\", is_file=True)\n else:\n fpath = pathlib.Path(fpath)\n\n with fpath.open(\"w\") as f:\n data = units.combine_quantities(data)\n data = units.apply_unitschema(data, {\"a\": \"angstrom\", \"b\": \"angstrom\", \"c\": \"angstrom\", \"ccoords\": \"angstrom\"},\n as_quantity=False)\n natoms = 0 if \"atoms\" not in data or not include_atoms else len(data[\"atoms\"][\"ccoords\"])\n\n avec = np.asarray(data[\"cell_vectors\"][\"a\"]) / codata[(\"Bohr\", \"Angstrom\")]\n bvec = np.asarray(data[\"cell_vectors\"][\"b\"]) / codata[(\"Bohr\", \"Angstrom\")]\n cvec = np.asarray(data[\"cell_vectors\"][\"c\"]) / codata[(\"Bohr\", \"Angstrom\")]\n centre = 0.5 * (avec + bvec + cvec)\n centre_offset = -1*centre\n\n f.write(data[\"title\"] + \"\\n\")\n f.write(data[\"densities\"][density][\"type\"] + \"\\n\")\n dense = np.asarray(data[\"densities\"][density][\"magnitude\"])\n na, nb, nc = dense.shape\n f.write(\"{0:6d} {1:10.6f} {2:10.6f} {3:10.6f}\\n\".format(natoms, *centre_offset.tolist()))\n f.write(\"{0:6d} {1:10.6f} {2:10.6f} {3:10.6f}\\n\".format(na, *(avec/na).tolist()))\n f.write(\"{0:6d} {1:10.6f} {2:10.6f} {3:10.6f}\\n\".format(nb, *(bvec/nb).tolist()))\n f.write(\"{0:6d} {1:10.6f} {2:10.6f} {3:10.6f}\\n\".format(nc, *(cvec/nc).tolist()))\n\n if data.get(\"atoms\", False):\n for i, c in enumerate(data[\"atoms\"][\"ccoords\"]):\n atomic_number = data[\"atoms\"][\"atomic_number\"][i]\n nuclear_charge = data[\"atoms\"][\"nuclear_charge\"][i]\n ccoord = (np.array(c) / codata[(\"Bohr\", \"Angstrom\")]) + centre_offset\n\n f.write(\"{0:6d} {1:10.6f} {2:10.6f} {3:10.6f} {4:10.6f}\\n\".format(atomic_number, nuclear_charge,\n *ccoord.tolist()))\n dense = dense.flatten().tolist()\n dlength = len(dense)\n output = []\n for i in range(int(dlength/6.)+1):\n if dlength > i*6 + 6:\n output.append(\"{0:12.5E} {1:12.5E} {2:12.5E} {3:12.5E} {4:12.5E} {5:12.5E}\".format(*dense[i*6: i*6+6]))\n else:\n output.append(\" \".join([\"{0:12.5E}\".format(v) for v in dense[i*6: dlength]]))\n\n f.write(\"\\n\".join(output))\n\n return fpath\n","repo_name":"chrisjsewell/ejplugins","sub_path":"ejplugins/gcube.py","file_name":"gcube.py","file_ext":"py","file_size_in_byte":6540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"36770922009","text":"def chunking_by(items: list, size: int):\r\n listy=[]\r\n count=len(items)//size\r\n\r\n for i in range(0,count):\r\n listy1=[] \r\n for y in range(0,size):\r\n listy1.append(items[0])\r\n items.remove(items[0]) \r\n listy.append(listy1)\r\n if items!=[]:\r\n listy.append(items)\r\n return listy","repo_name":"MiracleX77/CheckIO","sub_path":"O'Reilly/Chunk.py","file_name":"Chunk.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32395107085","text":"'''\nInserindo e removendo elementos da lista\nappend(elemento) insert(index, elemento) pop(index) remove(elemento)\n'''\n\nlanches = ['doritos', 'hamburguer', 'batata-frita', 'pudim']\n\n# Verificando o tipo Lista\nprint(type(lanches))\n\ndef exibir():\n print(lanches)\n\nexibir()\n\n# Inserindo elemento na lista\nlanches.append('salgadinho')\nlanches.insert(0, 'piraque')\n\nexibir()\n\n# Removendo último elemento\nlanches.pop() \n# Removendo elemento pelo índice\nlanches.pop(2)\n# Removendo elemento pelo elemento\nlanches.remove('piraque')\n\nexibir()\n\nif 'refrigerante' in lanches:\n lanches.remove('refrigerante')\n\n'''\nUma outra maneira de remover um elemento\ndel lanche[2]\n'''\n","repo_name":"udanielnogueira/Python.Treinamento1","sub_path":"mundo-3/2-1-lists.py","file_name":"2-1-lists.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10641948708","text":"import sqlite3\n\nfrom config.config import db_path\n\n\nclass DBConnect(object):\n\n def __new__(cls):\n \"\"\"\n метод добавляет классу свойства синглетона и инициализирует его\n \"\"\"\n if not hasattr(cls, 'instance'):\n try:\n cls.conn = sqlite3.connect(db_path, check_same_thread=False)\n except Exception as e:\n raise e\n\n cls.instance = super(DBConnect, cls).__new__(cls)\n return cls.instance\n\n def getConnection(self):\n return self.conn\n\n def getCursor(self):\n return self.conn.cursor()\n\n def do_select_query(self, q):\n cur = self.getCursor()\n try:\n cur.execute(q)\n o = cur.fetchone()\n if o is not None:\n o = o[0]\n return o\n except sqlite3.Error as e:\n raise e\n finally:\n if cur:\n cur.close()\n\n def do_selects_query(self, q):\n cur = self.getCursor()\n try:\n cur.execute(q)\n o = cur.fetchall()\n return o\n except sqlite3.Error as e:\n raise e\n finally:\n if cur:\n cur.close()\n\n @staticmethod\n def get_custom_user_field(tg_id, table, field):\n \"\"\"\n метод получает сложное поле из db\n :param tg_id:\n :param table:\n :param field:\n :return:\n \"\"\"\n cur = DBConnect().getCursor()\n try:\n q = f'select {field} from {table} where tg_id = {tg_id}'\n cur.execute(q)\n step = cur.fetchone()[0]\n return step\n except sqlite3.Error as e:\n raise e\n finally:\n if cur:\n cur.close()\n\n @staticmethod\n def update_custom_user_filed(tg_id, table, filed, val):\n \"\"\"\n метод обновляет сложное поле из db\n :param tg_id:\n :param table:\n :param filed:\n :param val:\n \"\"\"\n conn = DBConnect().getConnection()\n cur = conn.cursor()\n try:\n if val is None:\n q = f'update {table} set {filed} = NULL where tg_id = {tg_id}'\n else:\n q = f'update {table} set {filed} = \\'{val}\\' where tg_id = {tg_id}'\n cur.execute(q)\n conn.commit()\n except sqlite3.Error as e:\n raise e\n finally:\n if cur:\n cur.close()\n\n @staticmethod\n def update_custom_user_photo_filed(tg_id, table, filed, photo):\n \"\"\"\n метод обновляет фото произвольного поля из db\n :param tg_id:\n :param table:\n :param filed:\n :param photo:\n \"\"\"\n conn = DBConnect().getConnection()\n cur = conn.cursor()\n try:\n q = f'update {table} set {filed} = ? where tg_id = {tg_id}'\n cur.execute(q, [photo])\n conn.commit()\n except sqlite3.Error as e:\n raise e\n finally:\n if cur:\n cur.close()\n","repo_name":"akatsnelson/hsetravel_mem60","sub_path":"dao/db_connection.py","file_name":"db_connection.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20668859149","text":"import hashlib\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nfrom base64 import b64encode, b64decode\n\nimport os\nfrom stat import S_IREAD, S_IRWXU\n\nclass AESCipher(object):\n def __init__(self, key):\n self.block_size = AES.block_size\n self.key = hashlib.sha256(key.encode()).digest()\n\n def encrypt(self, plain_text):\n plain_text = self.__pad(plain_text)\n iv = Random.new().read(self.block_size)\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n encrypted_text = cipher.encrypt(plain_text.encode())\n return b64encode(iv + encrypted_text).decode(\"utf-8\")\n\n def decrypt(self, encrypted_text):\n encrypted_text = b64decode(encrypted_text)\n iv = encrypted_text[:self.block_size]\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n plain_text = cipher.decrypt(encrypted_text[self.block_size:]).decode(\"utf-8\")\n return self.__unpad(plain_text)\n\n def __pad(self, plain_text):\n number_of_bytes_to_pad = self.block_size - len(plain_text) % self.block_size\n ascii_string = chr(number_of_bytes_to_pad)\n padding_str = number_of_bytes_to_pad * ascii_string\n padded_plain_text = plain_text + padding_str\n return padded_plain_text\n\n @staticmethod\n def __unpad(plain_text):\n last_character = plain_text[len(plain_text) - 1:]\n return plain_text[:-ord(last_character)]\n\npath = os.getcwd()\ndir_list = os.listdir(path)\nprint(dir_list)\nos.chmod('state', S_IREAD)\ng = open('state', 'r')\nstatus = g.readlines()\nif(status[0] == 'encrypted'):\n\n password = input(\"Enter password for decryption: \")\n a = AESCipher(password)\n # print(a.decrypt('4aiHxRFeKAKTz9zGs2KZF0SmGKaWS/9wb6kXzQkKyyQ='))\n # print(a.decrypt('5USKlrd8JobNwNmVKB3kTw+of+0jiG8IlKCiHt810Do='))\n\n for i in dir_list:\n\n if(i != \"encryption.py\" and i != \"keys\" and i != 'state'):\n os.chmod(i, S_IREAD)\n print(i)\n f = open(i, 'r')\n list = f.readlines()\n elist = \"\"\n m = []\n for j in list:\n elist = a.decrypt(j)\n m.append(elist)\n print(m)\n os.chmod(i, S_IRWXU)\n f = open(i, 'w')\n for k in m:\n f.write(k)\n\n os.chmod('state', S_IRWXU)\n g = open('state', 'w')\n g.write('decrypted')\n os.chmod('state', S_IREAD)\n\nelif(status[0] == 'decrypted'):\n password = input(\"Enter any password for encryption, make sure to remember it: \")\n a = AESCipher(password)\n for i in dir_list:\n if(i != \"encryption.py\" and i != \"keys\" and i!= 'state'):\n f = open(i, 'r')\n list = f.readlines()\n elist = \"\"\n m = []\n for j in list:\n elist = a.encrypt(j)\n m.append(elist)\n print(m)\n f = open(i, 'w')\n for k in m:\n f.write(k)\n f.write('\\n')\n os.chmod(i, S_IREAD)\n os.chmod('state', S_IRWXU)\n g = open('state', 'w')\n g.write('encrypted')\n os.chmod('state', S_IREAD)","repo_name":"JDeepak45/Os","sub_path":"encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17610845559","text":"import torch\nfrom torch import nn\nfrom tqdm import tqdm\nimport numpy as np\n\n\ndef train(model, train_generator, validation_generator, epochs, device, optimizer):\n \"\"\"\n trains a model for a specified number of epochs\n @param model: model currently trained\n @param train_generator: iterator which generates batch of training samples\n @param validation_generator: iterator which generates batch of evaluation samples\n @param epochs: number of epochs to train the model on\n @param device: either 'cpu' or 'cuda' depending on hardware availability\n @param optimizer: optimizer used to updated the model's parameters\n @return:\n \"\"\"\n # Set the objective function\n loss_function = nn.CrossEntropyLoss()\n\n # Move model's parameters to device\n model = model.to(device)\n\n # Set model in train mode for dropout and batch-normalization layers\n model.train(True)\n for e in range(epochs):\n nb_correct_train = 0\n for batch_tweets, batch_labels in tqdm(train_generator):\n # Move batch to device\n batch_labels = torch.LongTensor(batch_labels).to(device)\n\n # Reset the gradient for each batch\n optimizer.zero_grad()\n\n # Forward pass)\n outputs = model.forward(batch_tweets)\n\n # Compute the predictions\n predictions = torch.argmax(outputs.detach(), dim=1)\n nb_correct_train += torch.sum(predictions == batch_labels)\n\n loss = loss_function(outputs, batch_labels.squeeze())\n\n # Backward pass\n loss.backward()\n optimizer.step()\n\n # Compute training accuracy\n train_acc = nb_correct_train.item() / train_generator.dataset.__len__()\n train_message = 'Train accuracy at epoch {} is {}'\n print(train_message.format(e, train_acc))\n\n # Set model in eval mode for dropout and batch-normalization layers\n model.train(False)\n with torch.no_grad():\n nb_correct_val = 0\n for batch_tweets, batch_labels in validation_generator:\n batch_labels = torch.LongTensor(batch_labels).to(device)\n outputs = model.forward(batch_tweets)\n predictions = torch.argmax(outputs, dim=1)\n nb_correct_val += torch.sum(predictions == batch_labels)\n val_acc = nb_correct_val.item() / validation_generator.dataset.__len__()\n val_message = 'Test accuracy is {}'\n print(val_message.format(val_acc))\n\n\ndef build_embedding_matrix(embedding_dim, glove, vocabulary):\n \"\"\"\n completes a pre-trained glove embedding with random initialization for missing entries.\n This implementation is strongly inspired by:\n https://medium.com/@martinpella/how-to-use-pre-trained-word-embeddings-in-pytorch-71ca59249f76\n @param embedding_dim: dimension of the the embedding space\n @param glove: pretrained glove embedding dictionary\n @param vocabulary: list of words used in the tweets\n @return: completed embedding matrix and word to index dictionary\n \"\"\"\n embedding_matrix = np.zeros((len(vocabulary), embedding_dim))\n text_indexer_dict = dict()\n for i, word in enumerate(vocabulary):\n try:\n embedding_matrix[i] = glove[word]\n except KeyError:\n embedding_matrix[i] = np.random.normal(loc=0, scale=1., size=embedding_dim)\n text_indexer_dict[word] = i\n return embedding_matrix, text_indexer_dict\n\n\ndef accuracy(model, inputs, labels, batch_size, device):\n \"\"\"\n computes the accuracy of the model on the given inputs\n @param model: model currently trained\n @param inputs: array of inputs on which to compute the accuracy\n @param labels: array of true labels\n @param batch_size: size of batches\n @param device: cpu or cuda depending on hardware availability\n @return: accuracy of the predictions\n \"\"\"\n nb_correct = 0\n for b in range(0, inputs.shape[0], batch_size):\n outputs = model.forward(inputs.narrow(0, b, batch_size).to(device))\n predictions = torch.argmax(outputs, dim=1)\n nb_correct += torch.sum(predictions == labels.narrow(0, b, batch_size).squeeze().to(device))\n return nb_correct.item() / inputs.shape[0]\n\n\ndef load_dict_contractions():\n \"\"\"\n this list of contractions was taken from a previous versions of :\n https://en.wikipedia.org/wiki/Wikipedia%3AList_of_English_contractions\n @return: dictionary from contraction to corresponding correct spelling\n \"\"\"\n return {\n \"ain't\": \"is not\",\n \"amn't\": \"am not\",\n \"aren't\": \"are not\",\n \"can't\": \"cannot\",\n \"'cause\": \"because\",\n \"couldn't\": \"could not\",\n \"couldn't've\": \"could not have\",\n \"could've\": \"could have\",\n \"daren't\": \"dare not\",\n \"daresn't\": \"dare not\",\n \"dasn't\": \"dare not\",\n \"didn't\": \"did not\",\n \"doesn't\": \"does not\",\n \"don't\": \"do not\",\n \"e'er\": \"ever\",\n \"em\": \"them\",\n \"everyone's\": \"everyone is\",\n \"finna\": \"fixing to\",\n \"gimme\": \"give me\",\n \"gonna\": \"going to\",\n \"gon't\": \"go not\",\n \"gotta\": \"got to\",\n \"hadn't\": \"had not\",\n \"hasn't\": \"has not\",\n \"haven't\": \"have not\",\n \"he'd\": \"he would\",\n \"he'll\": \"he will\",\n \"he's\": \"he is\",\n \"he've\": \"he have\",\n \"how'd\": \"how would\",\n \"how'll\": \"how will\",\n \"how're\": \"how are\",\n \"how's\": \"how is\",\n \"I'd\": \"I would\",\n \"I'll\": \"I will\",\n \"I'm\": \"I am\",\n \"I'm'a\": \"I am about to\",\n \"I'm'o\": \"I am going to\",\n \"isn't\": \"is not\",\n \"it'd\": \"it would\",\n \"it'll\": \"it will\",\n \"it's\": \"it is\",\n \"I've\": \"I have\",\n \"kinda\": \"kind of\",\n \"let's\": \"let us\",\n \"mayn't\": \"may not\",\n \"may've\": \"may have\",\n \"mightn't\": \"might not\",\n \"might've\": \"might have\",\n \"mustn't\": \"must not\",\n \"mustn't've\": \"must not have\",\n \"must've\": \"must have\",\n \"needn't\": \"need not\",\n \"ne'er\": \"never\",\n \"o'\": \"of\",\n \"o'er\": \"over\",\n \"ol'\": \"old\",\n \"oughtn't\": \"ought not\",\n \"shalln't\": \"shall not\",\n \"shan't\": \"shall not\",\n \"she'd\": \"she would\",\n \"she'll\": \"she will\",\n \"she's\": \"she is\",\n \"shouldn't\": \"should not\",\n \"shouldn't've\": \"should not have\",\n \"should've\": \"should have\",\n \"somebody's\": \"somebody is\",\n \"someone's\": \"someone is\",\n \"something's\": \"something is\",\n \"that'd\": \"that would\",\n \"that'll\": \"that will\",\n \"that're\": \"that are\",\n \"that's\": \"that is\",\n \"there'd\": \"there would\",\n \"there'll\": \"there will\",\n \"there're\": \"there are\",\n \"there's\": \"there is\",\n \"these're\": \"these are\",\n \"they'd\": \"they would\",\n \"they'll\": \"they will\",\n \"they're\": \"they are\",\n \"they've\": \"they have\",\n \"this's\": \"this is\",\n \"those're\": \"those are\",\n \"'tis\": \"it is\",\n \"'twas\": \"it was\",\n \"wanna\": \"want to\",\n \"wasn't\": \"was not\",\n \"we'd\": \"we would\",\n \"we'd've\": \"we would have\",\n \"we'll\": \"we will\",\n \"we're\": \"we are\",\n \"weren't\": \"were not\",\n \"we've\": \"we have\",\n \"what'd\": \"what did\",\n \"what'll\": \"what will\",\n \"what're\": \"what are\",\n \"what's\": \"what is\",\n \"what've\": \"what have\",\n \"when's\": \"when is\",\n \"where'd\": \"where did\",\n \"where're\": \"where are\",\n \"where's\": \"where is\",\n \"where've\": \"where have\",\n \"which's\": \"which is\",\n \"who'd\": \"who would\",\n \"who'd've\": \"who would have\",\n \"who'll\": \"who will\",\n \"who're\": \"who are\",\n \"who's\": \"who is\",\n \"who've\": \"who have\",\n \"why'd\": \"why did\",\n \"why're\": \"why are\",\n \"why's\": \"why is\",\n \"won't\": \"will not\",\n \"wouldn't\": \"would not\",\n \"would've\": \"would have\",\n \"y'all\": \"you all\",\n \"you'd\": \"you would\",\n \"you'll\": \"you will\",\n \"you're\": \"you are\",\n \"you've\": \"you have\",\n \"Whatcha\": \"What are you\",\n \"luv\": \"love\",\n \"sux\": \"sucks\"\n }\n","repo_name":"AleksandarHr/Twitter_Sentiment_Analysis","sub_path":"source/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":8248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42864441889","text":"# Kingdom's night\ninput_data = input()\nrow = int(input_data[1])\ncolumn = ord(input_data[0]) - ord('a') + 1\n\nsteps = [(-2, -1), (-1, -2), (1, -2), (2, -1), (2, 1), (1, 2), (-1, 2), (-2, 1)]\nanswer = 0\n\nfor step in steps:\n next_row = row + step[0]\n next_column = column + step[1]\n if (next_row >= 1 and next_row <= 8 and next_column >= 1 and next_column <= 8):\n answer = answer + 1\n\nprint(answer)","repo_name":"sigridjineth/algorithm_log","sub_path":"leetcode/Kingdoms_night.py","file_name":"Kingdoms_night.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"78"} +{"seq_id":"32915108286","text":"import pandas as pd\nfrom sklearn.cluster import MiniBatchKMeans\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef main():\n dataset = pd.read_csv('../datasets/candy.csv')\n print(dataset.head())\n # al ser aprendizaje no supervisado, no separamos nuestro datasets en partes\n # eliminaremos los nombres de los caramelos, ya que eso no nos sirve para el algoritmo\n X = dataset.drop('competitorname', axis=1)\n kmeans = MiniBatchKMeans(n_clusters=4, batch_size=8)\n kmeans.fit(X)\n print('Total de centros: ', len(kmeans.cluster_centers_))\n print('=' * 64)\n col_name = \"clusters\"\n dataset[col_name] = kmeans.predict(X)\n print(dataset)\n sns.pairplot(dataset[['sugarpercent', 'pricepercent', 'winpercent', col_name]], hue=col_name, palette='colorblind')\n plt.savefig(\"pares.png\")\n plt.show()\n plt.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ichcanziho/cursos_platzi","sub_path":"machine_learning_scikit_learn/4_clustering/k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"32943226995","text":"# Overall I found help from a guide via the RealPython [website](https://realpython.com/python-rock-paper-scissors/) which further aided in \n# my understanding of the exercise and functions that I need to use for the exercise.\n\n# this is the \"game.py\" file\nimport random \n\n# I'm introducing the game to the player and getting them excited to play.\n\"Hello. Welcome Player One. Let's play Rock, Paper, Scissors, Shoot!\"\nprint(\"Hello. Welcome Player One. Let's play Rock, Paper, Scissors, Shoot!\")\n\n\n# processed user inputs, validated user inputs, and set up the randomized computer selections for the game to run properly.\n# I need to make a list of all the options so that the user action returns the accurate result\n# Found the \"while True\" via the Real Python link above which helped me to understand and format the below code\nwhile True:\n user_action = input(\"Enter a choice (Rock, Paper, Scissors): \")\n user_action = user_action.lower()\n \n computer_choices = [\"rock\", \"paper\", \"scissors\"]\n if user_action not in computer_choices:\n print(\"Sorry I don't understand that entry. Please try again\")\n exit() # quit()\n computer_action = random.choice(computer_choices)\n print(f\"You chose {user_action}, \\ncomputer chose {computer_action}\")\n \n# Setting up all the various outcomes of the game between player and computer using if, elif, and else statements\n# Then determining the winner based on the player's and computer's actions\n if user_action == computer_action:\n print(f\"Whoa, you both selected {user_action}. Tie game!\")\n elif user_action == \"rock\":\n if computer_action == \"paper\":\n print(\"paper beats rock! You lost to the computer, uh oh!\")\n else:\n print(\"rock beats scissors! You win!! WOOHOO!\")\n elif user_action == \"paper\":\n if computer_action == \"scissors\":\n print(\"scissors beats paper! You lost to the computer, uh oh!\")\n else:\n print(\"paper beats rock! You win!! WOOHOO!\")\n elif user_action == \"scissors\":\n if computer_action == \"rock\":\n print(\"rock beats scissors! You lost to the computer, uh oh!\")\n else:\n print(\"scissors beats paper! You win! WOOHOO!\")\n\n# Asking the player if they want to play again or end the game.\n play_again = input(\"Would you like to try again? (y/n): \")\n if play_again != \"y\":\n print(\"Goodbye. Thanks for playing!\")\n break\n","repo_name":"Lrich14/Rock-Paper-Scissors-exercise","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"26832762850","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# In[3]:\r\n\r\n\r\n# !pip install pyjwt\r\nimport jwt\r\nimport hashlib\r\nimport requests\r\nimport uuid\r\nfrom urllib.parse import urlencode, unquote\r\nimport importlib\r\nimport upbit_keys as keys\r\n\r\n\r\n# In[4]:\r\n\r\n\r\nserver_url = 'https://api.upbit.com'\r\n\r\n############################################################################\r\n# make your upbit key file(.py) on same directory\r\n#\r\n# ex) upbit_keys.py\r\n# access_key = ' -* your access key here *-'\r\n# secret_key = ' -* your secret key here *-'\r\n#\r\n############################################################################\r\n\r\n############################################################################\r\n# 업비트 자산조회\r\n# 입력 : 업비트 access key, secret key\r\n# 출력 : 나의 자산내용에 해당하는 json\r\n############################################################################\r\n\r\ndef asset_check(server_url = server_url):\r\n payload = {\r\n 'access_key': keys.access_key,\r\n 'nonce': str(uuid.uuid4()),\r\n }\r\n\r\n jwt_token = jwt.encode(payload, keys.secret_key)\r\n authorization = 'Bearer {}'.format(jwt_token)\r\n headers = {\r\n 'Authorization': authorization,\r\n }\r\n\r\n res = requests.get(server_url + '/v1/accounts', headers=headers)\r\n return res.json()\r\n\r\n\r\n############################################################################\r\n# 현금계좌 잔액 조회\r\n# 입력 : .\r\n# 출력 : (float) balance\r\n############################################################################\r\n\r\ndef get_balance():\r\n assets = asset_check()\r\n for i, asset in enumerate(assets):\r\n if(asset['currency'] == 'KRW'):\r\n return float(asset['balance'])\r\n\r\n \r\n \r\n############################################################################\r\n# 비트코인 조회\r\n# 입력 : .\r\n# 출력 : (float) volume\r\n# (int) -1 [코인이 없다면]\r\n############################################################################\r\n\r\ndef get_coin_balance():\r\n assets = asset_check()\r\n for i, asset in enumerate(assets):\r\n if(asset['currency'] == 'BTC'):\r\n return float(asset['balance'])\r\n elif (i == len(assets)-1):\r\n return -1\r\n\r\n\r\n \r\n############################################################################\r\n# 비트코인 구매가격 조회\r\n# 입력 : .\r\n# 출력 : (float) avg_buy_price\r\n############################################################################\r\n\r\ndef get_buy_price():\r\n assets = asset_check()\r\n for i, asset in enumerate(assets):\r\n if(asset['currency'] == 'BTC'):\r\n return float(asset['avg_buy_price'])\r\n elif (i == len(assets)-1):\r\n print('COIN_BALANCE_EMPTY')\r\n\r\n \r\n############################################################################\r\n# 주문 가능정보 조회\r\n# 입력 : (str) 코인 티커\r\n# ex) 'BTC'\r\n# 출력 : 입력한 코인 시장 정보 json\r\n############################################################################\r\n\r\ndef market_price(market, server_url = server_url):\r\n params = {\r\n 'market': 'KRW-' + market\r\n }\r\n query_string = unquote(urlencode(params, doseq=True)).encode(\"utf-8\")\r\n\r\n m = hashlib.sha512()\r\n m.update(query_string)\r\n query_hash = m.hexdigest()\r\n\r\n payload = {\r\n 'access_key': keys.access_key,\r\n 'nonce': str(uuid.uuid4()),\r\n 'query_hash': query_hash,\r\n 'query_hash_alg': 'SHA512',\r\n }\r\n\r\n jwt_token = jwt.encode(payload, keys.secret_key)\r\n authorization = 'Bearer {}'.format(jwt_token)\r\n headers = {\r\n 'Authorization': authorization,\r\n }\r\n\r\n res = requests.get(server_url + '/v1/orders/chance', params=params, headers=headers)\r\n return res.json()\r\n\r\n\r\n\r\n############################################################################\r\n# 개별 주문 조회\r\n# 입력 : 주문 uuid\r\n# ex) '00000000-0000-0000-0000-000000000000'\r\n# 출력 : 입력한 주문에 대한 내용 json\r\n############################################################################\r\n\r\ndef order_check(order_id, server_url = server_url):\r\n params = {\r\n 'uuid': order_id\r\n }\r\n query_string = unquote(urlencode(params, doseq=True)).encode(\"utf-8\")\r\n\r\n m = hashlib.sha512()\r\n m.update(query_string)\r\n query_hash = m.hexdigest()\r\n\r\n payload = {\r\n 'access_key': keys.access_key,\r\n 'nonce': str(uuid.uuid4()),\r\n 'query_hash': query_hash,\r\n 'query_hash_alg': 'SHA512',\r\n }\r\n\r\n jwt_token = jwt.encode(payload, keys.secret_key)\r\n authorization = 'Bearer {}'.format(jwt_token)\r\n headers = {\r\n 'Authorization': authorization,\r\n }\r\n\r\n res = requests.get(server_url + '/v1/order', params=params, headers=headers)\r\n return res.json()\r\n\r\n\r\n\r\n############################################################################\r\n# 주문 취소 접수\r\n# 미완\r\n#\r\n############################################################################\r\n\r\ndef cancel_order(server_url = server_url):\r\n params = {\r\n 'uuid': '00000000-0000-0000-0000-000000000000'\r\n }\r\n query_string = unquote(urlencode(params, doseq=True)).encode(\"utf-8\")\r\n\r\n m = hashlib.sha512()\r\n m.update(query_string)\r\n query_hash = m.hexdigest()\r\n\r\n payload = {\r\n 'access_key': keys.access_key,\r\n 'nonce': str(uuid.uuid4()),\r\n 'query_hash': query_hash,\r\n 'query_hash_alg': 'SHA512',\r\n }\r\n\r\n jwt_token = jwt.encode(payload, keys.secret_key)\r\n authorization = 'Bearer {}'.format(jwt_token)\r\n headers = {\r\n 'Authorization': authorization,\r\n }\r\n\r\n res = requests.delete(server_url + '/v1/order', params=params, headers=headers)\r\n return res.json()\r\n\r\n\r\n\r\n############################################################################\r\n# 업비트 주문하기\r\n# 입력 : \r\n# 출력 : 매도, 매수 주문내용에 해당하는 json\r\n############################################################################\r\n\r\ndef coin_order(side_option, price = 'NULL', volume = 'NULL', server_url = server_url):\r\n params = {\r\n 'market': 'KRW-BTC',\r\n }\r\n \r\n # [side_option] ( 0 : 매수 , 1 : 매도 )\r\n if side_option == 0: # 매수\r\n params['side'] = 'bid'\r\n params['ord_type'] = 'price'\r\n params['price'] = price\r\n elif side_option == 1: # 매도\r\n params['side'] = 'ask'\r\n params['ord_type'] = 'market'\r\n params['volume'] = volume\r\n else:\r\n print('error form function \"coin_order\" wrong input in parameter \"side_option\"')\r\n return -1;\r\n \r\n query_string = unquote(urlencode(params, doseq=True)).encode(\"utf-8\")\r\n\r\n m = hashlib.sha512()\r\n m.update(query_string)\r\n query_hash = m.hexdigest()\r\n\r\n payload = {\r\n 'access_key': keys.access_key,\r\n 'nonce': str(uuid.uuid4()),\r\n 'query_hash': query_hash,\r\n 'query_hash_alg': 'SHA512',\r\n }\r\n\r\n jwt_token = jwt.encode(payload, keys.secret_key)\r\n authorization = 'Bearer {}'.format(jwt_token)\r\n headers = {\r\n 'Authorization': authorization,\r\n }\r\n\r\n res = requests.post(server_url + '/v1/orders', json=params, headers=headers)\r\n return res.json()\r\n\r\n\r\n\r\n############################################################################\r\n# 시장가 매수\r\n# 입력 : int 매수금액\r\n# 출력 : 실패시 -1 반환\r\n# 성공시 주문내역에 해당하는 json 반환\r\n############################################################################\r\n\r\ndef ord_bid_price(price):\r\n if price < (int)(market_price('BTC')['market']['bid']['min_total']):\r\n print('최소 주문금액은 {} 입니다.'.format(market_price('BTC')['market']['bid']['min_total']))\r\n return -1;\r\n else:\r\n return coin_order(side_option = 0, price = price)\r\n\r\n \r\n \r\n############################################################################\r\n# 시장가 매도\r\n# 입력 : 매도 물량\r\n# default : 0 ( 전체 물량 매도 )\r\n# 출력 : 잘못된 입력이 들어왔을떄 -1 반환\r\n# 주문내역에 해당하는 json 반환\r\n############################################################################\r\n\r\ndef ord_ask_market(volume = 0):\r\n if(volume == 0): \r\n return coin_order(side_option = 1, volume = market_price('BTC')['ask_account']['balance'])\r\n elif(volume > 0):\r\n return coin_order(side_option = 1, volume = volume)\r\n else:\r\n return -1\r\n\r\n \r\n \r\n############################################################################\r\n# 전체 주문 조회\r\n# 입력 : .\r\n# 출력 : 전채 주문내역에 해당하는 json list \r\n# ex) [0]( 최근 ) ~ [...]( 먼 과거 )\r\n############################################################################\r\n\r\ndef orderList_check(server_url = server_url):\r\n params = {\r\n # [states option] ( 조회할 주문 상태 ) \r\n 'states[]': ['done', 'cancel']\r\n }\r\n query_string = unquote(urlencode(params, doseq=True)).encode(\"utf-8\")\r\n\r\n m = hashlib.sha512()\r\n m.update(query_string)\r\n query_hash = m.hexdigest()\r\n\r\n payload = {\r\n 'access_key': keys.access_key,\r\n 'nonce': str(uuid.uuid4()),\r\n 'query_hash': query_hash,\r\n 'query_hash_alg': 'SHA512',\r\n }\r\n\r\n jwt_token = jwt.encode(payload, keys.secret_key)\r\n authorization = 'Bearer {}'.format(jwt_token)\r\n headers = {\r\n 'Authorization': authorization,\r\n }\r\n\r\n res = requests.get(server_url + '/v1/orders', params=params, headers=headers)\r\n return res.json()\r\n\r\n\r\n\r\n############################################################################\r\n# 마지막 매수 주문 가져오기\r\n# 입력 : .\r\n# 출력 : 마지막 매수주문에 해당하는 json\r\n############################################################################\r\n\r\ndef get_last_bid():\r\n result = ''\r\n for i, order in enumerate(orderList_check()):\r\n if (order['side'] == 'bid'):\r\n result = orderList_check()[i]\r\n break;\r\n return result\r\n\r\n\r\n\r\n############################################################################\r\n# 마지막 매도 주문 가져오기\r\n# 입력 : .\r\n# 출력 : 마지막 매도주문에 해당하는 json\r\n############################################################################\r\n\r\ndef get_last_ask():\r\n result = ''\r\n for i, order in enumerate(orderList_check()):\r\n if (order['side'] == 'ask'):\r\n result = orderList_check()[i]\r\n break;\r\n return result\r\n\r\n\r\n# In[7]:\r\n\r\n\r\n# !jupyter nbconvert --to script request_order.ipynb\r\n\r\n","repo_name":"doongeon/Bitcoin-Trader-with-Python-Korean","sub_path":"request_order.py","file_name":"request_order.py","file_ext":"py","file_size_in_byte":10626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39377053476","text":"import numpy as np\n\n\ndef binary_search(search_algorithm):\n \"\"\"\n Binary search meant to be used as a decorator for algorithms OPT1 and OPT2\n\n :param search_algorithm: OPT1 or OPT2 algorithms\n :return:\n \"\"\"\n\n def bin_search(*args):\n low = 0\n high = len(args[1]) - 1\n\n tuple_ = (np.inf, None)\n\n while low <= high:\n mid = (high + low) // 2\n results = search_algorithm(args[0], args[1][mid], *args[2:])\n\n if results['valid']:\n tuple_ = (args[1][mid], results['r'])\n high = mid - 1\n else:\n low = mid + 1\n\n return tuple_[0], tuple_[1]\n\n return bin_search","repo_name":"mattrighetti/leiserson-retiming","sub_path":"algorithms/binary_search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"18209077553","text":"import sys\nimport socket\nimport scrapy\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_OAEP\nimport base64\n\"\"\"\nA network socket is an endpoint of an interprocess communication across a computer network. \nThe Python Standard Library has a module called socket which provides a low-level internet networking interface.\n\"\"\"\ntry:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"Socket successfully created\")\nexcept socket.error as err:\n print(\"socket creation failed with error %s\" % err)\n\n# default port for socket\nport = 80\n\ntry:\n host_ip = socket.gethostbyname('www.google.com')\nexcept socket.gaierror:\n\n # this means could not resolve the host\n print(\"there was an error resolving the host\")\n sys.exit()\n\n# connecting to the server\ns.connect((host_ip, port))\n\nprint(\"Connected to google\")\n\n\ndef rsa(message):\n \"\"\"\n Generating new keys\n Generating a keypair may take a long time, depending on the number of bits required. \n The number of bits determines the cryptographic strength of the key, as well as the size of the message you can encrypt.\n \"\"\"\n key = RSA.generate(2048)\n private_key = key.exportKey('PEM')\n public_key = key.publickey().exportKey('PEM')\n \"\"\" Encrypting message using public key \"\"\"\n rsa_public_key = RSA.importKey(public_key)\n rsa_public_key = PKCS1_OAEP.new(rsa_public_key)\n encrypted_text = rsa_public_key.encrypt(message)\n encrypted_text_b64 = base64.b64encode(encrypted_text)\n print('encrypted message: {}'.format(encrypted_text_b64))\n \"\"\" Decrypting message using private key \"\"\"\n rsa_private_key = RSA.importKey(private_key)\n rsa_private_key = PKCS1_OAEP.new(rsa_private_key)\n decrypted_text = rsa_private_key.decrypt(encrypted_text)\n print('decrypted message: {}'.format(decrypted_text))\n\n\nmessage = input('Enter the message: ')\nmessage = str.encode(message)\nrsa(message)\n\ns.send(message)\ndata = s.recv(1024)\n\ns.close()\n","repo_name":"HollisLynch/bsi_crawler","sub_path":"sockets_connection.py","file_name":"sockets_connection.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33799042573","text":"import tkinter as tk\nfrom PIL import ImageTk, Image\nfrom db.sqlite import insert_into_table, update_row\nfrom widgets.dialog import DialogBox\nimport datetime\nimport os\nfrom tkinter.tix import ScrolledGrid\n\n\nclass BaseFrame(tk.Frame):\n def __init__(self, root, connection, *args, **kwargs):\n super().__init__(root, bg=\"#ffffff\")\n self.tasks = None\n self.connection = connection\n self.cursor = self.connection.cursor()\n self.all_tasks = {}\n self.img = self._parse_background_image()\n self.tasks_frame = tk.Frame(self, bg=\"#ffffff\")\n self.no_tasks_frame = tk.Frame(self, bg=\"#ffffff\")\n self._configure_entry_field()\n self.root = root\n self.root.bind('', self.on_submit)\n self._dialog_box = None\n\n def _parse_background_image(self):\n path = os.path.dirname(os.path.abspath(__file__))\n image_path = os.path.join(path, \"images/check.png\")\n im = Image.open(image_path)\n img = im.resize((156, 128), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(img)\n\n return img\n\n def _configure_entry_field(self):\n self.entry = tk.Text(self, bd=0.5, width=74, height=1, spacing1=7, spacing3=7, fg=\"#585858\")\n self.entry.insert('1.0', 'Task Title')\n self.entry.grid(row=0, column=1, pady=(30, 30))\n self.entry.bind(\"\", self.on_entry_clicked)\n\n def on_entry_clicked(self, event):\n self.entry.config(highlightcolor=\"#585858\")\n if self.entry.get('1.0', '10.0') == 'Task Title\\n':\n self.entry.delete(\"1.0\", \"10.0\")\n\n def set_default_background(self):\n panel = tk.Label(self.no_tasks_frame, image=self.img, bd=0)\n no_tasks = tk.Label(self.no_tasks_frame, text='No Tasks Found', bg=\"#ffffff\", bd=0, fg=\"#585858\")\n add_tasks = tk.Label(self.no_tasks_frame, text='You can add tasks using the + above', bg=\"#ffffff\", bd=0, fg=\"#585858\")\n panel.grid(columnspan=2, row=1, rowspan=2, pady=(60, 30))\n no_tasks.grid(columnspan=2, row=4)\n add_tasks.grid(columnspan=2, row=5)\n\n def on_submit(self, e):\n self.focus()\n data = self.entry.get('0.0', tk.END).strip('\\n')\n\n if data and data != 'Task Title':\n insert_into_table(self.cursor, data, self.connection)\n self.no_tasks_frame.grid_remove()\n self.add_task_to_list()\n self.entry.delete('1.0', tk.END)\n self.entry.insert('1.0', 'Task Title')\n\n def add_task_to_list(self):\n id = self.cursor.lastrowid\n task = self.cursor.execute(\"SELECT * FROM todo WHERE id = %s LIMIT 1\" % id).fetchone()\n if not self.tasks:\n self.tasks_frame.grid(row=1, column=1, sticky=tk.NSEW)\n self.tasks_frame.columnconfigure(1, weight=1)\n self.render_task(task, len(self.all_tasks), self.tasks_frame)\n\n def handle_edit_action(self, task_id):\n DialogBox.root = self.root\n self._dialog_box = DialogBox(save=self.on_edit_save, delete=self.on_edit_delete, task_id=task_id)\n\n def render_task(self, task, index, target_frame):\n # check_is_done = tk.Button(self, text='✎', bg=\"#ffffff\",\n # bd=0, relief=\"raised\", command=lambda: self.handle_edit_action(task[\"id\"]))\n # check_is_done.grid(row=index + 1, column=0, sticky=tk.W)\n task_fields = tk.Text(target_frame, bd=2, width=80, height=1, spacing1=7, spacing3=7, fg=\"#585858\")\n task_fields.task_id = task['id']\n task_fields.insert('1.0', task['task'])\n task_fields.config(state=tk.DISABLED)\n task_fields.grid(row=index + 1, column=1, pady=(10, 10))\n extra_options = tk.Button(task_fields, text='✎', bg=\"#ffffff\",\n bd=0, relief=\"raised\", command=lambda: self.handle_edit_action(task[\"id\"]))\n extra_options.config(highlightbackground=\"#ffffff\", cursor='hand1')\n extra_options.pack(padx=(553, 0))\n self.all_tasks.update({task['id']: task_fields})\n\n def _calculate_scheduled_date(self, strict_opt, custom_opt):\n date = None\n current_date = datetime.date.today()\n if custom_opt:\n date = custom_opt.replace(\"/\", \"-\")\n else:\n if strict_opt == 0:\n date = current_date.strftime('%Y-%m-%d')\n elif strict_opt == 1:\n date = (current_date + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n elif strict_opt == 2:\n date = (current_date + datetime.timedelta(7-current_date.weekday())).strftime('%Y-%m-%d')\n\n return date\n\n def on_edit_save(self):\n data = self._dialog_box.get_normalized_values()\n self._dialog_box.window_close()\n rowid = data.pop(\"task_id\")\n scheduled_date = self._calculate_scheduled_date(data[\"button_option\"], data[\"custom_date\"])\n priority = data.pop(\"priority\")\n data = dict(note=data.pop(\"note\"),\n scheduled_date=scheduled_date,\n todo_date=scheduled_date,\n priority=priority\n )\n update_row(cursor=self.cursor, connection=self.connection, data=data, rowid=rowid)\n self.respawn_slaves()\n\n\n def on_edit_delete(self):\n data = self._dialog_box.get_normalized_values()\n self._dialog_box.window_close()\n rowid = data.pop(\"task_id\")\n self.cursor.execute(\"DELETE FROM todo WHERE id=?\", (rowid, ))\n self.connection.commit()\n self.all_tasks[rowid].grid_remove()\n\n def respawn_slaves(self):\n for slave in self.tasks_frame.grid_slaves():\n slave.destroy()\n self.tasks_frame.grid_remove()\n self.fetch_todos()\n self.fill_tasks_frame()\n\n def list_all_tasks(self):\n for i, task in enumerate(self.tasks):\n self.render_task(task, i, self.tasks_frame)\n\n def fill_tasks_frame(self):\n if self.tasks:\n self.tasks_frame.grid(row=1, column=1, sticky=tk.NSEW)\n self.tasks_frame.columnconfigure(1, weight=1)\n self.list_all_tasks()\n else:\n self.no_tasks_frame.grid(row=1, column=1, sticky=tk.NSEW)\n self.no_tasks_frame.columnconfigure(1, weight=1)\n self.set_default_background()\n\n def fetch_todos(self):\n pass\n\n","repo_name":"Toshe1991/TodoApp","sub_path":"widgets/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28192732880","text":"# you can write to stdout for debugging purposes, e.g.\n# print(\"this is a debug message\")\n\ndef solution(X):\n string_X = str(X)\n if string_X[0] == '-':\n sign = '-'\n unsigned_X = string_X[1:]\n new_X = int(''.join(sign + unsigned_X[::-1]))\n else:\n sign = ''\n new_X = int(''.join(sign + string_X[::-1]))\n return new_X","repo_name":"danrasband/coding-experiment-reviews","sub_path":"responses/A5QMTE-D2P/2_reverse_digits.py","file_name":"2_reverse_digits.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20764771146","text":"class Solution(object):\n def isValid(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n map = { \")\":\"(\" , \"]\":\"[\" , \"}\":\"{\"} #maps close to open parenthesis\n stack = [] #stack of open parenthesis\n \n for bracket in s:\n if bracket in map: # If its an CLOSING bracket (cause our keys to map are closing brackets) \n \n if stack and (stack[-1] == map[bracket]): # If top of stack, which is the opening bracket matches the closing bracket correctly\n stack.pop() # Pop opening bracket form stack\n \n else: # If they do not match correctly\n return False\n \n else: #If its an OPENING bracket\n stack.append(bracket)\n \n # if stack is empty it means all of our brackets matched correctly so we return true, otherwise we return false\n \n if not stack:\n return True\n else:\n return False\n","repo_name":"elenazavala/leetcode","sub_path":"python/valid-parentheses.py","file_name":"valid-parentheses.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10208471480","text":"# \n# 4 3 1\n\ndef vlozh(elem, lvl=1):\n if elem.tag == 'cube' and elem.attrib['color'] in dd.keys():\n dd[elem.attrib['color']] += lvl\n # print(lvl, elem.tag, elem.attrib['color'])\n for child in elem.getchildren():\n vlozh(child, lvl+1)\n\n\nfrom xml.etree import ElementTree as ET\n\ncd = ['red', 'green', 'blue']\ndd = {'red': 0, 'green': 0, 'blue': 0}\nroot = ET.fromstring(input())\n\n# print(root)\n# print(root.tag, root.attrib)\n# for child in root:\n# print(child.tag, child.attrib)\n# for element in root.iter(\"cube\"):\n# print(element.attrib)\n\nvlozh(root)\nprint(dd['red'], dd['green'], dd['blue'])","repo_name":"Kaermor/stepik-course512","sub_path":"3-7.py","file_name":"3-7.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12553866343","text":"from .. import bp\nfrom flask import render_template, request, redirect, url_for\nfrom jeec_brain.finders.companies_finder import CompaniesFinder\nfrom jeec_brain.finders.auctions_finder import AuctionsFinder\nfrom jeec_brain.handlers.auctions_handler import AuctionsHandler\nfrom jeec_brain.apps.auth.wrappers import allowed_roles\nfrom jeec_brain.values.api_error_value import APIErrorValue\n\n\n# Auction routes\n@bp.route('/auctions', methods=['GET'])\n@allowed_roles(['admin'])\ndef auctions_dashboard():\n auctions_list = AuctionsFinder.get_all()\n\n for auction in auctions_list:\n auction.highest_bid = AuctionsFinder.get_auction_highest_bid(auction)\n \n if auctions_list is None:\n error = 'No results found'\n return render_template('admin/auctions/auctions_dashboard.html', auctions=None, error=error)\n\n return render_template('admin/auctions/auctions_dashboard.html', auctions=auctions_list, error=None)\n\n\n@bp.route('/new-auction', methods=['GET'])\n@allowed_roles(['admin'])\ndef add_auction_dashboard():\n return render_template('admin/auctions/add_auction.html', \\\n error=None)\n\n\n@bp.route('/new-auction', methods=['POST'])\n@allowed_roles(['admin'])\ndef create_auction():\n name = request.form.get('name')\n description = request.form.get('description')\n is_open = request.form.get('is_open')\n closing_date = request.form.get('closing_date')\n\n try:\n minimum_value = float(request.form.get('minimum_value'))\n except:\n return 'Invalid minimum value inserted', 404\n\n if is_open == 'True':\n is_open = True\n else:\n is_open = False\n\n # create new auction\n auction = AuctionsHandler.create_auction(\n name=name,\n description=description,\n is_open=is_open,\n minimum_value=minimum_value,\n closing_date=closing_date\n )\n\n if auction is None:\n return render_template('admin/auctions/add_auction.html', \\\n error=\"Failed to create auction!\")\n\n return redirect(url_for('admin_api.auctions_dashboard'))\n\n\n@bp.route('/auctions/', methods=['GET'])\n@allowed_roles(['admin'])\ndef get_auction(auction_external_id):\n auction = AuctionsFinder.get_auction_by_external_id(auction_external_id)\n\n if auction is None:\n error = 'Non existant auction'\n return render_template('admin/auctions/auctions_dashboard.html', auctions=None, error=error)\n\n return render_template('admin/auctions/update_auction.html', \\\n auction=auction, \\\n error=None)\n\n\n@bp.route('/auctions/', methods=['POST'])\n@allowed_roles(['admin'])\ndef update_auction(auction_external_id):\n auction = AuctionsFinder.get_auction_by_external_id(auction_external_id)\n\n if auction is None:\n return APIErrorValue('Couldnt find auction').json(500)\n\n name = request.form.get('name')\n description = request.form.get('description')\n closing_date = request.form.get('closing_date')\n\n try:\n minimum_value = float(request.form.get('minimum_value'))\n except:\n return APIErrorValue('Wrong value format input').json(400)\n\n is_open = request.form.get('is_open')\n\n if is_open == 'True':\n is_open = True\n else:\n is_open = False\n\n updated_auction = AuctionsHandler.update_auction(\n auction=auction,\n name=name,\n description=description,\n minimum_value=minimum_value,\n is_open=is_open,\n closing_date=closing_date\n )\n \n if updated_auction is None:\n return render_template('admin/auctions/update_auction.html', \\\n auction=auction, \\\n error=\"Failed to update auction!\")\n\n return redirect(url_for('admin_api.auctions_dashboard'))\n\n\n@bp.route('/auctions//delete', methods=['GET'])\n@allowed_roles(['admin'])\ndef delete_auction(auction_external_id):\n auction = AuctionsFinder.get_auction_by_external_id(auction_external_id)\n\n if auction is None:\n return APIErrorValue('Couldnt find auction').json(500)\n \n if AuctionsHandler.delete_auction(auction):\n return redirect(url_for('admin_api.auctions_dashboard'))\n\n else:\n return render_template('admin/auctions/update_aution.html', auction=auction, error=\"Failed to delete auction!\")\n\n\n# Members management\n@bp.route('/auctions//participants', methods=['GET'])\n@allowed_roles(['admin'])\ndef auction_participants_dashboard(auction_external_id):\n auction = AuctionsFinder.get_auction_by_external_id(auction_external_id)\n\n if auction is None:\n return APIErrorValue('Couldnt find auction').json(400)\n\n not_participants = AuctionsFinder.get_not_participants(auction)\n\n if len(auction.participants) == 0:\n error = 'No results found'\n return render_template('admin/auctions/auction_participants_dashboard.html', auction=auction, not_participants=not_participants, error=error)\n\n return render_template('admin/auctions/auction_participants_dashboard.html', auction=auction, not_participants=not_participants, error=None)\n\n\n@bp.route('/auctions//add-participant', methods=['POST'])\n@allowed_roles(['admin'])\ndef add_auction_participant(auction_external_id):\n auction = AuctionsFinder.get_auction_by_external_id(auction_external_id)\n\n if auction is None:\n return APIErrorValue('Couldnt find auction').json(400)\n\n company_external_id = request.form.get('company_external_id')\n\n if company_external_id is None:\n return redirect(url_for('admin_api.auction_participants_dashboard', auction_external_id=auction_external_id))\n\n company = CompaniesFinder.get_from_external_id(company_external_id)\n\n if company is None:\n return APIErrorValue('Couldnt find company').json(400)\n\n AuctionsHandler.add_auction_participant(auction, company)\n \n return redirect(url_for('admin_api.auction_participants_dashboard', auction_external_id=auction_external_id))\n","repo_name":"jose-correia/brain","sub_path":"jeec_brain/apps/admin_api/auctions/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28609879155","text":"import math\r\nfrom math import ceil, floor, inf\r\n\r\n\r\ndef distance(p1, p2):\r\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\r\n\r\n\r\n\r\ndef shortest_d(input_x, input_y):\r\n\r\n if len(input_x) <= 3:\r\n s_dis = inf\r\n for i in range(len(input_x)-1):\r\n dis_tmp = distance(input_x[i], input_x[i+1])\r\n if dis_tmp < s_dis:\r\n s_dis = dis_tmp\r\n return round(s_dis,6)\r\n else:\r\n #Divide\r\n half_n = len(input_x)//2\r\n left_points = input_x[:half_n]\r\n right_points = input_x[half_n:]\r\n\r\n mid_x = input_x[half_n][0] #Line\r\n \r\n Qy,Ry = [], []\r\n for point in input_y:\r\n if point[0] < mid_x:\r\n Qy.append(point)\r\n else:\r\n Ry.append(point)\r\n \r\n #Conquer\r\n l_s_t = shortest_d(left_points, Qy)\r\n r_s_t = shortest_d(right_points, Ry)\r\n\r\n #Combine\r\n delta = min(l_s_t, r_s_t) \r\n # x_bar = left_points[-1][0]\r\n\r\n #Remove the data >delta from Line\r\n #candidates in delta region\r\n candidates = []\r\n for point in input_y:\r\n if abs(point[0] - mid_x) < delta:\r\n candidates.append(point)\r\n\r\n\r\n\r\n #1 + 7\r\n for i in range(len(candidates)-1):\r\n j = i + 1\r\n while j < min(len(candidates), i + 7) and abs(candidates[i][1] - candidates[j][1]) < delta:\r\n dis_tmp = distance(candidates[i], candidates[j])\r\n if dis_tmp < delta:\r\n delta = dis_tmp\r\n j += 1\r\n\r\n return round(delta,6)\r\n\r\n\r\ndef main():\r\n n_sets = int(input())\r\n if n_sets>= 0:\r\n for n in range(n_sets):\r\n n_points = int(input())\r\n points = []\r\n for i in range(n_points):\r\n point = [float(v) for v in input().split()]\r\n\r\n points.append(point)\r\n\r\n #Sort\r\n input_x = sorted(points , key=lambda k: k[0]) \r\n input_y = sorted(points , key=lambda k: k[1]) \r\n # input_x, input_y = first_sort(points)\r\n output = shortest_d(input_x, input_y)\r\n print('%6f' % (output))\r\n\r\n\r\nmain()\r\n\r\n\r\n\r\n","repo_name":"xup6YJ/Algorithm","sub_path":"Closest Pair.py","file_name":"Closest Pair.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13065802924","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nModule with logic for currency value object\n\n\"\"\"\n\n__author__ = 'Samir Adrik'\n__email__ = 'samir.adrik@gmail.com'\n\nfrom source.util import InvalidCurrencyError, Assertor, Tracking\n\nfrom .value import Value\n\n\nclass Currency(Value):\n \"\"\"\n Implementation of currency value object\n\n \"\"\"\n\n @Tracking\n def validate_currency(self, currency: str):\n \"\"\"\n method for validating currency string\n\n Parameters\n ----------\n currency : str\n string to be validated\n\n \"\"\"\n if currency not in [\"kr\"]:\n raise InvalidCurrencyError(\n \"'{}' is an unsupported currency\".format(currency))\n\n def __init__(self, currency: str = \"kr\"):\n \"\"\"\n constructor / instantiating of class\n\n Parameters\n ----------\n currency : str\n currency string\n\n \"\"\"\n try:\n super().__init__()\n Assertor.assert_data_types([currency], [str])\n self.validate_currency(currency)\n self._currency = currency.lower()\n except Exception as currency_exception:\n raise currency_exception\n\n @property\n def currency(self):\n \"\"\"\n currency getter\n\n Returns\n -------\n out : str\n active currency\n\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, new_currency: str):\n \"\"\"\n currency setter\n\n Parameters\n ----------\n new_currency : str\n new currency to be set\n\n \"\"\"\n Assertor.assert_data_types([new_currency], [str])\n self.validate_currency(new_currency)\n self._currency = new_currency.lower()\n","repo_name":"seemir/stressa","sub_path":"source/domain/currency.py","file_name":"currency.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4751768816","text":"import configparser\r\nimport getopt\r\nimport re\r\nimport sys\r\n\r\nfrom urllib.request import Request, urlopen\r\n\r\nNONLETTERS = \"[^a-zA-ZąćęłńóśźżĄĆĘŁŃÓŚŹŻ]\"\r\n\r\n\r\ndef nctm_to_xml(nctm_path):\r\n with open(nctm_path, 'r', encoding=\"utf-8\") as f:\r\n xml = '\\n'\r\n timestamps = []\r\n for line in f:\r\n line_timestamps = []\r\n for timestamp in re.findall('\\|.*?\\|', line):\r\n line_timestamps.append(timestamp[1:-1])\r\n timestamps.append(line_timestamps)\r\n simplified_line = re.sub('\\|.*?\\|', '', line)\r\n xml += '' + re.sub(\"&\", \"&\", simplified_line)[:-1] + '\\n'\r\n xml += ''\r\n return xml.encode(), timestamps\r\n\r\n\r\ndef ask_gram_textphrases(xml):\r\n config = configparser.ConfigParser()\r\n config.read(\"params.ini\")\r\n req = Request(\r\n url=config[\"Gram\"][\"url\"] + \"textphrases\",\r\n data=xml,\r\n headers={'Content-Type': 'application/xml', \"Accept\": 'application/json'}\r\n )\r\n return urlopen(req).read()\r\n\r\n\r\ndef get_joined_phrase(phrases):\r\n if type(phrases) != list:\r\n phrases = [phrases]\r\n joined_phrase = \"\"\r\n for inner_phrase in phrases:\r\n if inner_phrase[\"@type\"] == \"whitePhrase\":\r\n joined_phrase += inner_phrase[\"whites\"]\r\n elif inner_phrase[\"@type\"] == \"atomPhrase\":\r\n joined_phrase += inner_phrase[\"atom\"][\"text\"]\r\n elif inner_phrase[\"@type\"] == \"annotatedPhrase\":\r\n joined_phrase += get_joined_phrase(inner_phrase[\"phrases\"])\r\n else:\r\n raise Exception(\"Unknown phrase type\")\r\n return joined_phrase\r\n\r\n\r\ndef get_annotated_phrases(gram_response, timestamps):\r\n response_dict = eval(gram_response) if gram_response != b'null' else {\"paragraphs\": []}\r\n result = {}\r\n if type(response_dict[\"paragraphs\"]) != list:\r\n response_dict[\"paragraphs\"] = [response_dict[\"paragraphs\"]]\r\n for paragraph, line_timestamps in zip(response_dict[\"paragraphs\"], timestamps):\r\n remove_next = True\r\n if type(paragraph[\"phrases\"]) == list:\r\n for phrase in paragraph[\"phrases\"]:\r\n if phrase[\"@type\"] == \"atomPhrase\":\r\n if re.sub(NONLETTERS, \"\", phrase[\"atom\"][\"text\"]):\r\n if remove_next:\r\n del line_timestamps[:2]\r\n else:\r\n if phrase[\"atom\"][\"rightWhite\"] == \"true\" or phrase[\"atom\"][\"leftWhite\"] == \"true\":\r\n remove_next = True\r\n else:\r\n if phrase[\"atom\"][\"rightWhite\"] == \"false\" and phrase[\"atom\"][\"leftWhite\"] == \"false\":\r\n remove_next = False\r\n if phrase[\"atom\"][\"rightWhite\"] == \"true\" and phrase[\"atom\"][\"leftWhite\"] == \"true\":\r\n del line_timestamps[:2]\r\n if phrase[\"@type\"] == \"annotatedPhrase\":\r\n joined_phrase = get_joined_phrase(phrase[\"phrases\"])\r\n count_phrases = len(joined_phrase.split())\r\n if phrase[\"annotation\"][\"phraseType\"][\"$\"] == \"Person\":\r\n phrase[\"start\"], phrase[\"end\"] = line_timestamps[0], line_timestamps[2*count_phrases - 1]\r\n result[str((line_timestamps[0], line_timestamps[2*count_phrases - 1]))] = joined_phrase\r\n del line_timestamps[:2*count_phrases]\r\n return result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n input_nctm = \"\"\r\n try:\r\n opts, args = getopt.getopt(sys.argv[1:], \"i:\", [\"input_nctm=\"])\r\n except getopt.GetoptError:\r\n print(\"Usage: ask_gram_nctm.py -i test.txt\")\r\n sys.exit(2)\r\n for opt, arg in opts:\r\n if opt == \"-i\":\r\n input_nctm = arg\r\n\r\n request, timestamps = nctm_to_xml(input_nctm)\r\n response = ask_gram_textphrases(request)\r\n print(get_annotated_phrases(response, timestamps))\r\n","repo_name":"smalec/ner-for-asr","sub_path":"ask_gram_nctm.py","file_name":"ask_gram_nctm.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4263114182","text":"from unittest import mock\n\nimport factory\nimport pytest\nfrom dateutil.parser import parse as dateutil_parse\nfrom django.conf import settings\n\nfrom datahub.company.test.factories import AdviserFactory\nfrom datahub.core.exceptions import APIConflictException\nfrom datahub.omis.order.constants import OrderStatus\nfrom datahub.omis.order.test.factories import (\n OrderFactory,\n OrderPaidFactory,\n OrderWithAcceptedQuoteFactory,\n)\nfrom datahub.omis.payment.constants import PaymentGatewaySessionStatus, PaymentMethod\nfrom datahub.omis.payment.govukpay import govuk_url, GOVUKPayAPIException\nfrom datahub.omis.payment.models import Payment, PaymentGatewaySession\nfrom datahub.omis.payment.test.factories import PaymentGatewaySessionFactory\n\n\n# mark the whole module for db use\npytestmark = pytest.mark.django_db\n\n\nclass TestPaymentManager:\n \"\"\"Tests for the Payment Manager.\"\"\"\n\n @mock.patch('datahub.omis.payment.managers.generate_datetime_based_reference')\n def test_create_from_order(\n self,\n mocked_generate_datetime_based_reference,\n ):\n \"\"\"Test that Payment.objects.create_from_order creates a payment.\"\"\"\n mocked_generate_datetime_based_reference.return_value = '201702010004'\n\n order = OrderPaidFactory()\n by = AdviserFactory()\n attrs = {\n 'transaction_reference': 'lorem ipsum',\n 'amount': 1001,\n 'received_on': dateutil_parse('2017-01-01').date(),\n }\n payment = Payment.objects.create_from_order(\n order=order, by=by, attrs=attrs,\n )\n\n payment.refresh_from_db()\n assert payment.reference == '201702010004'\n assert payment.created_by == by\n assert payment.order == order\n assert payment.transaction_reference == attrs['transaction_reference']\n assert payment.additional_reference == ''\n assert payment.amount == attrs['amount']\n assert payment.received_on == attrs['received_on']\n\n\nclass TestPaymentGatewaySessionManager:\n \"\"\"Tests for the Payment Gateway Session Manager.\"\"\"\n\n def test_create_first_session_from_order(self, requests_mock, monkeypatch):\n \"\"\"\n Test the successful creation of the first payment gateway session for an order.\n \"\"\"\n monkeypatch.setattr(\n 'uuid.uuid4',\n mock.Mock(return_value='0123abcd-0000-0000-0000-000000000000'),\n )\n\n # mock request\n govuk_payment_id = '123abc123abc123abc123abc12'\n govuk_payments_url = govuk_url('payments')\n requests_mock.post(\n govuk_payments_url,\n status_code=201,\n json={\n 'state': {'status': 'created', 'finished': False},\n 'payment_id': govuk_payment_id,\n '_links': {\n 'next_url': {\n 'href': 'https://payment.example.com/123abc',\n 'method': 'GET',\n },\n },\n },\n )\n\n assert PaymentGatewaySession.objects.count() == 0\n\n # call method\n adviser = AdviserFactory()\n order = OrderWithAcceptedQuoteFactory()\n session = PaymentGatewaySession.objects.create_from_order(\n order=order,\n attrs={'created_by': adviser},\n )\n\n # check session\n assert session.order == order\n assert session.status == PaymentGatewaySessionStatus.CREATED\n assert session.govuk_payment_id == govuk_payment_id\n assert session.created_by == adviser\n\n assert PaymentGatewaySession.objects.count() == 1\n\n # check mocked request\n assert requests_mock.call_count == 1\n assert requests_mock.request_history[-1].url == govuk_payments_url\n assert requests_mock.request_history[-1].json() == {\n 'amount': order.total_cost,\n 'reference': f'{order.reference}-0123ABCD',\n 'description': settings.GOVUK_PAY_PAYMENT_DESCRIPTION.format(\n reference=order.reference,\n ),\n 'return_url': settings.GOVUK_PAY_RETURN_URL.format(\n public_token=order.public_token,\n session_id=session.pk,\n ),\n }\n\n def test_create_cancels_other_sessions(self, requests_mock):\n \"\"\"\n Test that creating a new payment gateway session cancels\n the other ongoing sessions and GOV.UK payments.\n\n Given:\n - ongoing session 1\n - ongoing session 2\n - failed session 3\n\n Calling .create_from_order should:\n - cancel the GOV.UK payment related to session 1\n - update the payment gateway session 1 status to 'cancelled'\n\n - cancel the GOV.UK payment related to session 2\n - update the payment gateway session 2 status to 'cancelled'\n\n - start a new GOV.UK payment\n - create a payment gateway session related to it\n \"\"\"\n order = OrderWithAcceptedQuoteFactory()\n existing_data = PaymentGatewaySessionFactory.create_batch(\n 3,\n order=order,\n status=factory.Iterator([\n PaymentGatewaySessionStatus.CREATED,\n PaymentGatewaySessionStatus.STARTED,\n PaymentGatewaySessionStatus.FAILED,\n ]),\n )\n\n # mock GOV.UK requests used to:\n # - refresh the payment gateway sessions\n # - cancel the GOV.UK payments\n # - refresh the payment gateway sessions again after the cancellation\n for session in existing_data:\n requests_mock.get(\n govuk_url(f'payments/{session.govuk_payment_id}'),\n [\n # this is for the initial refresh\n {\n 'status_code': 200,\n 'json': {'state': {'status': session.status}},\n },\n # this is for the second refresh after cancelling\n {\n 'status_code': 200,\n 'json': {'state': {'status': 'cancelled'}},\n },\n ],\n )\n requests_mock.post(\n govuk_url(f'payments/{session.govuk_payment_id}/cancel'),\n status_code=204,\n )\n\n # mock GOV.UK request used to create a new payment session\n govuk_payment_id = '123abc123abc123abc123abc12'\n requests_mock.post(\n govuk_url('payments'),\n status_code=201,\n json={\n 'state': {'status': 'created', 'finished': False},\n 'payment_id': govuk_payment_id,\n '_links': {\n 'next_url': {\n 'href': 'https://payment.example.com/123abc',\n 'method': 'GET',\n },\n },\n },\n )\n\n assert PaymentGatewaySession.objects.count() == 3\n\n session = PaymentGatewaySession.objects.create_from_order(order=order)\n\n # check sessions cancelled\n for existing_session in existing_data[:-1]:\n existing_session.refresh_from_db()\n assert existing_session.status == PaymentGatewaySessionStatus.CANCELLED\n\n assert PaymentGatewaySession.objects.count() == 4\n\n # check session record created\n session.refresh_from_db()\n assert session.govuk_payment_id == govuk_payment_id\n\n # check mocked requests:\n # 2 refresh / 2 cancel - 2 refresh / 1 create\n assert requests_mock.call_count == (2 + 2 + 2 + 1)\n assert requests_mock.request_history[-1].json() == {\n 'amount': order.total_cost,\n 'reference': f'{order.reference}-{str(session.id)[:8].upper()}',\n 'description': settings.GOVUK_PAY_PAYMENT_DESCRIPTION.format(\n reference=order.reference,\n ),\n 'return_url': settings.GOVUK_PAY_RETURN_URL.format(\n public_token=order.public_token,\n session_id=session.id,\n ),\n }\n\n @pytest.mark.parametrize(\n 'disallowed_status', (\n OrderStatus.DRAFT,\n OrderStatus.QUOTE_AWAITING_ACCEPTANCE,\n OrderStatus.PAID,\n OrderStatus.COMPLETE,\n OrderStatus.CANCELLED,\n ),\n )\n def test_exception_if_order_in_disallowed_status(self, disallowed_status):\n \"\"\"\n Test that if the order is not in one of the allowed statuses, the method raises\n APIConflictException.\n \"\"\"\n assert PaymentGatewaySession.objects.count() == 0\n\n order = OrderFactory(status=disallowed_status)\n\n with pytest.raises(APIConflictException):\n PaymentGatewaySession.objects.create_from_order(order)\n\n # test no session created\n assert PaymentGatewaySession.objects.count() == 0\n\n def test_exception_if_refresh_updates_order_status_to_paid(self, requests_mock):\n \"\"\"\n Test that if the system is not up-to-date, the order is in quote_accepted\n but the GOV.UK payment happens, the method triggers a check on existing\n sessions, realises that one finished successfully and records the payment\n marking the order as 'paid'.\n For this reason, the method raises APIConflictException as no other payment can be started.\n \"\"\"\n # set up db\n order = OrderWithAcceptedQuoteFactory()\n existing_session = PaymentGatewaySessionFactory(\n order=order,\n status=PaymentGatewaySessionStatus.STARTED,\n )\n\n # mock GOV.UK requests used to refresh the payment session,\n # GOV.UK Pay says that the payment completed successfully\n requests_mock.get(\n govuk_url(f'payments/{existing_session.govuk_payment_id}'),\n status_code=200,\n json={\n 'amount': order.total_cost,\n 'state': {'status': 'success'},\n 'email': 'email@example.com',\n 'created_date': '2018-02-13T14:56:56.734Z',\n 'reference': '12345',\n 'card_details': {\n 'last_digits_card_number': '1111',\n 'cardholder_name': 'John Doe',\n 'expiry_date': '01/20',\n 'billing_address': {\n 'line1': 'line 1 address',\n 'line2': 'line 2 address',\n 'postcode': 'SW1A 1AA',\n 'city': 'London',\n 'country': 'GB',\n },\n 'card_brand': 'Visa',\n },\n },\n )\n\n with pytest.raises(APIConflictException):\n PaymentGatewaySession.objects.create_from_order(order)\n\n # check session record\n existing_session.refresh_from_db()\n assert existing_session.status == PaymentGatewaySessionStatus.SUCCESS\n\n # check order and payment\n order.refresh_from_db()\n assert order.status == OrderStatus.PAID\n\n assert Payment.objects.count() == 1\n payment = Payment.objects.first()\n\n assert payment.amount == order.total_cost\n assert payment.method == PaymentMethod.CARD\n assert payment.received_on == dateutil_parse('2018-02-13').date()\n assert payment.transaction_reference == '12345'\n assert payment.cardholder_name == 'John Doe'\n assert payment.billing_address_1 == 'line 1 address'\n assert payment.billing_address_2 == 'line 2 address'\n assert payment.billing_address_town == 'London'\n assert payment.billing_address_postcode == 'SW1A 1AA'\n assert payment.billing_address_country == 'GB'\n assert payment.billing_email == 'email@example.com'\n assert payment.card_brand == 'Visa'\n\n @pytest.mark.parametrize('govuk_status_code', (400, 401, 422, 500))\n def test_exception_if_govuk_pay_errors_when_creating(\n self, govuk_status_code, requests_mock,\n ):\n \"\"\"\n Test that if GOV.UK Pay errors whilst creating a new payment, the method raises\n GOVUKPayAPIException.\n\n Possible GOV.UK Pay errors:\n - 400 - BAD REQUEST\n - 401 - UNAUTHORIZED\n - 422 - UNPROCESSABLE ENTITY\n - 500 - INTERNAL SERVER ERROR\n \"\"\"\n requests_mock.post(\n govuk_url('payments'),\n status_code=govuk_status_code,\n )\n\n assert PaymentGatewaySession.objects.count() == 0\n\n order = OrderWithAcceptedQuoteFactory()\n\n with pytest.raises(GOVUKPayAPIException):\n PaymentGatewaySession.objects.create_from_order(order)\n\n assert PaymentGatewaySession.objects.count() == 0\n\n @pytest.mark.parametrize('govuk_status_code', (400, 401, 404, 409, 500))\n def test_exception_if_govuk_pay_errors_when_cancelling(\n self, govuk_status_code, requests_mock,\n ):\n \"\"\"\n Test that if GOV.UK Pay errors whilst cancelling some other ongoing\n sessions/payments, the method raises GOVUKPayAPIException to keep the system consistent.\n\n Possible GOV.UK Pay errors when cancelling:\n - 400 - BAD REQUEST\n - 401 - UNAUTHORIZED\n - 404 - NOT FOUND\n - 409 - CONFLICT\n - 500 - INTERNAL SERVER ERROR\n \"\"\"\n order = OrderWithAcceptedQuoteFactory()\n existing_session = PaymentGatewaySessionFactory(\n order=order,\n status=PaymentGatewaySessionStatus.CREATED,\n )\n\n # mock GOV.UK requests used to\n # - refresh the existing payment gateway session\n # - cancel the GOV.UK payment\n requests_mock.get(\n govuk_url(f'payments/{existing_session.govuk_payment_id}'),\n status_code=200,\n json={\n 'state': {'status': existing_session.status},\n },\n )\n requests_mock.post(\n govuk_url(f'payments/{existing_session.govuk_payment_id}/cancel'),\n status_code=govuk_status_code,\n )\n\n assert PaymentGatewaySession.objects.count() == 1\n\n with pytest.raises(GOVUKPayAPIException):\n PaymentGatewaySession.objects.create_from_order(order)\n\n assert PaymentGatewaySession.objects.count() == 1\n\n def test_ongoing(self):\n \"\"\"\n Test that given:\n session 1 - order 1 - status created\n session 2 - order 1 - status submitted\n session 3 - order 1 - status failed\n session 4 - order 2 - status started\n session 5 - order 2 - status success\n session 6 - order 2 - status cancelled\n\n the method .ongoing() on the queryset only returns the sessions\n which are considered not finished.\n \"\"\"\n order1, order2 = OrderWithAcceptedQuoteFactory.create_batch(2)\n\n order1_sessions = PaymentGatewaySessionFactory.create_batch(\n 3,\n order=order1,\n status=factory.Iterator([\n PaymentGatewaySessionStatus.CREATED,\n PaymentGatewaySessionStatus.SUBMITTED,\n PaymentGatewaySessionStatus.FAILED,\n ]),\n )\n order2_sessions = PaymentGatewaySessionFactory.create_batch(\n 3,\n order=order2,\n status=factory.Iterator([\n PaymentGatewaySessionStatus.STARTED,\n PaymentGatewaySessionStatus.SUCCESS,\n PaymentGatewaySessionStatus.CANCELLED,\n ]),\n )\n\n # test qs without filters\n qs = PaymentGatewaySession.objects.ongoing()\n assert set(qs.values_list('id', flat=True)) == {\n order1_sessions[0].id,\n order1_sessions[1].id,\n order2_sessions[0].id,\n }\n\n # test qs with order filter\n qs = PaymentGatewaySession.objects.filter(order=order1).ongoing()\n assert set(qs.values_list('id', flat=True)) == {\n order1_sessions[0].id,\n order1_sessions[1].id,\n }\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/omis/payment/test/test_managers.py","file_name":"test_managers.py","file_ext":"py","file_size_in_byte":16049,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"30622144507","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 30 13:22:59 2017\n\n@author: Charles\n\"\"\"\nimport glob, re, os\n\nfor filename in glob.glob(r'F:\\DS-main\\Kaggle-main\\Carvana Image Masking Challenge\\input\\New folder'):\n new_name = re.sub(\"\", r'\\1_\\2\\3', filename)\n os.rename(filename, new_name)\n \n","repo_name":"charlesjansen/Kaggle-Carvana-Image-Masking-Challenge","sub_path":"old/untitled1.py","file_name":"untitled1.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23863992857","text":"\"\"\"\nDescrição rápida e simples\n\nEste modulo é apenas um teste para vermos como funciona o help utilizando a docstrings,\nonde eu estou estudando python para um dia se tornar programador profissional que é o meu maior sonho no momento\ne no futuro conseguir comprar um apartamento para meus pais e conseguir comprar o meu para construi uma familia, e\ntambem ajudar meus irmaos.Hoje é dia 01/08/2022 e eu estou atras do meu sonho, hj é um dia em que estou bem triste\npois a ansiedade e a depressao esta acabando comigo pois nao estou conseguindo continuar minha vida e ir atras dos meus\nsonhos.\n\n\"\"\"\n\nvariavel_1 = 'valor 1'\n\n#a primeira linha da minha função deve ser minha documentação\ndef foma(x, y):\n \"\"\"soma x e y \"\"\"\n return x + y\n\ndef multiplica(x, y, z=None):\n \"\"\"Multiplica x, y, z\n\n Multiplica x, y e oz, O programador por omitir a variavel z caso nao tenha necessidade de usa-la\n \"\"\"\n print(z)\n if z:\n return x * y\n else:\n return x * y * z\n\nvariavel_2 = 'valor 2'\nvariavel_3 = 'valor 3'\nvariavel_4 = 'valor 4'\n","repo_name":"diogolimalucasdev/PythonNovaJornada","sub_path":"aula122_DocStrings/funcoes.py","file_name":"funcoes.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38742269793","text":"import argparse\nimport os\nfrom PIL import Image\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\nimport YodaModel as yoda\nimport train\nfrom torch.utils.data import DataLoader, Dataset\nimport time as t\n# import custom_dataset as custData\n\nimport cv2\nfrom KittiDataset import KittiDataset\nfrom KittiAnchors import Anchors\n\nmax_ROIs = -1\n\n\nif __name__ == '__main__':\n\n device = 'cpu'\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-model_file', type=str, help='model weight file')\n parser.add_argument('-cuda', type=str, help='[y/N]')\n parser.add_argument('-i', metavar='input_dir', type=str, help='input dir (./)')\n parser.add_argument('-o', metavar='output_dir', type=str, help='output dir (./)')\n parser.add_argument('-IoU', metavar='IoU_threshold', type=float, help='[0.02]')\n parser.add_argument('-d', metavar='display', type=str, help='[y/N]')\n parser.add_argument('-m', metavar='mode', type=str, help='[train/test]')\n parser.add_argument('-v', default=False, type=bool, help='[train/test]')\n\n opt = parser.parse_args()\n\n if torch.cuda.is_available() and opt.cuda == 'Y':\n if torch.cuda.device_count() > 1:\n device = torch.device(\"cuda:1\")\n else:\n device = torch.device(\"cuda:0\")\n else:\n device = torch.device(\"cpu\")\n print(\"Using device: {}\".format(device))\n\n input_dir = None\n if opt.i != None:\n input_dir = opt.i\n\n output_dir = None\n if opt.o != None:\n output_dir = opt.o\n\n IoU_threshold = 0.02\n if opt.IoU != None:\n IoU_threshold = float(opt.IoU)\n\n show_images = False\n if opt.d != None:\n if opt.d == 'y' or opt.d == 'Y':\n show_images = True\n\n training = True\n if opt.m == 'test':\n training = False\n\n train_transform = transforms.Compose([transforms.ToTensor(), transforms.Resize((150, 150))])\n\n batch_size = 48 # 48 ROIs generated per image\n\n # Load kitti data\n dataset = KittiDataset(input_dir, training=training)\n anchors = Anchors()\n\n # Load saved yodamodel\n model_file = opt.model_file\n model = yoda.model()\n model.load_state_dict(torch.load(opt.model_file))\n model.to(device=device)\n model.eval()\n\n print('model loaded OK!')\n print(\"Using device: {}\".format(device))\n all_mean_IoU = []\n out_tensor = None\n print(\"Starting Test\")\n for item in enumerate(dataset):\n idx = item[0]\n if opt.v:\n print(\" {}/{}\".format(idx, len(dataset)))\n # Original Kitti img\n image = item[1][0]\n label = item[1][1]\n # Get car label indx\n idx = dataset.class_label['Car']\n # Get all of the CAR ROIS for this img Truth ones\n car_ROIs = dataset.strip_ROIs(class_ID=idx, label_list=label)\n anchor_centers = anchors.calc_anchor_centers(image.shape, anchors.grid)\n # Generate ROIs for each image\n ROIs, boxes = anchors.get_anchor_ROIs(image, anchor_centers, anchors.shapes)\n if opt.v:\n print(\"Created all ROIs\".format())\n # Convert ROIS to a tensor\n transformed_ROI = []\n for idx, item in enumerate(ROIs):\n temp_ROI = train_transform(item)\n transformed_ROI.append(temp_ROI)\n tensor_ROI = torch.stack(transformed_ROI)\n\n with torch.no_grad():\n if opt.v:\n print(\"Putting ROI's through model\".format())\n output = model(tensor_ROI)\n\n # Rounding results, putting answers to 1 or 0\n output_rounded = torch.round(output)\n ROI_IoUs = []\n bound_boxes = []\n # Iterate through all outputs\n if opt.v:\n print(\"ROIS classified as 'Car': \".format())\n for idx, item in enumerate(output_rounded):\n # Find the ones that predicted 'Car'\n\n if torch.argmax(item) == 1:\n if opt.v:\n print(\"Index: {}, Box: {}\".format(idx, boxes[idx]))\n # Get the IoU of this box from the CarROIs\n curr_roi_max_IoU = anchors.calc_max_IoU(boxes[idx], car_ROIs)\n ROI_IoUs += [curr_roi_max_IoU]\n if curr_roi_max_IoU >= IoU_threshold:\n bound_boxes.append(boxes[idx])\n print(\"Printing all IoUs for each ROI of this image\")\n for idx, iou in enumerate(ROI_IoUs):\n print(\"ROI {}, IoU calculations: {}\".format(idx, ROI_IoUs[idx]))\n\n curr_image_mean_IoU = 0\n if len(ROI_IoUs) != 0:\n curr_image_mean_IoU = sum(ROI_IoUs)/len(ROI_IoUs)\n # sum_IoU = 0\n all_mean_IoU.append(curr_image_mean_IoU)\n print(\"Mean IoU {}\".format(curr_image_mean_IoU))\n\n if show_images:\n image2 = image.copy()\n\n for box in bound_boxes:\n pt1 = (box[0][1], box[0][0])\n pt2 = (box[1][1], box[1][0])\n cv2.rectangle(image2, pt1, pt2, color=(0, 255, 255))\n\n if show_images:\n cv2.imshow('boxes', image2)\n key = cv2.waitKey(0)\n if key == ord('x'):\n break\n\n global_mean_IoU = sum(all_mean_IoU)/len(dataset)\n print(\"Final global mean IoU is: {}\".format(global_mean_IoU))\n","repo_name":"Krsikapa00/ELEC475-Labs","sub_path":"Lab4/Submission_files/YodaTest.py","file_name":"YodaTest.py","file_ext":"py","file_size_in_byte":5269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13554686216","text":"\"\"\"\nModule with the class for Syslog message transform\n\"\"\"\n\n# More info in RFC 3164: https://tools.ietf.org/html/rfc3164\nFACILITY_KERN = 0 # kernel messages\nFACILITY_USER = 1 # user-level messages\nFACILITY_MAIL = 2 # mail system\nFACILITY_DAEMON = 3 # system daemons\nFACILITY_AUTH = 4 # security/authorization messages\nFACILITY_SYSLOG = 5 # messages generated internally by syslog\nFACILITY_LPR = 6 # line printer subsystem\nFACILITY_NEWS = 7 # network news subsystem\nFACILITY_UUCP = 8 # UUCP subsystem\nFACILITY_CLOCK = 9 # clock daemon\nFACILITY_AUTHPRIV = 10 # security/authorization messages\nFACILITY_FTP = 11 # FTP daemon\nFACILITY_NTP = 12 # NTP subsystem\nFACILITY_LOG_AUDIT = 13 # log audit\nFACILITY_LOG_ALERT = 14 # log alert\nFACILITY_CRON = 15 # scheduling daemon\nFACILITY_LOCAL0 = 16 # local use 0 (local0)\nFACILITY_LOCAL1 = 17 # local use 1 (local1)\nFACILITY_LOCAL2 = 18 # local use 2 (local2)\nFACILITY_LOCAL3 = 19 # local use 3 (local3)\nFACILITY_LOCAL4 = 20 # local use 4 (local4)\nFACILITY_LOCAL5 = 21 # local use 5 (local5)\nFACILITY_LOCAL6 = 22 # local use 6 (local6)\nFACILITY_LOCAL7 = 23 # local use 7 (local7)\n# More info in RFC 5424: https://tools.ietf.org/html/rfc5424\nSEVERITY_EMERG = 0 # System is unusable\nSEVERITY_ALERT = 1 # Should be corrected immediately\nSEVERITY_CRIT = 2 # Critical conditions\nSEVERITY_ERROR = 3 # Error conditions\nSEVERITY_WARN = 4 # May indicate that an error will occur if action is\n# not taken.\nSEVERITY_NOTICE = 5 # Events that are unusual, but not error conditions.\nSEVERITY_INFO = 6 # Normal operational messages that require no action.\nSEVERITY_DEBUG = 7 # Information useful to developers for debugging\n# the application.\nCOMPOSE = \"%s%s\"\nCOMPOSE_BYTES = b\"%s%s\"\nFORMAT_MY = \"<%d>%s %s %s: \" # Not \\000\nFORMAT_MY_BYTES = b\"<%d>%s %s %s: \" # Not \\000\n\n# logging.handler translator to Sender codes\nfacility_names = {\n \"auth\": FACILITY_AUTH,\n \"authpriv\": FACILITY_AUTHPRIV,\n \"cron\": FACILITY_CRON,\n \"daemon\": FACILITY_DAEMON,\n \"ftp\": FACILITY_FTP,\n \"kern\": FACILITY_KERN,\n \"lpr\": FACILITY_LPR,\n \"mail\": FACILITY_MAIL,\n \"news\": FACILITY_NEWS,\n \"security\": FACILITY_AUTH, # DEPRECATED\n \"syslog\": FACILITY_SYSLOG,\n \"user\": FACILITY_USER,\n \"uucp\": FACILITY_UUCP,\n \"local0\": FACILITY_LOCAL0,\n \"local1\": FACILITY_LOCAL1,\n \"local2\": FACILITY_LOCAL2,\n \"local3\": FACILITY_LOCAL3,\n \"local4\": FACILITY_LOCAL4,\n \"local5\": FACILITY_LOCAL5,\n \"local6\": FACILITY_LOCAL6,\n \"local7\": FACILITY_LOCAL7,\n}\n\npriority_map = {\n \"EMERG\": SEVERITY_EMERG,\n \"ALERT\": SEVERITY_ALERT,\n \"CRITICAL\": SEVERITY_CRIT,\n \"CRIT\": SEVERITY_CRIT,\n \"ERROR\": SEVERITY_ERROR,\n \"ERR\": SEVERITY_ERROR,\n \"WARNING\": SEVERITY_WARN,\n \"WARN\": SEVERITY_WARN, # DEPRECATED\n \"NOTICE\": SEVERITY_NOTICE,\n \"INFO\": SEVERITY_INFO,\n \"DEBUG\": SEVERITY_DEBUG,\n}\n","repo_name":"DevoInc/python-sdk","sub_path":"devo/sender/transformsyslog.py","file_name":"transformsyslog.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"81"} +{"seq_id":"20874754565","text":"from django import forms\nfrom .models import UserInfo\n\n\nclass NewPostForm(forms.Form):\n post = forms.CharField(label=\"\",\n max_length=1000,\n min_length=1,\n widget=forms.Textarea(attrs={'rows': 3,\n 'cols': 40,\n 'placeholder': 'What is on your mind?', }))\n\n\nclass CommentForm(forms.Form):\n comment = forms.CharField(label=\"\",\n max_length=1000,\n min_length=1,\n widget=forms.Textarea(attrs={'class': 'form-control',\n 'placeholder': 'Comment',\n 'rows': 1, }))\n\n\nclass ProfileInfoForm(forms.ModelForm):\n date_of_birth = forms.CharField(required=False, label='Date of birth')\n live_in = forms.CharField(required=False, label='I live in')\n phone = forms.CharField(required=False, label='Phone number')\n sex = forms.ChoiceField(choices=[('male', 'Male'), ('female', 'Female')], required=False, label=\"I'm\")\n email = forms.BooleanField(required=False, label=\"Share email?\")\n\n class Meta:\n model = UserInfo\n fields = ['date_of_birth', 'live_in', 'phone', 'sex', 'email']\n widgets = {\n 'email': forms.CheckboxInput(),\n }\n\n\nclass ChatForm(forms.Form):\n message = forms.CharField(label=\"\",\n max_length=1000,\n min_length=1,\n widget=forms.Textarea(attrs={'rows': 2,\n 'placeholder': 'Aa', }))\n","repo_name":"nvast/Social_media","sub_path":"main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26026481081","text":"from typing import Optional\n\nfrom dstack._internal.api import workflow_api\nfrom dstack._internal.api.artifacts import (\n download_artifact_files_backend,\n upload_artifact_files_backend,\n upload_artifact_files_from_tag_backend,\n)\nfrom dstack._internal.api.runs import get_tagged_run_name_backend\n\n\ndef upload(local_path: str, artifact_path: Optional[str] = None, tag: Optional[str] = None):\n \"\"\"Uploads files located at `local_path` as the artifacts of the current run.\n If `tag` is specified, uploads the files as the artifacts of the tag instead.\n By default, artifact files saved under the same path as `local_path`.\n The `artifact_path` parameter can be used to specify a different artifact path.\n\n Examples:\n ```python\n from dstack import artifacts\n\n # Uploads local_path as an artifact of the current run\n artifacts.upload(local_path=\"datasets/dataset1\")\n\n # Uploads local_path as an artifact of a new run tagged as my_tag and saves it as artifact_path\n artifacts.upload(local_path=\"datasets/dataset1\", artifact_path=\"data\", tag=\"my_tag\")\n ```\n\n :param local_path: The local path to upload the files from\n :type local_path: str\n :param artifact_path: The path under which the files will be stored\n :type artifact_path: Optional[str]\n :param tag: The tag to assign the artifacts, defaults to None\n :type tag: Optional[str]\n :raises ArtifactsUploadError: Raises if cannot upload the artifacts\n :raises DstackError: The base exception for all dstack errors\n \"\"\"\n if artifact_path is None:\n artifact_path = local_path\n backend = workflow_api.get_current_backend()\n repo_id = workflow_api.get_current_repo_id()\n job_id = workflow_api.get_current_job_id()\n if tag is None:\n upload_artifact_files_backend(\n backend=backend,\n repo_id=repo_id,\n job_id=job_id,\n local_path=local_path,\n artifact_path=artifact_path,\n )\n return\n job = workflow_api.get_current_job()\n upload_artifact_files_from_tag_backend(\n backend=backend,\n repo=job.repo,\n hub_user_name=job.hub_user_name,\n local_path=local_path,\n artifact_path=artifact_path,\n tag_name=tag,\n )\n\n\ndef download(\n run: Optional[str] = None,\n tag: Optional[str] = None,\n artifact_path: Optional[str] = None,\n local_path: Optional[str] = None,\n):\n \"\"\"Downloads artifact files of a run or a tag.\n The files are downloaded from `artifact_path` to `local_path`.\n By default, downloads all the files and saves them to the current directory.\n\n Examples:\n ```python\n from dstack import artifacts\n\n # Downloads all artifact files of a run\n artifacts.download(run=\"sharp-shrimp-1\")\n\n # Downloads artifact files from artifact_path and saves them to local_path\n artifacts.download(tag=\"my_tag\", artifact_path=\"output/my_model\", local_path=\"./my_model\")\n ```\n\n :param run: The run to download the artifacts from\n :type run: Optional[str]\n :param tag: The tag to download the artifacts from\n :type tag: Optional[str]\n :param artifact_path: The path to artifact files to download, defaults to \"\"\n :type artifact_path: Optional[str]\n :param local_path: The local path to save the files to, defaults to \".\"\n :type local_path: Optional[str]\n :raises ArtifactsDownloadError: Raises if cannot download the artifacts\n :raises DstackError: The base exception for all dstack errors\n \"\"\"\n if artifact_path is None:\n artifact_path = \"\"\n local_path = \".\"\n if local_path is None:\n local_path = artifact_path\n backend = workflow_api.get_current_backend()\n repo_id = workflow_api.get_current_repo_id()\n run_name, _ = get_tagged_run_name_backend(\n backend=backend,\n repo_id=repo_id,\n run_name=run,\n tag_name=tag,\n )\n download_artifact_files_backend(\n backend=backend,\n repo_id=repo_id,\n run_name=run_name,\n source=artifact_path,\n target=local_path,\n )\n","repo_name":"silvacarl2/dstack","sub_path":"cli/dstack/artifacts/_artifacts.py","file_name":"_artifacts.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"36685641053","text":"import sys\nfrom PySide6.QtGui import *\nfrom PySide6.QtCore import *\nfrom PySide6.QtWidgets import *\n\ndata = (['1', '张三', '男'], ['2', '张四', '男'], ['3', '王五', '男'], ['4', '虎妞', '女'], ['5', '刘明', '男'], ['1', '张三', '男'],\n ['2', '张四', '男'], ['3', '王五', '男'], ['4', '虎妞1', '女'], ['5', '刘1明', '男'])\n\n\nclass TableDome(QWidget):\n def __init__(self):\n super(TableDome, self).__init__()\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle('QTableWidget案例')\n self.resize(400, 300)\n conLayout = QHBoxLayout()\n tableWidget = QTableWidget() # 创建表格实例\n tableWidget.setRowCount(15) # 初始化表格5行\n tableWidget.setColumnCount(3) # 3列\n tableWidget.setShowGrid(True) # 设置显示网格线\n tableWidget.setHorizontalHeaderLabels(['学号', '姓名', '性别']) # 设置表头信息\n # tableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) # 自适应伸缩模式\n # tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers) # 禁止编辑\n # tableWidget.resizeColumnsToContents() # 根据内容自适应列宽\n # tableWidget.resizeRowsToContents() # 根据内容自适应行高\n # tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows) # 设置单元格选择方式为行模式\n # tableWidget.verticalHeader().setVisible(False) # 隐藏第一列的表头\n # tableWidget.horizontalHeader().setVisible(False) # 隐藏第一行的表头\n\n # 添加表格数据\n for num, i in enumerate(data):\n for col_num, col in enumerate(i):\n tableWidget.setItem(num, col_num, QTableWidgetItem(col)) # 向表格中添加数据\n tableWidget.setAlternatingRowColors(True) # 表格颜色交替显示\n conLayout.addWidget(tableWidget)\n self.setLayout(conLayout)\n item = tableWidget.findItems('刘1明',Qt.MatchExactly)\n item = item[0]\n row = item.row()\n item.setForeground(QBrush(QColor(255,0,0)))\n print(row,'item:',item)\n tableWidget.verticalScrollBar().setSliderPosition(row)\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n example = TableDome()\n example.show()\n sys.exit(app.exec())\n","repo_name":"tainyuhong/pyside_study1","sub_path":"stu/stu_Qtablewidget1.py","file_name":"stu_Qtablewidget1.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72890906186","text":"from collections import deque\n\n\ndef dfs(g, v, visited):\n visited[v] = True #방문하면 True\n print(v, end=' ')\n\n for i in g[v]: #시작노드와 연결된 노드 중에서\n if not visited[i]: #방문 안 한 노드가 있다면\n dfs(g,i, visited) #해당 노드부터 다시 탐색\n\n\n\ndef bfs(g, v, visited):\n q = deque([v]) #시작노드 큐에 삽입\n\n visited[v] = True #시작노드 방문\n\n while q:\n v = q.popleft() #시작노드 pop\n print(v, end=' ')\n for i in g[v]: #시작노드와 연결된 모든 노드 중에서\n if not visited[i]: #방문 안 한 노드가 있다면\n q.append(i) #큐에 넣고 \n visited[i] = True #방문\n #현재 노드와 연결된 모든 노드를 방문했으면 제일 먼저 방문한 노드 pop\n\n\n\n\n\nqty, n, start = map(int, input().split())\n\n#그래프, 방문 리스트 초기화\ngraph = [[] for _ in range(qty+1)]\nvisi = [False for _ in range(qty+1)]\n\n#그래프에 노드 정보 입력\nfor _ in range(n):\n i, node = map(int, input().split())\n graph[i].append(node)\n graph[node].append(i)\n\n#각 노드 오름차순으로 정렬\nfor i in range(qty+1):\n graph[i].sort()\n\n\ndfs(graph, start, visi)\nvisi = [False for _ in range(qty+1)]\nprint()\nbfs(graph, start, visi)\n","repo_name":"Dam0305/Algorithm","sub_path":"BOJ/1260_DFS와 BFS.py","file_name":"1260_DFS와 BFS.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33392946726","text":"import pygame\n\n# defining variables to easily call \nWIDTH, HEIGHT = 900, 500\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Hunt The Wumpus\")\n\nWHITE = (255, 255, 255)\n\nFPS = 60\n\n# function to set the colour of the window\ndef draw_window():\n\t\tWIN.fill((WHITE))\n\t\tpygame.display.update()\n\n# function that controls most gameplay\ndef main():\n\tclock = pygame.time.Clock()\n\trun = True\n\twhile run:\n\t\t# caps FPS at definied value\n\t\tclock.tick(FPS)\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trun = False\n\n\t\tdraw_window()\n\n\tpygame.quit()\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"andrewheld616/huntTheWumpus","sub_path":"htw.py","file_name":"htw.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25306507968","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 4 17:39:53 2019\r\n\r\n@author: cheerag.verma\r\n\"\"\"\r\n\r\nn = int(input())\r\na= []\r\nif n%6 ==0 or n%6 ==1 or n%6 ==3:\r\n print(\"YES\",end = \"\")\r\nelse:\r\n print(\"NO\",end = \"\")\r\n\r\n","repo_name":"iamcheerag/NPTEL-JOY-OF-COMPUTING","sub_path":"week10/Programming Assignment-2 Jumps.py","file_name":"Programming Assignment-2 Jumps.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30323204634","text":"#Chase Williams\n#Lab 4\nimport pygame\nimport math\nimport time\nimport math3d\n\nclass Curve(object):\n def __init__(self):\n self.CPList = [] #Add main points\n self.TList = [] #Tangent list\n self.DrawList = []\n self.Resolution = 10 #distance between points\n\n def addCP(self, p):\n self.CPList.append(p)\n# self.TList.append()#some random value for this number)\n# self.genDraw()\n\n def genDraw(self): #Generates lines to draw in between control points\n for i in self.TList:\n self.p0 = self.CPList[i]\n self.p1 = self.CPList[i + 1]\n self.t0 = i\n self.t1 = i + 1\n u = 0\n for j in range(self.resolution): #Calculates tangent\n a = 2(u ** 3) - 3(u ** 2) + 1\n b = u ** 3 - 2(u ** 2) + u\n c = -2(u ** 3) + 3(u ** 2)\n d = u ** 3 - u ** 2\n p = a * self.p0 + b * self.t0 + c * self.p1 + d * self.t1\n self.DrawList.append(p)\n u += 1 / (resolution + 1)\n\n def Render(self): #renders objects to the screen\n pygame.draw.circle(window, (255, 255, 255), self.CPList[-1], 10)\n if len(self.CPList) > 1:\n for i in range(self.Resolution):\n pygame.draw.lines(window, (255, 0, 0), False, self.CPList)\n\npygame.init()\n\nwindow = pygame.display.set_mode((800,600))\nCP = Curve()\ndone = False\nwhile not done:\n #mBut = pygame.mouse.get_pressed()\n mPos = math3d.VectorN((pygame.mouse.get_pos()))\n evtList = pygame.event.get()\n for evt in evtList:\n if evt.type == pygame.QUIT:\n done = True\n if evt.type == pygame.MOUSEBUTTONDOWN: #when mouse is clicked, add mouse pos to CPList\n CP.CPList.append(mPos.iTuple())\n for p in range(CP.Resolution):\n CP.Render()\n #if mBut[0]:\n # CP.addCP(mPos)\n # CP.Render()\n #window.fill((0, 0, 0))\n pygame.display.flip()\n\npygame.display.quit()\n\n\n","repo_name":"fooy787/CLASS_LABS","sub_path":"Freshman/1803/Lab 4/Lab 4.py","file_name":"Lab 4.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25306072188","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 11 12:50:33 2020\r\n\r\n@author: cheerag.verma\r\n\"\"\"\r\n\r\ndef spiralPrinting(arr):\r\n \r\n rowStart = 0\r\n rowEnd = n\r\n colStart = 0\r\n colEnd = m\r\n while rowStart self.size-1:\n raise IndexError()\n current = self.head\n for _ in range(index):\n current = current.next\n return current\n\n def add(self, new_node, index):\n if index == 0:\n new_node.next = self.head\n self.head = new_node\n else:\n previous_elem = self.index(index-1)\n new_node.next = previous_elem.next\n previous_elem.next = new_node\n self.size += 1\n\n def remove(self, index):\n if index == 0:\n self.head = self.head.next\n else:\n previous_elem = self.index(index-1)\n previous_elem.next = previous_elem.next.next\n self.size -= 1\n\n def display(self):\n current = self.head\n out = [current.value]\n while current.next:\n out.append(current.next.value)\n current = current.next\n return out\n\n\nclass DoubleLinkedList(SingleLinkedList):\n def add(self, new_node, index):\n if not self.head:\n self.head = new_node\n self.size += 1\n return\n if not self.tail:\n self.tail = new_node\n self.head.next = self.tail\n self.size += 1\n return\n if index == 0:\n new_node.next = self.head\n self.head.prev = new_node\n self.head = new_node\n elif index == self.size:\n new_node.prev = self.tail\n self.tail.next = new_node\n self.tail = new_node\n else:\n previous_elem = self.index(index-1)\n new_node.next = previous_elem.next\n new_node.prev = previous_elem\n previous_elem.next = new_node\n new_node.next.prev = new_node\n self.size += 1\n\n def remove(self, index):\n if index == 0:\n self.head = self.head.next\n self.head.prev = None\n elif index == self.size - 1:\n self.tail = self.tail.prev\n self.tail.next = None\n else:\n previous_elem = self.index(index-1)\n previous_elem.next = previous_elem.next.next\n previous_elem.next.prev = previous_elem\n self.size -= 1\n\n\nlst1 = SingleLinkedList()\nlst1.head = SingleLinkedNode(1, next=SingleLinkedNode(2, next=SingleLinkedNode(3)))\nlst1.tail = SingleLinkedNode(3)\nlst1.size = 3\nassert lst1.index(1) == SingleLinkedNode(2, next=SingleLinkedNode(3))\nassert lst1.find(2)\nassert lst1.find(1) == 0\nassert lst1.find(4) == -1\n\nanother_lst = SingleLinkedList()\nanother_lst.add(SingleLinkedNode(1), index=0)\nassert another_lst.display() == [1]\nanother_lst.add(SingleLinkedNode(2), index=1)\nassert another_lst.display() == [1, 2]\nanother_lst.add(SingleLinkedNode(1), index=2)\nassert another_lst.display() == [1, 2, 1]\nanother_lst.add(SingleLinkedNode(6), index=1)\nassert another_lst.display() == [1, 6, 2, 1]\nanother_lst.remove(2)\nassert another_lst.display() == [1, 6, 1]\n\nlst2 = DoubleLinkedList()\nlst2.add(DoubleLinkedNode(1), index=0)\nassert lst2.display() == [1]\nlst2.add(DoubleLinkedNode(2), index=1)\nassert lst2.display() == [1, 2]\nlst2.add(DoubleLinkedNode(3), index=1)\nassert lst2.display() == [1, 3, 2]\nassert lst2.find(3) == 1\nassert lst2.find(1) == 0\nassert lst2.find(2) == 2\nlst2.remove(1)\nassert lst2.display() == [1, 2]\n","repo_name":"confar/python_challenges","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2307747886","text":"\"\"\"\nDefinitions of siamese models. Each model uses a siamese network to convert the survival problem into a\nclassification problem.\n\n - The :class:`ImageSiamese` class creates a model that uses only the images as input and uses some blocks\n - The :class:`SimpleImageSiamese` class creates a basic model that only uses images as input\n - The :class:`ImageScalarSiamese` class creates a model that combines the image input with the scalar input\n of the radiomic features, extracted with PyRadiomics.\n - The :class:`ResidualImageScalarSiamese` class creates a model that combines the image input with the scalar input\n but it also uses a residual network to fit the images. It also uses multiple blocks, similar to the\n inception idea\n - The :class:`ScalarOnlySiamese` class creates a siamese model that only uses the radiomic features as an input.\n - The :class:`ScalarOnlyDropoutSiamese` class creates a siamese model that only uses the radiomic features as an\n input but adds multiple dropout layers to improve the results.\n - The :class:`VolumeOnlySiamese` class creates a siamese model that only uses the volume feature to fit the model.\n\n\n.. inheritance-diagram:: models.siameses\n :parts: 1\n\n\"\"\"\n\nfrom typing import Dict, Any, Union, Tuple\n\nimport tensorflow as tf\nimport numpy as np\n\nimport data\nimport settings\nfrom .basics import BasicImageSiamese, BasicSiamese\n\n\nclass ImageSiamese(BasicImageSiamese):\n \"\"\"\n Class representing the initial and simple siamese structure used for the first steps of the project. It\n inherits :any:`BasicSiamese` so it has the same tensors to be fed.\n\n **Convolutional Model**:\n\n It contains parallel in inception_block and 3 FC layers\n\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Construct a new SimpleSiamese class\n\n :param gpu_level: Amount of GPU to be used with the model\n\n 0. No GPU usage\n 1. Only second conv layers\n 2. All conv layers\n 3. All layers and parameters are on the GPU\n \"\"\"\n super().__init__(**kwargs)\n\n def _inception_block(self, x:tf.Tensor, stage: str, block:str):\n \"\"\"\n\n :param x: Network's input images with shape ``[batch, 64, 64, 64, 1]``\n :param filters: list of integers, the number of filters in the CONV layers\n :param stage: integer, Used to name the layers, depending on their position in the network\n :param block: string, Used to name the layers, depending on their position in the network\n :return: Tensor of shape ``[n_X, n_Y, n_Z, n_C]``\n \"\"\"\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n # Retrieve Filters\n #F1, F2, F3 = filters\n device = '/gpu:0' if self._gpu_level >= 1 else '/cpu:0'\n self.logger.debug(f\"Using device: {device} for first conv layers\")\n with tf.device(device):\n\n a1 = tf.layers.conv3d(\n\n x,\n\n filters=8,\n\n kernel_size=[1, 1, 1],\n\n strides=1,\n\n activation=tf.nn.relu,\n\n padding='SAME',\n\n name=conv_name_base + 'a1'\n\n )\n\n a1 = tf.layers.conv3d(\n\n a1,\n\n filters=8,\n\n kernel_size=[4, 4, 4],\n\n strides=1,\n\n activation=tf.nn.relu,\n\n padding='SAME',\n\n name=conv_name_base + 'a2'\n\n )\n device = '/gpu:0' if self._gpu_level >= 2 else '/cpu:0'\n self.logger.debug(f\"Using device: {device} for first conv layers\")\n with tf.device(device):\n\n b1 = tf.layers.conv3d(\n\n x,\n\n filters=8,\n\n kernel_size=[1, 1, 1],\n\n strides=1,\n\n activation=tf.nn.relu,\n\n padding='SAME',\n\n name=conv_name_base + 'b1'\n\n )\n\n b1 = tf.layers.conv3d(\n\n b1,\n\n filters=8,\n\n kernel_size=[2, 2, 2],\n\n strides=1,\n\n activation=tf.nn.relu,\n\n padding='SAME',\n\n name=conv_name_base + 'b2'\n\n )\n device = '/gpu:0' if self._gpu_level >= 3 else '/cpu:0'\n self.logger.debug(f\"Using device: {device} for first conv layers\")\n with tf.device(device):\n c1 = tf.nn.max_pool3d(\n\n x,\n\n ksize=[1, 4, 4, 4, 1],\n\n strides=[1,1,1,1,1],\n\n padding='SAME',\n\n\n name=conv_name_base + 'c1'\n\n )\n\n c1 = tf.layers.conv3d(\n\n c1,\n\n filters=8,\n\n kernel_size=[1, 1, 1],\n\n strides=1,\n\n activation=tf.nn.relu,\n\n padding='SAME',\n\n name=conv_name_base + 'c2'\n\n )\n\n d1 = tf.concat([a1, b1], 4)\n d1 = tf.concat([d1, c1], 4)\n\n d1 = tf.layers.conv3d(\n\n d1,\n\n filters=1,\n\n kernel_size=[1, 1, 1],\n\n strides=1,\n\n activation=tf.nn.relu,\n\n padding='SAME',\n\n name=conv_name_base + 'd'\n )\n d1 = tf.contrib.layers.batch_norm(d1,\n center=True, scale=True,\n scope='bn')\n tf.layers.BatchNormalization(d1)\n return d1\n\n def _conv_layers(self, x: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Implementation of abstract method :func:`~BasicSiamese._conv_layers`\n\n :param x: Network's input images with shape ``[batch, 64, 64, 64, 1]``\n :return: Filtered image with the convolutions applied\n \"\"\"\n # In: [batch, 64, 64, 64, 1]\n x1 = self._inception_block(x,\"1s\", \"b1\")\n\n return x1\n\n\n def _fc_layers(self, x: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Implementation of abstract method ``BasicSiamese._fc_layers``\n\n :param x: Image, usually previously filtered with the convolutional layers.\n :return: Tensor with shape ``[batch, 1]``\n \"\"\"\n device = '/gpu:0' if self._gpu_level >= 3 else '/cpu:0'\n self.logger.debug(f\"Using device: {device} for FC layers\")\n with tf.device(device):\n # Out: [batch, 64*64*64*1]\n x = tf.layers.flatten(\n x,\n name=\"flat\"\n )\n\n # Out: [batch, 100]\n x = tf.layers.dense(\n x,\n 100,\n activation=tf.nn.relu,\n name=\"fc1\"\n )\n\n # Out: [batch, 50]\n x = tf.layers.dense(\n x,\n 50,\n activation=tf.nn.relu,\n name=\"fc2\"\n )\n\n # Out: [batch, 1]\n x = tf.layers.dense(\n x,\n 1,\n activation=tf.nn.relu,\n name=\"fc3\"\n )\n return x\n\n def uses_images(self) -> bool:\n \"\"\"\n Implementation of :func:`BasicModel.uses_images`.\n\n :return: :any:`True`, the model uses images as input to work\n \"\"\"\n return True\n\n\nclass SimpleImageSiamese(BasicImageSiamese):\n r\"\"\"\n Simple siamese network implementation that uses images as input.\n\n Has the same parameters as :class:`BasicImageSiamese`\n\n **Convolutional Model**:\n\n It contains 4 convolutional layers and 3 FC layers\n\n - :math:`3^3` kernel with 30 filters and stride = 2\n - :math:`3^3` kernel with 40 filters and stride = 1\n - :math:`3^3` kernel with 40 filters and stride = 1\n - :math:`3^3` kernel with 50 filters and stride = 1\n - 100 units, activation ReLU\n - 50 units, activation ReLu\n - 1 unit, activation ReLu\n\n **Attributes**:\n\n Includes the same attributes as :class:`BasicImageSiamese`\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def _conv_layers(self, x: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Implementation of abstract method :func:`~BasicSiamese._conv_layers`\n\n :param x: Network's input images with shape ``[batch, 64, 64, 64, 1]``\n :return: Filtered image with the convolutions applied\n \"\"\"\n # In: [batch, 64, 64, 64, 1]\n\n device = '/gpu:0' if self._gpu_level >= 2 else '/cpu:0'\n self.logger.debug(f\"Using device: {device} for first conv layers\")\n with tf.device(device):\n # Out: [batch, 31, 31, 31, 30]\n x = tf.layers.conv3d(\n x,\n filters=30,\n kernel_size=3,\n strides=2,\n activation=tf.nn.relu,\n name=\"conv1\"\n )\n\n # Out: [batch, 29, 29, 29, 40]\n x = tf.layers.conv3d(\n x,\n filters=40,\n kernel_size=3,\n activation=tf.nn.relu,\n name=\"conv2\"\n )\n\n device = '/gpu:0' if self._gpu_level >= 1 else '/cpu:0'\n self.logger.debug(f\"Using device: {device} for second conv layers\")\n with tf.device(device):\n # Out: [batch, 27, 27, 27, 40]\n x = tf.layers.conv3d(\n x,\n filters=40,\n kernel_size=3,\n activation=tf.nn.relu,\n name=\"conv3\"\n )\n\n # Out: [batch, 25, 25, 25, 50]\n x = tf.layers.conv3d(\n x,\n filters=50,\n kernel_size=3,\n activation=tf.nn.relu,\n name=\"conv4\"\n )\n return x\n\n def _fc_layers(self, x: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Implementation of abstract method ``BasicSiamese._fc_layers``\n\n :param x: Image, usually previously filtered with the convolutional layers.\n :return: Tensor with shape ``[batch, 1]``\n \"\"\"\n device = '/gpu:0' if self._gpu_level >= 3 else '/cpu:0'\n self.logger.debug(f\"Using device: {device} for FC layers\")\n with tf.device(device):\n # Out: [batch, 25*25*25*50]\n x = tf.layers.flatten(\n x,\n name=\"flat\"\n )\n\n # Out: [batch, 100]\n x = tf.layers.dense(\n x,\n 100,\n activation=tf.nn.relu,\n name=\"fc1\"\n )\n\n # Out: [batch, 50]\n x = tf.layers.dense(\n x,\n 50,\n activation=tf.nn.relu,\n name=\"fc2\"\n )\n\n # Out: [batch, 1]\n x = tf.layers.dense(\n x,\n 1,\n activation=tf.nn.relu,\n name=\"fc3\"\n )\n return x\n\n\nclass ImageScalarSiamese(BasicImageSiamese):\n r\"\"\"\n Siamese model that uses both images and scalar values as input.\n\n It uses scalar features extracted with PyRadiomics and provided through a Tensorflow ``placeholder``,\n the model assumes that there are :any:`settings.NUMBER_FEATURES` for each input.\n\n :param learning_rate:\n :param gpu_level: Amount of GPU that should be used with the model\n :param regularization: Regularization factor for the weights\n :param dropout: Dropout probability\n\n This class creates a Siamese model that uses both images and scalar features extracted using\n `PyRadiomics `_.\n The features are not extracted by the model, so they have to be provided in one of the placeholders\n\n **Network structure**:\n\n - :math:`3^3` kernel with 30 filters and stride = 2 with ReLu\n - :math:`3^3` kernel with 30 filters and stride = 2 with ReLu\n - :math:`3^3` kernel with 40 filters and stride = 1 with ReLu\n - :math:`3^3` kernel with 40 filters and stride = 1 with ReLu\n - :math:`3^3` kernel with 50 filters and stride = 1 with ReLu\n - :math:`3^3` kernel with 50 filters and stride = 1 with ReLu\n - Flattening layer\n - 8000 units, activation tanh\n - 100 units, activation tanh\n - 1 unit, activation ReLu\n\n **Attributes**:\n\n Includes the same attributes as :class:`BasicImageSiamese` and adds the following ones:\n\n :var ImageScalarSiamese.x_scalar: Scalar features obtained with\n `PyRadiomics `_\n :vartype ImageScalarSiamese.x_scalar: tf.Tensor\n \"\"\"\n\n def __init__(self, **kwargs):\n #: **Attribute**: Scalar features obtained with `PyRadiomics `_\n self.x_scalar = tf.placeholder(tf.float32, [None, settings.NUMBER_FEATURES], name=\"radiomic_features\")\n\n super().__init__(**kwargs)\n\n def _conv_layers(self, x: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Implementation of abstract method :func:`~BasicSiamese._conv_layers`\n\n :param x: Network's input images with shape ``[batch, 64, 64, 64, 1]``\n :return: Filtered image with the convolutions applied\n \"\"\"\n # In: [batch, 64, 64, 64, 1]\n\n device = '/gpu:0' if self._gpu_level >= 2 else '/cpu:0'\n self.logger.debug(f\"Using device: {device} for first conv layers\")\n with tf.device(device):\n # Out: [batch, 31, 31, 31, 30]\n x = self._conv3d(\n x,\n filters=30,\n kernel_size=3,\n strides=2,\n name=\"conv1\"\n )\n\n # Out: [batch, 15, 15, 15, 30]\n x = self._conv3d(\n x,\n filters=30,\n kernel_size=3,\n strides=2,\n name=\"conv2\"\n )\n\n device = '/gpu:0' if self._gpu_level >= 1 else '/cpu:0'\n self.logger.debug(f\"Using device: {device} for second conv layers\")\n with tf.device(device):\n # Out: [batch, 13, 13, 13, 40]\n x = self._conv3d(\n x,\n filters=40,\n kernel_size=3,\n name=\"conv3\"\n )\n\n # Out: [batch, 11, 11, 11, 40]\n x = self._conv3d(\n x,\n filters=40,\n kernel_size=3,\n name=\"conv4\"\n )\n\n # Out: [batch, 9, 9, 9, 50]\n x = self._conv3d(\n x=x,\n filters=50,\n kernel_size=3,\n name=\"conv5\",\n )\n\n return x\n\n def _fc_layers(self, x: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Implementation of abstract method :func:`~BasicSiamese._fc_layers`\n\n :param x: Image, usually previously filtered with the convolutional layers.\n :return: Tensor with shape ``[batch, 1]``\n \"\"\"\n\n # In this case we will be using the same idea seen in SimpleSiamese but we will be adding the scalar\n # features instead\n device = '/gpu:0' if self._gpu_level >= 3 else '/cpu:0'\n self.logger.debug(f\"Using device: {device} for FC layers\")\n with tf.device(device):\n # Out: [batch, 9*9*9*50]\n x = tf.layers.flatten(\n x,\n name=\"flat\"\n )\n\n # This is where the magic happens\n # Out: [batch, 37 175]\n x = tf.concat([x, self.x_scalar], axis=1)\n\n # Out: [batch, 8000]\n x = self._dense(\n x=x,\n units=8000,\n name=\"fc1\"\n )\n\n x = self._dense(\n x=x,\n units=1000,\n name=\"fc3\"\n )\n\n # x = tf.layers.dropout(\n # x,\n # rate=self._dropout,\n # training=self.training\n # )\n\n # Out: [batch, 10]\n x = self._dense(\n x=x,\n units=100,\n activation=tf.nn.relu,\n name=\"fc4\"\n )\n return x\n\n def _conv3d(self, x: tf.Tensor,\n filters: int,\n kernel_size: Union[int, Tuple],\n name: str,\n strides: Union[int, Tuple] = 1,\n activation: Any = tf.nn.relu,\n padding=\"valid\") -> tf.Tensor:\n return tf.layers.conv3d(\n name=name,\n inputs=x,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n activation=activation,\n kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self._regularization),\n padding=padding\n )\n\n def _dense(self, x: tf.Tensor, units: int, name: str, activation=tf.nn.relu) -> tf.Tensor:\n return tf.layers.dense(\n x,\n units=units,\n activation=activation,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self._regularization),\n name=name\n )\n\n def feed_dict(self, batch: data.PairBatch, training: bool = True) -> Dict:\n \"\"\"\n Re-implementation of :func:`~BasicSiamese.feed_dict` to create a custom dict including the scalar values\n\n :param batch: Data containing for the current batch, usually this would be generated by\n :func:`~BatchData.batches`\n :param training: Whether we are training or not. Useful for training layers like dropout where we do not\n want to apply dropout if we are not training\n :return: Return the ``feed_dict`` as a dictionary\n \"\"\"\n return {\n **super().feed_dict(batch, training=training),\n self.x_scalar: np.stack(batch.patients[\"features\"].values)\n }\n\n\nclass ResidualImageScalarSiamese(ImageScalarSiamese):\n\n def __init__(self, **kwargs):\n self.residual_count_a = 0\n self.residual_count_b = 0\n\n super().__init__(**kwargs)\n\n def _conv_layers(self, x: tf.Tensor) -> tf.Tensor:\n device = '/gpu:0' if self._gpu_level >= 1 else '/cpu:0'\n with tf.device(device):\n x = self._stem_block(x)\n\n for i in range(2):\n x = self._res_block_a(x)\n\n device = '/gpu:1' if self._gpu_level >= 1 else '/cpu:0'\n with tf.device(device):\n x = self._reduction_a(x)\n\n for i in range(2):\n x = self._res_block_b(x)\n\n # Out: [batch, 7, 7, 7, 350]\n x = self._reduction_b(x)\n\n # Out: [batch, 2, 2, 2, 350]\n x = tf.layers.average_pooling3d(\n inputs=x,\n pool_size=6,\n strides=1\n )\n\n return x\n\n def _fc_layers(self, x: tf.Tensor) -> tf.Tensor:\n\n # Out: [batch, 2*2*2*350] = [batch, 2800]\n x = tf.layers.flatten(x, name=\"flat\")\n\n # Out: [batch, 2800 + 725]\n x = tf.concat([x, self.x_scalar], axis=1)\n\n x = self._dense(\n x=x,\n units=800,\n name=\"fc_0\",\n )\n\n x = self._dense(\n x=x,\n units=100,\n name=\"fc_1\",\n )\n\n x = self._dense(\n x=x,\n units=10,\n name=\"fc_2\",\n activation=tf.nn.relu\n )\n\n return x\n\n def _stem_block(self, x: tf.Tensor) -> tf.Tensor:\n with tf.variable_scope(\"stem_reduce\"):\n\n # Out: [batch, 31, 31, 31, 25]\n x_a = self._conv3d(\n x=x,\n name=\"a_0_conv_3x3x3\",\n filters=25,\n kernel_size=3,\n strides=2,\n )\n\n # Out: [batch, 31, 31, 31, 1]\n x_b = tf.layers.max_pooling3d(\n inputs=x,\n name=\"b_0_pool_3x3x3\",\n pool_size=3,\n strides=2,\n )\n\n # Out: [batch, 31, 31, 31, 25]\n x_b = self._conv3d(\n x=x_b,\n name=\"b_1_conv_1x1x1\",\n filters=25,\n kernel_size=1,\n padding=\"same\"\n )\n\n # Out: [batch, 31, 31, 31, 50]\n x_concat: tf.Tensor = tf.concat([x_a, x_b], axis=4)\n assert x_concat.get_shape()[-1] == 50\n\n self.logger.debug(x_concat.get_shape())\n\n return x_concat\n\n def _res_block_a(self, x: tf.Tensor, activation_fn=tf.nn.relu) -> tf.Tensor:\n \"\"\"\n Residual block with size ``[batch, 31, 31, 31, 50]``\n :param x:\n :return:\n \"\"\"\n\n with tf.variable_scope(f\"block_31_{self.residual_count_a}\"):\n self.residual_count_a += 1\n\n x_a = self._conv3d(\n x=x,\n name=\"a_0_conv_1x1x1\",\n filters=32,\n kernel_size=1,\n padding=\"same\"\n )\n\n x_b = self._conv3d(\n x=x,\n name=\"b_0_conv_1x1x1\",\n filters=32,\n kernel_size=1,\n padding=\"same\"\n )\n\n x_b = self._conv3d(\n x=x_b,\n name=\"b_1_conv_3x3x3\",\n filters=32,\n kernel_size=3,\n padding=\"same\"\n )\n\n x_c = self._conv3d(\n x=x,\n name=\"c_0_conv_1x1x1\",\n filters=32,\n kernel_size=1,\n padding=\"same\"\n )\n\n for i in range(1, 3):\n x_c = self._conv3d(\n x=x_c,\n name=f\"c_{i}_conv_3x3x3\",\n filters=32,\n kernel_size=3,\n padding=\"same\"\n )\n\n x_conv = tf.concat([x_a, x_b, x_c], axis=4)\n\n x_conv = self._conv3d(\n x=x_conv,\n name=\"conv_1x1\",\n filters=x.get_shape()[-1],\n kernel_size=1,\n padding=\"same\",\n activation=None\n )\n\n x += x_conv\n return activation_fn(x)\n\n def _reduction_a(self, x: tf.Tensor) -> tf.Tensor:\n \"\"\"\n :param x: Tensor with shape ``[batch, 31, 31, 31, 50]``\n :return: Tensor with shape ``[batch, 15, 15, 15, 130]``\n \"\"\"\n with tf.variable_scope(\"reduction_a\"):\n x_a = tf.layers.max_pooling3d(\n inputs=x,\n name=\"a_0_pooling_3x3x3\",\n pool_size=3,\n strides=2\n )\n\n x_b = self._conv3d(\n x=x,\n name=\"b_0_conv_3x3x3\",\n filters=50,\n kernel_size=3,\n strides=2\n )\n\n x_c = self._conv3d(\n x=x,\n name=\"c_0_conv_1x1x1\",\n filters=30,\n kernel_size=1,\n padding=\"same\"\n )\n\n x_c = self._conv3d(\n x=x_c,\n name=\"c_1_conv_3x3x3\",\n filters=30,\n kernel_size=3,\n padding=\"same\"\n )\n\n x_c = self._conv3d(\n x=x_c,\n name=\"c_2_conv_3x3x3\",\n filters=30,\n kernel_size=3,\n strides=2\n )\n\n return tf.concat([x_a, x_b, x_c], axis=4)\n\n def _res_block_b(self, x: tf.Tensor, activation_fn=tf.nn.relu) -> tf.Tensor:\n \"\"\"\n :param x: Tensor with shape ``[batch, 15, 15, 15, 130]``\n :return: Tensor with shape ``[batch, 15, 15, 15, 130]``\n \"\"\"\n\n with tf.variable_scope(f\"block_15_{self.residual_count_b}\"):\n self.residual_count_b += 1\n\n x_a = self._conv3d(\n x=x,\n name=\"a_0_conv_1x1x1\",\n filters=50,\n kernel_size=1,\n padding=\"same\"\n )\n\n x_b = self._conv3d(\n x=x,\n name=\"b_0_conv_1x1x1\",\n filters=50,\n kernel_size=1,\n padding=\"same\"\n )\n\n x_b = self._conv3d(\n x=x_b,\n name=\"b_1_conv_1x1x7\",\n filters=50,\n kernel_size=(1, 1, 7),\n padding=\"same\"\n )\n\n x_b = self._conv3d(\n x=x_b,\n name=\"b_1_conv_1x7x1\",\n filters=50,\n kernel_size=(1, 7, 1),\n padding=\"same\"\n )\n\n x_b = self._conv3d(\n x=x_b,\n name=\"b_1_conv_7x1x1\",\n filters=50,\n kernel_size=(7, 1, 1),\n padding=\"same\"\n )\n\n x_concat = tf.concat([x_a, x_b], axis=4)\n x_conv = self._conv3d(\n x=x_concat,\n name=\"conv_1x1\",\n filters=x.get_shape()[-1],\n kernel_size=1,\n padding=\"same\",\n activation=None\n )\n\n x += x_conv\n return activation_fn(x)\n\n def _reduction_b(self, x: tf.Tensor) -> tf.Tensor:\n \"\"\"\n :param x: Tensor with shape ``[batch, 15, 15, 15, 130]``\n :return: Tensor with shape ``[batch, 7, 7, 7, 350]``\n \"\"\"\n\n with tf.variable_scope(\"reduction_b\"):\n x_a = self._conv3d(\n x=x,\n name=\"a_0_conv_3x3\",\n filters=100,\n kernel_size=3,\n strides=2\n )\n\n x_b = self._conv3d(\n x=x,\n name=\"b_0_conv_1x1\",\n filters=100,\n kernel_size=1,\n padding=\"same\"\n )\n\n x_b = self._conv3d(\n x=x_b,\n name=\"b_1_conv_3x3\",\n filters=100,\n kernel_size=3,\n strides=2\n )\n\n x_c = self._conv3d(\n x=x,\n name=\"c_0_conv_1x1\",\n filters=100,\n kernel_size=1,\n padding=\"same\"\n )\n\n x_c = self._conv3d(\n x=x_c,\n name=\"c_1_conv_3x3\",\n filters=100,\n kernel_size=3,\n strides=2\n )\n\n x_d = self._conv3d(\n x=x,\n name=\"d_0_conv_1x1\",\n filters=50,\n kernel_size=1,\n padding=\"same\"\n )\n\n x_d = self._conv3d(\n x=x_d,\n name=\"d_1_conv_3x3\",\n filters=50,\n kernel_size=3,\n padding=\"same\"\n )\n\n x_d = self._conv3d(\n x=x_d,\n name=\"d_2_conv_3x3\",\n filters=50,\n kernel_size=3,\n strides=2\n )\n\n # Out [batch, 7, 7, 7, 100 + 100 + 100 + 50]\n return tf.concat([x_a, x_b, x_c, x_d], axis=4)\n\n\nclass ScalarOnlySiamese(BasicSiamese):\n r\"\"\"\n Model that uses only radiomic features as input to train\n\n It has the same parameters as :class:`BasicSiamese`\n\n It only uses the radiomic features obtained with `PyRadiomics `_\n\n **Attributes**:\n\n Includes the same attributes as :class:`BasicSiamese` and adds the following ones:\n\n :var ScalarOnlySiamese.x_scalar: Radiomic features obtained with\n `PyRadiomics `_\n :vartype ScalarOnlySiamese.x_scalar: tf.Tensor\n \"\"\"\n\n def __init__(self, **kwargs):\n #: Radiomic features obtained with `PyRadiomics `_\n self.x_scalar = tf.placeholder(tf.float32, [None, settings.NUMBER_FEATURES])\n\n super().__init__(**kwargs)\n\n def _sister(self):\n # Out: [batch, 500]\n x = self.x_scalar\n x = self._dense(\n x,\n 500,\n \"fc1\"\n )\n\n # Out: [batch, 200]\n x = self._dense(\n x,\n 200,\n \"fc2\"\n )\n\n x = tf.layers.dropout(\n x,\n rate=self._dropout,\n training=self.training\n )\n\n # Out: [batch, 50]\n x = self._dense(\n x,\n 50,\n \"fc3\"\n )\n\n # Out: [batch, 1]\n x = self._dense(\n x,\n 10,\n \"fc4\",\n activation=tf.nn.relu\n )\n\n return x\n\n def _dense(self, x: tf.Tensor, units: int, name: str, activation=tf.nn.tanh) -> tf.Tensor:\n return tf.layers.dense(\n x,\n units=units,\n activation=activation,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self._regularization),\n name=name\n )\n\n def feed_dict(self, batch: data.PairBatch, training: bool = True):\n\n return {\n **super().feed_dict(batch, training),\n self.x_scalar: np.stack(batch.patients[\"features\"]),\n }\n\n def uses_images(self) -> bool:\n \"\"\"\n Implementation of :func:`BasicModel.uses_images`. This model does not uses images to work.\n\n :return: :any:`False` since this model does not use images to work\n \"\"\"\n return False\n\n\nclass ScalarOnlyDropoutSiamese(ScalarOnlySiamese):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def _sister(self):\n # Out: [batch, 500]\n x = self.x_scalar\n x = self._dense(\n x,\n 500,\n \"fc1\"\n )\n\n x = tf.layers.dropout(\n x,\n rate=self._dropout,\n training=self.training\n )\n\n # Out: [batch, 200]\n x = self._dense(\n x,\n 200,\n \"fc2\"\n )\n\n x = tf.layers.dropout(\n x,\n rate=self._dropout,\n training=self.training\n )\n\n # Out: [batch, 50]\n x = self._dense(\n x,\n 50,\n \"fc3\"\n )\n\n x = tf.layers.dropout(\n x,\n rate=self._dropout,\n training=self.training\n )\n\n # Out: [batch, 1]\n x = self._dense(\n x,\n 10,\n \"fc4\",\n activation=tf.nn.relu\n )\n\n return x\n\n\nclass VolumeOnlySiamese(BasicSiamese):\n r\"\"\"\n Model that only uses the volume radiomic feature\n\n The features are provided by the package from `PyRadiomics `_\n\n It trains a model in the form :math:`y = w \\cdot V + b`\n\n **Attributes**:\n\n Includes the same attributes as :class:`BasicSiamese` and adds the following ones:\n\n :ivar VolumeOnlySiamese.x_volume: Placeholder for the volume feature\n :vartype VolumeOnlySiamese.x_volume: tf.Tensor\n \"\"\"\n\n def __init__(self, **kwargs):\n #: **Attribute**: Radiomic volume feature obtained with\n #: `PyRadiomics `_\n self.x_volume = tf.placeholder(tf.float32, [None, 1])\n\n super().__init__(**kwargs)\n\n def _sister(self) -> tf.Tensor:\n \"\"\"\n Super greedy predictor, more volume means less survival time so we only have to invert the volume size to\n create an inverse relation. This model does not have trainable variables\n\n :return: Greedy siamese applied\n \"\"\"\n\n total = tf.Variable(0., name=\"bias\")\n for i in range(1):\n w = tf.Variable(-1., name=f\"weight_{i}\")\n total = total + w*(self.x_volume**(i + 1))\n\n return total\n\n def feed_dict(self, batch: data.PairBatch, training: bool = True):\n\n features = np.stack(batch.patients[\"features\"].values)\n volumes = features[:, settings.VOLUME_FEATURE_INDEX].reshape((-1, 1))\n\n return {\n **super().feed_dict(batch, training),\n self.x_volume: volumes\n }\n\n def uses_images(self):\n return False\n","repo_name":"bhklab/DeepCINET","sub_path":"Sources/models/siameses.py","file_name":"siameses.py","file_ext":"py","file_size_in_byte":32310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23225047838","text":"import cv2\nimport numpy as np\nimport os\nfrom matplotlib import pyplot as plt\n\n# infile = os.path.join('data', 'musician.jpg')\ninfile = os.path.join('data', 'calling.png')\noutfile = os.path.join('data', 'calling-edges.png')\n\nimg = cv2.imread(infile, 0)\nheight, width = img.shape\n# edges\t= cv2.Canny(image, threshold1, threshold2)\nedges = cv2.Canny(img, 100, 200)\n\n# Save the edge image in the original shape\n# https://www.infobyip.com/detectmonitordpi.php\nmy_dpi = 192\n\n# With margin\n# plt.figure(figsize=(width/my_dpi, height/my_dpi), dpi=my_dpi)\n# plt.imshow(edges, cmap='gray')\n# plt.axis('off')\n# plt.savefig(outfile, dpi=my_dpi)\n\n# Without margin\nplt.gca().set_axis_off()\nplt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)\nplt.margins(0,0)\nplt.gca().xaxis.set_major_locator(plt.NullLocator())\nplt.gca().yaxis.set_major_locator(plt.NullLocator())\n\nplt.figure(figsize=(width/my_dpi, height/my_dpi), dpi=my_dpi)\nplt.imshow(edges, cmap='gray')\nplt.axis('off')\nplt.savefig(outfile, dpi=my_dpi, bbox_inches = 'tight', pad_inches = 0)\n\n# Show the two images side by side\n# plt.subplot(121),plt.imshow(img,cmap = 'gray')\n# plt.title('Original Image'), plt.xticks([]), plt.yticks([])\n# plt.subplot(122),plt.imshow(edges,cmap = 'gray')\n# plt.title('Edge Image'), plt.xticks([]), plt.yticks([])\n# plt.show()","repo_name":"tintinrevient/period-recreation","sub_path":"edge_detection/canny_edge_detector.py","file_name":"canny_edge_detector.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31631015468","text":"\"\"\"Topology example (:mod:`fluidimage.topologies.example`)\n==========================================================\n\nThis topology has two pythran cpu bounded tasks. It helps see executors behavior\nwith C functions.\n\n.. autoclass:: TopologyExample\n :members:\n :private-members:\n\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport scipy.io\n\nfrom transonic import boost\n\nfrom . import TopologyBase\nfrom ..util import imread\n\nA = \"uint8[:,:]\"\n\n\n@boost\ndef cpu1(array1: A, array2: A, nloops: int = 10):\n\n a = np.arange(10000000 // nloops)\n result = a\n for i in range(nloops):\n result += a**3 + a**2 + 2\n\n for i in range(nloops):\n array1 = array1 * array2\n return (array1, array1)\n\n\n@boost\ndef cpu2(array1: A, array2: A, nloops: int = 10):\n\n a = np.arange(10000000 // nloops)\n result = a\n for i in range(nloops):\n result += a**3 + a**2 + 2\n\n for i in range(nloops):\n array1 = np.multiply(array1, array2)\n return array1\n\n\nclass TopologyExample(TopologyBase):\n \"\"\"Topology example for testing.\n\n Parameters\n ----------\n\n logging_level : str, {'warning', 'info', 'debug', ...}\n\n Logging level.\n\n nb_max_workers : None, int\n\n Maximum numbers of \"workers\". If None, a number is computed from the\n number of cores detected. If there are memory errors, you can try to\n decrease the number of workers.\n\n \"\"\"\n\n @classmethod\n def create_default_params(cls):\n params = dict(\n path_input=None,\n path_dir_result=None,\n nloops=1,\n multiplicator_nb_images=1,\n )\n return params\n\n def __init__(self, params, logging_level=\"info\", nb_max_workers=None):\n self.params = params\n\n path_input = params[\"path_input\"]\n path_dir_result = params[\"path_dir_result\"]\n nloops = params[\"nloops\"]\n self.multiplicator_nb_images = params[\"multiplicator_nb_images\"]\n\n def func1(arrays):\n key = arrays[0]\n if key == \"Karman_02_00\":\n raise ValueError(\"For testing\")\n arr0, arr1 = cpu1(arrays[1], arrays[2], nloops)\n return key, arr0, arr1\n\n def func2(arrays):\n key = arrays[0]\n result = cpu2(arrays[1], arrays[2], nloops)\n return key, result\n\n self.path_input = path_input\n\n super().__init__(\n path_dir_result=path_dir_result,\n logging_level=logging_level,\n nb_max_workers=nb_max_workers,\n )\n\n if not self.path_dir_result.exists():\n self.path_dir_result.mkdir()\n\n self.img_counter = 0\n\n queue_names = self.add_queue(\"names images\")\n queue_couples_names = self.add_queue(\"names couples\")\n queue_arrays = self.add_queue(\"arrays\")\n queue_couples_arrays = self.add_queue(\"couples arrays\")\n queue_cpu1 = self.add_queue(\"queue cpu1\")\n queue_cpu2 = self.add_queue(\"queue cpu2\")\n\n self.add_work(\n \"fill names\",\n func_or_cls=self.fill_names,\n output_queue=queue_names,\n kind=(\"global\", \"one shot\"),\n )\n self.add_work(\n \"fill names couples\",\n func_or_cls=self.fill_couples_names,\n input_queue=queue_names,\n output_queue=queue_couples_names,\n kind=(\"global\", \"one shot\"),\n )\n\n self.add_work(\n \"read array\",\n func_or_cls=self.read_array,\n input_queue=queue_names,\n output_queue=queue_arrays,\n kind=\"io\",\n )\n self.add_work(\n \"fill couples arrays\",\n func_or_cls=self.fill_couples_arrays,\n input_queue=(queue_couples_names, queue_arrays),\n output_queue=queue_couples_arrays,\n kind=(\"global\"),\n )\n self.add_work(\n \"cpu1\",\n func_or_cls=func1,\n input_queue=queue_couples_arrays,\n output_queue=queue_cpu1,\n kind=\"cpu\",\n )\n self.add_work(\n \"cpu2\",\n func_or_cls=func2,\n input_queue=queue_cpu1,\n output_queue=queue_cpu2,\n kind=\"cpu\",\n )\n self.add_work(\n \"save\", func_or_cls=self.save, input_queue=queue_cpu2, kind=\"io\"\n )\n\n def fill_names(self, input_queue, output_queue):\n for ind in range(self.multiplicator_nb_images):\n for name in sorted(os.listdir(self.path_input)):\n key = name.split(\".bmp\")[0] + f\"_{ind:02}\"\n output_queue[key] = name\n\n def fill_couples_names(self, input_queue, output_queue):\n for key, name in list(input_queue.items()):\n output_queue[key] = [key, (name, name)]\n\n def read_array(self, name):\n if name == \"Karman_03.bmp\":\n raise ValueError(\"For testing\")\n\n image = imread(self.path_input / name)\n return image\n\n def fill_couples_arrays(self, input_queues, output_queue):\n queue_couples_names, queue_arrays = input_queues\n queue_couples_arrays = output_queue\n\n for key, array in list(queue_arrays.items()):\n if key not in queue_couples_names:\n continue\n queue_arrays.pop(key)\n queue_couples_names.pop(key)\n # propagating possible exceptions...\n if isinstance(array, Exception):\n queue_couples_arrays[key] = array\n else:\n queue_couples_arrays[key] = [key, array, array]\n\n def save(self, inputs):\n key = inputs[0]\n arr = inputs[1]\n name = key + \".h5\"\n scipy.io.savemat(self.path_dir_result / name, mdict={\"array\": arr})\n","repo_name":"fluiddyn/fluidimage","sub_path":"fluidimage/topologies/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"69978405384","text":"from django.contrib import admin\nfrom .models import Page, Chapter\n\nclass PageAdmin(admin.ModelAdmin):\n readonly_fields = ('created_at', 'updated_at')\n\nclass ChapterAdmin(admin.ModelAdmin):\n readonly_fields = ('created_at',)\n\n# Register your models here.\nadmin.site.register(Page, PageAdmin)\nadmin.site.register(Chapter, ChapterAdmin)\n\ntitle = \"Panel de Administración IOSA | Aerolíneas Argentinas\"\nsubtitle = \"Panel de Gestión\"\n\nadmin.site.site_header = title\nadmin.site.site_title = title\nadmin.site.index_title = subtitle","repo_name":"BauerPablo/presentacion_IOSA","sub_path":"pages/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14433993422","text":"from flask import Flask, render_template, jsonify, request\nfrom pymongo import MongoClient\nimport time\n\napp = Flask(__name__)\n\nclient = MongoClient(\"mongodb://localhost:27017/\")\ndb = client.dbStock\n\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/post', methods=['POST'])\ndef save_post():\n print('clicked')\n idx = request.form['idx']\n title = request.form['title_give']\n comment = request.form['comment_give']\n nowTime = time.strftime('%Y.%m.%d %X')\n doc = {\n 'idx' : idx,\n 'title' : title,\n 'comment' : comment,\n 'reg_date' : nowTime\n }\n db.posts.insert_one(doc)\n return {\"result\": \"success\"}\n\n\n@app.route('/post', methods=['GET'])\ndef get_post():\n posts = list(db.posts.find({}, {'_id' : False}))\n return jsonify({'all_posts' : posts})\n\n\n@app.route('/post', methods=['DELETE'])\ndef delete_post():\n return {\"result\": \"success\"}\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)","repo_name":"Ohjinn/TimeAttack2","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72082403465","text":"# Name - Jatin\n# Entry Number - 2020CSB1090\n\nimport numpy as np\nfrom math import log10, sqrt\nfrom numpy import dot, exp, mgrid, pi, ravel, square, uint8, zeros\nimport skimage\nfrom skimage.color import *\nfrom skimage import feature\nfrom matplotlib.image import imread\nimport matplotlib.pyplot as plt\nfrom skimage.metrics import structural_similarity\n\n\ndef peak_signal_to_noise_ratio(original, compressed):\n mse = np.mean((original - compressed) ** 2)\n if (mse == 0):\n return 100\n max_pixel = 255.0\n psnr = 20 * log10(max_pixel / sqrt(mse))\n return psnr\n\n\ndef myCannyEdgeDetector(inputImg, lowThresholdRatio, highThresholdRatio):\n\n # gaussian filter Noise Reduction\n gx, gy = np.meshgrid(np.arange(-7/2+1, 7/2+1), np.arange(-7/2+1, 7/2+1))\n\n NORMALISED = 1 / (2.0 * np.pi * 1**2)\n\n Kernal = np.exp(-(gx**2+gy**2) / (2.0*1**2)) / \\\n NORMALISED\n\n size_kernal, gaussi_filter = Kernal.shape[0], np.zeros_like(\n img, dtype=float)\n\n for i in range(img.shape[0]-(size_kernal-1)):\n for j in range(img.shape[1]-(size_kernal-1)):\n window = img[i:i+size_kernal, j:j+size_kernal] * Kernal\n gaussi_filter[i, j] = np.sum(window)\n\n # plt.imshow(gaussi_filter, cmap='gray')\n # plt.show()\n\n # sobel filter\n sobel_x = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]])\n sobel_y = np.array([[1.0, 2.0, 1.0], [0.0, 0.0, 0.0], [-1.0, -2.0, -1.0]])\n [rows, columns] = np.shape(gaussi_filter)\n sobel_filtered_image = np.zeros(shape=(rows, columns))\n\n theta = np.zeros(shape=(rows, columns))\n\n for i in range(rows - 2):\n for j in range(columns - 2):\n gx = np.sum(np.multiply(sobel_x, gaussi_filter[i:i + 3, j:j + 3]))\n gy = np.sum(np.multiply(sobel_y, gaussi_filter[i:i + 3, j:j + 3]))\n sobel_filtered_image[i + 1, j + 1] = np.sqrt(gx ** 2 + gy ** 2)\n if (gx == 0):\n theta[i+1, j+1] = 90\n else:\n theta[i+1, j+1] = ((np.arctan(gy/gx))/np.pi) * 180\n\n sobel_filtered_image = sobel_filtered_image/np.max(sobel_filtered_image)\n # plt.imshow(sobel_filtered_image, cmap='gray')\n # plt.show()\n\n # Nonmaximum suppression\n nms = np.copy(sobel_filtered_image)\n\n for i in range(theta.shape[0]-(2)):\n for j in range(theta.shape[1]-(2)):\n if theta[i][j] < 0:\n theta[i][j] += 180\n\n if (theta[i, j] <= 22.5 or theta[i, j] > 157.5):\n if (sobel_filtered_image[i, j] <= sobel_filtered_image[i-1, j]) and (sobel_filtered_image[i, j] <= sobel_filtered_image[i+1, j]):\n nms[i, j] = 0\n if (theta[i, j] > 22.5 and theta[i, j] <= 67.5):\n if (sobel_filtered_image[i, j] <= sobel_filtered_image[i-1, j-1]) and (sobel_filtered_image[i, j] <= sobel_filtered_image[i+1, j+1]):\n nms[i, j] = 0\n if (theta[i, j] > 67.5 and theta[i, j] <= 112.5):\n if (sobel_filtered_image[i, j] <= sobel_filtered_image[i+1, j+1]) and (sobel_filtered_image[i, j] <= sobel_filtered_image[i-1, j-1]):\n nms[i, j] = 0\n if (theta[i, j] > 112.5 and theta[i, j] <= 157.5):\n if (sobel_filtered_image[i, j] <= sobel_filtered_image[i+1, j-1]) and (sobel_filtered_image[i, j] <= sobel_filtered_image[i-1, j+1]):\n nms[i, j] = 0\n\n nms = nms/np.max(nms)\n # plt.imshow(nms, cmap='gray')\n # plt.show()\n\n # Double THRESHING\n Threash = np.copy(nms)\n h = int(Threash.shape[0])\n w = int(Threash.shape[1])\n highThreshold = np.max(Threash) * highThresholdRatio\n lowThreshold = highThreshold * lowThresholdRatio\n\n for i in range(1, h-1):\n for j in range(1, w-1):\n if (Threash[i, j] > highThreshold):\n Threash[i, j] = 1\n elif (Threash[i, j] < lowThreshold):\n Threash[i, j] = 0\n else:\n if ((Threash[i-1, j-1] > highThreshold) or\n (Threash[i-1, j] > highThreshold) or\n (Threash[i-1, j+1] > highThreshold) or\n (Threash[i, j-1] > highThreshold) or\n (Threash[i, j+1] > highThreshold) or\n (Threash[i+1, j-1] > highThreshold) or\n (Threash[i+1, j] > highThreshold) or\n (Threash[i+1, j+1] > highThreshold)):\n Threash[i, j] = 1\n\n return Threash\n\n\nimg = skimage.io.imread('7.jpg')\nimg = rgb2gray(img)\noutputImg = myCannyEdgeDetector(img, 0.2, 0.4)\n\nplt.figure()\nplt.imshow(outputImg, cmap='gray')\nplt.title(\"My canny Edge Detector\")\nplt.show()\ntrue_canny = feature.canny(img)\nplt.imshow(true_canny, cmap='gray')\nplt.title(\"Inbuilt_Function\")\nplt.show()\npsnr = peak_signal_to_noise_ratio(true_canny, outputImg)\nprint(\"Peak Signal To Noise Ratio: \", end=' ')\nprint(psnr)\nssim1 = structural_similarity(outputImg, true_canny, data_range=255)\nprint(\"Structural Similarity Index Metric: \", end=' ')\nprint(ssim1)\n","repo_name":"jatin1322/CannyEdgeDetector","sub_path":"MyCannyEdgeDetectorDemo.py","file_name":"MyCannyEdgeDetectorDemo.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20335379892","text":"#We add libraries\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QVBoxLayout, QLineEdit, QHBoxLayout\r\nfrom datetime import datetime\r\nimport time\r\n\r\n\r\n\r\ndef confirmation(): \r\n def exitt():\r\n main_win.hide()\r\n\r\n edit_day_text=int(edit_day.text())\r\n edit_month_text=int(edit_month.text())\r\n edit_year_text=int(edit_year.text())\r\n year1 = year-edit_year_text\r\n if year1 > 16:\r\n text.setText('Ваша вікова категорія підходить')\r\n butt = QPushButton('продовжити')\r\n line_v.addWidget(butt, alignment = Qt.AlignCenter)\r\n butt.clicked.connect(exitt)\r\n elif year1 == 16:\r\n if month > edit_month_text:\r\n text.setText('Ваша вікова категорія підходить')\r\n butt = QPushButton('продовжити')\r\n line_v.addWidget(butt, alignment = Qt.AlignCenter)\r\n butt.clicked.connect(exitt)\r\n elif month == edit_month_text:\r\n if day >= edit_day_text:\r\n text.setText('Ваша вікова категорія підходить')\r\n butt = QPushButton('продовжити')\r\n line_v.addWidget(butt, alignment = Qt.AlignCenter)\r\n butt.clicked.connect(exitt)\r\n else:\r\n text.setText('Вибачте але ваша вікова категорія не підходить')\r\n butt2 = QPushButton('продовжити')\r\n line_v.addWidget(butt2, alignment = Qt.AlignCenter)\r\n butt2.clicked.connect(quit)\r\n else:\r\n text.setText('Вибачте але ваша вікова категорія не підходить')\r\n butt2 = QPushButton('продовжити')\r\n line_v.addWidget(butt2, alignment = Qt.AlignCenter)\r\n butt2.clicked.connect(quit) \r\n else:\r\n text.setText('Вибачте але ваша вікова категорія не підходить')\r\n butt2 = QPushButton('продовжити')\r\n line_v.addWidget(butt2, alignment = Qt.AlignCenter)\r\n butt2.clicked.connect(quit)\r\n\r\ndef questionnaire():\r\n def continuation():\r\n def exitt1():\r\n main_win1.hide()\r\n\r\n dani.setText('Ваші дані збережено')\r\n butt1 = QPushButton('Продовжити')\r\n line_v1.addWidget(butt1, alignment = Qt.AlignCenter)\r\n butt1.clicked.connect(exitt1)\r\n\r\n app1 = QApplication([])\r\n main_win1 = QWidget()\r\n\r\n main_win1.resize(400,300)\r\n main_win1.move(460, 280)\r\n\r\n main_win1.setWindowTitle('АНКЕТА АБІТУРІЄНТА')\r\n \r\n dani = QLabel(\"Ваші дані\")\r\n name = QLabel(\"Ваше прізвище, ім'я та по-батькові\")\r\n live = QLabel(\"Домашня адреса (Область, район, місто/село)\")\r\n gmail = QLabel(\"Ваш e-mail\")\r\n number = QLabel(\"Номер телефону\")\r\n surroundings = QLabel(\"Розкажіть про себе(Ваші уподобання, хобі, коло інтересів)\")\r\n\r\n but1 = QPushButton('Підтвердити')\r\n\r\n name_l = QLineEdit()\r\n live_l = QLineEdit()\r\n gmail_l = QLineEdit()\r\n number_l = QLineEdit()\r\n surroundings_l = QLineEdit()\r\n\r\n line_v1 = QVBoxLayout()\r\n\r\n line_v1.addWidget(dani, alignment = Qt.AlignCenter)\r\n line_v1.addWidget(name, alignment = Qt.AlignCenter)\r\n line_v1.addWidget(name_l, alignment = Qt.AlignCenter)\r\n line_v1.addWidget(live, alignment = Qt.AlignCenter)\r\n line_v1.addWidget(live_l, alignment = Qt.AlignCenter)\r\n line_v1.addWidget(gmail, alignment = Qt.AlignCenter)\r\n line_v1.addWidget(gmail_l, alignment = Qt.AlignCenter)\r\n line_v1.addWidget(number, alignment = Qt.AlignCenter)\r\n line_v1.addWidget(number_l, alignment = Qt.AlignCenter)\r\n line_v1.addWidget(surroundings, alignment = Qt.AlignCenter)\r\n line_v1.addWidget(surroundings_l, alignment = Qt.AlignCenter)\r\n line_v1.addWidget(but1, alignment = Qt.AlignCenter)\r\n\r\n main_win1.setLayout(line_v1)\r\n\r\n but1.clicked.connect(continuation)\r\n\r\n main_win1.show()\r\n app1.exec_()\r\n\r\nquestionnaire()\r\n\r\n#Let's take the current time\r\ndate = datetime.now()\r\n\r\nyear = date.year\r\nmonth = date.month\r\nday = date.day\r\n\r\n#We create \"application\" and \"window\" objects\r\napp = QApplication([])\r\nmain_win = QWidget()\r\n\r\n#Window dimensions and location coordinates\r\nmain_win.resize(400,300)\r\nmain_win.move(460, 280)\r\n\r\n#Interface name\r\nmain_win.setWindowTitle('АНКЕТА АБІТУРІЄНТА')\r\n\r\n\r\n\r\n#Text widgets\r\ntext = QLabel('Ведіть свою дату народження')\r\nday_1 = QLabel('Чісло')\r\nmonth_1 = QLabel('Місяць')\r\nyear_1 = QLabel('Рік')\r\n\r\n#input strings\r\nedit_day = QLineEdit('')\r\nedit_month = QLineEdit('')\r\nedit_year = QLineEdit('')\r\n\r\n#\r\nbut = QPushButton('Підтвердити')\r\nbutt = QPushButton('продовжити')\r\n\r\n#We create vertical and horizontal lines\r\nline_v = QVBoxLayout()\r\nline_h = QHBoxLayout()\r\nline_h1 = QHBoxLayout()\r\n\r\n\r\nline_v.addWidget(text, alignment = Qt.AlignCenter)\r\nline_h1.addWidget(day_1, alignment = Qt.AlignLeft)\r\nline_h1.addWidget(month_1, alignment = Qt.AlignLeft)\r\nline_h1.addWidget(year_1, alignment = Qt.AlignLeft)\r\nline_v.addLayout(line_h1)\r\nline_h.addWidget(edit_day, alignment = Qt.AlignLeft)\r\nline_h.addWidget(edit_month, alignment = Qt.AlignCenter)\r\nline_h.addWidget(edit_year, alignment = Qt.AlignRight)\r\nline_v.addLayout(line_h)\r\nline_v.addWidget(but, alignment = Qt.AlignCenter)\r\n\r\nmain_win.setLayout(line_v)\r\n#We make the window visible and leave the program open until the exit button is pressed\r\n\r\nbut.clicked.connect(confirmation)\r\n\r\nmain_win.show()\r\napp.exec_()","repo_name":"Yarykl/hak.","sub_path":"one.py","file_name":"one.py","file_ext":"py","file_size_in_byte":5827,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31898732430","text":"from datetime import datetime\n\nfrom aiogram import types\n\nfrom tgbot.models.models import User\n\n\nclass UserCommands:\n\n async def get_user(self, user_id):\n user = await User.query.where(User.user_id == user_id).gino.first()\n return user\n\n async def add_new_user(self):\n user = types.User.get_current()\n old_user = await self.get_user(user.id)\n if old_user:\n return old_user\n new_user = User()\n new_user.user_id = user.id\n new_user.username = user.username\n new_user.full_name = user.full_name\n new_user.reg_date = datetime.now()\n await new_user.create()\n return new_user\n","repo_name":"codebegemot/okoloBot","sub_path":"tgbot/models/user_commands.py","file_name":"user_commands.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17525131600","text":"import os\nimport argparse\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nfrom datasets import Dataset, DatasetDict\n\nos.makedirs('data/', exist_ok=True)\n\nSEED = 42\n\n# limit_tweets\ndef limit_tweets(tweets, num_tweets):\n tweets = tweets.split(sep='|||') # Each tweet are separated by \"|||\"\n tweets = tweets[:num_tweets]\n tweets = '|||'.join(tweets)\n return tweets\n\n# Indexing tweets, assuming each tweet is separated by \"|||\"\ndef index_tweets(tweets):\n sep_tweets = tweets.split(\"|||\")\n \n indexed_tweets = [] \n for i, tweet in enumerate(sep_tweets):\n indexed_tweets.append(f\"{i+1}. {tweet}\")\n \n indexed_tweets = \"\\n\".join(indexed_tweets)\n \n return indexed_tweets\n \n# Save preprocessed data\ndef save_preprocessed_dataset(path: str, dataset):\n dataset.save_to_disk(path)\n\ndef parse_args():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-num_tweets\", type=int, default=25)\n ap.add_argument(\"-original_data\", type=str, default=\"data/kaggle.csv\")\n ap.add_argument(\"-save_dir\", type=str, default=\"data/processed_data\")\n args = ap.parse_args()\n return (\n args.num_tweets,\n args.original_data,\n args.save_dir\n )\n\n\nif __name__==\"__main__\":\n (\n num_tweets,\n original_data,\n save_dir,\n ) = parse_args()\n\n\n # Load Kaggle dataset for MBTI\n df = pd.read_csv(original_data)\n\n # Preprocess text data\n df['text'] = df['text'].apply(lambda x: limit_tweets(x, num_tweets)) # Decrease the number of tweets for each sample by 25\n df['text'] = df['text'].apply(lambda x: index_tweets(x))\n\n # Split into train/test dataset\n text_train, text_test, label_train, label_test = train_test_split(df['text'], df['type'], test_size=0.1, random_state=SEED)\n\n # Store data in DatasetDict\n dataset = DatasetDict()\n dataset[\"train\"] = Dataset.from_dict(\n {\"text\": text_train, \"label\": label_train}\n )\n dataset[\"test\"] = Dataset.from_dict(\n {\"text\": text_test, \"label\": label_test}\n )\n\n save_preprocessed_dataset(save_dir, dataset)","repo_name":"Yuta555/LLM-personality-evaluation","sub_path":"personality_detection_model/scripts/preprocess_data.py","file_name":"preprocess_data.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33812874465","text":"\"\"\"requests session object\"\"\"\nfrom __future__ import annotations\n\nimport io\nimport json\n\nfrom urllib.parse import urljoin\nfrom typing import Literal\n\nfrom pyetm.exceptions import UnprossesableEntityError, format_error_messages\n\nimport requests\nimport pandas as pd\n\nDecoder = Literal['bytes', 'BytesIO', 'json', 'text']\nMethod = Literal['delete', 'get', 'post', 'put']\n\n\nclass RequestsSession:\n \"\"\"requests-based session object\"\"\"\n\n @property\n def base_url(self) -> str | None:\n \"\"\"base url used in make_url\"\"\"\n return self.__base_url\n\n @base_url.setter\n def base_url(self, base_url: str | None) -> None:\n\n if base_url is None:\n self.__base_url = base_url\n\n if base_url is not None:\n self.__base_url = str(base_url)\n\n @property\n def headers(self) -> dict:\n \"\"\"headers that are passed in each request\"\"\"\n return self.__headers\n\n @headers.setter\n def headers(self, headers: dict | None) -> None:\n\n if headers is None:\n headers = {}\n\n self.__headers = dict(headers)\n\n def __init__(self, base_url: str | None = None,\n headers: dict | None = None, proxies: dict | None = None,\n stream: bool = False, verify: bool | str = True,\n cert: str | tuple | None = None):\n \"\"\"session object for pyETM clients\n\n Parameters\n ----------\n base_url: str, default None\n Base url to which the session connects, all request urls\n will be merged with the base url to create a destination.\n headers : dict, default None\n Headers that are always passed during requests, e.g. an\n authorization token.\n proxies: dict, default None\n Dictionary mapping protocol or protocol and\n hostname to the URL of the proxy.\n stream: boolean, default False\n Whether to immediately download the response content.\n verify: boolean or string, default True\n Either a boolean, in which case it controls whether we verify\n the server's TLS certificate, or a string, in which case it must\n be a path to a CA bundle to use. When set to False, requests will\n accept any TLS certificate presented by the server, and will ignore\n hostname mismatches and/or expired certificates, which will make\n your application vulnerable to man-in-the-middle (MitM) attacks.\n Setting verify to False may be useful during local development or\n testing.\n cert: string or tuple, default None\n If string; path to ssl client cert file (.pem).\n If tuple; ('cert', 'key') pair.\"\"\"\n\n # set parameters\n self.base_url = base_url\n self.headers = headers\n\n # set environment kwargs for method requests\n self._request_env = {\n \"proxies\": proxies, \"stream\": stream,\n \"verify\": verify, \"cert\": cert}\n\n # set session\n self._session = requests.Session()\n\n def __repr__(self):\n \"\"\"reproduction string\"\"\"\n\n # object environment\n env = \", \".join(f\"{k}={str(v)}\" for k, v in\n self._request_env.items())\n\n return f\"RequestsSession({env})\"\n\n def __str__(self):\n \"\"\"stringname\"\"\"\n return 'RequestsSession'\n\n def __enter__(self) -> RequestsSession:\n \"\"\"enter context manager\"\"\"\n\n # connect session\n self.connect()\n\n return self\n\n def __exit__(self, *args, **kwargs) -> None:\n \"\"\"exit context manager\"\"\"\n\n # close session\n self.close()\n\n def make_url(self, url: str | None = None):\n \"\"\"join url with base url\"\"\"\n return urljoin(self.base_url, url)\n\n def connect(self):\n \"\"\"connect session\"\"\"\n\n def close(self):\n \"\"\"close session\"\"\"\n\n def request(self, method: Method, url: str,\n decoder: Decoder = 'bytes', **kwargs):\n \"\"\"make request to api session\"\"\"\n\n retries = 5\n while retries:\n\n try:\n\n # merge kwargs with session envioronment kwargs\n kwargs = {**self._request_env, **kwargs}\n\n # add persistent headers\n headers = kwargs.get('headers', {})\n kwargs['headers'] = {**headers, **self.headers}\n\n # make method request\n request = getattr(self._session, method)\n with request(url, **kwargs) as resp:\n\n # check response\n if not resp.ok:\n\n # get debug message\n if resp.status_code == 422:\n self._error_report(resp)\n\n # raise for status\n resp.raise_for_status()\n\n # bytes decoding\n if decoder == \"bytes\":\n resp = resp.content\n\n # bytes as BytesIO\n elif decoder == \"BytesIO\":\n byts = resp.content\n resp = io.BytesIO(byts)\n\n # json decoding\n elif decoder == \"json\":\n resp = resp.json()\n\n # text decoding\n elif decoder == \"text\":\n resp = resp.text\n\n else:\n msg = f\"decoding method '{decoder}' not implemented\"\n raise NotImplementedError(msg)\n\n return resp\n\n # except connectionerrors and retry\n except requests.exceptions.ConnectionError as error:\n retries -= 1\n\n # raise after retries\n if not retries:\n raise error\n\n def _error_report(self, resp: requests.Response) -> None:\n \"\"\"create error report when api returns error messages.\"\"\"\n\n try:\n\n # attempt decode error message(s)\n msg = resp.json()\n errors = msg.get(\"errors\")\n\n except json.decoder.JSONDecodeError:\n\n # no message returned\n errors = None\n\n if errors:\n\n # format error message(s)\n msg = format_error_messages(errors)\n raise UnprossesableEntityError(msg)\n\n def delete(self, url: str | None = None,\n decoder: Decoder = 'text', **kwargs):\n \"\"\"delete request\"\"\"\n return self.request(\"delete\", self.make_url(url), decoder, **kwargs)\n\n def get(self, url: str | None = None,\n decoder: Decoder = 'json', **kwargs):\n \"\"\"get request\"\"\"\n return self.request(\"get\", self.make_url(url), decoder, **kwargs)\n\n def post(self, url: str | None = None,\n decoder: Decoder = 'json', **kwargs):\n \"\"\"post request\"\"\"\n return self.request(\"post\", self.make_url(url), decoder, **kwargs)\n\n def put(self, url: str | None = None,\n decoder: Decoder = 'json', **kwargs):\n \"\"\"put request\"\"\"\n return self.request(\"put\", self.make_url(url), decoder, **kwargs)\n\n def upload_series(self, url: str | None = None,\n series: pd.Series | None = None, name: str | None = None, **kwargs):\n \"\"\"upload series object\"\"\"\n\n # default to empty series\n if series is None:\n series = pd.Series()\n\n # set key as name\n if name is None:\n name = \"not specified\"\n\n # convert series to string\n data = series.to_string(index=False)\n form = {\"file\": (name, data)}\n\n return self.put(url=url, files=form, **kwargs)\n","repo_name":"robcalon/pyetm","sub_path":"src/pyetm/sessions/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21716746180","text":"from copy import deepcopy\n\nfrom common import sign\n\ninputs = [x.strip() for x in open(\"2022/inputs/14.txt\").readlines()]\n\n\nrocks = set()\nabyss = 0\nfor input in inputs:\n points = [complex(*[int(y) for y in x.split(\",\")]) for x in input.split(\" -> \")]\n\n for i in range(len(points) - 1):\n a, b = points[i : i + 2]\n rocks.add(a)\n rocks.add(b)\n\n while a != b:\n c = b - a\n if c.real:\n a += sign(c.real)\n if c.imag:\n a += sign(c.imag) * 1j\n rocks.add(a)\n abyss = max(abyss, a.imag + 1)\n\nsand = 0\nsource = 500\nrocks_1 = deepcopy(rocks)\nwhile source.imag <= abyss:\n down, left, right = source + 1j, source + 1j - 1, source + 1j + 1\n if down not in rocks_1:\n source = down\n elif left not in rocks_1:\n source = left\n elif right not in rocks_1:\n source = right\n else:\n rocks_1.add(source)\n sand += 1\n source = 500\n\nprint(sand)\n\nsand = 0\nsource = 500\nrocks_2 = deepcopy(rocks)\nwhile 500 not in rocks_2:\n down, left, right = source + 1j, source + 1j - 1, source + 1j + 1\n if source.imag >= abyss:\n rocks_2.add(source)\n sand += 1\n source = 500\n elif down not in rocks_2:\n source = down\n elif left not in rocks_2:\n source = left\n elif right not in rocks_2:\n source = right\n else:\n rocks_2.add(source)\n sand += 1\n source = 500\n\nprint(sand)\n","repo_name":"jaredculp/aoc","sub_path":"2022/src/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1864616026","text":"from __future__ import absolute_import\n\nimport te.lang.cce\nfrom te import tvm\nfrom te.platform.fusion_manager import fusion_manager\nfrom topi import generic\nfrom topi.cce import util\nfrom functools import reduce as reduceIns\nfrom te import platform as tbe_platform\n\n# shape limit for aicore equals 2**31\nSHAPE_SIZE_LIMIT = 2147483648\n\n\n# pylint: disable=locally-disabled,too-many-arguments,unused-argument\n@fusion_manager.register(\"sqrt\")\ndef sqrt_compute(input_data, output_data, kernel_name=\"sqrt\"):\n \"\"\"\n calculating data sqrt,y= x**0.5,mini not support vsqrt, use exp(0.5*log(x))\n\n Parameters\n ----------\n input_data: TVM tensor\n the placeholder of input data\n output_data: dict\n shape and dtype of output, should be same shape and type as input\n kernel_name: str\n cce kernel name, default value is sqrt\n\n Returns\n -------\n result: TVM tensor\n the result of sqrt\n \"\"\"\n dtype = input_data.dtype\n has_improve_precision = False\n if dtype == \"float16\" and\\\n tbe_platform.cce_conf.api_check_support(\"te.lang.cce.vsqrt\",\n \"float32\"):\n input_data = te.lang.cce.cast_to(input_data, \"float32\")\n has_improve_precision = True\n result = te.lang.cce.vsqrt(input_data)\n\n if has_improve_precision:\n result = te.lang.cce.cast_to(result, \"float16\")\n\n return result\n\n\n@util.check_input_type(dict, dict, str)\ndef sqrt(input_x, output_y, kernel_name=\"sqrt\"):\n \"\"\"\n algorithm: sqrt\n calculating data sqrt,y= x**0.5, mini not support vsqrt, use exp(0.5*log(x))\n\n Parameters\n ----------\n input_x : dict\n shape and dtype of input, only support float16, float32\n output_y: dict\n shape and dtype of output, should be same shape and type as input\n kernel_name : str\n cce kernel name, default value is sqrt\n\n Returns\n -------\n None\n \"\"\"\n input_shape = input_x.get(\"shape\")\n input_dtype = input_x.get(\"dtype\").lower()\n\n util.check_kernel_name(kernel_name)\n util.check_shape_rule(input_shape)\n util.check_shape_size(input_shape, SHAPE_SIZE_LIMIT)\n util.check_dtype_rule(input_dtype, (\"float16\", \"float32\"))\n\n fuseshape = [1]\n fuseshape[0] = reduceIns(lambda x, y: x*y, input_shape)\n input_data = tvm.placeholder(fuseshape, name=\"input_data\",\n dtype=input_dtype)\n result = sqrt_compute(input_data, output_y, kernel_name)\n\n with tvm.target.cce():\n sch = generic.auto_schedule(result)\n\n config = {\"print_ir\": False,\n \"name\": kernel_name,\n \"tensor_list\": [input_data, result]}\n\n te.lang.cce.cce_build_code(sch, config)\n","repo_name":"jizhuoran/caffe-huawei-atlas-convertor","sub_path":"convertor/huawei/impl/sqrt.py","file_name":"sqrt.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"13733614171","text":"import os\nimport errno\n\ndotfiles_dir = os.path.join(os.environ['HOME'], 'dotfiles')\nEXCLUDE_FILES = [\n __file__,\n 'README.md'\n]\n\nfor filename in [f for f in os.listdir(dotfiles_dir) if f not in EXCLUDE_FILES]:\n dotfile_versioning = os.path.join(dotfiles_dir, filename)\n dotfile_inuse = os.path.join(os.environ['HOME'], filename)\n try:\n os.symlink(\n dotfile_versioning,\n dotfile_inuse\n )\n except Exception as e:\n if e.errno == errno.EEXIST:\n os.unlink(dotfile_inuse)\n os.symlink(\n dotfile_versioning,\n dotfile_inuse\n )\n else:\n raise e\n\n","repo_name":"nkimoto/dotfiles","sub_path":"export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4126806502","text":"import os, sys, tqdm, math, pickle\nimport numpy as np\nimport tensorflow as tf\n\nCM_PATH = './HOME/DATASETs/charmap.pickle'\n\ndef prepareXi(X, CM, MAX_LEN):\n CMm = max(CM.values()) + 1\n\n f = lambda x: CM[x] if x in CM else CMm\n Xi = np.zeros((len(X), MAX_LEN), np.uint8) \n for i, x in enumerate(X):\n xi = list(map(f, x))\n Xi[i, :len(xi)] = xi \n return Xi\n\ndef loadModel(path):\n gpus = tf.config.experimental.list_physical_devices('GPU') \n tf.config.experimental.set_memory_growth(gpus[0], True)\n return tf.keras.models.load_model(path, compile=False)\n\ndef inference(model, X, batch_size, MAX_LEN=16, CM_path=CM_PATH):\n \n with open(CM_path, 'rb') as f:\n CM = pickle.load(f)\n vocab_size = max(CM.values()) + 1\n \n Xi = prepareXi(X, CM, MAX_LEN)\n \n nbatch = math.ceil(len(Xi) / batch_size)\n \n for i in tqdm.trange(nbatch):\n xi = Xi[batch_size*i:batch_size*(i+1)]\n out = model(xi, training=False)[1].numpy()\n\n yield out\n","repo_name":"TheAdamProject/adams","sub_path":"NeuralNet/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"30119213945","text":"class direccion:\n def __init__(self):\n self.__calle = ''\n self.__piso = ''\n self.__ciudad = ''\n self.__cp = ''\n def getCalle(self):\n return self.__calle\n def getPiso(self):\n return self.__piso\n def getCiudad(self):\n return self.__ciudad\n def getCp(self):\n return self.__cp\n def setCalle(self,calle):\n self.__calle = calle\n def setPiso(self,piso):\n self.__piso = piso\n def setCiudad(self,ciudad):\n self.__ciudad = ciudad\n def setCp(self,cp):\n self.__cp = cp\n\nclass persona:\n def __init__(self):\n self.__nombre = ''\n self.__apellidos = ''\n self.__fechaNac = ''\n def getNombre(self):\n return self.__nombre\n def getApellidos(self):\n return self.__apellidos\n def getFechaNac(self):\n return self.__fechaNac\n def setNombre(self,nombre):\n self.__nombre = nombre\n def setApellidos(self,apellidos):\n self.__apellidos = apellidos\n def setFechaNac(self,fecha):\n self.__fechaNac = fecha\n\nclass telefono:\n def __init__(self):\n self.__movil = ''\n self.__fijo = ''\n self.__trabajo = ''\n def getMovil(self):\n return self.__movil\n def getFijo(self):\n return self.__fijo\n def getTrabajo(self):\n return self.__trabajo\n def setMovil(self,movil):\n self.__movil = movil\n def setFijo(self,fijo):\n self.__fijo = fijo\n def setTrabajo(self,trabajo):\n self.__trabajo = trabajo\n\nclass Contacto(direccion,persona,telefono):\n def __init__(self):\n self.__email = ''\n def getEmail(self):\n return self.__email\n def setEmail(self,mail):\n self.__email = mail\n def showContacto(self):\n print('---CONTACTO---')\n print('Nombre:',self.getNombre())\n print('Apellidos:',self.getApellidos())\n print('Fecha de nacimiento:',self.getFechaNac())\n print('Teléfono móvil:',self.getMovil())\n print('Teléfono fijo:',self.getFijo())\n print('Teléfono trabajo:',self.getTrabajo())\n print('Calle:',self.getCalle())\n print('Piso:',self.getPiso())\n print('Ciudad:',self.getCiudad())\n print('C.P.:',self.getCp())\n print('Email:',self.getEmail())\n\nclass agenda:\n \n def __init__(self,path):\n self.__listaContactos = []\n self.__path = path\n \n def cargarContactos(self):\n try:\n fichero = open(self.__path,'r')\n except:\n print('ERROR: El fichero no existe')\n else:\n contactos = fichero.readlines()\n fichero.close()\n if(len(contactos)>0):\n for contacto in contactos:\n datos = contacto.split('#')\n if(len(datos)==11):\n nuevoContacto = Contacto()\n nuevoContacto.setNombre(datos[0])\n nuevoContacto.setApellidos(datos[1])\n nuevoContacto.setFechaNac(datos[2])\n nuevoContacto.setMovil(datos[3])\n nuevoContacto.setFijo(datos[4])\n nuevoContacto.setTrabajo(datos[5])\n nuevoContacto.setCalle(datos[6])\n nuevoContacto.setPiso(datos[7])\n nuevoContacto.setCiudad(datos[8])\n nuevoContacto.setCp(datos[9])\n nuevoContacto.setEmail(datos[10])\n self.__listaContactos = self.__listaContactos + [nuevoContacto]\n print('INFO: Cargados',len(self.__listaContactos),'contactos')\n \n def crearContacto(self,contacto):\n self.__listaContactos = self.__listaContactos + [contacto]\n\n def guardarContacto(self):\n try:\n fichero = open(self.__path,'w')\n except:\n print('ERROR: El fichero no se puede guardar')\n else:\n for contacto in self.__listaContactos:\n texto = contacto.getNombre() + '#'\n texto = texto + contacto.getApellidos() + '#'\n texto = texto + contacto.getFechaNac() + '#'\n texto = texto + contacto.getMovil() + '#'\n texto = texto + contacto.getFijo() + '#'\n texto = texto + contacto.getTrabajo() + '#'\n texto = texto + contacto.getCalle() + '#'\n texto = texto + contacto.getPiso() + '#'\n texto = texto + contacto.getCiudad() + '#'\n texto = texto + contacto.getCp() + '#'\n texto = texto + contacto.getEmail() + '\\n'\n fichero.write(texto)\n fichero.close()\n\n def mostrarAgenda(self):\n print('### Agenda ###')\n print('Numero de contactos:',len(self.__listaContactos),'\\n')\n for contacto in self.__listaContactos:\n contacto.showContacto()\n print('######')\n\n def buscarContacto(self,tipo,dato):\n listaEncontrados = []\n for contacto in self.__listaContactos:\n if tipo == 1:\n if contacto.getNombre() == dato:\n listaEncontrados = listaEncontrados + [contacto] \n elif tipo == 2:\n if contacto.getMovil() == dato:\n listaEncontrados = listaEncontrados + [contacto]\n elif tipo == 3:\n if contacto.getFijo() == dato:\n listaEncontrados = listaEncontrados + [contacto]\n elif tipo == 4:\n if contacto.getTrabajo() == dato:\n listaEncontrados = listaEncontrados + [contacto]\n return listaEncontrados\n\n def borrarContacto(self,tipo,dato):\n listaFinal = []\n for contacto in self.__listaContactos:\n if tipo == 1:\n if contacto.getNombre() != dato:\n listaFinal = listaFinal + [contacto] \n elif tipo == 2:\n if contacto.getMovil() != dato:\n listaFinal = listaFinal + [contacto]\n elif tipo == 3:\n if contacto.getFijo() != dato:\n listaFinal = listaFinal + [contacto]\n elif tipo == 4:\n if contacto.getTrabajo() != dato:\n listaFinal = listaFinal + [contacto]\n print('INFO:',len(self.__listaContactos)-len(listaFinal),'contactos han sido borrados')\n self.__listaContactos = listaFinal\n\ndef obtenerOpcion(texto):\n leido = False\n while not leido:\n try:\n numero = int(input(texto))\n except ValueError:\n print('El valor debe ser un numero')\n else:\n leido = True\n return numero\n\ndef mostrarMenu():\n print('########## MENÚ PRINCIPAL ####################')\n print('1 - Mostrar contactos')\n print('2 - Buscar contactos')\n print('3 - Crear nuevo contacto')\n print('4 - Borrar contactos')\n print('5 - Guardar contactos')\n print('6 - Salir')\n\ndef buscarContactos(agenda):\n print('Buscar contactos')\n print('1 - Nombre')\n print('2 - Movil')\n print('3 - Fijo')\n print('4 - Trabajo')\n print('5 - Salir')\n finBuscar = False\n while not finBuscar:\n opcion = obtenerOpcion('Opción de búsqueda:')\n if opcion == 5:\n finBuscar = True\n encontrados = agenda.buscarContacto(opcion,input('Introduce el valor:'))\n if len(encontrados) > 0:\n print('### CONTACTOS ENCONTRADOS ###')\n for item in encontrados:\n item.showContacto()\n print('######')\n else:\n print('INFO: No se han encontrado contactos')\n\ndef procesoCrearContacto(agenda):\n nuevoContacto = Contacto()\n nuevoContacto.setNombre(input('Introduce el nombre:'))\n nuevoContacto.setApellidos(input('Introduce los apellidos:'))\n nuevoContacto.setFechaNac(input('Introduce la fecha de nacimiento:'))\n nuevoContacto.setMovil(input('Introduce el movil:'))\n nuevoContacto.setFijo(input('Introduce el fijo:'))\n nuevoContacto.setTrabajo(input('Introduce el teléfono del trabajo:'))\n nuevoContacto.setCalle(input('Introduce la calle:'))\n nuevoContacto.setPiso(input('Introduce el piso:'))\n nuevoContacto.setCiudad(input('Introduce la ciudad:'))\n nuevoContacto.setCp(input('Introduce el C.P.:'))\n nuevoContacto.setEmail(input('Introduce el email:'))\n agenda.crearContacto(nuevoContacto)\n\ndef borrarContacto(agenda):\n print('Borrar contacto')\n print('1 - Nombre')\n print('2 - Movil')\n print('3 - Fijo')\n print('4 - Trabajo')\n print('5 - Salir')\n finBuscar = False\n while not finBuscar:\n opcion = obtenerOpcion('Opción de borrado:')\n if opcion == 5:\n finBuscar = True\n else:\n encontrados = agenda.borrarContacto(opcion,input('Introduce el valor:'))\n finBuscar = True\n\ndef Main():\n nuevaAgenda = agenda('./files/agenda.txt')\n nuevaAgenda.cargarContactos()\n fin = False\n while not fin:\n mostrarMenu()\n opcion = obtenerOpcion('Opción:')\n if(opcion==1):\n nuevaAgenda.mostrarAgenda()\n elif(opcion==2):\n buscarContactos(nuevaAgenda)\n elif(opcion==3):\n procesoCrearContacto(nuevaAgenda)\n elif(opcion==4):\n borrarContacto(nuevaAgenda)\n elif(opcion==5):\n nuevaAgenda.guardarContacto()\n elif(opcion==6):\n fin = True\n\nMain()\n \n","repo_name":"victoraagg/python-test","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":9496,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70202311304","text":"#Importing Libraries\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport json\n\n#Important stuff\nscope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\ncreds = ServiceAccountCredentials.from_json_keyfile_name(\n 'dividend-investing-274019-b556916d3607.json', scope) # Your json file here\nclient = gspread.authorize(creds)\n\n#Access sheet\nspreadsheetName = 'The Dividend Chamipon'\nsheet = client.open(spreadsheetName)\nworksheet = sheet.get_worksheet(4)\n\n#Request ticker\nsymbol = input('Enter ticker symbol: ')\nsymbol = symbol.upper()\n\n#Lookup ticker \nticker_range = worksheet.range('B7:B859')\n\n#Lookup dividend\ndiv_range = worksheet.range('J7:J859')\n\n#Match the ticker with dividend\nfor cell in ticker_range:\n ticker_val = cell.value\n ticker_row = cell.row\n ticker_col = cell.col\n if ticker_val == symbol:\n dividend = worksheet.cell(ticker_row, 10).value\n print('Dividend: ' + dividend)\n\n\n\n","repo_name":"ummaromanasama/Candlestick_Chart","sub_path":"googlesheet.py","file_name":"googlesheet.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35310809040","text":"'''\nThis module implements a simple classroom example of probabilistic inference\nover the full joint distribution specified by AIMA, Figure 13.3.\nIt is based on the code from AIMA probability.py.\n\n@author: kvlinden\n@version Jan 1, 2013\n'''\n\nfrom probability import JointProbDist, enumerate_joint_ask\n'''\nExercise 4.2\ni. There are 16 entries in the table now.\nii. The probabilities sum up to 1 and they should. The probabilities in a sample space always sums to 1.\niii. No. We can use 0 and 1 to denote True or False, but essentially they can only take on these values. This is because\nin the real world an event either happens or doesn't happen (not considering Schrodinger's cat scenario)\niv. No, the probability of rain is not independent of the probability of toothache. Since people seem to have toothaches\nmore often when it rains, I chose the probability of rain to be 0.6 given toothache, and probability of rain = 0.3\nwhen no toothache.\n\nP(toothache | rain) = P(toothache ^ rain) / P(rain)\n = (0.0648 + 0.0072 + 0.0096 + 0.0384) \n / (0.0648 + 0.0072 + 0.0096 + 0.0384 + 0.0216 + 0.0024 + 0.0432 + 0.1728)\n ~= 0.333\nP(-toothache | rain) = 1 - 0.333 = 0.667\nP(Toothahce | rain) = <0.333, 0.667> \n\n'''\n# The Joint Probability Distribution Fig. 13.3 with added Rain variable (from AIMA Python)\nPr = JointProbDist(['Toothache', 'Rain', 'Cavity', 'Catch'])\nT, F = True, False\nPr[T, T, T, T] = 0.0648;\nPr[T, T, F, T] = 0.0096\nPr[F, T, T, T] = 0.0216;\nPr[F, T, F, T] = 0.0432\nPr[T, F, T, T] = 0.0432;\nPr[T, F, F, T] = 0.0064\nPr[F, F, T, T] = 0.0504;\nPr[F, F, F, T] = 0.1008\nPr[T, T, T, F] = 0.0072;\nPr[T, T, F, F] = 0.0384\nPr[F, T, T, F] = 0.0024;\nPr[F, T, F, F] = 0.1728\nPr[T, F, T, F] = 0.0048;\nPr[T, F, F, F] = 0.0256\nPr[F, F, T, F] = 0.0056;\nPr[F, F, F, F] = 0.4032\n\nPtr = enumerate_joint_ask('Toothache', {'Rain': T}, Pr)\nprint(Ptr.show_approx())","repo_name":"zchen0925/Artificial-Intelligence-course-projects","sub_path":"lab04/lab_2.py","file_name":"lab_2.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31439191568","text":"\r\nquestions = [\"Are you a man or woman?\\n1 - Man\\n2 - Woman\\n0 - Quit\",\r\n \"What is your name?\",\r\n \"How old are you?\\n1 - under 20\\n2 - 21 - 30\\n3 - 31 - 40\\n4 - Mature\\n0 - Quit\",\r\n \"Are you in a relationship?\\n1 - Yes\\n2 - No\\n0 - Quit\",\r\n \"What is your occupation? (Policeman, engineer, etc.)\"]\r\n\r\ndiscourage_m = [\"You remind me of a song: Dude looks like a lady.\",\r\n \"What a stupid name.\",\r\n \"Who did you have to suck off to get that job?\"]\r\ndiscourage_f = [\"Really? Manly jawline and those eye brows, Yikes.\",\r\n \"Cheap name for a cheap girl.\",\r\n \"Who did you have to suck off to get that job?\"]\r\n\r\nrelationship_m_d = [\"Hard to believe.\", \"Momma is still #1.\"]\r\nrelationship_f_d = [\"He is cheating on you.\", \"Still aiming too high.\"]\r\n\r\ndiscourage_age_m = [\"So.. still a virgin. Good.\",\r\n \"By now you must know that girls do not like you?\",\r\n \"Say goodbye to your early morning erection.\",\r\n \"Why are you still here?\"]\r\ndiscourage_age_f = [\"Inexperienced in life but not in bed.\",\r\n \"I want to be your 111th boyfriend.\",\r\n \"Woman's 30's is a man's 50's, getting old and useless.\",\r\n \"I met the fathers of your children, all five of them.\"]\r\n\r\nmotivate_m = [\"Wrong. You are The Man.\",\r\n \"A proper name for a Hero.\",\r\n \"You are a gift for Mankind.\"]\r\nmotivate_f = [\"And a very beautiful one.\",\r\n \"Lovely.\",\r\n \"With a smile like that they should pay you extra!\"]\r\nmotivate_age_m = [\"A Stud in the making.\",\r\n \"Full of testosterone, watch out ladies!\",\r\n \"A Man in his prime.\",\r\n \"You still got the charms, handsome.\"]\r\nmotivate_age_f = [\"Young and Beautiful, will you marry me?\",\r\n \"They still don't deserve you!\",\r\n \"Nothing beats beauty with experience.\",\r\n \"Still more than anyone deserves.\"]\r\nmotivate_rel_m = [\"Of course you are, you handsome Stud-Muffin!\",\r\n \"Too handsome for your own good.\"]\r\nmotivate_rel_f = [\"I did not want to hear that. :(\",\r\n \"Really? Well, I am single.. if you are interested in an automaton.\"]\r\n\r\nisMale = True\r\n\r\n\r\ndef setMode():\r\n print(\"EMOT-O-TRON >> Select mode:\")\r\n print(\"1 - Motivate me.\")\r\n print(\"2 - Discourage me.\") \r\n print(\"0 - I do not want to play this game. (Quit)\")\r\n index = input(\"Choice: \")\r\n return index\r\n\r\ndef discourage(a, q): \r\n\r\n global isMale\r\n print(\"\\n\")\r\n if q == 0 and a == '1':\r\n isMale = True\r\n print(discourage_m[q])\r\n elif q == 0 and a == '2':\r\n isMale = False\r\n print(discourage_f[q])\r\n\r\n if isMale:\r\n if q == 1:\r\n print(discourage_m[q])\r\n elif q == 2:\r\n print(discourage_age_m[int(a) - 1])\r\n elif q == 3:\r\n print(relationship_m_d[int(a) - 1])\r\n elif q == 4:\r\n print(discourage_m[q - 2])\r\n else:\r\n if q == 1:\r\n print(discourage_f[q])\r\n elif q == 2:\r\n print(discourage_age_f[int(a) - 1])\r\n elif q == 3:\r\n print(relationship_f_d[int(a) - 1])\r\n elif q == 4:\r\n print(discourage_f[q - 2])\r\n\r\n print(\"\\n\")\r\n\r\ndef motivate(a, q):\r\n\r\n global isMale\r\n print(\"\\n\")\r\n \r\n if q == 0 and a == '1':\r\n isMale = True\r\n print(motivate_m[q])\r\n elif q == 0 and a == '2':\r\n isMale = False\r\n print(motivate_f[q])\r\n\r\n if isMale:\r\n if q == 1:\r\n print(motivate_m[q])\r\n elif q == 2:\r\n print(motivate_age_m[int(a) - 1])\r\n elif q == 3:\r\n print(motivate_rel_m[int(a) - 1])\r\n elif q == 4:\r\n print(motivate_m[q - 2])\r\n else:\r\n if q == 1:\r\n print(motivate_f[q])\r\n elif q == 2:\r\n print(motivate_age_f[int(a) - 1])\r\n elif q == 3:\r\n print(motivate_rel_f[int(a) - 1])\r\n elif q == 4:\r\n print(motivate_f[q - 2])\r\n\r\n print(\"\\n\")\r\n\r\ndef interact(message):\r\n print(\"EMOT-O-TRON >> \\n{}\".format(message)) \r\n answer = input(\"Answer: \")\r\n return answer\r\n\r\n\r\ndef getMessage(q):\r\n return questions[q]\r\n\r\n\r\ndef main(): \r\n run = True\r\n q = 0\r\n\r\n index = setMode()\r\n\r\n if index == '0':\r\n run = False\r\n print(\"Goodbye\")\r\n \r\n while run:\r\n if q <= 4:\r\n answer = interact(getMessage(q))\r\n if index == '0' or answer == '0':\r\n run = False\r\n print(\"Goodbye\")\r\n elif index == '1':\r\n motivate(answer, q)\r\n elif index == '2':\r\n discourage(answer, q)\r\n else:\r\n print(\"Goodbye and good luck.\")\r\n run = False\r\n break;\r\n \r\n q += 1\r\n\r\n if answer == '0':\r\n run = False\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"mkkjhn/emototron","sub_path":"emot.py","file_name":"emot.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29678126574","text":"#!/usr/bin/python3\r\n######################################################################\r\n# File Name: config_vars.py\r\n#\r\n# Description: Program configuration variables\r\n#\r\n# File History\r\n# 03/14/2021 - Andrew Yoder : Initial Release\r\n# 11/06/2021 - Andrew Yoder : Specifically call out python3\r\n# 01/03/2023 - Andrew Yoder : Updated to pull provider details from database\r\n# instead of being hardcoded in this file\r\n######################################################################\r\n\r\nimport sqlite3\r\n\r\nfrom platform_config import pt_db\r\n\r\n #connect to db and fetch all contents\r\ndb = sqlite3.connect(pt_db)\r\ncursor = db.cursor()\r\ncursor.execute('''SELECT * FROM provider_details''')\r\nprovider_details_data = cursor.fetchall()\r\n\r\n # Print all clients and information\r\nfor provider in provider_details_data:\r\n provider_name = provider[1]\r\n provider_title = provider[2]\r\n provider_phone = provider[3]\r\n provider_email = provider[4] \r\n provider_location = provider[5] + \", \" + provider[6] + \", \" + provider[7]\r\n paypal_link = provider[8]","repo_name":"ayoder770/Project-Tracking-System","sub_path":"python/config_vars.py","file_name":"config_vars.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74008966026","text":"\"\"\"\n@author: Alfons\n@contact: alfons_xh@163.com\n@file: timer.py\n@time: 18-10-8 下午10:44\n@version: v1.0 \n\"\"\"\nimport time\n\n\ndef timer(func):\n def wrapper(*args, **kwargs):\n startTime = time.time()\n ret = func(*args, **kwargs)\n stopTime = time.time()\n\n print(\"{func} use time -> {time}'s\".format(func=func.__name__,\n time=stopTime - startTime))\n return ret\n\n return wrapper\n","repo_name":"Alfonsxh/AlgorithmsAreaNotes","sub_path":"Python/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18002178805","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\n\n\n#creates network\nclass network(nn.Module):\n def __init__(self, inputSize, outputSize):\n super(network,self).__init__()\n #This is a 1 layer network\n #input layer has inputSize nodes, first hidden layer has 50 nodes, output layer has outputSize nodes\n self.fc1 = nn.Linear(inputSize, 100)\n self.fc2 = nn.Linear(100,50)\n self.fc3 = nn.Linear(50,outputSize)\n \n def forward(self, x):\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.fc3(x)\n return x\n \ndef run():\n #Sets device\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n #Parameters\n inputSize = 784\n numClasses = 10\n learningRate = 0.001\n batchSize = 64 #Batch size fed through the neuro network\n numEpoch = 3 #Number of iterations through the neuro network\n\n #Loading training data\n trainDataset = datasets.MNIST(root='dataset/', train=True, transform=transforms.ToTensor(), download = True)\n trainLoader = DataLoader(dataset = trainDataset, batch_size = batchSize, shuffle = True) \n\n #Loading testing data\n testDataset = datasets.MNIST(root='dataset/', train=False, transform=transforms.ToTensor(), download = True)\n testLoader = DataLoader(dataset = testDataset, batch_size = batchSize, shuffle = True)\n\n \n \n #Initializing network to device\n model = network(inputSize = inputSize, outputSize = numClasses).to(device)\n\n #Loss and optimizer\n criteron = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(),lr = learningRate)\n\n #Training network\n for epoch in range(numEpoch):\n for batchId, (data, target) in enumerate(trainLoader):\n #Puts data into GPU\n data = data.to(device = device)\n target = target.to(device = device)\n \n #Reshaping data\n data = data.reshape(data.shape[0],-1)\n \n #Forward\n scores = model(data)\n loss = criteron(scores,target)\n \n #Backward\n optimizer.zero_grad()\n loss.backward()\n \n #Gradient descent / adam step\n optimizer.step()\n \n def accuracy(loader, model):\n numCorrect = 0\n numSamples = 0\n \n model.eval()\n \n with torch.no_grad():\n for x, y in loader:\n x = x.to(device = device)\n y = y.to(device = device)\n \n x = x.reshape(x.shape[0],-1)\n scores = model(x)\n _, predictions = scores.max(1)\n numCorrect += (predictions == y).sum()\n numSamples += predictions.size(0)\n \n print(f'Got {numCorrect} / {numSamples} with a accuracy of {((float(numCorrect)/float(numSamples))*100):.2f}')\n \n model.train()\n\n accuracy(trainLoader, model)\n accuracy(testLoader, model)\n \n torch.save(model.state_dict(), 'modelTrained.pt')\n","repo_name":"FaJacos/Number_Detection","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20036808807","text":"import csv\nimport os\nimport unicodedata\nimport re\nfrom typing import Tuple, List\nimport time\nimport math\n\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom torch import Tensor, nn\nfrom torchtext.data import get_tokenizer\n\n\nclass LanguageDictionary:\n \"\"\"Collects word metadata for building one-hot-encoding vector.\"\"\"\n SOS_token = 0\n EOS_token = 1\n\n def __init__(self, name: str):\n self.name = name\n self.word2index = {}\n self.word2count = {}\n self.index2word = {LanguageDictionary.SOS_token: 'SOS', LanguageDictionary.EOS_token: 'EOS'}\n self.n_words = 2\n\n def add_word(self, word: str):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n def add_sentence(self, sentence: str):\n # TODO: improve tokenization\n for word in sentence.split(' '):\n self.add_word(word)\n\n\ndef unicode_to_ascii(s):\n \"\"\"Turn a Unicode string to plain ASCII, thanks to\n https://stackoverflow.com/a/518232/2809427\n \"\"\"\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n )\n\n\ndef normalize_string(s):\n \"\"\"Lowercase, trim, and remove non-letter characters\"\"\"\n s = unicode_to_ascii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n return s\n\n\ndef read_parallel_corpus(lang1: str, lang2: str, reverse: bool = False) -> Tuple[LanguageDictionary,\n LanguageDictionary,\n List[List[str]]]:\n \"\"\"Reads a parallel text corpus from a text file. Assumes that there are two columns separated by\n a tab character. Words are separated by spaces.\"\"\"\n print(\"Reading lines...\")\n\n # Read the file and split into lines\n txt_path = os.path.join('..', f'data/{lang1}-{lang2}.txt')\n lines = open(txt_path, encoding='utf-8').read().strip().split('\\n')\n\n # Split every line into pairs and normalize\n pairs = [[normalize_string(s) for s in l.split('\\t')] for l in lines]\n\n # Reverse pairs\n if reverse:\n pairs = [list(reversed(p)) for p in pairs]\n\n # Make language dictionaries\n return LanguageDictionary(lang1), LanguageDictionary(lang2), pairs\n\n\nMAX_SENTENCE_LENGTH = 10\n\neng_prefixes = (\n \"i am \", \"i m \",\n \"he is\", \"he s \",\n \"she is\", \"she s \",\n \"you are\", \"you re \",\n \"we are\", \"we re \",\n \"they are\", \"they re \"\n)\n\n\ndef filter_pair(language_pairs: List[str]):\n assert len(language_pairs) == 2, \"Should always be of length 2\"\n return len(language_pairs[0].split(' ')) < MAX_SENTENCE_LENGTH and \\\n len(language_pairs[1].split(' ')) < MAX_SENTENCE_LENGTH and \\\n language_pairs[1].startswith(eng_prefixes)\n\n\ndef filter_pairs(pairs: List[List[str]]):\n return [pair for pair in pairs if filter_pair(pair)]\n\n\ndef prepare_data(lang1: str, lang2: str, reverse: bool = False):\n input_dict, output_dict, pairs = read_parallel_corpus(lang1, lang2, reverse)\n print(f\"Read {len(pairs)} sentence pairs\")\n pairs = filter_pairs(pairs)\n print(f\"Trimmed to {len(pairs)} sentence pairs\")\n for text in pairs:\n input_dict.add_sentence(text[0])\n output_dict.add_sentence(text[1])\n print(f\"Counted words:\")\n print(f\"Input language {lang1} - {input_dict.n_words}\")\n print(f\"Output language {lang2} - {output_dict.n_words}\")\n return input_dict, output_dict, pairs\n\n\ndef as_minutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef time_since(since, percent):\n seconds = time.time() - since\n remaining_seconds = (seconds / percent) - seconds\n return '%s (- %s)' % (as_minutes(seconds), as_minutes(remaining_seconds))\n\n\ndef indexes_from_sentence(lang_dictionary: LanguageDictionary, sentence: str) -> List[int]:\n return [lang_dictionary.word2index[word] for word in sentence.split(' ')]\n\n\ndef tensor_from_sentence(lang_dictionary: LanguageDictionary, sentence: str) -> Tensor:\n indexes = indexes_from_sentence(lang_dictionary, sentence)\n indexes.append(LanguageDictionary.EOS_token)\n return torch.tensor(indexes, dtype=torch.long).view(-1, 1)\n\n\ndef tensors_from_pair(input_lang: LanguageDictionary, output_lang: LanguageDictionary,\n pair: List[str]) -> Tuple[Tensor, Tensor]:\n input_tensor = tensor_from_sentence(input_lang, pair[0])\n target_tensor = tensor_from_sentence(output_lang, pair[1])\n return input_tensor, target_tensor\n\n\ndef write_losses(output_path: str, training_loss: List[float], validation_loss: List[float]):\n \"\"\"Write training and validation losses to CSV file.\"\"\"\n assert len(training_loss) == len(validation_loss), f\"{len(training_loss)} \" \\\n f\"!= {len(validation_loss)}\"\n with open(output_path, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['epoch', 'training_loss', 'validation_loss'])\n for i, (train, val) in enumerate(zip(training_loss, validation_loss)):\n writer.writerow([i, train, val])\n print(f\"Train and validation losses are output to {output_path}\")\n\n\ndef cosine_similarity(t1: Tensor, t2: Tensor):\n cos = nn.CosineSimilarity(dim=0, eps=1e-6)\n return cos(t1, t2)\n\n\ndef l2_norm(t1: Tensor, t2: Tensor):\n return torch.norm(torch.dot(t1, t2))\n\n\ndef train_val_test_split(data_path: str, output_dir: str) -> None:\n \"\"\"Create train/val/test split from randomized input data and write to an output directory.\"\"\"\n df = pd.read_csv(data_path, sep='\\t')\n train_df, val_df, test_df = np.split(df.sample(frac=1), [int(.8 * len(df)), int(.9 * len(df))])\n\n print(f'Train samples: {len(train_df):,}')\n print(f'Validate samples: {len(val_df):,}')\n print(f'Test samples: {len(test_df):,}')\n\n train_df.to_csv(os.path.join(output_dir, 'train.tsv'), sep='\\t', index=False)\n val_df.to_csv(os.path.join(output_dir, 'val.tsv'), sep='\\t', index=False)\n test_df.to_csv(os.path.join(output_dir, 'test.tsv'), sep='\\t', index=False)\n\n\ndef tokenize_english_text(input_path: str, output_path: str, col_index: int = 0):\n \"\"\"Tokenize and lowercase text\"\"\"\n # Download en tokenizer with `python -m spacy download en`\n en_tokenizer = get_tokenizer('spacy', language='en_core_web_sm')\n tokenized_rows = []\n with open(input_path) as f:\n reader = csv.reader(f, delimiter='\\t')\n header_text = next(reader)\n for row in reader:\n tokenized_rows.append([' '.join(en_tokenizer(row[col_index].lower()))])\n with open(output_path, 'w') as f:\n writer = csv.writer(f, delimiter='\\t')\n writer.writerow(header_text)\n writer.writerows(tokenized_rows)\n\n","repo_name":"BushMinusZero/deep-learning-skunk-works","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6774,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"503440975","text":"import nltk\n\nnltk.download('stopwords')\ntext=\"\"\"welcome you to programming knowledge. Lets start with our first tutorial on NLTK. We shall learn the basics of NLTK here.\"\"\"\ndemowords= [\"playing\", \"happiness\",\"going\",\"doing\",\"yes\",\"no\",\"1\",\"having\",\"had\", \"haved\"]\nfrom nltk.corpus import stopwords\nstop_words= stopwords.words (\"english\")\n# print (stop_words)\n# print(\"\\n\\n\")\n# print (set(stop_words))\n\nfrom nltk.tokenize import word_tokenize, sent_tokenize\ntokenize_words= word_tokenize(text)\n#print(tokenize_words )\ntokenize_words_without_stop_words = []\nfor word in tokenize_words:\n if word not in stop_words:\n tokenize_words_without_stop_words.append(word)\nprint(\"stop words that got removed\", set(tokenize_words)-set(tokenize_words_without_stop_words))","repo_name":"composureR3j3c/NTLK","sub_path":"ref/2_stopwords.py","file_name":"2_stopwords.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2669340379","text":"#!/usr/bin/python3\nif __name__ == \"__main__\":\n import sys\n from calculator_1 import add, sub, mul, div\n argv = sys.argv\n slen = len(argv)\n if (slen != 4):\n print(\"Usage: ./100-my_calculator.py \")\n sys.exit(1)\n ops = [\"+\", \"-\", \"*\", \"/\"]\n op = argv[2]\n if op not in ops:\n print(\"Unknown operator. Available operators: +, -, * and /\")\n sys.exit(1)\n a = int(argv[1])\n b = int(argv[3])\n if (op == ops[0]):\n print(\"{} + {} = {}\".format(a, b, add(a, b)))\n elif (op == ops[1]):\n print(\"{} - {} = {}\".format(a, b, sub(a, b)))\n elif (op == ops[2]):\n print(\"{} * {} = {}\".format(a, b, mul(a, b)))\n elif (op == ops[3]):\n print(\"{} / {} = {}\".format(a, b, div(a, b)))\n","repo_name":"marybngozi/alx-higher_level_programming","sub_path":"0x02-python-import_modules/100-my_calculator.py","file_name":"100-my_calculator.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4654353071","text":"class Solution:\n def distinctNames(self, ideas: List[str]) -> int:\n n = ord(\"z\") - ord(\"a\") + 1\n suffix = [set() for _ in range(n)]\n for word in ideas:\n letter = word[0]\n suffix[ord(letter) - ord(\"a\")].add(word[1:])\n counter = 0\n for i in range(n):\n for j in range(i + 1, n):\n mutual = len(suffix[i] & suffix[j])\n counter += 2 * (len(suffix[i]) - mutual) * (len(suffix[j]) - mutual)\n\n return counter\n","repo_name":"Xrenya/Algorithms","sub_path":"_2306.py","file_name":"_2306.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41694170964","text":"\"\"\"\r\nGiven a binary tree, return the zigzag level order traversal of its nodes' values. (ie, from left to right, then right\r\nto left for the next level and alternate between).\r\n\r\nFor example:\r\nGiven binary tree {3,9,20,#,#,15,7},\r\n 3\r\n / \\\r\n 9 20\r\n / \\\r\n 15 7\r\nreturn its zigzag level order traversal as:\r\n[\r\n [3],\r\n [20,9],\r\n [15,7]\r\n]\r\nconfused what \"{1,#,2,3}\" means? > read more on how binary tree is serialized on OJ.\r\n\"\"\"\r\n__author__ = 'Danyang'\r\n# Definition for a binary tree node\r\nclass TreeNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.left = None\r\n self.right = None\r\n\r\nclass Solution:\r\n def zigzagLevelOrder(self, root):\r\n \"\"\"\r\n BFS, stack & queue\r\n :param root: a tree node\r\n :return: a list of lists of integers\r\n \"\"\"\r\n if not root:\r\n return []\r\n\r\n result = []\r\n lst = [root]\r\n direction = False\r\n while lst:\r\n if direction:\r\n result.append([element.val for element in lst])\r\n else:\r\n result.append([element.val for element in reversed(lst)])\r\n\r\n for i in range(len(lst)): # evaluation time\r\n element = lst.pop(0) # queue \r\n if element.left:\r\n lst.append(element.left)\r\n if element.right:\r\n lst.append(element.right)\r\n direction = not direction\r\n return result\r\n\r\n\r\n","repo_name":"algorhythms/LeetCode","sub_path":"103 Binary Tree Zigzag Level Order Traversal.py","file_name":"103 Binary Tree Zigzag Level Order Traversal.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":843,"dataset":"github-code","pt":"81"} +{"seq_id":"33240533927","text":"import pandas as pd\nimport shutil\n\n# 設定\nPREF_CODE = \"160008\"\nPREF_NAME = \"富山県\"\nCITY_NAME = \"\"\nURL = \"https://toyama-pref.box.com/shared/static/8wb3518lh47jx8iv81f80jsmqnnlw46m.xlsx\"\n\n\"\"\"\n患者等発生状況\n\"\"\"\ndf_patients = pd.read_excel(\n URL,\n sheet_name=\"患者等発生状況\",\n index_col=\"No\",\n dtype={\"発症日\": \"object\", \"年代\": \"object\", \"備考\": \"object\"},\n)\ndf_patients[\"発症日\"]=df_patients[\"発症日\"].apply(lambda x : str(x).replace(\"nan\",\"\").replace(\" 00:00:00\",\"\"))\ndf_patients.to_csv(\"./data/toyama_patients.csv\", encoding=\"utf_8_sig\")\n\ndf_patients.rename(\n columns={\n \"検査結果判明日\": \"公表_年月日\",\n \"発症日\": \"発症_年月日\",\n \"居住地\": \"患者_居住地\",\n \"年代\": \"患者_年代\",\n \"性別\": \"患者_性別\",\n \"職業\": \"患者_職業\",\n \"症状\": \"患者_状態\",\n \"渡航歴の有無\": \"患者_渡航歴の有無フラグ\",\n \"状態\": \"患者_退院済フラグ\",\n },\n inplace=True,\n)\n\ndf_patients[\"全国地方公共団体コード\"] = PREF_CODE\ndf_patients[\"都道府県名\"] = PREF_NAME\ndf_patients[\"市区町村名\"] = CITY_NAME\n\ndf_patients[\"患者_退院済フラグ\"] = (\n df_patients[\"患者_退院済フラグ\"].replace({\"入院中\": 0, \"入院調整中\": 0, \"入院\": 0, \"退院\": 1, \"死亡\": 1, \"調査中\": None}).astype(\"Int64\")\n)\n\ndf_patients[\"患者_渡航歴の有無フラグ\"] = (\n df_patients[\"患者_渡航歴の有無フラグ\"].replace({\"x\": 0, \"o\": 1}).astype(\"Int64\")\n)\n\ndf_patients[\"患者_症状\"] = \"\"\n\ndf_patients[\"患者_年代\"] = df_patients[\"患者_年代\"].replace({\"90代以上\": \"90歳以上\"})\n\npatients = df_patients.loc[\n :,\n [\n \"全国地方公共団体コード\",\n \"都道府県名\",\n \"市区町村名\",\n \"公表_年月日\",\n \"発症_年月日\",\n \"患者_居住地\",\n \"患者_年代\",\n \"患者_性別\",\n \"患者_職業\",\n \"患者_状態\",\n \"患者_症状\",\n \"患者_渡航歴の有無フラグ\",\n \"患者_退院済フラグ\",\n \"備考\",\n ],\n]\n\npatients.to_csv(\n \"./data/160008_toyama_covid19_patients.csv\",\n index=False,\n encoding=\"utf_8_sig\",\n)\n\n\"\"\"\n日別集計\n\"\"\"\ndf_counts = pd.read_excel(\n URL,\n sheet_name=\"日別集計\",\n engine=\"openpyxl\",\n index_col=\"年月日\",\n parse_dates=True,\n dtype={\n \"PCR検査数\": \"Int64\",\n \"抗原検査数\": \"Int64\",\n \"陰性人数\": \"Int64\",\n \"陽性人数\": \"Int64\",\n \"一般相談件数\": \"Int64\",\n \"受診・相談センター相談件数\": \"Int64\",\n \"退院者数\": \"Int64\",\n \"死亡者数\": \"Int64\",\n \"備考\": \"object\",\n }\n )\ndf_counts.to_csv(\"./data/toyama_counts.csv\", encoding=\"utf_8_sig\")\n\n# 検査実施人数\ndf_counts[\"実施_年月日\"] = df_counts.index.strftime(\"%Y-%m-%d\")\n\n# 陰性確認数\ndf_counts[\"完了_年月日\"] = df_counts.index.strftime(\"%Y-%m-%d\")\n\n# コールセンター相談件数\ndf_counts[\"受付_年月日\"] = df_counts.index.strftime(\"%Y-%m-%d\")\n\ndf_counts[\"全国地方公共団体コード\"] = PREF_CODE\ndf_counts[\"都道府県名\"] = PREF_NAME\ndf_counts[\"市区町村名\"] = CITY_NAME\n\n# 検査実施人数\ntest_people = df_counts.loc[\n :, [\"実施_年月日\", \"全国地方公共団体コ���ド\", \"都道府県名\", \"市区町村名\", \"PCR検査数\", \"備考\"]\n].copy()\n\ntest_people.rename(columns={\"PCR検査数\": \"検査実施_人数\"}, inplace=True)\n\ntest_people.to_csv(\n \"./data/160008_toyama_covid19_test_people.csv\",\n index=False,\n encoding=\"utf_8_sig\",\n)\n\nantigen_test_people = df_counts.loc[\n :, [\"実施_年月日\", \"全国地方公共団体コード\", \"都道府県名\", \"市区町村名\", \"抗原検査数\", \"備考\"]\n].copy()\n\nantigen_test_people.rename(columns={\"抗原検査数\": \"検査実施_人数\"}, inplace=True)\n\nantigen_test_people.to_csv(\n \"./data/160008_toyama_covid19_antigen_test_people.csv\",\n index=False,\n encoding=\"utf_8_sig\",\n)\n\n# 陰性確認数\ndf_counts.rename(columns={\"退院者数\": \"陰性確認_件数\"}, inplace=True)\n\nconfirm_negative = df_counts.loc[\n :, [\"完了_年月日\", \"全国地方公共団体コード\", \"都道府県名\", \"市区町村名\", \"陰性確認_件数\", \"備考\"]\n].copy()\n\nconfirm_negative.to_csv(\n \"./data/160008_toyama_covid19_confirm_negative.csv\",\n index=False,\n encoding=\"utf_8_sig\",\n)\n\n# 入退院確認数\ndf_counts.rename(columns={\"陽性人数\": \"陽性確認_件数\", \"死亡者数\": \"死亡確認_件数\"}, inplace=True)\n\nconfirm_patients = df_counts.loc[\n :,\n [\"完了_年月日\", \"全国地方公共団体コード\", \"都道府県名\", \"市区町村名\", \"陽性確認_件数\", \"陰性確認_件数\", \"死亡確認_件数\", \"備考\"],\n].copy()\n\nconfirm_patients.to_csv(\n \"./data/160008_toyama_covid19_confirm_patients.csv\",\n index=False,\n encoding=\"utf_8_sig\",\n)\n\n# 一般相談件数\ncall_center = df_counts.loc[\n :, [\"受付_年月日\", \"全国地方公共団体コード\", \"都道府県名\", \"市区町村名\", \"一般相談件数\"]\n].copy()\n\ncall_center.rename(columns={\"一般相談件数\": \"相談件数\"}, inplace=True)\ncall_center.to_csv(\n \"./data/160008_toyama_covid19_call_center.csv\",\n index=False,\n encoding=\"utf_8_sig\",\n)\n\n# 帰国者・接触者相談センター相談件数\nhot_line = df_counts.loc[\n :, [\"受付_年月日\", \"全国地方公共団体コード\", \"都道府県名\", \"市区町村名\", \"受診・相談センター相談件数\"]\n].copy()\n\nhot_line.rename(columns={\"受診・相談センター相談件数\": \"相談件数\"}, inplace=True)\nhot_line.to_csv(\n \"./data/160008_toyama_covid19_hot_line.csv\",\n index=False,\n encoding=\"utf_8_sig\",\n)\n\nshutil.make_archive(\"./opendata\", \"zip\", root_dir=\"./data\")\n","repo_name":"terachan0117/covid19-toyama-opendata","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7630687434","text":"\"\"\"\ndemo prompt\n\"\"\"\n\nimport argparse\nfrom tui_editor import TuiEditor\n\n\nif __name__ == '__main__':\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\"--height\", type=int, default=10)\n arg_parser.add_argument(\"--show-line-numbers\", action=\"store_true\")\n args = arg_parser.parse_args()\n\n print('Hello World! Ctrl+S to save.')\n e = TuiEditor()\n if args.show_line_numbers:\n e.show_line_numbers = True\n keys = []\n e.on_key = lambda key: (keys.append(key), e.set_status_lines([\"key: %r\" % key]), None)[-1]\n e.edit()\n print(\"Result:\", repr(e.get_text()))\n print(\"Keys:\", repr(keys))\n print('Good bye!')\n","repo_name":"albertz/py-tui-editor","sub_path":"demo-prompt.py","file_name":"demo-prompt.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"6427993989","text":"import string\n\nimport numpy as np\nimport pytest\nfrom torch import nn\nfrom torchvision.transforms import ToTensor, Normalize\nfrom sklearn.model_selection import train_test_split\n\nfrom pytorch_widedeep.models import Wide, TabMlp, Vision, BasicRNN, WideDeep\nfrom pytorch_widedeep.training import Trainer\n\nnp.random.seed(1)\n\n# Wide array\nX_wide = np.random.choice(50, (32, 10))\n\n# Deep Array\ncolnames = list(string.ascii_lowercase)[:10]\nembed_cols = [np.random.choice(np.arange(5), 32) for _ in range(5)]\nembed_input = [(u, i, j) for u, i, j in zip(colnames[:5], [5] * 5, [16] * 5)]\ncont_cols = [np.random.rand(32) for _ in range(5)]\nX_tab = np.vstack(embed_cols + cont_cols).transpose()\n\n# Text Array\npadded_sequences = np.random.choice(np.arange(1, 100), (32, 48))\nX_text = np.hstack((np.repeat(np.array([[0, 0]]), 32, axis=0), padded_sequences))\nvocab_size = 110\n\n# Image Array\nX_img = np.random.choice(256, (32, 224, 224, 3))\nX_img_norm = X_img / 255.0\n\n# Target\ntarget = np.random.choice(2, 32)\n\n# train/validation split\n(\n X_wide_tr,\n X_wide_val,\n X_tab_tr,\n X_tab_val,\n X_text_tr,\n X_text_val,\n X_img_tr,\n X_img_val,\n y_train,\n y_val,\n) = train_test_split(X_wide, X_tab, X_text, X_img, target)\n\n# build model components\nwide = Wide(np.unique(X_wide).shape[0], 1)\ndeeptabular = TabMlp(\n column_idx={k: v for v, k in enumerate(colnames)},\n cat_embed_input=embed_input,\n continuous_cols=colnames[-5:],\n mlp_hidden_dims=[32, 16],\n mlp_dropout=[0.5, 0.5],\n)\ndeeptext = BasicRNN(vocab_size=vocab_size, embed_dim=32, padding_idx=0)\ndeepimage = Vision(pretrained_model_setup=\"resnet18\", n_trainable=0)\n\n# transforms\nmean = [0.406, 0.456, 0.485] # BGR\nstd = [0.225, 0.224, 0.229] # BGR\ntransforms1 = [ToTensor, Normalize(mean=mean, std=std)]\ntransforms2 = [Normalize(mean=mean, std=std)]\n\ndeephead_ds = nn.Sequential(nn.Linear(16, 8), nn.Linear(8, 4))\ndeephead_ds.output_dim = 4\ndeephead_dt = nn.Sequential(nn.Linear(64, 8), nn.Linear(8, 4))\ndeephead_dt.output_dim = 4\ndeephead_di = nn.Sequential(nn.Linear(512, 8), nn.Linear(8, 4))\ndeephead_di.output_dim = 4\n\n# #############################################################################\n# Test that runs many possible scenarios of data inputs I can think off.\n# Surely users will input something unexpected\n# #############################################################################\n\n\n@pytest.mark.parametrize(\n \"X_wide, X_tab, X_text, X_img, X_train, X_val, target, val_split, transforms\",\n [\n (X_wide, X_tab, X_text, X_img, None, None, target, None, transforms1),\n (X_wide, X_tab, X_text, X_img, None, None, target, None, transforms2),\n (X_wide, X_tab, X_text, X_img, None, None, target, None, None),\n (\n X_wide,\n X_tab,\n X_text,\n X_img_norm,\n None,\n None,\n target,\n None,\n transforms2,\n ),\n (\n X_wide,\n X_tab,\n X_text,\n X_img_norm,\n None,\n None,\n target,\n None,\n transforms1,\n ),\n (X_wide, X_tab, X_text, X_img_norm, None, None, target, None, None),\n (X_wide, X_tab, X_text, X_img, None, None, target, 0.2, None),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide,\n \"X_tab\": X_tab,\n \"X_text\": X_text,\n \"X_img\": X_img,\n \"target\": target,\n },\n None,\n None,\n None,\n None,\n ),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide,\n \"X_tab\": X_tab,\n \"X_text\": X_text,\n \"X_img\": X_img,\n \"target\": target,\n },\n None,\n None,\n None,\n transforms1,\n ),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide,\n \"X_tab\": X_tab,\n \"X_text\": X_text,\n \"X_img\": X_img,\n \"target\": target,\n },\n None,\n None,\n 0.2,\n None,\n ),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide,\n \"X_tab\": X_tab,\n \"X_text\": X_text,\n \"X_img\": X_img,\n \"target\": target,\n },\n None,\n None,\n 0.2,\n transforms2,\n ),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide_tr,\n \"X_tab\": X_tab_tr,\n \"X_text\": X_text_tr,\n \"X_img\": X_img_tr,\n \"target\": y_train,\n },\n {\n \"X_wide\": X_wide_val,\n \"X_tab\": X_tab_val,\n \"X_text\": X_text_val,\n \"X_img\": X_img_val,\n \"target\": y_val,\n },\n None,\n None,\n None,\n ),\n (\n None,\n None,\n None,\n None,\n {\n \"X_wide\": X_wide_tr,\n \"X_tab\": X_tab_tr,\n \"X_text\": X_text_tr,\n \"X_img\": X_img_tr,\n \"target\": y_train,\n },\n {\n \"X_wide\": X_wide_val,\n \"X_tab\": X_tab_val,\n \"X_text\": X_text_val,\n \"X_img\": X_img_val,\n \"target\": y_val,\n },\n None,\n None,\n transforms1,\n ),\n ],\n)\ndef test_widedeep_inputs(\n X_wide,\n X_tab,\n X_text,\n X_img,\n X_train,\n X_val,\n target,\n val_split,\n transforms,\n):\n model = WideDeep(\n wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage\n )\n trainer = Trainer(model, objective=\"binary\", transforms=transforms, verbose=0)\n trainer.fit(\n X_wide=X_wide,\n X_tab=X_tab,\n X_text=X_text,\n X_img=X_img,\n X_train=X_train,\n X_val=X_val,\n target=target,\n val_split=val_split,\n batch_size=16,\n )\n assert trainer.history[\"train_loss\"] is not None\n\n\n@pytest.mark.parametrize(\n \"X_wide, X_tab, X_text, X_img, X_train, X_val, target\",\n [\n (\n X_wide,\n X_tab,\n X_text,\n X_img,\n None,\n {\n \"X_wide\": X_wide_val,\n \"X_tab\": X_tab_val,\n \"X_text\": X_text_val,\n \"X_img\": X_img_val,\n \"target\": y_val,\n },\n target,\n ),\n ],\n)\ndef test_xtrain_xval_assertion(\n X_wide,\n X_tab,\n X_text,\n X_img,\n X_train,\n X_val,\n target,\n):\n model = WideDeep(\n wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage\n )\n trainer = Trainer(model, objective=\"binary\", verbose=0)\n with pytest.raises(AssertionError):\n trainer.fit(\n X_wide=X_wide,\n X_tab=X_tab,\n X_text=X_text,\n X_img=X_img,\n X_train=X_train,\n X_val=X_val,\n target=target,\n batch_size=16,\n )\n\n\n@pytest.mark.parametrize(\n \"wide, deeptabular, deeptext, deepimage, X_wide, X_tab, X_text, X_img, target\",\n [\n (wide, None, None, None, X_wide, None, None, None, target),\n (None, deeptabular, None, None, None, X_tab, None, None, target),\n (None, None, deeptext, None, None, None, X_text, None, target),\n (None, None, None, deepimage, None, None, None, X_img, target),\n ],\n)\ndef test_individual_inputs(\n wide, deeptabular, deeptext, deepimage, X_wide, X_tab, X_text, X_img, target\n):\n model = WideDeep(\n wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage\n )\n trainer = Trainer(model, objective=\"binary\", verbose=0)\n trainer.fit(\n X_wide=X_wide,\n X_tab=X_tab,\n X_text=X_text,\n X_img=X_img,\n target=target,\n batch_size=16,\n )\n # check it has run succesfully\n assert len(trainer.history) == 1\n\n\n###############################################################################\n# test deephead is not None and individual components\n###############################################################################\n\n\n@pytest.mark.parametrize(\n \"deeptabular, deeptext, deepimage, X_tab, X_text, X_img, deephead, target\",\n [\n (deeptabular, None, None, X_tab, None, None, deephead_ds, target),\n (None, deeptext, None, None, X_text, None, deephead_dt, target),\n (None, None, deepimage, None, None, X_img, deephead_di, target),\n ],\n)\ndef test_deephead_individual_components(\n deeptabular, deeptext, deepimage, X_tab, X_text, X_img, deephead, target\n):\n model = WideDeep(\n deeptabular=deeptabular,\n deeptext=deeptext,\n deepimage=deepimage,\n deephead=deephead,\n ) # noqa: F841\n trainer = Trainer(model, objective=\"binary\", verbose=0)\n trainer.fit(\n X_wide=X_wide,\n X_tab=X_tab,\n X_text=X_text,\n X_img=X_img,\n target=target,\n batch_size=16,\n )\n # check it has run succesfully\n assert len(trainer.history) == 1\n\n\n###############################################################################\n# test deephead is None and head_layers is not None and individual components\n###############################################################################\n\n\n@pytest.mark.parametrize(\n \"deeptabular, deeptext, deepimage, X_tab, X_text, X_img, target\",\n [\n (deeptabular, None, None, X_tab, None, None, target),\n (None, deeptext, None, None, X_text, None, target),\n (None, None, deepimage, None, None, X_img, target),\n ],\n)\ndef test_head_layers_individual_components(\n deeptabular, deeptext, deepimage, X_tab, X_text, X_img, target\n):\n model = WideDeep(\n deeptabular=deeptabular,\n deeptext=deeptext,\n deepimage=deepimage,\n head_hidden_dims=[8, 4],\n ) # noqa: F841\n trainer = Trainer(model, objective=\"binary\", verbose=0)\n trainer.fit(\n X_wide=X_wide,\n X_tab=X_tab,\n X_text=X_text,\n X_img=X_img,\n target=target,\n batch_size=16,\n )\n # check it has run succesfully\n assert len(trainer.history) == 1\n","repo_name":"jrzaurin/pytorch-widedeep","sub_path":"tests/test_model_functioning/test_data_inputs.py","file_name":"test_data_inputs.py","file_ext":"py","file_size_in_byte":10678,"program_lang":"python","lang":"en","doc_type":"code","stars":1164,"dataset":"github-code","pt":"81"} +{"seq_id":"33363243656","text":"import torchvision.transforms as tsf\n\nclass Config:\n def __init__(self):\n self.model_name = 'YOLOv4'\n\n # ------\n # Input size\n # (w, h)\n # YOLO accepts any input sizes as long as the size is multiples of 32\n # ------\n self.input_size = (672, 512)\n \n # ------\n # Classe names\n # ------\n self.class_names = [\n '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'\n ]\n \n # ------\n # Anchors\n # [(w, h), ...]\n # 3 (YOLO heads) x n (num of anchors for each head)\n # big <------size------> small\n # ------\n self.anchors = [ \n # yolo default anchors for (608, 608)\n # (459, 401), (192, 243), (142, 110), (72, 146), (76, 55), (36, 75), (40, 28), (91, 36), (12, 16)\n # BBTv1 for (672, 512)\n (38, 60), (50, 43), (25, 70), (29, 54), (37, 42), (47, 31), (20, 54), (24, 37), (32, 26)\n ]\n\n\n # ------\n # Grayscale for every input image\n # ------\n self.grayscale = True\n\n # ------\n # Resize mode\n # \"stretch\" or \"pad\"\n # ------\n self.resize_mode = 'stretch'\n\n # ------\n # Normalization\n # [means, stds]\n # ------\n self.normalization = [\n (0.5, 0.5, 0.5), (0.22, 0.22, 0.22)\n ]\n \n\n # ------\n # YOLO structure\n # ------\n self.num_heads = 3\n\n\nclass DetectionConfig(Config):\n def __init__(self):\n # get general configs\n super(DetectionConfig, self).__init__()\n # threshold\n self.confidence = 0.5\n self.iou = 0.3\n self.text_font = 'simhei.ttf'\n \n # ------\n # Cuda for detection\n # ------\n self.cuda = True\n \n \n\nclass TrainingConfig(Config):\n def __init__(self):\n # get general configs\n super(TrainingConfig, self).__init__()\n self.log_dir = 'logs/'\n # ------\n # Basic hyperparameters\n # total epochs = freeze + unfreeze\n # ------\n self.initial_epoch = 1\n self.freeze_epochs = 0\n self.unfreeze_epochs = 300\n self.batch_size = 4\n self.learning_rate = 1e-3\n\n # ------\n # Training dictionary\n # ------\n self.train_dict = 'logs/yolo_train_dict.txt'\n\n # ------\n # Pre-train weight\n # set \"None\" to get Kaiming initialization\n # ------\n self.pretrained = None\n\n # ------\n # Ratio of validation\n # ------\n self.valid_ratio = 0.1\n\n # ------\n # Center crop\n # aspect ratio (w, h)\n # set \"None\" to turn off\n # ------\n self.center_crop_ratio = (4, 3)\n\n # ------\n # Color Jitter\n # set \"None\" to turn off\n # ------\n self.color_jitter = tsf.ColorJitter(\n brightness = 0.2, \n contrast = 0.2, \n saturation = 0, \n hue = 0\n )\n\n # ------\n # Random Flip\n # mode in \"all\", \"horizontal\", \"vertical\"\n # set \"None\" to turn off\n # ------\n self.random_flip = 'all'\n\n # ------\n # Mosaic augmentation\n # image sizes from dataset better similar to each other\n # ------\n self.mosaic = True\n\n # ------\n # Label smoothing\n # ------\n self.label_smoothing = 0.\n\n # ------\n # Size augmentation\n # a float in [0, 1] for probability of chagning input size\n # the input size will +-(32, 32) randomly for training\n # set \"None\" to turn off\n # ------ \n self.size_aug = None\n\n # ------\n # Threads of dataloader\n # ------\n self.num_workers = 4\n\n # ------\n # Consine learning rate\n # ------\n self.cos_lr = True\n \n # ------\n # Cuda for training\n # ------\n self.cuda = True\n","repo_name":"panghanwu/yolov4_pytorch","sub_path":"configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33398926427","text":"import numpy as np\nimport pathlib as pl\nfrom scipy.ndimage import gaussian_filter as gfilt\n\ndef makeAlphaMask(shape=(720, 1280), margin=30, sigma=10, low=-1, high=1):\n \"\"\"\n \"\"\"\n\n mask = np.full(shape, high).astype(float)\n # mask = np.ones(shape)\n mask[:, :margin] = low\n mask[:, -margin:] = low\n mask[:margin, :] = low\n mask[-margin:, ] = low\n filtered = gfilt(mask, sigma)\n clipped = np.clip(filtered, -1, 1)\n\n return clipped\n\ndef cycleSignalPatch(display, cycle=(1, 1), nCycles=1):\n \"\"\"\n \"\"\"\n\n for iCycle in range(nCycles):\n display.state = True\n for iFrame in range(int(round(display.fps * cycle[0]))):\n display.drawBackground()\n display.flip()\n\n display.state = False\n for iFrame in range(int(round(display.fps * cycle[1]))):\n display.drawBackground()\n display.flip()\n\n return\n\ndef generateMetadataFilename(parent, tag, extension='.pkl'):\n \"\"\"\n \"\"\"\n\n existing = list(parent.glob(f'{tag}*'))\n n = len(existing) + 1\n filename = parent.joinpath(f'{tag}-{n}{extension}')\n return filename\n\ndef estimateFrameCount(t, fps=60, roundingMethod='nearest'):\n \"\"\"\n Convert time (in seconds) to frames\n \"\"\"\n\n if roundingMethod == 'nearest':\n nFrames = int(round(fps * t))\n elif roundingMethod == 'ceiling':\n nFrames = int(np.ceil(fps * t))\n elif roundingMethod == 'floor':\n nFrames = int(np.floor(fps * t))\n else:\n raise Exception(f'Rounding method must be one of \"nearest\", \"ceiling\", or \"floor\"')\n\n if nFrames == 0:\n raise Exception(f'Estimated frame count is 0')\n\n return nFrames","repo_name":"jbhunt/openpmad2","sub_path":"openpmad2/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24663923454","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport Store.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Store', '0004_add_orders'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='orderDateYMD',\n field=models.CharField(default=Store.models.get_default_ymd, max_length=8),\n ),\n ]\n","repo_name":"luispdm/GameStore","sub_path":"GameStore/Store/migrations/0005_auto_20160217_2344.py","file_name":"0005_auto_20160217_2344.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"8225995274","text":"# -*- coding=utf-8 -*-\nimport io\nimport re\nimport sys\nfrom collections import Counter\nimport jieba\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport wordcloud\nimport warnings\n# from snownlp import seg, SnowNLP\n\n\n# from scipy.misc import imread\n# from importlib import reload\n\n\ndef mycut(str1):\n\tstr1 = ' '.join(jieba.cut(str1)) # 曾经尝试关闭新词发现\n\treturn str1\n# mycut = lambda s:' '.join(jieba.cut(s))\n\n# 4SnowNLP模块做正负情感分析√/或者使用ROSTCM6代替生成(3.7应用Error)\nfilepath1 = r'./foundation_output/pos_data.csv'\nfilepath2 = r'./foundation_output/neg_data.csv'\ndf1 = pd.read_csv(filepath1, encoding='utf-8', header=0)\ndf2 = pd.read_csv(filepath2, encoding='utf-8', header=0)\ndf1 = df1['comment']\ndf2 = df2['comment']\nprint('pos:\\n', df1.head(), 'neg\\n', df2.head())\n\n'''如果是ROSTCM6就用以下内容去除前缀\n# read_csv(r’F:\\Demo\\ThesisProject4\\cleanoil_output\\temp_after_filter3_utf8_正面情感结果.txt')\ndf1 = pd.read_csv(filepath1, encoding='utf-8', header=0)\ndf2 = pd.read_csv(filepath2, encoding='utf-8', header=0)\nprint(df1.head())\nprint(df2.head())\ndf11 = df1['comments'].map(lambda x: re.sub(r'\\d*\\s\\s', '', x))\ndf22 = df2['comments'].map(lambda x: re.sub(r'[-]\\d*\\s\\s', '', x))\nprint('去除评分前缀后正面:\\n')\nprint(df11.head())\nprint('去除评分前缀后负面:\\n')\nprint(df22.head())\n# 输出去除前缀后的内容 用于ROSTCM6生成正负语义网络\ndf11.to_csv('正面情感评论_无前缀.txt', index=False, header=False, encoding='utf-8')\ndf22.to_csv('负面情感评论_无前缀.txt', index=False, header=False, encoding='utf-8')\n'''\n\n# 5jieba模块做中文分词处理,采用apply()广播形式加快分词速度\n# df1 = df1.iloc[:, 0].apply(mycut)\ndf11 = pd.DataFrame(df1.apply(mycut))\n# df2 = df2.iloc[:, 0].apply(mycut)\ndf22 = pd.DataFrame(df2.apply(mycut))\nprint('分词处理后正面:\\n')\nprint(df11.head())\nprint('分词处理后负面:\\n')\nprint(df22.head())\n# df1 = mycut(str(df1))这种方法也可以\n# df11 = pd.DataFrame(df1)\n\n# 6去除停用词\nstopfilepath = './stoplist_utf8.txt'\nstop = pd.read_csv(stopfilepath, encoding='utf-8', header=None, sep='dingbangchu',\n engine='python') # 这里sep分割符 非停用词符均可,这里选的是竞赛站名\n# sep设置分割词,csv默认以半角逗号分割,该词恰好在停用词表中导致读取出错,因此手动设置一个不存在的分割词,如dingbangchu\nstop = [' ', ''] + list(stop[0]) # Pandas自动过滤了空格符,这里手动添加,此时stop由df类型转为list类型\npos = pd.DataFrame(df11.copy())\nneg = pd.DataFrame(df22.copy())\npos = pos['comment'].apply(lambda s: s.split(' ')) # 定义一个分割函数,然后用apply广播,每行按空格分隔,每行由str转变为list\npos = pos.apply(lambda x: [i for i in x if i not in stop]) # 逐词判断是否停用词,思路同上\nneg = neg['comment'].apply(lambda s: s.split(' '))\nneg = neg.apply(lambda x: [i for i in x if i not in stop])\nprint('去除停用词后正面:\\n')\nprint(pos.head())\nprint('去除停用词后负面:\\n')\nprint(neg.head())\n# 输出去除停用词后已分词的内容 用于生成词云以及后续LDA主题分析\npos.to_csv('foundation_output/正面情感分词有效版.txt', index=False, header=False, encoding='utf-8')\nneg.to_csv('foundation_output/负面情感分词有效版.txt', index=False, header=False, encoding='utf-8')\n\n# pos = pd.DataFrame(pos.copy())\n# neg = pd.DataFrame(neg.copy())\n# pos = pos[pos.iloc[:, 0].apply(len) >= 1]\n# print(pos[:5])\n# neg = neg[neg.iloc[:, 0].apply(len) >= 1]\n# print(neg[:5])\n\n# 7词频统计\n# =============正面=============\nall_words = []\nfor n in range(0, len(pos)):\n\tfor i in pos[n]:\n\t\tall_words.append(i)\nword_count = pd.Series(all_words)\ntop_10 = word_count.value_counts(sort=True, ascending=False, dropna=True)\nprint('正面词频统计TOP10关键词:')\nprint(top_10[:10])\ncounts_result = dict(Counter(all_words))\ncounts_result = dict(sorted(counts_result.items(), key=lambda d: d[1], reverse=True))\n# counts_result.sort(reverse=True)\n# get to k most frequently occuring words\n# counts_result = Counter(all_words).most_common(10)\n# print('正面词频统计字典如下:')\n# print(counts_result)\nwith open('foundation_output/正面词频统计.txt', 'w', errors='ignore') as f:\n\t[f.write(str('{0},{1}\\n'.format(key, value))) for key, value in counts_result.items()]\n# =============负面================\nall_words = []\nfor n in range(0, len(neg)):\n\tfor i in neg[n]:\n\t\tall_words.append(i)\nword_count = pd.Series(all_words)\ntop_10 = word_count.value_counts(sort=True, ascending=False, dropna=True)\nprint('负面词频统计TOP10关键词:')\nprint(top_10[:10])\ncounts_result = dict(Counter(all_words))\ncounts_result = dict(sorted(counts_result.items(), key=lambda d: d[1], reverse=True))\n# counts_result.sort(reverse=True)\n# print('负面词频统计字典如下:')\n# print(counts_result)\nwith open('foundation_output/负面词频统计.txt', 'w', errors='ignore') as f:\n\t[f.write(str('{0},{1}\\n'.format(key, value))) for key, value in counts_result.items()]\n# temp_df = pd.DataFrame(counts_result)\n# temp_df.to_csv('词频统计.txt', index=False, header=False, encoding='utf-8')\n# print(type(pos)) \n# print(pos[0])\n# print(type(pos[0])) list\n# print(all_words)\n# print(type(all_words))\n\n\n# 8词云图绘制\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\nwarnings.filterwarnings(\"ignore\")\n# (1)读取背景图片\n# back_picture = imread(r\"\")\n# usa_mask = np.array(Image.open('flower.png'))\n# (2)设置词云参数(源文件应该是分词后并去除停用词的内容,而不是词频表)\n# =============pos_wordcloud=============\nfont = r'C:\\Windows\\Fonts\\msyh.ttc'\nwc = wordcloud.WordCloud(\n\tbackground_color=\"white\",\n\theight=800,\n\twidth=1000,\n\tfont_path=font,\n\tprefer_horizontal=0.2,\n\tmax_words=2000,\n\trelative_scaling=0.3,\n\tmax_font_size=200).generate(str(pos.tolist()))\nplt.imshow(wc, interpolation=\"nearest\")\nplt.axis(\"off\")\nplt.show()\nwc.to_file(\"ciyun_foundaiton_pos.png\")\nprint('词云图_正面已生成!')\n# 从文件导入数据:\n# f = open('cleanoil_output\\负面情感分词有效版.txt', encoding='utf-8').read()\n# =============neg_wordcloud=============\nfont = r'C:\\Windows\\Fonts\\msyh.ttc'\n# pic = imread('') +mask='pic',\nwc = wordcloud.WordCloud(\n\tbackground_color=\"white\",\n\theight=800,\n\twidth=1000,\n\tfont_path=font,\n prefer_horizontal=0.2,\n\tmax_words=2000,\n\trelative_scaling=0.3,\n\tmax_font_size=200).generate(str(neg.tolist()))\n# wc1 = wc.fit_words(counts_result)\n# .generate_from_text(comments)\n# 从DataFrame/pos/neg生成词云\n# 从词频统计文件生成词云\n# (3)绘制词云图\nplt.imshow(wc, interpolation=\"nearest\")\nplt.axis(\"off\")\nplt.show()\n# (4)保存到本地\nwc.to_file(\"ciyun_foundation_neg.png\")\nprint('词云图_负面已生成!')\n# plt.savefig('图6.jpg', dpi=600, bbox_inches='tight', quality=95)\n# plt.show()\n\n\n'''以下是某网站的词云全过程前半部分代码\n# 利用jieba进行分析操作\ndf[\"评论\"] = df[\"评论\"].apply(jieba.lcut)\ndf.head()\n# 去除停用词操作\nwith open(\"stopword.txt\", \"r\", encoding=\"gbk\") as f:\n\tstop = f.read() # 返回的是一个字符串\nstop = stop.split() # 这里得到的是一个列表.split()会将空格,\\n,\\t进行切分,因此我们可以将这些加到停用词当中\nstop = stop + [\" \", \"\\n\", \"\\t\"]\ndf_after = df[\"评论\"].apply(lambda x: [i for i in x if i not in stop])\ndf_after.head()\n'''\n\n'''以下是所有为了去除前缀所做的尝试\n课本中的“data1 = pd.DataFrame(data1[0].str.replace(r'.*?\\d+?\\\\t ', ''))”方法不行,会显示df无str()方法\ndf1 = df1.apply(lambda s: s.str.split(r'\\s'))\ndf2 = df2.apply(lambda s: s.str.split(r'\\s'))\nprint(df1.values)\nprint(df1.shape[1])\ndf1 = df1.apply(lambda s: s.str.strip([0, 2])) # s.str.strip('@')\nndarray = df1['comments'].astype(str).values\nfor i in ndarray:\n\ti = i.split(' ')\nprint(df1.shape[1])\n用正则表达式非贪婪模式修改数据:(*?)表示0个或多个非\\n字符 (+?)表示1个或多个数字,此操作是为了删除ROSTCM6软件给出的评分正负前缀\ndf3 = pd.DataFrame(df1.astype(str).replace(r'\\d*\\s\\s', 'a'))\nPython 3将字符串文字解释为Unicode字符串,因此 \\d 被视为转义的Unicode字符\ndf4 = pd.DataFrame(df2.astype(str).replace(r'([-]\\d*\\s\\s).*?', 'b'))\n'''\n","repo_name":"DingBangBang/SHU-TMALL-comments-NLP","sub_path":"ThesisProject4/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4654361141","text":"a, b = list(map(int, input().split(\" \")))\ncondition = str(input())\ndef calc(a, b, condition):\n if condition == \"freeze\":\n if a < b:\n return a\n else:\n return b\n elif condition == \"heat\":\n if a < b:\n return b\n else:\n return a\n elif condition == \"auto\":\n return b\n return a\nprint(calc(a=a, b=b, condition=condition))\n","repo_name":"Xrenya/Algorithms","sub_path":"yandex/Lecture_1/Contest/A/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70220455945","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"#########################################################\n############################################################\n### Define constants used by multiple source files. ###\n### ###\n### Author: Daniel Dantas ###\n### Last edited: Sep 2022 ###\n############################################################\n#########################################################\"\"\"\n\nimport os\n\n## Canvas sizes\nSUBJ_W = 720\nSUBJ_H = 540\nOP_W = 640\nOP_H = 480\nPAD_X = 10\nPAD_Y = 10\n\n## Filenames\nEXT_TSV = \".tsv\"\nEXT_LIN = \"_linear.tsv\"\nEXT_NN = \"_nearest.tsv\"\nEXT_INFERRED = \"_inferred_rr.tsv\"\nEXT_FLOW = \"_flow.tsv\"\n\n## Used mostly by Polar.py, Data.py and Plot.py\nTYPE_RR = \"R\"\nTYPE_ECG = \"E\"\nTYPE_AUDIO = \"A\"\n\n## Used mostly by WinOp.py\nFOLDER_DATA = \"data\"\nFOLDER_DEBUG = os.path.join(FOLDER_DATA, \"DEBUG\")\n\nFILENAME_RANDOM = \"random.txt\"\nFILENAME_ROUTINE = \"routine.txt\"\nFILENAME_START_TIME = \"start_time.txt\"\nFILENAME_LOG = \"log.txt\"\n\nFILENAME_ECG = \"subj%d_ecg.tsv\"\nFILENAME_ECG_S1 = \"subj1_ecg.tsv\"\nFILENAME_ECG_S2 = \"subj2_ecg.tsv\"\n\nFILENAME_RR = \"subj%d_rr.tsv\"\nFILENAME_RR_S1 = \"subj1_rr.tsv\"\nFILENAME_RR_S2 = \"subj2_rr.tsv\"\n\nFILENAME_AUDIO = \"audio.wav\"\n\n# \"subj%d_ecg_inferred_rr.tsv\"\nFILENAME_ECG_RR = FILENAME_ECG.replace(EXT_TSV, EXT_INFERRED)\nFILENAME_ECG_RR_S1 = FILENAME_ECG_S1.replace(EXT_TSV, EXT_INFERRED)\nFILENAME_ECG_RR_S2 = FILENAME_ECG_S2.replace(EXT_TSV, EXT_INFERRED)\n\nFILENAME_VIDEO = \"subj%d.mp4\"\nFILENAME_VIDEO_S1 = \"subj1.mp4\"\nFILENAME_VIDEO_S2 = \"subj2.mp4\"\n\n\n## Used mostly by 01_sync/sync.py\nFOLDER_SYNC = \"01_sync\"\nFILENAME_VIDEO_SYNC = \"subj%d_sync.mp4\"\nFILENAME_VIDEO_SYNC_S1 = \"subj1_sync.mp4\"\nFILENAME_VIDEO_SYNC_S2 = \"subj2_sync.mp4\"\n\n## Used mostly by 02_preprocess/preprocess.py\nFOLDER_PREP = \"02_preprocess\"\nFOLDER_OUTPUT = \"output\"\n#FILENAME_DATASET = [\"dataset_jf.tsv\", \"dataset_dd.tsv\"]\nFILENAME_DATASET = \"dataset.tsv\"\nFILENAME_SLIDE = \"slides_time.tsv\"\n#FILENAME_ANNOTATION = [\"annotation_jf.eaf\", \"annotation_dd.eaf\"]\n# FILENAME_ANNOTATION = \"annotation_dd.eaf\"\nDATASET_HEADERS = ['folder', 'time', 'block', 'slide',\n 'hr_subj1_linear', 'hr_subj2_linear', 'hr_subj1_nearest', 'hr_subj2_nearest', \\\n 'hr_subj1_ecg_linear', 'hr_subj2_ecg_linear', 'hr_subj1_ecg_nearest', 'hr_subj2_ecg_nearest', \\\n 'rr_subj1_linear', 'rr_subj2_linear', 'rr_subj1_nearest', 'rr_subj2_nearest', \\\n 'rr_subj1_ecg_linear', 'rr_subj2_ecg_linear', 'rr_subj1_ecg_nearest', 'rr_subj2_ecg_nearest']\n\n# \"subj%d_rr_linear.tsv\"\nFILENAME_RR_LIN = FILENAME_RR.replace(EXT_TSV, EXT_LIN)\nFILENAME_RR_LIN_S1 = FILENAME_RR_S1.replace(EXT_TSV, EXT_LIN)\nFILENAME_RR_LIN_S2 = FILENAME_RR_S2.replace(EXT_TSV, EXT_LIN)\n\n# \"subj%d_rr_nearest.tsv\"\nFILENAME_RR_NN = FILENAME_RR.replace(EXT_TSV, EXT_NN)\nFILENAME_RR_NN_S1 = FILENAME_RR_S1.replace(EXT_TSV, EXT_NN)\nFILENAME_RR_NN_S2 = FILENAME_RR_S2.replace(EXT_TSV, EXT_NN)\n\n# \"subj%d_ecg_inferred_rr_linear.tsv\"\nFILENAME_ECG_RR_LIN = FILENAME_ECG_RR.replace(EXT_TSV, EXT_LIN)\nFILENAME_ECG_RR_LIN_S1 = FILENAME_ECG_RR_S1.replace(EXT_TSV, EXT_LIN)\nFILENAME_ECG_RR_LIN_S2 = FILENAME_ECG_RR_S2.replace(EXT_TSV, EXT_LIN)\n\n# \"subj%d_ecg_inferred_rr_nearest.tsv\"\nFILENAME_ECG_RR_NN = FILENAME_ECG_RR.replace(EXT_TSV, EXT_NN)\nFILENAME_ECG_RR_NN_S1 = FILENAME_ECG_RR_S1.replace(EXT_TSV, EXT_NN)\nFILENAME_ECG_RR_NN_S2 = FILENAME_ECG_RR_S2.replace(EXT_TSV, EXT_NN)\n\n## Used mostly by 03_optical_flow/optical_flow.py\nFOLDER_OPTI = \"03_optical_flow\"\nLEN_PREF_VIDEO = 5\n","repo_name":"ddantas/img_hrv_lan","sub_path":"const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"28038200354","text":"\nclass Position:\n \n def __init__(self):\n self.board = []\n self.macroboard = []\n \n def parse_field(self, fstr):\n flist = fstr.replace(';', ',').split(',')\n self.board = [ int(f) for f in flist ]\n \n def parse_macroboard(self, mbstr):\n mblist = mbstr.replace(';', ',').split(',')\n self.macroboard = [ int(f) for f in mblist ]\n \n def is_legal(self, x, y):\n mbx, mby = x/3, y/3\n return self.macroboard[3*mby+mbx] == -1 and self.board[9*y+x] == 0\n\n def legal_moves(self):\n return [ (x, y) for x in range(9) for y in range(9) if self.is_legal(x, y) ]\n\n def make_move(self, x, y, pid):\n mb_new = 3*(y%3)+(x%3) #new macroboard\n mbx, mby = x/3, y/3 # old macroboard\n mb_old = mby*3 + mbx\n\n self.board[9*y+x] = pid\n\n\n winner = self.get_winner(mb_old)\n\n if self.macroboard[mb_new] <= 0: #if new is valid\n #invalidate others\n self.macroboard = [0 if n == -1 else n for n in self.macroboard]\n self.macroboard[mb_new] = -1\n else:\n self.macroboard = [-1 if n == 0 else n for n in self.macroboard]\n \n if winner != 0:\n self.macroboard[mb_old] = winner\n\n def get_winner(self, mb_i):\n start_index = (mb_i/3)*27 + (mb_i % 3)*3\n board = self.board\n # check rows/columns\n for i in range(3):\n row_value = board[i*9+start_index]\n col_value = board[i+start_index]\n for j in range(3):\n if board[i*9+j+start_index] != row_value:\n row_value = -1\n if board[j*9+i+start_index] != col_value:\n col_value = -1\n if row_value > 0:\n return row_value\n if col_value > 0:\n return col_value\n\n # Check diagonals\n d1_val = board[start_index]\n d2_val = board[2+start_index]\n for i in [1, 2]:\n if board[i*10+start_index] != d1_val:\n d1_val = -1\n if board[i*8+2+start_index] != d2_val:\n d2_val = -1\n if d2_val > 0:\n return d2_val\n if d1_val > 0:\n return d1_val\n return 0\n\n def get_board(self):\n return ''.join(self.board, ',')\n\n def get_macroboard(self):\n return ''.join(self.macroboard, ',')\n\n","repo_name":"Macetodaface/tictactoe","sub_path":"position.py","file_name":"position.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9602234169","text":"from functools import update_wrapper\nfrom django.utils import six\nfrom django.conf.urls import patterns, url, include\nfrom django.contrib.admin.sites import AdminSite\nfrom django.contrib.contenttypes import views as contenttype_views\n\nfrom tribus.web.admin.views import (tribus_config, active_modules,\n logger_levels)\nfrom tribus.web.registration.forms import LoginForm\n\n\nclass TribusAdmin(AdminSite):\n\n login_template = 'registration/login_form.html'\n login_form = LoginForm\n\n def get_urls(self):\n\n def wrap(view, cacheable=False):\n def wrapper(*args, **kwargs):\n return self.admin_view(view, cacheable)(*args, **kwargs)\n return update_wrapper(wrapper, view)\n\n urlpatterns = patterns(\n '',\n url(regex=r'^$',\n view=wrap(self.index),\n name='index'),\n url(regex=r'^config/$',\n view=wrap(tribus_config)),\n url(regex=r'^config/active-modules/$',\n view=wrap(active_modules)),\n url(regex=r'^config/logger-levels/$',\n view=wrap(logger_levels)),\n url(regex=r'^r/(?P\\d+)/(?P.+)/$',\n view=wrap(contenttype_views.shortcut),\n name='view_on_site'),\n url(regex=r'^(?P\\w+)/$',\n view=wrap(self.app_index),\n name='app_list')\n )\n\n for model, model_admin in six.iteritems(self._registry):\n urlpatterns += patterns(\n '',\n url(regex=r'^%s/%s/' % (model._meta.app_label,\n model._meta.model_name),\n view=include(model_admin.urls))\n )\n\n return urlpatterns\n\ntribus_admin = TribusAdmin()\n","repo_name":"CanaimaKueka/tribus","sub_path":"tribus/web/admin/sites.py","file_name":"sites.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"74921640265","text":"\"\"\"\nThis file is part of the TheLMA (THe Laboratory Management Application) project.\nSee LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.\n\nThis handler converts the results of the library member parser into a\nmolecule design set (:class:`thelma.entities.molecule.MoleculeDesignSet`).\nAt this, the handler tries to determine the stock sample molecule design ID\nfor every molecule design set. If there is no ID, a new ID is created.\n\nAAB\n\"\"\"\nfrom everest.entities.utils import get_root_aggregate\nfrom everest.querying.specifications import cntd\nfrom thelma.tools.handlers.base import BaseParserHandler\nfrom thelma.tools.parsers.libmembers import LibraryMemberParser\nfrom thelma.interfaces import IMoleculeDesignPool\nfrom thelma.entities.moleculedesign import MoleculeDesignPoolSet\nfrom thelma.entities.moleculetype import MoleculeType\n\n\n__docformat__ = 'reStructuredText en'\n\n__all__ = ['LibraryMemberParserHandler',\n ]\n\n\nclass LibraryMemberParserHandler(BaseParserHandler):\n \"\"\"\n Converts the results of the :class:`LibraryMemberParser` into a\n :class:`MoleculeDesignSet`.\n\n **Return Value:** :class:`PoolSet`\n \"\"\"\n NAME = 'Library Member Parser Handler'\n\n _PARSER_CLS = LibraryMemberParser\n\n def __init__(self, stream, number_molecule_designs, molecule_type,\n parent=None):\n \"\"\"\n Constructor.\n\n :param int number_molecule_designs: The number of molecule designs\n per cell (list).\n :param molecule_type: The expected molecule type for all molecule\n designs in the library.\n :type molecule_type: :class:`thelma.entities.moleculetype.MoleculeType`\n \"\"\"\n BaseParserHandler.__init__(self, stream=stream, parent=parent)\n self.number_molecule_designs = number_molecule_designs\n self.molecule_type = molecule_type\n #: Map molecule design ID -> molecule design.\n self.__md_map = None\n #: Contains the stock sample molecule design sets for the final\n #: library pool set.\n self.__library_sets = None\n\n def reset(self):\n BaseParserHandler.reset(self)\n self.__md_map = dict()\n self.__library_sets = set()\n\n def _convert_results_to_entity(self):\n self.add_info('Convert parser results ...')\n self.__check_input()\n if not self.has_errors():\n self.__get_molecule_design_pools()\n if not self.has_errors():\n self.return_value = MoleculeDesignPoolSet(\n molecule_type=self.molecule_type,\n molecule_design_pools=self.__library_sets)\n self.add_info('Conversion completed.')\n\n def __check_input(self):\n self._check_input_class('number molecule designs',\n self.number_molecule_designs, int)\n self._check_input_class('molecule type', self.molecule_type,\n MoleculeType)\n\n def __get_molecule_design_pools(self):\n self.add_debug('Fetch molecule design pools for IDs ...')\n invalid_length = []\n found_md_ids = set()\n for md_ids in self.parser.molecule_design_lists:\n if len(md_ids) != self.number_molecule_designs:\n invalid_length.append(\n '-'.join([str(md_id) for md_id in md_ids]))\n continue\n for md_id in md_ids:\n found_md_ids.add(md_id)\n if len(invalid_length) > 0:\n msg = 'Some of the specified molecule design pools do not ' \\\n 'have the expected number of molecule designs (%i): %s.' \\\n % (self.number_molecule_designs, ', '.join(invalid_length))\n self.add_error(msg)\n return\n invalid_type = []\n agg = get_root_aggregate(IMoleculeDesignPool)\n agg.filter = cntd(molecule_design_id=found_md_ids)\n iterator = agg.iterator()\n while True:\n try:\n md = iterator.next()\n except StopIteration:\n break\n else:\n if md.molecule_type != self.molecule_type:\n invalid_type.append(md.id)\n self.__md_map[md.id] = md\n # Search for missing molecule designs.\n if len(found_md_ids) != len(self.__md_map):\n diff = found_md_ids.symmetric_difference(self.__md_map.keys())\n diff = sorted(list(diff))\n msg = 'The following molecule designs could not be found in ' \\\n 'the DB: %s.' % (', '.join([str(md_id) for md_id in diff]))\n self.add_error(msg)\n if len(invalid_type) > 0:\n msg = 'The molecule designs in the list have different molecule ' \\\n 'types. Expected: %s. Others (molecule designs): %s.' \\\n % (self.molecule_type,\n ', '.join([str(md_id) for md_id in sorted(invalid_type)]))\n self.add_error(msg)\n","repo_name":"helixyte/TheLMA","sub_path":"thelma/tools/handlers/libmembers.py","file_name":"libmembers.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"351504479","text":"import requests\nimport configparser\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n# declarative to specify the route to take on my flask application\n# in this case it is the landing page/dashboard\n@app.route('/')\ndef weather_dashboard():\n return render_template('landing_page.html')\n\n# declarative to specify the route to results page once a user inputs a zip code\n@app.route('/results', methods=['POST'])\ndef render_results():\n zip_code = request.form['zipCode']\n\n api_key = get_api_key()\n data = get_weather_results(zip_code, api_key)\n\n # Formats JSON dictionary by parsing response to key values below\n # to render to the user\n temp = \"{0:.2f}\".format(data[\"main\"][\"temp\"])\n feels_like = \"{0:.2f}\".format(data[\"main\"][\"feels_like\"])\n weather = data[\"weather\"][0][\"main\"]\n location = data[\"name\"]\n\n return render_template('results_page.html', location=location, temp=temp, \n feels_like=feels_like, weather=weather)\n\n# Function to store configurations to specify my application and to store my api key\ndef get_api_key():\n config = configparser.ConfigParser()\n config.read('config.ini')\n return config['openweathermap']['api']\n\n# Function to get weather results based on zip code, and my api key.\n# Formatted to take in the parameters that user puts into the system\ndef get_weather_results(zip_code, api_key):\n api_url = \"http://api.openweathermap.org/\" \\\n \"data/2.5/weather?zip={}&units=imperial&appid={}\".format(zip_code, api_key)\n r = requests.get(api_url)\n return r.json()\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"ChichaRonin/Weather-API-Application","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27686383243","text":"from flask import Blueprint, Flask, redirect, render_template, request\n\nfrom models.student import Student\nfrom models.registration import Registration\n\nimport repositories.student_repository as student_repository\nimport repositories.course_repository as course_repository\nimport repositories.registration_repository as registration_repository\n\nstudent_blueprint = Blueprint(\"students\", __name__)\n\n@student_blueprint.route(\"/students\")\ndef list_students():\n students = student_repository.select_all()\n return render_template (\"students/index.html\", students = students)\n\n@student_blueprint.route(\"/students/new\")\ndef new_student():\n courses = course_repository.select_all()\n return render_template(\"students/new.html\", courses= courses)\n\n@student_blueprint.route(\"/students\", methods=[\"POST\"])\ndef create_student():\n name = request.form[\"name\"]\n dob = request.form[\"dob\"]\n experience = request.form[\"experience\"]\n email = request.form[\"email\"]\n phone = request.form['phone']\n membership= request.form[\"membership\"]\n course_id = request.form[\"course_id\"]\n\n course = course_repository.select(course_id)\n new_student = Student(name, dob, experience, email, phone, membership)\n new_registration = Registration(course, new_student)\n\n student_repository.save(new_student)\n registration_repository.save(new_registration)\n return redirect(f\"/students/{new_student.id}\")\n\n@student_blueprint.route(\"/students/\")\ndef show(id):\n student = student_repository.select(id)\n courses = registration_repository.select_courses(id)\n return render_template(\"students/show.html\", student=student, courses= courses)\n\n@student_blueprint.route(\"/students//delete\", methods=[\"POST\"])\ndef delete_student(id):\n student_repository.delete(id)\n return redirect(\"/students\")\n\n@student_blueprint.route(\"/students//edit\")\ndef edit_student(id):\n student = student_repository.select(id)\n return render_template('students/edit.html', student= student)\n\n@student_blueprint.route(\"/students/\", methods=[\"POST\"])\ndef update_student(id):\n name = request.form[\"name\"]\n dob = request.form[\"dob\"]\n experience = request.form[\"experience\"]\n email = request.form[\"email\"]\n phone = request.form['phone']\n membership= request.form[\"membership\"]\n\n student_to_update = Student(name, dob, experience, email, phone, membership, id)\n\n student_repository.update(student_to_update)\n return redirect(f\"/students/{student_to_update.id}\")\n","repo_name":"mariamuzas/mariuca_school_app_project","sub_path":"controllers/student_controller.py","file_name":"student_controller.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33659934515","text":"# from jose import jws\n# contract=\"This is a sample contract if you are agree on this pls sign the doc\"\n# signed = jws.sign({\"contract\":contract}, 'secret', algorithm='HS256')\n# print(signed)\n\n\n# verification=jws.verify(signed, 'secret', algorithms=['HS256'])\n# print(verification)\nfrom Crypto.Cipher import AES\n\n\nkey = b'Sixteen byte key'\n# print(type(key))\ncipher = AES.new(key, AES.MODE_EAX)\ndata=bytes(\"hemendrasharma\",'utf-8')\nnonce = cipher.nonce\n# print(nonce)\n# n = \"4Y\\x89\\x7f\\x1dI\\x05}\\x19\\xa3\\xf2, \\rJ\\xe4}\"\n# print(nonce == bytes(n,\"utf-8\"))\nciphertext, tag = cipher.encrypt_and_digest(data)\nprint(str(ciphertext))\n# # plaintext = cipher.decrypt(ciphertext)\n# # print(plaintext)\n\ncipher1 = AES.new(key, AES.MODE_EAX,nonce)\nplaintext = cipher1.decrypt(ciphertext)\n# print(plaintext.decode('utf-8'))\n","repo_name":"adityadev11/Anti_tamper","sub_path":"dsa.py","file_name":"dsa.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1820674964","text":"# 실행 시간 (22947번)\n\"\"\"\n 문제: https://www.acmicpc.net/problem/22947\n 브루트포스, 위상정렬\n 문제를 잘못 읽는 바람에 맞왜틀을 했다.\n 시작 작업과 마무리 작업을 제외한 min(N - 2, K)개의 작업 시간을 강제로 0으로 만들어야하는데,\n 모든 작업 중에서 K개의 작업들의 조합에 대해서 시간을 강제로 0으로 만들고 있었다.\n 마지막 작업은 역진입 차수 배열을 통해서 찾고(역진입 차수 배열에서 진입 차수가 0인 노드가 마지막 작업임)\n 2부터 N까지 역진입 차수 배열에서 0이 아닌 노드들을 조합을 만들 배열에 담아서 K개의 조합을 구성한다.\n 나머지 부분은 위상 정렬을 통해서 모든 작업을 완료할 수 있는 가장 작은 시간을 찾는다.\n\"\"\"\nimport sys\nfrom collections import deque\nfrom itertools import combinations\ninput = sys.stdin.readline\n\nN, M, K = map(int, input().split()) # 작업 개수, 작업 순서 수, 실행 시간이 0인 수\ntime = [0] + list(map(int, input().split())) # 실행 시간\ngraph = [[] for _ in range(N + 1)]\nindegree = [0] * (N + 1) # 진입 차수\nreverse_ind = [0] * (N + 1) # 역진입차수(마지막 작업을 찾기 위함)\n\nfor _ in range(M):\n s, e = map(int, input().split())\n graph[s].append(e)\n indegree[e] += 1\n reverse_ind[s] += 1\n\narr = []\nfor i in range(2, N + 1):\n if reverse_ind[i]:\n arr.append(i)\n\ncombs = list(combinations(arr, K))\nmin_time = sys.maxsize\nfor comb in combs:\n tmp_ind = indegree[:] # 진입 차수 복사\n tmp_t = time[:] # 실행 시간 복사\n T = [0] * (N + 1)\n T[1] = tmp_t[1]\n for n in comb:\n tmp_t[n] = 0\n\n q = deque([(T[1], 1)]) # 1은 항상 작업 시작임\n while q:\n cost, cur = q.popleft()\n for nxt in graph[cur]:\n tmp_ind[nxt] -= 1\n T[nxt] = max(cost + tmp_t[nxt], T[nxt])\n if tmp_ind[nxt] == 0:\n q.append((T[nxt], nxt))\n\n min_time = min(min_time, max(T))\n\nprint(min_time)\n","repo_name":"siwon-park/Problem-Solving","sub_path":"Baekjoon_Solve/Topology/22947.py","file_name":"22947.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41212978388","text":"import torch\nimport torch.nn as nn\nclass LSTM_Intent(nn.Module):\n def __init__(self, vocab_size, embedding_dim, hidden_units, output_size,n_layers):\n super(LSTM_Intent, self).__init__()\n self.hidden_units = hidden_units\n self.embedding_dim = embedding_dim\n self.vocab_size = vocab_size\n self.output_size = output_size\n self.n_layers = n_layers\n \n # layers\n self.embedding = nn.Embedding(self.vocab_size, self.embedding_dim)\n self.lstm = nn.LSTM(self.embedding_dim, self.hidden_units,self.n_layers)\n self.fc = nn.Linear(self.hidden_units, self.output_size)\n self.sig = nn.Sigmoid()\n# self.do = nn.Dropout(0.3)\n# self.do1 = nn.Dropout(0.5)\n def initialize_hidden_state(self):\n return torch.zeros((self.n_layers, 1, self.hidden_units)),torch.zeros((self.n_layers,1, self.hidden_units))\n \n def forward(self, x):\n# self.hidden = self.initialize_hidden_state()\n# print(x.shape)\n out = self.embedding(x)\n# print(out)\n# out = self.do(out)\n out, self.hidden = self.lstm(out) # max_len X batch_size X hidden_units\n# print(out.shape)\n# out = self.do1(out)\n out = self.fc(out)\n out = out[-1,:,:] \n# print(out)\n out = self.sig(out)\n return out","repo_name":"reyzaldindra/intun_implimin","sub_path":"app/bpjs_intent.py","file_name":"bpjs_intent.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15671579029","text":"# Input: text T\r\nT = input()\r\n\r\n# Input: pattern P\r\nP = input()\r\n\r\n# Import re module\r\nimport re\r\n\r\n# Find all occurrences of P in T using re.finditer\r\nmatches = re.finditer(P,T)\r\n\r\n# Initialize count of occurrences\r\ncount = 0\r\n\r\n# Initialize list of start and end indices\r\nindices = []\r\n\r\n# Loop through each match object\r\nfor match in matches:\r\n # Increment count by 1\r\n count += 1\r\n \r\n # Get start and end indices of match using match.start() and match.end()\r\n start = match.start()\r\n end = match.end() - 1\r\n \r\n # Append a tuple of start and end indices to indices list\r\n indices.append((start,end))\r\n\r\n# Print the output \r\nif count == 0:\r\n # If no occurrence is found, print 0\r\n print(0)\r\nelse:\r\n # If some occurrences are found, print count and indices list \r\n print(count)\r\n print(*indices)","repo_name":"dikaapranata/Praktikum-ASA","sub_path":"praktikum 3/pt.py","file_name":"pt.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4395012867","text":"# 给定一个序列,输出其下一个更大序列,若已最大,则从头开始。时间复杂度O(1)\n# 1, 2, 3 -> 1, 3, 2\n\n# 思路:\n# 找递增结束的位置,换上比该数大的最小数\n\nclass Solution:\n def nextPermutation(self, nums):\n i = len(nums) - 1\n if nums[i - 1] >= nums[i]:\n i = i - 1\n print(i)\n if i == 1:\n m = 0\n n = len(nums) - 1\n print(m, n)\n while m <= n:\n nums[m], nums[n] = nums[n], nums[m]\n m = m + 1\n n = n - 1\n print(m, n)\n else:\n k = 0\n for j in range(i, len(nums)):\n min = nums[j]\n print(j)\n if nums[j] > nums[i - 1] and nums[j] <= min:\n min = nums[j]\n k = j\n nums[i - 1], nums[k] = nums[k], nums[i - 1]\n return nums\n\n\nif __name__ == '__main__':\n nums = [1, 1, 5]\n s = Solution()\n result = s.nextPermutation(nums)\n print(result)\n\n\n\n","repo_name":"huhuzwxy/leetcode","sub_path":"array/31_next_permutation.py","file_name":"31_next_permutation.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74585072263","text":"\"\"\"A collection of model configurations.\"\"\"\n\nimport optuna\nimport pytorch_lightning as pl\n\nfrom models.feedforward import FeedForward\nfrom models.tcn import TemporalConvNet\n\nfrom utils.pl_utils import get_training_config\nfrom utils.config_utils import BaseConfig\n\n\nclass Config(BaseConfig):\n \"\"\"Global model configuration (does not change / is not tunable).\"\"\"\n def __init__(self, study_name: str = 'study', batch_size: int = 10, *args, **kwargs):\n \"\"\"Model config.\n\n The batch size can be set manually as different models may require different sized batches.\n\n Args:\n study_name (str, optional): the study name. Defaults to `study`.\n batch_size (int, optional): the batch size. Defaults to 10.\n \"\"\"\n super(Config, self).__init__(*args, **kwargs)\n\n self.STUDY_NAME = study_name\n\n self.NUM_INPUTS = 1\n self.NUM_OUTPUTS = 1\n\n self.BATCH_SIZE = batch_size\n self.MIN_EPOCHS = 1\n self.MAX_EPOCHS = 40\n\n self.SEED = 23427\n\n # Not saved (bc. lowercase).\n self.log_freq = 10\n self.num_workers = 0\n self.val_loss_name = 'val_loss'\n\n\ndef feedforward(config: BaseConfig, trial: optuna.trial.Trial) -> pl.LightningModule:\n \"\"\"Returns a tunable PyTorch lightning feedforward module.\n\n Args:\n config (BaseConfig): the hard-coded configuration.\n trial (optuna.Trial): optuna trial.\n\n Returns:\n pl.LightningModule: a lightning module.\n \"\"\"\n\n model = FeedForward(\n num_inputs=config.NUM_INPUTS,\n num_outputs=config.NUM_OUTPUTS,\n num_hidden=trial.suggest_int('num_hidden', 1, 4),\n num_layers=trial.suggest_int('num_layers', 1, 2),\n dropout=trial.suggest_float('dropout', 0.0, 0.5),\n activation=trial.suggest_categorical('activation', ['relu', 'none'])\n )\n\n training_config = get_training_config(\n lr=trial.suggest_loguniform('lr', 1e-3, 1e-0),\n weight_decay=trial.suggest_loguniform('weight_decay', 1e-5, 1e-1),\n max_epochs=config.MAX_EPOCHS)\n\n pl_model = TemporalConvNet(\n training_config=training_config,\n lr=trial.suggest_loguniform('lr', 1e-3, 1e-0),\n weight_decay=trial.suggest_loguniform('weight_decay', 1e-5, 1e-1),\n max_epochs=config.MAX_EPOCHS\n )\n\n return pl_model\n\n\ndef tcn(config: BaseConfig, trial: optuna.trial.Trial) -> pl.LightningModule:\n \"\"\"Returns a tunable PyTorch lightning tcn module.\n\n Args:\n config (BaseConfig): the hard-coded configuration.\n trial (optuna.Trial): optuna trial.\n\n Returns:\n pl.LightningModule: a lightning module.\n \"\"\"\n\n training_config = get_training_config(\n lr=trial.suggest_loguniform('lr', 1e-3, 1e-0),\n weight_decay=trial.suggest_loguniform('weight_decay', 1e-5, 1e-1),\n max_epochs=config.MAX_EPOCHS)\n\n tcn = TemporalConvNet(\n training_config=training_config,\n num_inputs=config.NUM_INPUTS,\n num_outputs=config.NUM_OUTPUTS,\n num_hidden=trial.suggest_int('num_hidden', 1, 4),\n kernel_size=trial.suggest_int('kernel_size', 2, 4),\n num_layers=trial.suggest_int('num_layers', 1, 2),\n dropout=trial.suggest_float('dropout', 0.1, 0.3)\n )\n\n return tcn\n","repo_name":"bask0/q10hybrid","sub_path":"project/model_configs.py","file_name":"model_configs.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"4126862988","text":"from collections import deque\nN = int(input())\nd = deque()\nfor i in range(N):\n a = input().split()\n if a[0] == \"append\":\n d.append(int(a[1]))\n elif a[0] == \"pop\":\n d.pop()\n elif a[0] == \"popleft\":\n d.popleft()\n elif a[0] == \"appendleft\":\n d.appendleft(int(a[1]))\nprint(' '.join(map(str, d)))","repo_name":"elisa2602/PCS2_HOMEWORK","sub_path":"pcs2_hm1/collections/collections_deque().py","file_name":"collections_deque().py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11175292540","text":"\"\"\"\npipeline designed to create/update active roster table within the NBA states database\n\"\"\"\n\nimport pymysql\nimport re\n\ndef drop_table(connection):\n drop_table_statement = 'drop table active_rosters'\n sql_execute(drop_table_statement, connection)\n\ndef get_cities(connection): #function to extract all cities from team_info entity\n\n city_extract_query = \"select team from team_info\"\n cities_raw_extract = sql_execute(city_extract_query, connection)\n cities = data_extract_from_raw_data(cities_raw_extract)\n return cities\n\ndef get_team_names(connection):\n\n team_names_extract_query = \"select distinct current_team from player_info\"\n team_names_extract = sql_execute(team_names_extract_query, connection)\n teams = data_extract_from_raw_data(team_names_extract)\n return teams\n\ndef data_extract_from_raw_data(data):\n\n regex_catch = re.compile('[A-Za-z].*\\w')\n cleaned_data_list = []\n\n for raw_line in data:\n try:\n data_string = re.search(regex_catch, str(raw_line))\n cleaned_data_list.append(data_string.group(0))\n except AttributeError:\n print(\"Data not captured in regex expression\", raw_line)\n return cleaned_data_list\n\ndef create_city_team_dict(teams, cities):\n\n city_team_dict = {}\n for city in cities:\n for team in teams:\n if city in team:\n city_team_dict[city] = team\n #hard coding in LA Lakers\n city_team_dict[\"LA Lakers\"] = \"Los Angeles Lakers\"\n return city_team_dict\n\ndef create_update_statements(city_team_dict, connection):\n\n for city_team in city_team_dict:\n update_statement = \"update team_info set team = '\" + str(city_team_dict[city_team]) + \"' where team = '\" + str(city_team) + \"'\"\n sql_execute(update_statement, connection)\n\ndef create_active_roster_table(connection):\n\n create_table_statement = 'create table nba_stats.active_rosters( \\\n select distinct reg.player_id, play.name, team.team_id, reg.team, stand.conference \\\n from RegularSeasonAverages as reg \\\n inner join player_info as play on play.player_id = reg.player_id \\\n inner join team_info as team on (team.team = reg.team) \\\n inner join team_standings as stand on ((team.team = stand.team) and (stand.season = reg.season)) \\\n where stand.season = (select max(season) from team_standings))'\n sql_execute(create_table_statement, connection)\n\n add_primary_key = 'alter table nba_stats.active_rosters\\\n add constraint active_roster_pk primary key(player_id, name, team_id, team)'\n sql_execute(add_primary_key, connection)\n\ndef sql_execute(query, connection):\n\n exe = connection.cursor()\n exe.execute(query)\n return exe.fetchall()\n\n## main function\nmyConnection = pymysql.connect(host=\"localhost\", user=\"root\", password=\"Sk1ttles\", db=\"nba_stats\", autocommit=\"true\")\n\n#drop_table(myConnection)\ncities = get_cities(myConnection)\nteams = get_team_names(myConnection)\ncity_team_dict = create_city_team_dict(teams, cities)\ncreate_update_statements(city_team_dict, myConnection)\ncreate_active_roster_table(myConnection)\n","repo_name":"pstoeber/Personal-Projects","sub_path":"Pipeline Development/active_roster.py","file_name":"active_roster.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3265659409","text":"#Example 1: \r\n# Sorting the element name by alphbetic order \r\nearth_metals = [\"Beryllium\", \"Magnesium\", \"Calcium\", \"Strontium\", \"Barium\", \"Radium\"]\r\nearth_metals.sort() # by default , python assumes you want to sort it by alphbetic order \r\n\r\nprint (earth_metals)\r\n\r\n# sort by reversed alphbetic order \r\nearth_metals.sort(reverse=True)\r\nprint (earth_metals)\r\n\r\n# remember sorting changes of things, so the list can be sorted, however if you put the\r\n# above data into a tuples, then you can not sort it directly because tuples are immutable. \r\n\r\n# the following will fail. \r\n# earth_metals = (\"Beryllium\", \"Magnesium\", \"Calcium\", \"Strontium\", \"Barium\", \"Radium\")\r\n# earth_metals.sort()\r\n\r\n# however you can use the sorted function instead to sort the tuples. \r\nearth_metals = (\"Beryllium\", \"Magnesium\", \"Calcium\", \"Strontium\", \"Barium\", \"Radium\")\r\nsorted_earth_metals = sorted(earth_metals)\r\nprint (sorted_earth_metals)\r\nprint (earth_metals)\r\n\r\ndata = (7, 2, 5, 6, 1, 3, 9, 10, 4, 8)\r\nprint (sorted(data))\r\n\r\n\r\n# Exmple 2: \r\n# we want to sort by the 2nd value of the tuple in the planet list. \r\nplanets = [(\"Mercury\", 2440, 5.43), \r\n (\"Venus\", 6052, 5.24), \r\n (\"Mars\", 3397, 1.530), \r\n (\"Jupiters\", 71452, 1.33, 5.21)]\r\n\r\n# use lambda to get the size of the planet. \r\nsize = lambda planet: planet[1]\r\n\r\n# use the lambda function in the sort \"key\" value, \r\nplanets.sort(key=size, reverse=True)\r\nprint (planets)","repo_name":"hsnyxfhyqhyh/python3","sub_path":"pythonExcise/pythonExcise/basics/SortingTest.py","file_name":"SortingTest.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35890659978","text":"import tensorflow as tf\nimport numpy as np\nimport tensorflow.keras\nfrom tensorflow.keras import Model, Sequential\nimport tensorflow.keras.utils\nfrom tensorflow.keras.regularizers import l2, l1\nfrom tensorflow.keras.layers import Dense, Input, BatchNormalization, LeakyReLU, Flatten, GaussianDropout, Lambda, LSTM, TimeDistributed, RepeatVector, Embedding, LayerNormalization, Dropout, MultiHeadAttention, Conv1D\nfrom tensorflow.keras.losses import MeanSquaredError\nimport tensorflow.keras.backend as K\nfrom plot_outputs import *\nimport time\n\n\nclass MLP():\n def __init__(self, mlp_parameters, autoencoder_parameters, mlp_optimizer, mlp_reg, num_classes):\n # mlp\n self.mlp_input = Dense(autoencoder_parameters['autoencoder_batch_size'], input_shape=(None, None))\n self.mlp_layer_1 = Dense(round(autoencoder_parameters['autoencoder_batch_size'] * 0.9), bias_initializer='zeros', kernel_initializer='glorot_uniform', activity_regularizer=l2(mlp_reg))\n self.mlp_layer_2 = Dense(round(autoencoder_parameters['autoencoder_batch_size'] * 0.7), bias_initializer='zeros', kernel_initializer='glorot_uniform', activity_regularizer=l2(mlp_reg))\n self.mlp_layer_3 = Dense(round(autoencoder_parameters['autoencoder_batch_size'] * 0.5), bias_initializer='zeros', kernel_initializer='glorot_uniform', activity_regularizer=l2(mlp_reg))\n self.mlp_layer_4 = Dense(round(autoencoder_parameters['autoencoder_batch_size'] * 0.3), bias_initializer='zeros', kernel_initializer='glorot_uniform', activity_regularizer=l2(mlp_reg))\n self.mlp_layer_5 = Dense(round(autoencoder_parameters['autoencoder_batch_size'] * 0.1), bias_initializer='zeros', kernel_initializer='glorot_uniform', activity_regularizer=l2(mlp_reg))\n self.mlp_output_sigmoid = Dense(num_classes, activation='sigmoid')\n self.mlp_output_softmax = Dense(num_classes, activation='softmax')\n # mlp_optimizer\n self.mlp_op = mlp_optimizer\n # leaky\n self.mlp_leaky = LeakyReLU(alpha=0.3)\n\n def run_mlp(self, reconstructed_latent):\n \"\"\"\n maybe think about having separate gradient for this + optimizer and looop through this\n need to add leaky relu activation\n \"\"\"\n mlp_input = self.mlp_input(reconstructed_latent)\n mlp1 = self.mlp_layer_1(mlp_input)\n mlp_leaky1 = self.mlp_leaky(mlp1)\n mlp2 = self.mlp_layer_2(mlp_leaky1)\n mlp_leaky2 = self.mlp_leaky(mlp2)\n mlp3 = self.mlp_layer_3(mlp_leaky2)\n mlp_leaky3 = self.mlp_leaky(mlp3)\n mlp4 = self.mlp_layer_4(mlp_leaky3)\n mlp_leaky4 = self.mlp_leaky(mlp4)\n mlp5 = self.mlp_layer_5(mlp_leaky4)\n mlp_leaky5 = self.mlp_leaky(mlp5)\n mlp_output_softmax = self.mlp_output_softmax(mlp_leaky5)\n return mlp_output_softmax\n\n def mlp_gradient(self, true_labels, reconstructed_latent, num_classes):\n with tf.GradientTape() as tape2:\n # mlp\n tape2.watch(self.mlp_input.variables)\n tape2.watch(self.mlp_layer_1.variables)\n tape2.watch(self.mlp_layer_2.variables)\n tape2.watch(self.mlp_layer_3.variables)\n tape2.watch(self.mlp_layer_4.variables)\n tape2.watch(self.mlp_layer_5.variables)\n tape2.watch(self.mlp_output_softmax.variables)\n class_acc, class_loss = self.get_mlp_loss(true_labels, reconstructed_latent, num_classes)\n mlp_gradient = tape2.gradient(class_loss, [self.mlp_input.variables[0], self.mlp_input.variables[1],\n self.mlp_layer_1.variables[0], self.mlp_layer_1.variables[1], self.mlp_layer_2.variables[0], self.mlp_layer_2.variables[1],\n self.mlp_layer_3.variables[0], self.mlp_layer_3.variables[1], self.mlp_layer_4.variables[0], self.mlp_layer_4.variables[1],\n self.mlp_layer_5.variables[0], self.mlp_layer_5.variables[1], self.mlp_output_softmax.variables[0], self.mlp_output_softmax.variables[1]])\n return mlp_gradient, class_acc, class_loss\n\n def mlp_learn(self, true_labels, reconstructed_latent, num_classes):\n mlp_gradient, class_acc, class_loss = self.mlp_gradient(true_labels, reconstructed_latent, num_classes)\n self.mlp_op.apply_gradients(zip(mlp_gradient, [self.mlp_input.variables[0], self.mlp_input.variables[1], self.mlp_layer_1.variables[0], self.mlp_layer_1.variables[1],\n self.mlp_layer_2.variables[0], self.mlp_layer_2.variables[1], self.mlp_layer_3.variables[0], self.mlp_layer_3.variables[1],\n self.mlp_layer_4.variables[0], self.mlp_layer_4.variables[1], self.mlp_layer_5.variables[0], self.mlp_layer_5.variables[1],\n self.mlp_output_softmax.variables[0], self.mlp_output_softmax.variables[1]]))\n return class_acc, class_loss\n\n def get_mlp_loss(self, true_labels, reconstructed_latent, num_classes):\n mlp_output_softmax = self.run_mlp(reconstructed_latent)\n # mlp_output = [200, 8] array of probability value for each feature or neuron belonging to a particular class --> find max prob for class\n # mlp_output_softmax = mlp_output_softmax.numpy()\n # mlp_output_softmax = mlp_output_softmax.reshape(mlp_output_softmax.shape[0]*mlp_output_softmax.shape[1], mlp_output_softmax.shape[2])\n class_acc = (np.sum(np.equal(np.argmax(mlp_output_softmax, axis=1), true_labels)) / len(true_labels)) * 100\n encoded_labels = self.one_hot_encoding(labels=true_labels, num_classes=num_classes, convert_back=False)\n class_loss = -tf.reduce_sum(tf.math.log(mlp_output_softmax) * encoded_labels)\n return class_acc, class_loss\n\n @staticmethod\n def one_hot_encoding(labels, num_classes, convert_back):\n if convert_back is False:\n hot_encoded_labels = np.zeros((len(labels), num_classes))\n for x in range(len(labels)):\n hot_encoded_labels[x, int(labels[x])] = 1\n return hot_encoded_labels\n else:\n return [x for x in np.argwhere(labels == 1)[:, 1]]\n","repo_name":"ankushgpta2/Vision2DeepManifold","sub_path":"models/mlp_model.py","file_name":"mlp_model.py","file_ext":"py","file_size_in_byte":6236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7750215067","text":"from music21 import converter, clef, stream, pitch, note, meter, midi\nimport numpy as np\n\n\nKEY_SIG_OFFSET = 0\n\ndef parse_notes(midi_stream):\n melody_corpus = []\n\n last_pitch = 1\n chord_buffer = []\n prev_offset = 0.0\n for m in midi_stream.measures(1, None):\n time_sig = m.timeSignature\n for nr in m.flat.notesAndRests:\n offset_loc = nr.offset\n # pitch = nr.pitch.pitchClass + 1 if isinstance(nr, note.Note) else 0\n pitch = nr.pitch.midi if isinstance(nr, note.Note) else 0\n beat_strength = round(nr.beatStrength * 4.0, 0)\n duration = float(nr.quarterLength)\n\n note_repr = (pitch, beat_strength, duration)\n # note_repr = (pitch, duration)\n # Handle chords\n if nr.offset == prev_offset:\n if note_repr[0] > 0:\n chord_buffer.append(note_repr)\n else:\n if chord_buffer: # Choose tone from chord buffer closest to current note\n chord_melody_tone = sorted(chord_buffer, key=lambda x: abs(x[0] - pitch))[0]\n melody_corpus.append(chord_melody_tone)\n melody_corpus.append(note_repr)\n chord_buffer = []\n prev_offset = nr.offset\n\n return melody_corpus\n\n\ndef build_corpus(midi_files):\n melody_corpus = []\n for file in midi_files:\n midi_stream = converter.parse(file)\n midi_stream = midi_stream[0]\n if '1008' in file or '1011' in file:\n midi_stream.keySignature = midi_stream.keySignature.relative\n key_sig = midi_stream.keySignature\n print('Input file: {} ({})'.format(file, str(key_sig)))\n midi_stream.transpose(KEY_SIG_OFFSET - key_sig.tonic.pitchClass, inPlace=True)\n melody_corpus.extend(parse_notes(midi_stream))\n # map indices for constructing matrix representations\n melody_set = set(melody_corpus)\n notes_indices = {note: i for i, note in enumerate(melody_set)}\n indices_notes = {i: note for i, note in enumerate(melody_set)}\n\n return melody_corpus, melody_set, notes_indices, indices_notes\n","repo_name":"naoyak/JohaNN","sub_path":"corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"19119327814","text":"from rest_framework import serializers\nfrom rest_framework.reverse import reverse\nfrom drf_spectacular.utils import extend_schema_serializer, extend_schema_field, OpenApiTypes\nfrom events.models import OfficeHour, Event2019, CrewAttendanceRecord\nfrom accounts.models import Officer\nfrom data.models import ResizedRedirect\nfrom pages.models import Page\nfrom spotify.models import Session, SpotifyUser, SongRequest\nfrom spotify.api import get_playback_state, queue_estimate\n\nfrom .models import TokenRequest\n\n\n# Create your serializers here.\n@extend_schema_serializer(\n exclude_fields=['img', 'class_year']\n)\nclass OfficerSerializer(serializers.ModelSerializer):\n def __init__(self, *args, **kwargs):\n super(OfficerSerializer, self).__init__(*args, **kwargs)\n\n if self.context.get('request', None):\n fields = self.context['request'].query_params.get('options')\n if fields:\n fields = fields.split(',')\n fields.append('title')\n fields.append('name')\n allowed = set(fields)\n current = set(self.fields.keys())\n for field_name in current - allowed:\n self.fields.pop(field_name)\n else:\n self.fields.pop('img')\n self.fields.pop('class_year')\n\n class_year = serializers.IntegerField(source='user.class_year')\n img = serializers.SerializerMethodField()\n name = serializers.CharField(source='user.name')\n\n def get_img(self, obj):\n if obj.img:\n return obj.img.img.url\n else:\n return None\n\n class Meta:\n model = Officer\n fields = ('title', 'name', 'img', 'class_year')\n\n\nclass HourSerializer(serializers.ModelSerializer):\n officer = serializers.CharField(source='officer.exec_position.title')\n location = serializers.CharField(source='location.name')\n\n class Meta:\n model = OfficeHour\n fields = ('officer', 'day', 'location', 'hour_start', 'hour_end')\n\n\nclass NotificationSerializer(serializers.BaseSerializer):\n def to_representation(self, instance):\n ref_id = \"LNLWN-\" + str(instance.pk)\n class_type = 2\n if instance.target == \"all\" or instance.target == \"All\":\n if instance.dismissible is True and instance.format == \"alert\":\n class_type = 2\n else:\n class_type = 1\n elif instance.dismissible is True:\n class_type = 3\n return {\n 'id': ref_id,\n 'class': class_type,\n 'format': instance.format,\n 'type': instance.type,\n 'expires': instance.expires,\n 'title': instance.title,\n 'message': instance.message\n }\n\n\nclass EventSerializer(serializers.ModelSerializer):\n location = serializers.CharField(source='location.name')\n datetime_start = serializers.DateTimeField(format=\"%Y-%m-%dT%H:%M:%S%z\", read_only=True)\n datetime_end = serializers.DateTimeField(format=\"%Y-%m-%dT%H:%M:%S%z\", read_only=True)\n\n class Meta:\n model = Event2019\n fields = ('id', 'event_name', 'description', 'location', 'datetime_start', 'datetime_end',)\n\n\nclass AttendanceSerializer(serializers.ModelSerializer):\n class Meta:\n model = CrewAttendanceRecord\n fields = ('user', 'event', 'checkin', 'checkout', 'active')\n\n\nclass RedirectSerializer(serializers.ModelSerializer):\n class Meta:\n model = ResizedRedirect\n fields = ('name', 'old_path')\n\n def to_representation(self, instance):\n path = instance.old_path\n if instance.old_path[0] == '/':\n path = instance.old_path[1:]\n return {\n 'title': instance.name,\n 'path': path,\n 'category': 'Redirects'\n }\n\n\nclass CustomPageSerializer(serializers.ModelSerializer):\n class Meta:\n model = Page\n fields = ('title', 'slug', 'sitemap_category')\n\n def to_representation(self, instance):\n category = 'Redirects'\n if instance.sitemap_category is not None:\n category = instance.sitemap_category\n return {\n 'title': instance.title,\n 'path': instance.slug,\n 'category': category\n }\n\n\nclass SpotifyUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = SpotifyUser\n fields = ('id', 'display_name', 'spotify_id', 'personal')\n\n\nclass SpotifySessionReadOnlySerializer(serializers.ModelSerializer):\n id = serializers.CharField(source='slug')\n event = EventSerializer()\n user = SpotifyUserSerializer()\n urls = serializers.SerializerMethodField()\n\n is_playing = serializers.SerializerMethodField()\n device = serializers.SerializerMethodField()\n current_track = serializers.SerializerMethodField()\n runtime_ms = serializers.SerializerMethodField()\n\n def __new__(cls, *args, **kwargs):\n if kwargs.get('many', False) is True:\n context = kwargs.get('context', {})\n context.update({'has_many': True})\n kwargs.update({'context': context})\n\n return super().__new__(cls, *args, **kwargs)\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop('request', None)\n super(SpotifySessionReadOnlySerializer, self).__init__(*args, **kwargs)\n\n many = self.context.get('has_many', False)\n\n # If session is inactive, do not return playback state\n if isinstance(self.instance, Session):\n event = self.instance.event\n if not event.approved or event.cancelled or event.closed or event.reviewed or \\\n not self.instance.accepting_requests:\n many = True\n\n if many:\n self.fields.pop('is_playing')\n self.fields.pop('device')\n self.fields.pop('current_track')\n self.fields.pop('runtime_ms')\n elif isinstance(self.instance, Session):\n self.playback_state = get_playback_state(self.instance)\n\n # Do not show certain fields to unprivileged users\n if not self.request or not self.request.user or not self.request.user.has_perm('spotify.view_session'):\n self.fields.pop('user')\n self.fields.pop('private')\n self.fields.pop('auto_approve')\n not many and self.fields.pop('device')\n\n @extend_schema_field(OpenApiTypes.OBJECT)\n def get_urls(self, obj):\n urls = {\n 'request_form': reverse('spotify:request', args=[obj.slug], request=self.request),\n 'qr_code': reverse('spotify:qr', args=[obj.pk], request=self.request)\n }\n return urls\n\n @extend_schema_field(OpenApiTypes.BOOL)\n def get_is_playing(self, obj):\n if self.playback_state:\n return self.playback_state.get('is_playing', False)\n return False\n\n @extend_schema_field(OpenApiTypes.OBJECT)\n def get_device(self, obj):\n if self.playback_state:\n return self.playback_state.get('device', None)\n return None\n\n @extend_schema_field(OpenApiTypes.OBJECT)\n def get_current_track(self, obj):\n if self.playback_state:\n return self.playback_state.get('item', None)\n return None\n\n @extend_schema_field(OpenApiTypes.INT)\n def get_runtime_ms(self, obj):\n return queue_estimate(obj, True)\n\n class Meta:\n model = Session\n fields = ('id', 'event', 'user', 'allow_explicit', 'require_payment', 'private', 'accepting_requests',\n 'auto_approve', 'is_playing', 'runtime_ms', 'device', 'current_track', 'urls')\n depth = 1\n\n\nclass SpotifySessionWriteSerializer(serializers.ModelSerializer):\n event = serializers.PrimaryKeyRelatedField(\n queryset=Event2019.objects.filter(approved=True, reviewed=False, closed=False, cancelled=False),\n help_text=\"Primary key value of the corresponding event\"\n )\n user = serializers.PrimaryKeyRelatedField(\n queryset=SpotifyUser.objects.all(), help_text=\"Primary key value of the Spotify account to use\"\n )\n auto_approve = serializers.BooleanField(default=False, help_text=\"Attempt to automatically queue song requests\")\n private = serializers.BooleanField(default=False, help_text=\"Restrict session to LNL members\")\n\n def __init__(self, *args, **kwargs):\n partial = kwargs.get('partial', None)\n super(SpotifySessionWriteSerializer, self).__init__(*args, **kwargs)\n\n if partial:\n for field in self.fields:\n self.fields[field].required = False\n\n def update(self, instance, validated_data):\n validated_data['event'] = instance.event # Changing this field is not permitted\n return super(SpotifySessionWriteSerializer, self).update(instance, validated_data)\n\n class Meta:\n model = Session\n fields = ('event', 'user', 'accepting_requests', 'allow_explicit', 'auto_approve', 'private')\n\n\nclass SongRequestSerializer(serializers.ModelSerializer):\n session = serializers.CharField(source='session.slug')\n queued_ts = serializers.DateTimeField(source='queued')\n requestor = serializers.SerializerMethodField()\n urls = serializers.SerializerMethodField()\n\n @extend_schema_field(OpenApiTypes.OBJECT)\n def get_requestor(self, obj):\n return {\"name\": obj.submitted_by, \"email\": obj.email, \"phone\": obj.phone}\n\n @extend_schema_field(OpenApiTypes.OBJECT)\n def get_urls(self, obj):\n return {'spotify_url': 'https://open.spotify.com/track/{}'.format(obj.identifier)}\n\n class Meta:\n model = SongRequest\n fields = ('id', 'session', 'name', 'duration', 'approved', 'queued_ts', 'requestor', 'urls')\n\n\nclass TokenRequestSerializer(serializers.ModelSerializer):\n APIKey = serializers.CharField(help_text=\"Your application's API key\")\n username = serializers.CharField(source='user.username', help_text=\"The user's username\")\n code = serializers.IntegerField(help_text=\"The user's verification code\")\n\n class Meta:\n model = TokenRequest\n fields = ('code', 'APIKey', 'username')\n","repo_name":"WPI-LNL/lnldb","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":10090,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"43121003548","text":"from datetime import datetime\nimport unittest\nfrom eqcatalogue import models as catalogue\nimport geoalchemy\nfrom tests.test_utils import in_data_dir\n\n\nclass ShouldCreateAlchemyTestCase(unittest.TestCase):\n\n def setUp(self):\n self.catalogue = catalogue.CatalogueDatabase(memory=True, drop=True)\n self.session = self.catalogue.session\n\n def tearDown(self):\n self.session.commit()\n\n def test_drop(self):\n self.catalogue = catalogue.CatalogueDatabase(\n drop=True, filename=in_data_dir(\"test_drop.db\"))\n self.catalogue = catalogue.CatalogueDatabase(memory=True, drop=True)\n\n def test_eventsource(self):\n event_source = catalogue.EventSource(name=\"test1\")\n self.session.add(event_source)\n self.assertEqual(self.session.query(\n catalogue.EventSource).filter_by(name='test1').count(), 1)\n\n def test_agency(self):\n eventsource = catalogue.EventSource(name=\"test2\")\n self.session.add(eventsource)\n\n agency = catalogue.Agency(source_key=\"test\", eventsource=eventsource)\n self.session.add(agency)\n self.assertEqual(\n self.session.query(catalogue.Agency).filter_by(\n source_key='test').count(),\n 1)\n\n def test_event(self):\n eventsource = catalogue.EventSource(name=\"test3\")\n self.session.add(eventsource)\n\n event = catalogue.Event(source_key=\"test\", eventsource=eventsource)\n self.session.add(event)\n self.assertEqual(\n self.session.query(catalogue.Event).filter_by(\n source_key='test').count(), 1)\n\n def test_origin(self):\n eventsource = catalogue.EventSource(name=\"test4\")\n self.session.add(eventsource)\n\n origin = catalogue.Origin(source_key=\"test\", eventsource=eventsource,\n position=geoalchemy.WKTSpatialElement(\n 'POINT(-81.40 38.08)'),\n time=datetime.now(),\n depth=3)\n self.session.add(origin)\n self.assertEqual(\n self.session.query(catalogue.Origin).filter(\n catalogue.Origin.depth > 2).count(), 1)\n\n def test_magnitudemeasure(self):\n eventsource = catalogue.EventSource(name=\"test4\")\n self.session.add(eventsource)\n\n event = catalogue.Event(source_key=\"test\", eventsource=eventsource)\n self.session.add(event)\n\n agency = catalogue.Agency(source_key=\"test\", eventsource=eventsource)\n self.session.add(agency)\n\n origin = catalogue.Origin(\n source_key=\"test\", eventsource=eventsource,\n position=geoalchemy.WKTSpatialElement('POINT(-81.40 38.08)'),\n time=datetime.now(),\n depth=1)\n self.session.add(origin)\n\n measure = catalogue.MagnitudeMeasure(\n event=event, agency=agency, origin=origin, scale='mL', value=5.0)\n self.session.add(measure)\n\n self.assertEqual(\n self.session.query(catalogue.MagnitudeMeasure).count(), 1)\n\n def test_get_or_add(self):\n event_source1, created = self.catalogue.get_or_create(\n catalogue.EventSource, {'name': \"test_5\"})\n self.assertTrue(created)\n event_source2, created = self.catalogue.get_or_create(\n catalogue.EventSource, {'name': \"test_5\"})\n self.assertFalse(created)\n self.assertEqual(event_source1, event_source2)\n\n def create_test_fixture(self):\n eventsource = catalogue.EventSource(name=\"AnEventSource\")\n self.session.add(eventsource)\n\n first_event = catalogue.Event(source_key=\"1st\",\n eventsource=eventsource)\n second_event = catalogue.Event(source_key=\"2nd\",\n eventsource=eventsource)\n self.session.add(first_event)\n self.session.add(second_event)\n\n agency_one = catalogue.Agency(source_key=\"1st\",\n eventsource=eventsource, name='Tatooine')\n agency_two = catalogue.Agency(source_key=\"2nd\",\n eventsource=eventsource, name='Alderaan')\n agency_three = catalogue.Agency(source_key=\"3rd\",\n eventsource=eventsource, name='DeathStar')\n self.session.add(agency_one)\n self.session.add(agency_two)\n self.session.add(agency_three)\n\n origin = catalogue.Origin(\n source_key=\"test\", eventsource=eventsource,\n position=geoalchemy.WKTSpatialElement('POINT(-81.40 38.08)'),\n time=datetime.now(),\n depth=1)\n self.session.add(origin)\n\n measure_one = catalogue.MagnitudeMeasure(\n event=first_event, agency=agency_one,\n origin=origin, scale='mL', value=5.0)\n self.session.add(measure_one)\n\n measure_two = catalogue.MagnitudeMeasure(\n event=second_event, agency=agency_two,\n origin=origin, scale='mb', value=6.0)\n self.session.add(measure_two)\n\n def test_available_measures_agencies(self):\n self.create_test_fixture()\n\n self.assertEqual(set(['Tatooine', 'Alderaan', 'DeathStar']),\n self.catalogue.get_agencies())\n\n def test_available_measures_scales(self):\n self.create_test_fixture()\n\n self.assertEqual(set(['mL', 'mb']),\n self.catalogue.get_measure_scales())\n\n def test_get_summary(self):\n self.create_test_fixture()\n\n self.assertEqual({catalogue.CatalogueDatabase.MEASURE_AGENCIES:\n set(['Tatooine', 'Alderaan', 'DeathStar']),\n catalogue.CatalogueDatabase.MEASURE_SCALES:\n set(['mL', 'mb'])},\n self.catalogue.get_summary())\n","repo_name":"gvallarelli/oq-eqcatalogue-tool","sub_path":"tests/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":5762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40245616640","text":"import miscnn.cli.data_exploration as data_exp\nimport unittest\nimport tempfile\nimport os\nimport nibabel as nib\nimport numpy as np\nimport pandas as pd\nfrom miscnn import Data_IO\nfrom miscnn.data_loading.interfaces import NIFTI_interface\n\n\nclass MockArgParser():\n def __init__(self):\n self.default = \"\"\n self.shortName = []\n self.name = []\n \n def add_argument(self, shortname, name, *args, **kwargs):\n self.shortName.append(shortname)\n self.name.append(name)\n \n def set_defaults(self, *args, **kwargs):\n self.default = kwargs[\"which\"]\n\nclass ArgData():\n def __init__(self, data_dir, imagetype):\n self.data_dir = data_dir\n self.imagetype = imagetype\n\ndef write_sample(sample_data, path, name):\n if not os.path.exists(path):\n raise IOError(\n \"Data path, {}, could not be resolved\".format(path)\n )\n # Save segmentation to disk\n sample_path = os.path.join(path, name)\n os.mkdir(sample_path)\n nifti = nib.Nifti1Image(sample_data[0], None)\n nib.save(nifti, os.path.join(sample_path, \"imaging.nii.gz\"))\n nifti = nib.Nifti1Image(sample_data[1], None)\n nib.save(nifti, os.path.join(sample_path, \"segmentation.nii.gz\"))\n\n\nclass cliTEST(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n # Create imgaging and segmentation data set\n np.random.seed(1234)\n self.dataset = dict()\n for i in range(0, 10):\n img = np.random.rand(16, 16, 16) * 256\n self.img = img.astype(int)\n seg = np.random.rand(16, 16, 16) * 3\n self.seg = seg.astype(int)\n sample = (self.img, self.seg)\n self.dataset[\"TEST.sample_\" + str(i)] = sample\n # Initialize Dictionary IO Interface\n # Initialize temporary directory\n self.tmp_dir = tempfile.TemporaryDirectory(prefix=\"tmp.miscnn.\")\n self.tmp_data = os.path.join(self.tmp_dir.name, \"data\")\n os.mkdir(self.tmp_data)\n \n for key, value in self.dataset.items():\n write_sample(value, self.tmp_data, key)\n \n self.dataio = Data_IO(NIFTI_interface(), self.tmp_data)\n \n #perhaps this should be a test but the other things kind of depend on this data\n \n #generate sample dir\n\n # Delete all temporary files\n @classmethod\n def tearDownClass(self):\n self.tmp_dir.cleanup()\n\n #-------------------------------------------------#\n # Base Functionality #\n #-------------------------------------------------#\n # Class Creation\n def test_checkRegistration(self):\n mockParser = MockArgParser()\n data_exp.register_commands(mockParser)\n \n assert mockParser.default == \"data_exp\"\n assert len(mockParser.shortName) == len(mockParser.name)\n assert len(mockParser.shortName) > 0\n \n \n # Class Creation\n def test_CLI_setup(self):\n args = ArgData(self.tmp_data, \"Unknown\")\n \n self.context_data = data_exp.setup_execution(args)\n \n assert isinstance(self.context_data[\"dataio\"], Data_IO)\n \n assert self.context_data[\"cnt\"] == 10\n #verifies the scans work\n assert self.context_data[\"cnt\"] == len(self.context_data[\"indices\"])\n assert self.context_data[\"cnt\"] == len(self.context_data[\"images\"])\n assert self.context_data[\"cnt\"] == len(self.context_data[\"segmentations\"])\n \n self.assertCountEqual(self.context_data[\"indices\"], self.dataset.keys())\n self.assertCountEqual(self.context_data[\"images\"], self.dataset.keys())\n self.assertCountEqual(self.context_data[\"segmentations\"], self.dataset.keys())\n \n \n def test_CLIclassAnalysis(self):\n result = data_exp.execute_class_analysis({\"data_dir\": self.tmp_data, \"dataio\":self.dataio, \"segmentations\":self.dataset.keys()})\n assert len(result) <= 3\n assert len(result) > 0\n \n def test_CLIstructureAnalysis(self):\n dataframe = pd.DataFrame({\"name\":[]})\n dataframe = data_exp.execute_structure_analysis({\"data_dir\": self.tmp_data, \"dataio\":self.dataio, \"indices\":self.dataset.keys()}, dataframe)\n \n expectation = pd.DataFrame.from_dict({id: [name, (16, 16, 16, 1), [1, 1, 1]] for id, name in enumerate(self.dataset.keys())}, orient=\"index\",columns=[\"name\", \"shape\", \"voxel_spacing\"])\n \n pd.testing.assert_frame_equal(dataframe,expectation,check_names=False)\n \n def test_CLIminmaxAnalysis(self):\n df, minmax_data = data_exp.execute_minmax_analysis({\"data_dir\": self.tmp_data, \"dataio\":self.dataio, \"indices\":self.dataset.keys()}, pd.DataFrame({\"name\":[]}))\n \n assert (df[\"minimum\"] < df[\"maximum\"]).all()\n assert (df[\"minimum\"] >= minmax_data[\"glob_min\"]).all()\n \n assert (df[\"maximum\"] <= minmax_data[\"glob_max\"]).all()\n \n def test_CLIminaxSegAnalysis(self):\n df, minmax_data = data_exp.execute_minmax_seg_analysis({\"data_dir\": self.tmp_data, \"dataio\":self.dataio, \"segmentations\":self.dataset.keys()}, pd.DataFrame({\"name\":[]}))\n \n for cl, minmax in minmax_data.items():\n assert (df[\"minimum_c\" + str(cl)] < df[\"maximum_c\" + str(cl)]).all()\n assert (df[\"minimum_c\" + str(cl)] >= minmax[\"glob_min\"]).all()\n \n assert (df[\"maximum_c\" + str(cl)] <= minmax[\"glob_max\"]).all()\n \n def test_CLIratioAnalysis(self):\n df = data_exp.execute_ratio_analysis({\"data_dir\": self.tmp_data, \"dataio\":self.dataio, \"indices\":self.dataset.keys()}, pd.DataFrame({\"name\":[]}))\n \n assert df[\"class_frequency\"].apply(lambda x: len(x) == 3).all\n \n def test_CLIbinningAnalysis(self):\n bins = data_exp.execute_binning({\"data_dir\": self.tmp_data, \"dataio\":self.dataio, \"indices\":self.dataset.keys(), \"images\":self.dataset.keys(), \"segmentations\":self.dataset.keys()}, pd.DataFrame({\"name\":[]}), 5)\n \n def test_CLIbinningSegAnalysis(self):\n bins = data_exp.execute_binning_seg({\"data_dir\": self.tmp_data, \"dataio\":self.dataio, \"segmentations\":self.dataset.keys()}, pd.DataFrame({\"name\":[]}), 5)\n ","repo_name":"frankkramer-lab/MIScnn","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","stars":382,"dataset":"github-code","pt":"81"} +{"seq_id":"1217345536","text":"from contextInfo.context import Context\nimport cv2\nimport numpy as np\nimport json\nimport copy\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\ndef rectify_video(image, map1, map2):\n frame_rectified = cv2.remap(image, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)\n return frame_rectified\n\n\ndef calculate_para(K, D, width, height):\n # 优化内参数和畸变系数\n p = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(K, D, (width, height), None)\n # 此处计算花费时间较大,需从循环中抽取出来\n map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, None, p, (width, height), cv2.CV_32F)\n return map1, map2\n\ndef current_coord(detect_result,results):\n pen_coords = [item for item in detect_result if item[\"name\"] == \"pen\"]\n screen_coords = [item for item in detect_result if item[\"name\"] == \"screen\"]\n sorted_pen = sorted(pen_coords, key=lambda x: x.get(\"confidence\"),reverse=True)\n sorted_coord_screen = sorted(screen_coords, key=lambda x: x.get(\"confidence\"), reverse=True)\n\n screen_rect = []\n pen_rect = []\n pen_point = [-1,-1]\n if sorted_coord_screen:\n screen_x1 = int(sorted_coord_screen[0][\"xmin\"])-10\n screen_y1 = int(sorted_coord_screen[0][\"ymin\"])-10\n screen_x2 = int(sorted_coord_screen[0][\"xmax\"])+5\n screen_y2 = int(sorted_coord_screen[0][\"ymax\"])+5\n screen_rect = [screen_x1,screen_y1,screen_x2,screen_y2]\n\n if sorted_pen:\n # print(\"sort_pen = \", sorted_pen)\n # results.show() # 显示标注\n pen_x1 = int(sorted_pen[0][\"xmin\"])\n pen_y1 = int(sorted_pen[0][\"ymin\"])\n pen_x2 = int(sorted_pen[0][\"xmax\"])\n pen_y2 = int(sorted_pen[0][\"ymax\"])\n pen_point = [pen_x2,pen_y2]\n pen_rect = [pen_x1,pen_y1,pen_x2,pen_y2]\n return pen_point,pen_rect,screen_rect\n\nclass edgeDetectBase(object):\n\n def __init__(self):\n self.SN = \"002\"\n self.area_per_min = 0\n self.box_all = []\n self.count_keep_max=3\n self.dist_threshold =3\n self.count_keep = 0\n self.area_per_change_threshold = 0.005\n pass\n\n def edge_handle(self,img,screen_rect):\n # 屏幕 截取\n if screen_rect:\n mask = np.zeros(img.shape[:2], dtype=\"uint8\")\n cv2.rectangle(mask, (screen_rect[0], screen_rect[1]), (screen_rect[2], screen_rect[3]), 255, -1)\n\n screen_masked = cv2.bitwise_and(img, img, mask=mask)\n # cv2.imshow(\"rectangular mask\", masked)\n else:\n screen_masked = copy.deepcopy(img)\n\n img_handle = self.color_select(screen_masked)\n\n seg_img = np.zeros_like(img)\n img_copy = copy.deepcopy(img)\n # 转换为灰度图像\n gray = cv2.cvtColor(img_handle, cv2.COLOR_BGR2GRAY)\n\n gray_copy = copy.deepcopy(gray)\n\n # 对灰度图形态学处理 返回 mask\n mask, closed_mask, mask_er = self.egde_detect_biology(img,gray_copy)\n\n gray_max, gray_mask_max, gray_mask_max_bool = self.find_mask_max(closed_mask, gray_copy)\n\n gray, gray_mask = self.rect_mask_filter(gray_mask_max, gray_max)\n\n kernel_5x5 = np.ones((5, 5), np.uint8)\n gray_mask_ode = cv2.erode(gray_mask_max, kernel_5x5, iterations=50)\n # cv2.imshow(\"closed_mask\",gray_mask_ode)\n # 遮挡后,最大区域为外边框\n screen_contours = self.findcountours(gray_mask_ode)\n # screen_contours_old.append(screen_contours)\n\n #外接矩形\n rect = cv2.minAreaRect(screen_contours[0])\n # rect_all.append(rect)\n box = cv2.boxPoints(rect)\n box = np.int64(box)\n #\n self.box_all.append(box) #没有经过排序\n #\n cv2.drawContours(img_copy, [box], -1, (255, 0, 0), 2)\n # cv2.imshow(\"xx\",img_copy)\n #\n area = cv2.contourArea(box)\n area_contour = cv2.contourArea(screen_contours[0])\n area_per = area_contour/area#面积比例\n\n # # 边框迭代优化\n # # -------面积比变大暂存---------\n # if (self.area_per_min < area_per):\n # screen_contours_best_tmp = screen_contours\n # area_per_min_tmp = area_per\n # box_tmp = box\n # # -------面积比变大暂存---------\n # #1. area_per_min > area_per_cur #留下 area_per_min = area_per_cur\n # # 2.角点变化 #新SN area_per_min = area_per_cur\n # if len(self.box_all)>self.count_keep_max:\n #\n # box_past = self.box_all[-2] #上一次识别框外接矩形\n #\n # #判断识别框是否长时间不变\n # M_past = cv2.moments(box_past)\n # cx_past = int(M_past['m10'] / M_past['m00'])\n # cy_past = int(M_past['m01'] / M_past['m00'])\n # point_past = np.array([cx_past, cy_past])\n # M = cv2.moments(box)\n # cx = int(M['m10'] / M['m00'])\n # cy = int(M['m01'] / M['m00'])\n # point = np.array([cx, cy])\n # dist = np.linalg.norm(point_past-point) #重心变化\n # #-------重心保持---------\n # if (dist < self.dist_threshold):\n # self.count_keep += 1\n # else:\n # self.count_keep = 0\n # # -------重心保持---------\n #\n # # -------重心保持暂存---------\n # if (self.count_keep >= self.count_keep_max):\n # screen_contours_best_tmp = screen_contours\n # box_tmp = box\n # area_per_min_tmp = area_per\n # # -------重心保持---------\n #\n # # -------处理初始帧-----\n # else:\n # screen_contours_best = screen_contours_best_tmp\n # area_per_min = area_per_min_tmp\n # box_best = box_tmp\n # # -------处理初始三帧-----\n #\n # # -------重心保持覆盖--------- 条件:面积比变小程度不大,重心变化不大,并且保持了一段时间\n # if (dist < self.dist_threshold) and (self.count_keep >= self.count_keep_max) and ((area_per_min - area_per) < area_per_change_threshold):\n #\n # screen_contours_best = screen_contours_best_tmp\n # box_best = box_tmp\n # area_per_min = area_per_min_tmp\n # count_keep = 0\n # # -------重心保持覆盖---------\n #\n # area_per_list.append(area_per)\n # area_per_min_list.append(area_per_min)\n #\n # #box排序,便于后面显示可用区域\n # box_best_xs = [i[0] for i in box_best]\n # box_best_ys = [i[1] for i in box_best]\n # box_best_xs.sort()\n # box_best_ys.sort()\n # box_best = np.array([[box_best_xs[1],box_best_ys[1]],[box_best_xs[2],box_best_ys[1]],\n # [box_best_xs[2],box_best_ys[2]],[box_best_xs[1],box_best_ys[2]]])\n #\n # #最比例最大的(找到最好的框)----> 框长时间不变,开启角点检测 ----> 角点变化,当做新的SN检测,但是保留轨迹\n # screen_contours_new.append(screen_contours_best)\n # box_all_new.append(box_best) #经过排序后\n #\n # ##是否需要新的外接矩形\n # # 创建新的图像并绘制分割轮廓\n # img_test = np.zeros_like(img_copy)\n # cv2.drawContours(img_test, screen_contours_best, -1, (0, 0, 255), 2)\n # cv2.drawContours(img_copy, screen_contours_best, -1, (0, 0, 255), 2)\n # # cv2.drawContours(img_copy, [box_best], -1, (255, 0, 0), 2)\n # # gray_copy[closed_mask == 255] = 255\n #\n # #内接矩形\n # rect_in = order_points(screen_contours_best[0].reshape(screen_contours_best[0].shape[0], 2))\n # rect_in = np.int64(rect_in)\n # xs_in = [i[0] for i in rect_in]\n # ys_in = [i[1] for i in rect_in]\n # xs_in.sort()\n # ys_in.sort()\n # # 内接矩形的坐标为\n # # print(xs_in[1], xs_in[2], ys_in[1], ys_in[2])\n # box_in = np.array([[xs_in[1],ys_in[1]],[xs_in[2],ys_in[1]],\n # [xs_in[2],ys_in[2]],[xs_in[1],ys_in[2]]])\n # box_in_buffer = np.array([[xs_in[1]+buffer+buffer_in_right,ys_in[1]+buffer],[xs_in[2]-buffer,ys_in[1]+buffer],\n # [xs_in[2]-buffer,ys_in[2]-buffer],[xs_in[1]+buffer+buffer_in_right,ys_in[2]-buffer]])\n # # cv2.drawContours(img_copy, [box_in], -1, (0, 255, 255), 2)\n # x_inbuffer_left, x_inbuffer_right, y_inbuffer_up, y_inbuffer_down = box_in_buffer[0][0], box_in_buffer[1][0], \\\n # box_in_buffer[1][1], box_in_buffer[2][1]\n # #有效区域\n # box_buffer = np.array([[box_best[0][0] - buffer, box_best[0][1] - buffer], [box_best[1][0] + buffer+ buffer_right, box_best[1][1] - buffer],\n # [box_best[2][0] + buffer + buffer_right, box_best[2][1] + buffer], [box_best[3][0] - buffer, box_best[3][1] + buffer]])\n # xbuffer_left, xbuffer_right, ybuffer_up, ybuffer_down = box_buffer[0][0], box_buffer[1][0], \\\n # box_buffer[1][1], box_buffer[2][1]\n #\n # rectangle_out_mask = np.zeros(img.shape[0:2], dtype=\"uint8\")\n # cv2.rectangle(rectangle_out_mask, tuple(box_buffer[0]), tuple(box_buffer[2]), 255, -1)\n # rectangle_in_mask = np.zeros(img.shape[0:2], dtype=\"uint8\")\n # cv2.rectangle(rectangle_in_mask, tuple(box_in_buffer[0]), tuple(box_in_buffer[2]), 255, -1)\n # buffer_area = rectangle_out_mask-rectangle_in_mask\n # buffer_area_bool = (buffer_area==255)\n # area_zeros1 = 255-np.zeros_like(buffer_area)\n # area_zeros1[buffer_area_bool] = 0\n # area_zeros2 = 255-np.zeros_like(buffer_area)\n # area_zeros2[buffer_area_bool] = 0\n # area_zeros3 = 255 - np.zeros_like(buffer_area)\n # buffer_area_img = np.dstack((area_zeros1, area_zeros3, area_zeros2))\n\n @staticmethod\n def color_select(img):\n \"\"\"\n 阈值过滤 过滤掉多余颜色像素\n :param img:\n :return:\n \"\"\"\n blur = cv2.blur(img, (5, 5))\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\n\n # low_blue = np.array([100,100,100])\n # high_blue = np.array([180,180,180])\n low_blue = np.array([10, 2, 10])\n high_blue = np.array([220, 32, 230])\n # 可实现二值化功能(类似threshold()函数)可以同时针对多通道进行操作\n mask = cv2.inRange(hsv, low_blue, high_blue)\n res = cv2.bitwise_and(img, img, mask=mask)\n return res\n\n @staticmethod\n def egde_detect_biology(img,gray):\n \"\"\"\n 形态学处理\n :param gray: 灰度图\n :return: maskq\n \"\"\"\n # 进行阈值分割\n\n ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n # thresh = cv2.adaptiveThreshold(gray, 255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C,\n # thresholdType=cv2.THRESH_BINARY_INV, blockSize=int(1080 / 3 - 1), C=20)\n # thresh = cv2.adaptiveThreshold(gray, 255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C,\n # thresholdType=cv2.THRESH_BINARY_INV, blockSize=9, C=5)\n\n cv2.imshow(\"closed_mask\", thresh)\n # 进行形态学操作,去除噪声和小区域\n kernel = np.ones((3, 3), np.uint8)\n opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2) # 开运算\n # 进行连通区域分析\n cv2.imshow(\"closed_mask2\", opening)\n\n # canny边缘\n canny_edge = cv2.Canny(opening,128,255)\n\n cv2.imshow(\"canny\",canny_edge)\n\n # 找出轮廓\n # contours, hierarchy = cv2.findContours(opening, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n #\n # for cnt in contours:\n # if len(cnt)>4:\n # # rect = cv2.minAreaRect(cnt)\n # # box = cv2.boxPoints(rect)\n # # box = np.int0(box)q\n # # cv2.drawContours(img,[box],0,(0,0,255),2)\n # epsilon = 0.001 * cv2.arcLength(cnt, True)\n # approx = cv2.approxPolyDP(cnt, epsilon, True)\n # cv2.drawContours(img, approx, -1, (0, 255, 0), 5)\n # cv2.polylines(img, [approx], True, (0, 0, 255), 2)\n\n lines = cv2.HoughLinesP(canny_edge, 1, np.pi / 180, 100, minLineLength=120, maxLineGap=50)\n\n # 笔在哪一边时候,那边的边不确定\n up = []\n down = []\n left = []\n right = []\n coords = []\n for i,line in enumerate(lines):\n x1, y1, x2, y2 = line[0]\n if abs(y2-y1)>abs(x2-x1):\n # y轴方向 y轴变化大\n if x1<960 and x2<960:\n coord = \"left\"\n left.append(line[0])\n elif x1>960 and x2>960:\n coord = \"right\"\n right.append(line[0])\n else:\n coord = \"\"\n else:\n # x轴方向 x轴变化大\n if y1<540 and y2<540:\n coord = \"up\"\n up.append(line[0])\n elif y1>540 and y2>540:\n coord = \"down\"\n down.append(line[0])\n else:\n coord = \"\"\n coords.append(coord)\n\n\n print(coords)\n for line in lines:\n print(\"lines = \",line)\n # 获取坐标\n x1, y1, x2, y2 = line[0]\n cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), thickness=5)\n cv2.imshow(\"imag\", img)\n\n\n\n\n\n n, labels, stats, centroids = cv2.connectedComponentsWithStats(opening)\n area_label_list = list(np.where((stats[:, 4] < 1000000) & (stats[:, 4] > 100000))[0])\n # #找到最大连通区域\n # max_area = 0\n # max_label = 0\n # for i in range(0, n):\n # if stats[i, cv2.CC_STAT_AREA] > max_area:\n # max_area = stats[i, cv2.CC_STAT_AREA]\n # max_label = i\n # stats[np.argsort(-stats[:, 4])][1:3, :]\n # # 提取最大连通区域\n # mask = np.zeros_like(gray)\n # mask[labels == max_label] = 255\n\n # 提取最大连通区域\n mask = np.zeros_like(gray)\n for area_label in area_label_list:\n mask[labels == area_label] = 255\n cv2.imshow(\"mask\", mask)\n # 进行形态学操作,填充区域\n kernel_5x5 = np.ones((5, 5), np.uint8)\n kernel_3x3 = np.ones((3, 3), np.uint8)\n mask_er = cv2.dilate(mask, kernel_5x5, iterations=50)\n closed_mask = cv2.morphologyEx(mask_er, cv2.MORPH_CLOSE, kernel_5x5)\n\n # # 进行轮廓检测\n # contours, hierarchy = cv2.findContours(closed_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return mask, closed_mask, mask_er\n @staticmethod\n def getLinearEquation(p1x, p1y, p2x, p2y):\n sign = 1\n a = p2y - p1y\n if a < 0:\n sign = -1\n a = sign * a\n b = sign * (p1x - p2x)\n c = sign * (p1y * p2x - p1x * p2y)\n return [a, b, c]\n\n\n # mask最大区域\n @staticmethod\n def find_mask_max(closed_mask, gray):\n gray_copy = copy.deepcopy(gray)\n y, x = gray_copy.shape\n img_center_x = np.int32(x / 2)\n img_center_y = np.int32(y / 2)\n img_center_point = np.array([img_center_x, img_center_y])\n n, labels, stats, centroids = cv2.connectedComponentsWithStats(255 - closed_mask, connectivity=4)\n centroids_list = centroids.astype(np.int32)\n diff_dist = np.expand_dims(np.linalg.norm(img_center_point - centroids_list, axis=1), axis=1)\n diff_dist_min = diff_dist[1:].min() # 距离最小\n max_label = np.where(diff_dist == diff_dist_min)[0][0]\n # max_label = np.where(stats[:,4]==stats[1:,4].max())[0][0]\n max_label = max_label.astype(np.int32)\n gray_mask_bool = ~(labels == max_label)\n gray_mask = np.zeros_like(gray)\n gray_mask[gray_mask_bool] = 255\n gray_copy[gray_mask_bool] = 255\n return gray_copy, gray_mask, gray_mask_bool\n\n # 过滤矩形外区域\n @staticmethod\n def rect_mask_filter(gray_mask, gray):\n gray_mask_copy = copy.deepcopy(gray_mask)\n gray_copy = copy.deepcopy(gray)\n mask_num = np.sum(gray_mask == 0, 1)\n di = np.diff(mask_num)\n di = np.append(np.zeros(1, int), di)\n y_up = np.where(di == di.max())[0][0]\n y_down = np.where(di == di.min())[0][0]\n gray_copy[:y_up + 1, :] = 255\n gray_copy[y_down:, :] = 255\n gray_mask_copy[:y_up + 1, :] = 255\n gray_mask_copy[y_down:, :] = 255\n # plt.figure(1)\n # plt.subplot(1,2,1)\n # plt.plot(list(range(len(mask_num))),mask_num)\n # plt.subplot(1,2,2)\n # plt.plot(list(range(len(di))),di)\n # plt.show()\n return gray_copy, gray_mask_copy\n @staticmethod\n def findcountours(gray_mask_ode):\n contours, hierarchy = cv2.findContours(255 - gray_mask_ode, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n screen_contours = []\n for contour in contours:\n area = cv2.contourArea(contour)\n x, y, w, h = cv2.boundingRect(contour)\n aspect_ratio = float(w) / h\n if aspect_ratio > 1 and aspect_ratio < 5:\n screen_contours.append(contour)\n return tuple(screen_contours)\n def drop_context(self):\n pass\n\ndef main_logic(scan_info,model):\n # 根据扫描信息判断电视版信息\n # 根据扫描信息判断板子是否为同一个,为同一个则不清空信息\n # 不同的则清空给上下文信息\n # 打开视频流\n # 获取边框\n # 获取检测\n # 路径分析判断\n # 结果控制设备\n # 开始时候灯由熄灭状态 -> 黄色\n # 操作完成 黄色 -> 绿色 -> 熄灭\n\n # video = cv2.VideoCapture(0)\n # video = cv2.VideoCapture(\"./data/20230228_4mm.mp4\")\n video = cv2.VideoCapture(\"./data/0314_left.mp4\")\n ID = Context.getID()\n print(\"ID = \",ID)\n i = 0\n\n Context.change(scan_info)\n\n ID = Context.getID()\n print(\"ID new = \", ID)\n K = np.array(\n [[1201.3967181633418, 0.0, 909.7424436183744], [0.0, 1203.635467250557, 534.1590658991514], [0.0, 0.0, 1.0]])\n D = np.array([[-0.0978537375125563], [-0.03841501213366177], [-0.03612764818273854], [0.05276041355808103]])\n width, height = 1920, 1080\n # width, height = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n # print(width, height)\n map1,map2 = calculate_para(K,D,width,height)\n edgeDetect = edgeDetectBase()\n while (video.isOpened()):\n \"\"\"\n 0、矫正\n 1、获取检测框,检测框缓存\n 2、获取检测点\n 3、判断检测区域\n 4、决策逻辑 \n 5、设备控制\n \"\"\"\n success, image = video.read()\n if not success:\n break\n\n # cv2.imwrite(\"./data2/{}.jpg\".format(i), image)\n i = i + 1\n\n # 图片矫正\n frame = rectify_video(image, map1, map2)\n\n # frame = cv2.resize(frame, (960, 540))\n # cv2.imshow('frame', frame)\n\n # 边框检测\n\n # 算法模型\n results = model(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n detect_result = json.loads(results.pandas().xyxy[0].to_json(orient=\"records\"))\n # for box in detect_result:\n # l, t, r, b = int(box[\"xmin\"]),int(box[\"ymin\"]),int(box[\"xmax\"]),int(box[\"ymax\"])\n # confidence = round(box[\"confidence\"],1)\n # cls_name = box[\"name\"]\n # # outside = t - h >= 3\n # if cls_name==\"pen\" or cls_name==\"screen\":\n # cv2.rectangle(frame, (l, t), (r, b), (0, 255, 0), 2)\n # cv2.putText(frame, cls_name + \"-\" + str(confidence), (l, t), cv2.FONT_ITALIC, 1, (255, 0, 0), 2)\n # cv2.imshow(\"ff\",frame)\n\n # 当前笔的坐标,笔的位置,屏幕位置\n pen_point,pen_rect,screen_rect = current_coord(detect_result,results)\n\n # 边框提取\n edgeDetect.edge_handle(frame,screen_rect)\n\n #\n\n\n\n\n if cv2.waitKey(0) & 0xFF == ord('q'):\n break\n # Context.add(i)\n # if i%2==1:\n # Context.sub(i)\n # if i>100:\n # break\n\n # 决策分析\n\n\n video.release()\n\n cv2.destroyAllWindows()\n return {\"i\":i,\"ID\":ID}\n\nif __name__==\"__main__\":\n import torch\n\n model = torch.hub.load(\"D:\\project\\demo\", \"custom\", 'D:\\project\\demo\\model_save\\ybest_screen', source=\"local\")\n model.conf = 0.6 # NMS confidence threshold\n model.iou = 0.2\n main_logic(1,model)\n\n # A082\n","repo_name":"Zhengzhuo0309/segment_best_TCL","sub_path":"handle_pic.py","file_name":"handle_pic.py","file_ext":"py","file_size_in_byte":20911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21001122617","text":"import tkinter as tk\nfrom tkinter import ttk\n\nfrom db.sql_db import DatabaseService\n\nimport interface.get_bmi_screen as BMI\nfrom interface.entry_with_placeholder import EntryWithPlaceholder\n\n\nclass RecordBMIDisplay(tk.Frame):\n def init(self, master, *args, **kwargs):\n super().__init__(master, *args, **kwargs)\n self.db = DatabaseService()\n\n self.name_entry = EntryWithPlaceholder(self, \"Name\")\n self.name_entry.grid(row=0, column=0, padx=5, pady=5)\n self.name_entry.bind(\"\", lambda e: self.record_bmi(event=e))\n\n self.age_entry = EntryWithPlaceholder(self, \"Age\")\n self.age_entry.grid(row=1, column=0, padx=5, pady=5)\n self.age_entry.bind(\"\", lambda e: self.record_bmi(event=e))\n\n self.grade_section_entry = EntryWithPlaceholder(\n self, \"Grade & Section (Ex.: 12-Faraday)\")\n self.grade_section_entry.grid(row=2, column=0, padx=5, pady=(5, 20))\n self.grade_section_entry.bind(\n \"\", lambda e: self.record_bmi(event=e))\n\n self.lrn_entry = EntryWithPlaceholder(self, \"LRN\")\n self.lrn_entry.grid(row=3, column=0, padx=5, pady=(5, 20))\n self.lrn_entry.bind(\"\", lambda e: self.record_bmi(event=e))\n\n enter_button = ttk.Button(\n self, text=\"Enter\", command=self.record_bmi)\n enter_button.grid(row=3, column=0, padx=5, pady=(5, 30))\n\n self.label_error = ttk.Label(self, foreground='red')\n self.label_error.grid(row=10, column=0, sticky=tk.S, padx=5)\n\n def record_bmi(self, event=\"\"):\n if BMI.user_info == \"\" or BMI.user_info == None:\n print(\"Invalid user info\")\n return\n\n print(BMI.user_info)\n self.label_error[\"text\"] = \"\"\n\n name = self.name_entry.get().strip().upper()\n age = self.age_entry.get().strip()\n lrn = self.lrn_entry.get().strip()\n grade_section = self.grade_section_entry.get().strip().upper()\n\n print(name, age, lrn, grade_section)\n\n if (name == \"Name\" or name == \"\" or age == \"Age\" or age == \"\" or lrn == \"LRN\" or lrn == \"\" or grade_section == \"Grade & Section (Ex.: 12-Faraday)\" or grade_section == \"\"):\n self.label_error[\"text\"] = \"Please fill all entry fields.\"\n return\n\n BMI.user_info.name = name\n BMI.user_info.age = age\n BMI.user_info.lrn = lrn\n BMI.user_info.grade, BMI.user_info.section = grade_section.split(\"-\")\n\n self.db.insert_user(BMI.user_info)\n\n self.label_error = ttk.Label(\n self, text=\"DATA SAVED\", foreground='green')\n self.label_error.grid(row=9, column=0, sticky=tk.S, padx=5)\n","repo_name":"zatrodev/bmi-calculating-gui","sub_path":"interface/record_bmi_screen.py","file_name":"record_bmi_screen.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21643131464","text":"#!/usr/bin/python\n\nimport sqlite3\nimport json\n\n# SQLite DB Name\nDB_Name = \"../db.sqlite3\"\n\ndef Sensor(jsonData):\n\t#Parse Data \n\tjson_Dict = json.loads(jsonData)\n\tSensorID = json_Dict['Sensor_ID']\n\tData_and_Time = json_Dict['Time']\n\tTemperature = json_Dict['Temperature']\n\tHumidity = json_Dict['Humidity']\n\t#Push into DB Table\n\tconn = sqlite3.connect(DB_Name)\n\tconn.execute(\"INSERT INTO sensor_sensor (SensorID,Date_and_Time, Temperature,Humidity) \\\n\t VALUES (?,?,?,?)\",[SensorID,Data_and_Time, Temperature,Humidity])\n\tconn.commit()\n\tprint (\"Sensor created new value\")\n\tconn.close()\n\n\n","repo_name":"nam2297ptit/DoAnThietKeHeThongNhung","sub_path":"DHT11/Get_Data_to_DB.py","file_name":"Get_Data_to_DB.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12558186570","text":"# @Time : 2018/10/16 0016 16:01\n# @Author : lzc\n# @File : 猫眼数据处理.py\nimport time\n\nimport requests\nurl = 'https://box.maoyan.com/promovie/api/box/second.json'\n#从url地址获取数据\ndef getfilmdata():\n res = requests.get(url)\n\n #判断是否获取数据成功\n if res.status_code==200:\n json_data = res.json()\n else:\n print('获取数据失败')\n\n if json_data: #从电影票房数据(json对象)中获取所有电影票房集合信息\n films = json_data.get('data').get('list')\n for item in films:\n film = {}\n film['影片名'] = item.get('movieName')\n film['上映天数'] = item.get('releaseInfo')\n film['总票房'] = item.get('movieName')\n film['综合票房'] = item.get('sumBoxInfo')\n film['票房占比'] = item.get('boxInfo')\n film['排片场次'] = item.get('boxRate')\n film['排片占比'] = item.get('avgShowView')\n film['场均人次'] = item.get('avgSeatView')\n print(film)\n\nwhile True:\n getfilmdata()\n time.sleep(3)","repo_name":"liuzhongchuang/demo","sub_path":"爬虫数据抓取的方式/猫眼数据处理.py","file_name":"猫眼数据处理.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"7981146010","text":"import cx_Oracle\nimport pymongo\nimport os\n#mongodb ? ????\ndbname=\"petrol_data\"\n#??oracle ???\nis_oracle=True\n#mongodb ?? ????????\nrawcollection=\"petrol_raw\" \nmongourl=\"mongodb://localhost:27017/\" #mongodb ???\nos.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'\n\nlinkstr = \"puguang/puguang@192.168.43.22:1521/ORCL\" #oracle ??????\n'''\noracle connection string\n'''\nsqlstr = '''SELECT JH, JT, \n QXWJM, ZBMC, KC, CJXMID, CYJG, YXJDDJSD1,\n YXJDDJSD2, BZ, LRSJ, VRUSERNAME, VRUNITNAME,\n COLLECT_JOB_S,GL_UUID, DATASTATUS, CJLX,\n ORDERTIME, UPLOADFLAG\n from WL18'''\n#Oracle to mongodb mapping\nfield_raw = [\n 'JH', 'JT', 'QXWJM',\n 'ZBMC', 'KC', 'CJXMID',\n 'CYJG', 'YXJDDJSD1', 'YXJDDJSD2',\n 'BZ', 'LRSJ', 'VRUSERNAME', 'VRUNITNAME',\n 'COLLECT_JOB_S', 'GL_UUID', 'DATASTATUS', 'CJLX',\n 'ORDERTIME', 'UPLOADFLAG'\n]\ncollectionames= ['crud_dict', 'crud_field', 'crud_table', 'crud_user', 'model_config', \n'oil_field_config', 'task', 'users']\n#create crud meta schema\ndef create_schema(mongourl, dbname, collectionames):\n myclient = pymongo.MongoClient(mongourl)\n db = myclient[dbname]\n if len(db.list_collection_names()) != 0:\n [db.create_collection(item) for item in collectionames]\n print(\"在mongodb 中为 node.js 主服务 创建数据集合\")\n print(db.list_collection_names())\n#??????mongodb??\ndef transfer_data(linkstr, mongourl, dbname, field_list, is_oracle, datarepo, sqlstr):\n myclient = pymongo.MongoClient(mongourl)\n db = myclient[dbname]\n dbcols = db[datarepo]\n sys_conn = None\n if is_oracle:\n sys_conn = cx_Oracle.connect(linkstr)\n else:\n return\n cursor = sys_conn.cursor()\n cursor.execute(sqlstr.encode('utf-8'))\n result = cursor.fetchall()\n for row in result:\n dictionary = dict(zip(field_list,list(row)))\n dbcols.insert_one(dictionary)\n\nif __name__ == \"__main__\":\n create_schema(mongourl, dbname, collectionames)\n transfer_data(linkstr, mongourl, dbname, field_raw, is_oracle, rawcollection, sqlstr)","repo_name":"mini-tiger/loggingchartplus","sub_path":"python/taskutil/mongocreate.py","file_name":"mongocreate.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10976146289","text":"from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.forms import widgets\n\nfrom .models import *\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, Field\n\nclass AddPhoto(forms.ModelForm):\n existing_images = forms.ModelChoiceField(queryset=Photo.objects.all(), empty_label=None)\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n class Meta:\n model = Photo\n fields = ['photo']\n\n\nclass AddFilm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n class Meta:\n model = Films\n fields = ['name', 'content', 'big_photo', 'trailerURL', 'type1', 'type2', 'type3']\n widgets = {\n 'content': forms.Textarea(attrs={'class': 'form-control', 'rows': 3}),\n\n }\n\n\nclass AddSEO(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n class Meta:\n model = SEO\n fields = ['url', 'title', 'keywords', 'description']\n widgets = {\n 'description': forms.Textarea(attrs={'class': 'form-control', 'rows': 3}),\n }\n\n\nclass AddCinema(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n class Meta:\n model = Cinemas\n fields = ['name', 'content', 'logo', 'conditions', 'photo_banner']\n widgets = {\n 'content': forms.Textarea(attrs={'class': 'form-control', 'rows': 3}),\n 'conditions': forms.Textarea(attrs={'class': 'form-control', 'rows': 3}),\n }\n\nclass AddHall(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n class Meta:\n model = Halls\n fields = ['number', 'content', 'schema_hall', 'photo_banner']\n widgets = {\n 'content': forms.Textarea(attrs={'class': 'form-control', 'rows': 3}),\n }","repo_name":"romankylik/KinoCMS","sub_path":"cinemas/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41161782220","text":"import subprocess\nimport json\nfrom random import randint\nimport tweepy\nfrom mastodon import Mastodon\nimport datetime\nimport sqlite3\nimport requests\n\nclass photoDB:\n def __init__(self, db_name):\n self.con = sqlite3.connect(db_name)\n self.cur = self.con.cursor()\n\n def update_database(self, data):\n self.cur.execute('''UPDATE photos\n SET posted_date = ?\n WHERE\n id = ?\n '''\n , data\n )\n self.con.commit()\n\n def get_random_row(self, collection):\n d = self.cur.execute('''\n SELECT record FROM photos\n WHERE collection=? AND\n posted_date IS NULL AND\n dont_post IS NOT 1 AND\n invalid_record IS NOT 1\n ''',\n [collection])\n\n records = d.fetchall()\n\n # if there are no records found\n if len(records) == 0:\n return -1\n else:\n return records[randint(0,len(records)-1)][0]\n\n def update_row_status(self, date, id_, dont_post):\n self.cur.execute('''\n UPDATE photos\n SET posted_date=?, dont_post=?\n WHERE id=?\n ''',\n [date, dont_post, id_])\n\n self.con.commit()\n\n# break up description into valid length parts\ndef post_parts(desc, length):\n desc_parts = []\n\n # desc = 'Description: ' + desc\n\n if len(desc) <= length:\n desc_parts = [desc]\n\n else:\n l_idx = 0\n r_idx = length - 4\n finished = False\n while finished == False:\n # finish if we've reached the end of the string\n if r_idx >= len(desc)-1:\n finished = True\n desc_parts.append(desc[l_idx:])\n else:\n r_idx = desc[l_idx:r_idx].rfind(' ') + l_idx\n desc_parts.append(desc[l_idx:r_idx] + ' ...')\n\n l_idx = r_idx + 1\n r_idx += length - 4\n\n return desc_parts\n\n# randomly choose index of collection given weights\ndef choose_collection(weights):\n sum_w = 0\n for i in weights: sum_w+= i\n\n # find out which set r is in\n r = randint(1,sum_w)\n for i in range(0, len(weights)):\n sum_ = 0\n for j in range(0,i+1): sum_+=weights[j]\n if r <= sum_:\n return(i)\n\ndef get_metadata(url):\n \n # get json \n t = json.loads(requests.get(url).text)\n\n # if this if from streetcar collection:\n if 'msn' in url:\n try:\n keys = list(t['response']['document'].keys())\n metadata = {}\n # if invalid record\n except:\n return {}\n \n # get title\n metadata.update({'title':t['response']['document']['title_ssi']})\n metadata.update({'permis':'Minnesota Streetcar Museum'})\n metadata.update({'id':t['response']['document']['id'].split(':')[1]})\n \n # get description\n if 'description_ts' in keys:\n metadata.update({'descri':t['response']['document']['description_ts']})\n #else:\n # metadata.update({'descri':''})\n \n if 'dat_tesi' in keys:\n metadata.update({'year':t['response']['document']['dat_tesi']})\n \n if 'city_ssim' in keys:\n cities = t['response']['document']['city_ssim']\n if 'Minneapolis' in cities:\n metadata.update({'city':'Minneapolis'})\n else:\n metadata.update({'city':t['response']['document']['city_ssim'][0]})\n \n #m.update{'title':t['response']['document']['title_ssi']}\n \n # university of minnesota archives\n elif 'p16022coll175' in url:\n try:\n keys = list(t.keys())\n metadata = {}\n # if invalid record\n except:\n return {}\n \n metadata.update({'title':t['title']})\n metadata.update({'permis':t['contributing_organization_name']})\n metadata.update({'id':t['id'].split(':')[1]})\n \n # get description\n if 'description' in keys:\n metadata.update({'descri':t['description']})\n \n if 'date_created' in keys:\n metadata.update({'year':t['date_created'][0]})\n \n if 'city' in keys:\n cities = t['city']\n # initialize\n metadata.update({'city':t['city'][0]})\n for city in cities:\n # format typically like 'Minneapolis; St Paul'\n if 'minneapolis' in city.lower():\n metadata.update({'city':'Minneapolis'})\n\n # else if this is hclib photo\n else:\n try:\n metadata = t\n \n fields_to_remove = []\n for field in t:\n if metadata[field] == {}:\n fields_to_remove.append(field)\n for field in fields_to_remove:\n metadata.pop(field)\n \n # manually add this field in as 'id' for compatibility purposes\n metadata.update({'id': metadata['dmrecord']})\n \n except Exception as e:\n print('failure to get metadata')\n print(e)\n print(metadata)\n return {}\n\n return metadata\n\ndef get_photo(url, out_image):\n\n cmd = 'wget --user-agent=\"Mozilla\" ' + url + ' --output-file=/dev/null -O ' + out_image\n\n proc = subprocess.Popen(cmd, shell=True)\n proc.wait()\n\n # check if photo doesn't exist from source and return 1 if it doesn't\n ls = subprocess.check_output('ls -lt ' + out_image, shell=True).decode('utf-8')\n\n # return false if photo not created for some reason\n try:\n size = ls.split(' ')[4]\n except:\n return False\n\n if size == '0':\n return False\n else:\n return True\n\n# return true if bad word in title of description string\ndef bad_word_in_post(title, descr, subj, input_file):\n\n bad_word_list = []\n with open(input_file) as f:\n for line in f:\n bad_word = line.strip('\\n')\n bad_word_list.append(bad_word) \n\n for word in bad_word_list:\n if word in title.lower() or word in descr.lower() or word in subj.lower():\n return True\n return False\n\ndef get_api_keys(input_file):\n keys = {}\n with open(input_file) as f:\n for line in f:\n l = line.strip('\\n')\n l = l.split(',')\n keys.update({l[0]:l[1]})\n\n return keys\n\ndef create_send_post(collection, photo_id):\n\n # connect to twitter\n keys = get_api_keys('api_keys.txt')\n auth = tweepy.OAuthHandler(keys['api_key'], keys['api_key_secret'])\n auth.set_access_token(keys['access_token'], keys['access_token_secret'])\n api = tweepy.API(auth)\n\n # connect to mastodon\n mast_key = get_api_keys('api_keys_mastodon.txt')\n mastodon = Mastodon(\n access_token = mast_key['access_token_secret'],\n api_base_url = 'https://botsin.space/'\n )\n\n # images we'll be pulling\n if collection == 'msn':\n full_url = 'https://cdm16022.contentdm.oclc.org/digital/iiif/msn/' + str(photo_id) + '/full/2000,2000/0/default.jpg'\n metadata_url = 'https://collection.mndigital.org//catalog/msn:' + str(photo_id) + '.json'\n\n out_image = 'images/' + collection + photo_id + '.jpg'\n \n # umn archives \n elif collection == 'p16022coll175':\n full_url = 'https://cdm16022.contentdm.oclc.org/digital/iiif/p16022coll175/' + str(photo_id) + '/full/2000,2000/0/default.jpg'\n metadata_url = 'https://umedia.lib.umn.edu/item/p16022coll175:' + str(photo_id) + '.json'\n\n out_image = 'images/' + collection + photo_id + '.jpg' \n \n # if hclib collection:\n else:\n base_url = 'https://digitalcollections.hclib.org/'\n full_url = base_url + 'digital/download/collection/' + collection + '/id/' + str(photo_id) + '/size/large'\n metadata_url = base_url + 'digital/bl/dmwebservices/index.php?q=dmGetItemInfo/' + collection + '/' + str(photo_id) + '/json'\n\n out_image = 'images/' + collection + photo_id + '.jpg'\n\n # try to create photo and get metadata\n photo_created = get_photo(full_url, out_image)\n metadata = get_metadata(metadata_url)\n len_metadata = len(metadata)\n\n # return false if photo or metadata weren't retrieved or \n # index of metadata doesn't match intended value\n if photo_created == True and len_metadata > 1 and str(metadata['id']) == str(photo_id):\n \n metadata_keys = list(metadata.keys())\n title = metadata['title']\n\n # get date of tweet, if exists\n if 'year' in metadata_keys:\n date = metadata['year']\n elif 'decade' in metadata_keys:\n date = metadata['decade']\n\n # get attribution\n perm_exists = False\n if 'permis' in metadata_keys:\n\n # assuming normal format for hclb\n if ':' in metadata['permis']:\n try:\n source = metadata['permis'].split(':')[1]\n source = source.strip(' ').strip('\"').strip(\"'\")\n perm_exists = True\n\n # if the format isn't as expected:\n except:\n perm_exists = False\n source = ''\n #source = 'Hennepin County Library'\n\n # different format for glanton\n if 'glanton' in metadata['permis'].lower():\n source = 'Hennepin County Library and the children of John Glanton'\n \n if 'Streetcar Museum' in metadata['permis']:\n source = metadata['permis']\n perm_exists = True\n \n if 'University Archives' in metadata['permis']:\n source = metadata['permis']\n perm_exists = True \n \n # if the permissions say you need to contact them, don't post\n if 'viewed' in metadata['permis'] and 'specialcoll@hclib.org' in metadata['permis']:\n source = ''\n perm_exists = False\n\n # if there is no permissions field \n else:\n perm_exists = True\n source = 'Hennepin County Library'\n\n # make main tweet\n tweet1 = title\n\n if 'year' in metadata_keys or 'decade' in metadata_keys:\n tweet1 += ' (' + date + ')'\n\n if 'addres' in metadata_keys:\n tweet1 += '\\n' + metadata['addres']\n\n tweet1 += '\\nSource: ' + source\n \n # trim long tweets\n # tweet1 = tweet1[0:280]\n\n if 'descri' in metadata_keys:\n description = metadata['descri']\n tweet1 += '\\n\\n' + description\n else:\n description = ''\n\n if 'subjec' in metadata_keys:\n subject = metadata['subjec']\n else:\n subject = ''\n \n print(tweet1)\n\n\n # check that the photo was taken in minneapolis\n # if there's no city field, assume it was in mpls\n in_mpls = False\n if 'city' in metadata_keys:\n city = metadata['city']\n cities = ['minneapolis','saint anthony and minneapolis', 'saint anthony', 'richfield', 'hopkins', 'saint louis park', 'st. louis park', 'robbinsdale', 'fort snelling', 'golden valley', 'columbia heights']\n if city.lower() in cities:\n in_mpls = True\n else:\n in_mpls = False\n\n # assume in minneapolis is minneapolis mentioned. this is\n # redundant for now\n elif 'minneapolis' in title.lower() or 'minneapolis' in description.lower() or 'minneapolis' in subject.lower():\n in_mpls = True\n\n # assume it's in minneapolis if no city provided\n else:\n in_mpls = True\n\n # check for offensive content\n # post if non-offensive and there are permissions\n dont_post = bad_word_in_post(title, description, subject, 'bad_words.txt')\n if dont_post == False and perm_exists == True and in_mpls == True:\n # don't tweet anymore\n # print('sending tweet')\n # status = api.update_with_media(out_image, tweet1)\n\n print('sending toot')\n toot_len = 500\n post_text = post_parts(tweet1, toot_len)\n mast_media = mastodon.media_post(out_image)\n toot = mastodon.status_post(post_text[0], media_ids=mast_media)\n \n prev_toot_id = toot.id\n for i in range(1, len(post_text)):\n # don't tweet anymore\n # tweet thread\n # reply = api.update_status(status=d, \n # in_reply_to_status_id=prev_id, \n # auto_populate_reply_metadata=True)\n # prev_id = reply.id\n\n # mastodon thread\n mast_reply = mastodon.status_post(status=post_text[i], in_reply_to_id=prev_toot_id)\n prev_toot_id = mast_reply.id\n\n return True\n\n # if there's a filtered word in the post\n else:\n print('bad word: ' + str(dont_post))\n print('permission to post: ' + str(perm_exists))\n print('in mpls: ' + str(in_mpls))\n return False\n\n # failed if couldn't get photo, metadata, or metadata id doesn't match\n else:\n print(' failed')\n return False\n\n# main loop for sending posts\n# try to send until we successfully get an image\nif __name__ == '__main__':\n\n time = datetime.datetime.now()\n\n # coll18 is really old photos, coll1 is glanton photos, coll 175 is university archives\n collections = ['CPED', 'MplsPhotos', 'FloydKelley', 'MPRB', 'p17208coll18', 'p17208coll1', 'msn', 'p17208coll15', 'p16022coll175']\n max_idx = [21250, 60000, 212, 251, 1100, 820, 2776, 1406, 21899]\n weights = [20, 13, 1, 1, 5, 3, 10, 5, 10]\n\n # open connection to photo database\n db = photoDB('photoDB.db')\n\n sum_weights = 0\n for i in weights: sum_weights+=i\n\n # try until a photo is found and posted\n tries = 0\n posted = False\n while posted == False and tries < 10:\n\n # randomly choose collection based on weights given\n coll = choose_collection(weights)\n\n # randomly choose photo in collection\n # if = -1, then there are no records left\n photo_idx = db.get_random_row(collections[coll])\n \n #photo_idx = randint(1,max_idx[coll]) \n if int(photo_idx) != -1:\n posted = create_send_post(collections[coll], str(photo_idx))\n\n # update database with whether this was posted or not\n if posted == False:\n db.update_row_status(time.strftime('%d/%m/%y %H:%M:%S'), collections[coll] + '_' + str(photo_idx), 1)\n else:\n db.update_row_status(time.strftime('%d/%m/%y %H:%M:%S'), collections[coll] + '_' + str(photo_idx), 0)\n\n # write to log\n f = open('post_log.txt','a')\n f.write(time.strftime('%d/%m/%y %H:%M:%S') + ',' + collections[coll] + ',' + str(photo_idx) + '\\n')\n f.close()\n\n tries += 1\n\n db.con.close()\n","repo_name":"sampenders/photo_bot","sub_path":"send_tweet.py","file_name":"send_tweet.py","file_ext":"py","file_size_in_byte":15132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9690218684","text":"\ndb=\"\"\"\n11767,Polaris\n7588,Achernar\n21421,Aldebaran\n24436,Rigel\n24608,Capella\n27989,Betelgeuse\n30438,Canopus\n32349,Sirius\n37279,Procyon\n37826,Pollux\n49669,Regulus\n60718,Acrux\n62434,Mimosa\n65474,Spica\n68702,Hadar\n69673,Arcturus\n71683,Rigil Kent\n80763,Antares\n91262,Vega\n97649,Altair\n102098,Daneb\n113368,Fomalhaut\n\"\"\"\n\nimport numpy as np\nimport ephem\nimport stars as hip\nimport planisfunc as plf\n\noffset = np.pi /100.\n\nclass hip20_nameE:\n def __init__(self, observe, ax, zo, legend):\n\n nameE = {}\n s = []\n for line in db.split('\\n'):\n try:\n hipno = line.split(',')[0]\n name = line.split(',')[1]\n nameE[hipno]=name\n starz = hip.star(str(hipno))\n s.append(starz)\n\n except:\n None\n [s[i].compute(observe) for i in range(len(s))]\n\n NAME = np.array([body.name for body in s])\n ALT = np.array([float(body.alt) for body in s])\n AZ = np.array([float(body.az) for body in s])\n\n lim = np.pi /12.\n\n X, Y, ret = plf.polarXY(AZ, ALT, lim, name=NAME)\n NAME = ret['name']\n\n X += offset\n Y += offset\n\n for n in range(len(NAME)):\n name = nameE[NAME[n]]\n\n ax.text(X[n], Y[n], name, color='white',\n fontsize=10, alpha=0.8, zorder=zo )\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import sky\n from observe import *\n\n fig, ax = plt.subplots()\n\n ax.set_aspect('equal')\n sf = sky.skyfield(obs, ax, zo=0, legend=True)\n na = hip20_nameE(obs, ax, zo=1, legend=True)\n\n plt.xlim(- np.pi/2., np.pi/2.)\n plt.ylim(- np.pi/2., np.pi/2.)\n\n ax.axis('off')\n plt.show()\n","repo_name":"IchiroYoshida/python_public","sub_path":"planis/nameE.py","file_name":"nameE.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"29968033254","text":"import os\nimport pytest\n\n\nfrom qcore.utils import load_sim_params as mocked_load_sim_params\n\nfrom workflow.automation.tests.test_common_set_up import get_fault_from_rel, set_up\n\nimport workflow.automation.submit.submit_hf\n\n\n@pytest.mark.usefixtures(\"init_scheduler\")\ndef test_main(set_up, mocker):\n \"\"\"No return value. Just check that it runs without crashing\"\"\"\n\n mocker.patch(\n \"workflow.automation.submit.submit_hf.est.est_HF_chours_single\",\n lambda *args, **kwargs: (2, 0.05, 40),\n )\n\n for root_path, realisation in set_up:\n rel_dir = os.path.join(\n root_path, \"CSRoot\", \"Runs\", get_fault_from_rel(realisation), realisation\n )\n # Fault will probably change on each set of data, so reset this every time\n mocker.patch(\n \"workflow.automation.submit.submit_hf.utils.load_sim_params\",\n lambda x: mocked_load_sim_params(os.path.join(rel_dir, x)),\n )\n\n workflow.automation.submit.submit_hf.main(\n submit=None,\n machine=\"default\",\n ncores=80,\n rel_dir=rel_dir,\n retries=0,\n seed=None,\n version=None,\n write_directory=None,\n )\n","repo_name":"ucgmsim/slurm_gm_workflow","sub_path":"workflow/automation/tests/test_submit_scripts/test_submit_hf.py","file_name":"test_submit_hf.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1314373843","text":"# pylint: disable=protected-access\n\"\"\"Testing script\"\"\"\nimport pytest\nfrom PIL import ImageChops, Image\nimport sys\nimport os\nimport os.path\nfrom hypothesis import given\nfrom hypothesis.strategies import text, binary, characters\nfrom shutil import copy2\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n# noinspection PyPep8\nfrom steganographer.steganographer import Steganographer\n# noinspection PyPep8\nfrom steganographer.steganographer import _unpack_image, _pack_image, _open_bin_file, _write_bin_file, \\\n _open_image_file, _write_image_file\n\nCLEAN_PNG_LOCATION = \"tests/cleanImage.png\"\n\n\ndef test_generate_header():\n \"\"\"The header is generated as expected\"\"\"\n stegs = Steganographer()\n header = bytes(stegs._header._HEADER_TITLE, 'utf-8') + \\\n bytes(stegs._header.data_len.to_bytes(stegs._header._HEADER_DATA_SIZE, \"little\")) + \\\n bytes(stegs._header.bits_used.to_bytes(stegs._header._HEADER_BITS_SIZE, \"little\")) + \\\n bytes(stegs._header.file_name_len.to_bytes(stegs._header._HEADER_FILE_LENGTH_SIZE, \"little\"))\n\n assert header == stegs._generate_header(stegs._header.data_len, stegs._header.bits_used, \"\")\n\n\ndef test_retrieve_header():\n \"\"\"The header is retrieved as expected\"\"\"\n stegs = Steganographer()\n test_message = \"12345\".encode('utf-8')\n test_data_len = len(test_message)\n test_bits_used = 1\n test_file_name = \"test_retrieve_header.txt\"\n test_file_name_len = len(test_file_name)\n test_data = bytes(b'\\x01' * 1000)\n\n test_header = stegs._generate_header(test_data_len, test_bits_used, test_file_name)\n hidden_data = stegs._hide_data(test_data[:stegs._header.header_length * stegs._BYTELEN], test_header)\n hidden_data += stegs._hide_data(test_data[stegs._header.header_length * stegs._BYTELEN:], test_message)\n header_retrieved = stegs._retrieve_header(hidden_data)\n\n assert header_retrieved is True\n assert stegs._header.data_len == test_data_len\n assert stegs._header.bits_used == test_bits_used\n assert stegs._header.file_name_len == test_file_name_len\n assert stegs._header.file_name.decode('utf-8') == test_file_name\n\n\ndef test_hide_byte():\n \"\"\"The _hide_byte function does hide a byte and returns the test_data with that byte hidden.\"\"\"\n stegs = Steganographer()\n test_data = bytes(b'\\x01' * stegs._BYTELEN)\n data_to_hide = bytes('A', 'utf-8')\n solution_data = bytearray(stegs._BYTELEN)\n solution_data[1] = 1\n solution_data[7] = 1\n\n assert stegs._hide_byte(test_data, data_to_hide[0]) == solution_data\n\n\ndef test_reveal_byte():\n \"\"\"The _reveal_byte function returns a bytes object of the hidden byte.\"\"\"\n stegs = Steganographer()\n test_data = bytearray(stegs._BYTELEN)\n test_data[1] = 1\n test_data[7] = 1\n solution_data = bytes('A', 'utf-8')\n\n assert stegs._reveal_byte(test_data) == solution_data\n\n\n@given(data_to_hide=binary(min_size=1, max_size=1))\ndef test_hide_reveal_byte_inverse(data_to_hide):\n \"\"\"Anything hidden by _hide_byte is revealed by _reveal_byte.\"\"\"\n clean_data = bytes(b'\\x01' * 8)\n\n stegs = Steganographer()\n revealed_byte = stegs._reveal_byte(stegs._hide_byte(clean_data, data_to_hide[0]))\n assert revealed_byte == data_to_hide\n\n\ndef test_hide_string():\n \"\"\"Takes in a string and a bytes object and hides the string in that bytes object.\"\"\"\n stegs = Steganographer()\n test_data = bytes(b'\\x01' * stegs._BYTELEN * 3)\n solution_data = bytearray(stegs._BYTELEN * 3)\n solution_data[1] = 1\n solution_data[7] = 1\n solution_data[9] = 1\n solution_data[14] = 1\n solution_data[17] = 1\n solution_data[22] = 1\n solution_data[23] = 1\n\n assert stegs._hide_string(test_data, 'ABC') == solution_data\n\n\ndef test_reveal_string():\n \"\"\"Returns a string representation of the data that was hidden in the test_data.\"\"\"\n solution = 'ABC'\n stegs = Steganographer()\n stegs._header.data_len = len(solution)\n test_data = bytearray(stegs._BYTELEN * 4)\n test_data[1] = 1\n test_data[7] = 1\n test_data[9] = 1\n test_data[14] = 1\n test_data[17] = 1\n test_data[22] = 1\n test_data[23] = 1\n\n assert stegs._reveal_string(test_data) == solution\n\n\n@given(string_to_hide=text(characters(min_codepoint=1, blacklist_categories=('Cc', 'Cs'))))\ndef test_hide_reveal_string_inverse(string_to_hide):\n \"\"\"Anything hidden by _hide_string is revealed by _reveal_string.\"\"\"\n clean_data = bytes(b'\\x01' * 5000)\n stegs = Steganographer()\n stegs._header.data_len = len(string_to_hide.encode('utf-8'))\n revealed_string = stegs._reveal_string(stegs._hide_string(clean_data, string_to_hide))\n assert revealed_string == string_to_hide\n\n\ndef test_hide_data():\n \"\"\"Will hide one bytes object inside another.\"\"\"\n stegs = Steganographer()\n test_data = bytes(b'\\x01' * stegs._BYTELEN * 4)\n data_to_hide = bytes('ABC', 'utf-8')\n stegs._header.data_len = len(data_to_hide)\n solution_data = bytearray(stegs._BYTELEN * 4)\n solution_data[1] = 1\n solution_data[7] = 1\n solution_data[9] = 1\n solution_data[14] = 1\n solution_data[17] = 1\n solution_data[22] = 1\n solution_data[23] = 1\n solution_data[24:] = b'\\x01' * stegs._BYTELEN\n\n assert stegs._hide_data(test_data, data_to_hide) == solution_data\n\n\ndef test_hide_data_partial():\n \"\"\"Will work when given a bytes object that is too short to contain the full data to be hidden.\"\"\"\n stegs = Steganographer()\n test_data = bytes(b'\\x01' * stegs._BYTELEN * 3)\n data_to_hide = bytes('ABC', 'utf-8')\n stegs._header.data_len = len(data_to_hide)\n solution_data = bytearray(stegs._BYTELEN * 3)\n solution_data[1] = 1\n solution_data[7] = 1\n solution_data[9] = 1\n solution_data[14] = 1\n solution_data[17] = 1\n solution_data[22] = 1\n solution_data[23] = 1\n\n # Testing when only half a byte is passed in for the data that contains the hidden text.\n assert stegs._hide_data(test_data[:4], data_to_hide) == solution_data[:4]\n\n\ndef test_reveal_data():\n \"\"\"Will return the correct data that is hidden inside the test_data.\"\"\"\n solution_data = bytes('ABC', 'utf-8')\n stegs = Steganographer()\n stegs._header.data_len = len(solution_data)\n test_data = bytearray(stegs._BYTELEN * 3)\n test_data[1] = 1\n test_data[7] = 1\n test_data[9] = 1\n test_data[14] = 1\n test_data[17] = 1\n test_data[22] = 1\n test_data[23] = 1\n\n assert stegs._reveal_data(test_data) == solution_data\n\n\ndef test_reveal_data_partial():\n \"\"\"\n Will return as much data as possible.\n\n When the container bytes object passed in is too small for all the data to be hidden.\n \"\"\"\n stegs = Steganographer()\n solution_data = bytes('AB@', 'utf-8')\n test_data = bytearray(stegs._BYTELEN * 3) # Will contain 'ABC' but will be truncated when passed to _reveal_data\n test_data[1] = 1\n test_data[7] = 1\n test_data[9] = 1\n test_data[14] = 1\n test_data[17] = 1\n test_data[22] = 1\n test_data[23] = 1\n stegs._data_len = len('ABC')\n\n assert stegs._reveal_data(test_data[:-stegs._BYTELEN // 2]) == solution_data\n\n\n@given(string_to_hide=text(characters(min_codepoint=1, blacklist_categories=('Cc', 'Cs'))))\ndef test_hide_reveal_data_inverse(string_to_hide):\n \"\"\"Anything hidden by _hide_data is revealed by _reveal_data.\"\"\"\n clean_data = bytes(b'\\x01' * 5000)\n data_to_hide = bytes(string_to_hide, 'utf-8')\n\n stegs = Steganographer()\n stegs._header.data_len = len(string_to_hide.encode('utf-8'))\n revealed_data = stegs._reveal_data(stegs._hide_data(clean_data, data_to_hide))\n\n assert revealed_data == data_to_hide\n\n\ndef test_exact_data_with_string_inverse():\n \"\"\"The string entered is the string returned. The storing data is the exact length needed.\"\"\"\n test_string = \"This is a test String\"\n stegs = Steganographer()\n stegs._header.data_len = len(test_string)\n blank_data = bytes(b'\\x01' * len(test_string) * stegs._BYTELEN)\n\n revealed_string = stegs._reveal_string(stegs._hide_string(blank_data, test_string))\n\n assert test_string == revealed_string\n\n\ndef test_exact_data_with_data_inverse():\n \"\"\"The data entered is the data returned. The storing data is the exact length needed.\"\"\"\n test_string = \"This is a test String\"\n test_data = bytes(test_string, 'utf-8')\n stegs = Steganographer()\n stegs._header.data_len = len(test_string)\n blank_data = bytes(b'\\x01' * len(test_string) * stegs._BYTELEN)\n\n revealed_data = stegs._reveal_data(stegs._hide_data(blank_data, test_data))\n\n assert test_data == revealed_data\n\n\ndef test_short_data_with_string_inverse():\n \"\"\"When the data is too small, by a full byte, everything that can be returned is returned.\"\"\"\n test_string = \"This is a test String\"\n stegs = Steganographer()\n stegs._header.data_len = len(test_string)\n blank_data = bytes(b'\\x01' * (len(test_string) * stegs._BYTELEN - stegs._BYTELEN))\n\n revealed_string = stegs._reveal_string(stegs._hide_string(blank_data, test_string))\n\n assert test_string[:-1] == revealed_string\n\n\ndef test_short_data_with_data_inverse():\n \"\"\"When the data is too small, by a full byte, everything that can be returned is returned.\"\"\"\n test_string = \"This is a test String\"\n test_data = bytes(test_string, 'utf-8')\n stegs = Steganographer()\n stegs._header.data_len = len(test_string)\n blank_data = bytes(b'\\x01' * (len(test_string) * stegs._BYTELEN - stegs._BYTELEN))\n\n revealed_data = stegs._reveal_data(stegs._hide_data(blank_data, test_data))\n\n assert test_data[:-1] == revealed_data\n\n\ndef test_short_partial_data_string_inverse():\n \"\"\"When the data is too small, by a half byte, everything that can be returned is returned.\"\"\"\n test_string = \"This is a test String\"\n stegs = Steganographer()\n stegs._header.data_len = len(test_string)\n solution_string = test_string[:-1] + chr(ord(test_string[-1]) >> stegs._BYTELEN // 2 << stegs._BYTELEN // 2)\n blank_data = bytes(b'\\x01' * (len(test_string) * stegs._BYTELEN - stegs._BYTELEN // 2))\n\n revealed_string = stegs._reveal_string(stegs._hide_string(blank_data, test_string))\n\n assert solution_string == revealed_string\n\n\ndef test_short_partial_data_w_data_inverse():\n \"\"\"When the data is too small, by a half byte, everything that can be returned is returned.\"\"\"\n test_string = \"This is a test String\"\n test_data = bytes(test_string, 'utf-8')\n solution_data = bytearray(test_data)\n stegs = Steganographer()\n stegs._header.data_len = len(test_string)\n solution_data[-1] = solution_data[-1] >> stegs._BYTELEN // 2 << stegs._BYTELEN // 2\n blank_data = bytes(b'\\x01' * (len(test_string) * stegs._BYTELEN - stegs._BYTELEN // 2))\n\n revealed_data = stegs._reveal_data(stegs._hide_data(blank_data, test_data))\n\n assert solution_data == revealed_data\n\n\ndef test_unpack_image():\n \"\"\"Unpacking returns a bytes object full of all the pixels flattened in one dimension.\"\"\"\n pixel = 1, 2, 3, 4\n pixel_length = len(pixel)\n solution_pixels = bytes(list(pixel * pixel_length))\n test_pixels = []\n\n for _ in range(pixel_length):\n test_pixels.append(pixel)\n\n unpacked = _unpack_image(test_pixels)\n\n assert unpacked[0] == len(pixel)\n assert unpacked[1] == solution_pixels\n\n\ndef test_pack_image():\n \"\"\"Packing returns a list with tuples of length 4.\"\"\"\n pixel = 1, 2, 3, 4\n pixel_length = len(pixel)\n test_pixels = pixel_length, list(pixel * pixel_length)\n solution_pixels = []\n\n for _ in range(pixel_length):\n solution_pixels.append(pixel)\n\n packed = _pack_image(test_pixels)\n\n assert packed == solution_pixels\n\n\ndef test_unpack_pack_inverse():\n \"\"\"Pixels unpacked by _unpack_image are correctly packed by _pack_image.\"\"\"\n pixel = 1, 2, 3, 4\n test_pixels = []\n\n for _ in range(4):\n test_pixels.append(pixel)\n\n assert _pack_image(_unpack_image(test_pixels)) == test_pixels\n\n\ndef test_open_bin_file():\n \"\"\"Opening a file works.\"\"\"\n clean_file = CLEAN_PNG_LOCATION\n file_data = _open_bin_file(clean_file)\n\n with open(clean_file, 'rb') as file:\n assert file_data == file.read()\n\n with pytest.raises(SystemExit):\n _open_bin_file(\"OpenBinFileThatDoesNotExist.nope\")\n\n\ndef test_write_bin_diff_content():\n \"\"\"The file written is different from the one read, after hiding a message.\"\"\"\n clean_file = CLEAN_PNG_LOCATION\n dirty_file = \"tests/dirtyImage_test_write_bin_file_diff_content.png\"\n\n stegs = Steganographer()\n data = stegs._hide_string(_open_bin_file(clean_file), \"Hidden text from test_write_bin_diff_content.\")\n _write_bin_file(dirty_file, data)\n\n with open(clean_file, 'rb') as clean, open(dirty_file, 'rb') as dirty:\n assert clean.read() != dirty.read()\n\n os.remove(dirty_file)\n\n\ndef test_write_bin_file_size_same():\n \"\"\"The file written is the same size as the one read, after hiding a message.\"\"\"\n clean_file = CLEAN_PNG_LOCATION\n dirty_file = \"tests/dirtyImage_test_write_bin_file_size_same.png\"\n\n stegs = Steganographer()\n data = stegs._hide_string(_open_bin_file(clean_file), \"Hidden text from test_write_bin_file_size_same.\")\n _write_bin_file(dirty_file, data)\n\n # Getting the file sizes for the clean and dirty files.\n with open(clean_file, 'rb') as clean:\n clean.seek(0, 2)\n clean_file_size = clean.tell()\n\n with open(dirty_file, 'rb') as dirty:\n dirty.seek(0, 2)\n dirty_file_size = dirty.tell()\n\n assert clean_file_size == dirty_file_size\n\n os.remove(dirty_file)\n\n\ndef test_open_image_file():\n \"\"\"Opening an image file returns the data in the file in a one dimensional list.\"\"\"\n clean_file = CLEAN_PNG_LOCATION\n image_data = _open_image_file(clean_file)\n\n with Image.open(clean_file) as clean:\n pixels = clean.getdata()\n\n assert image_data[1] == _unpack_image(pixels)[1]\n\n with pytest.raises(SystemExit):\n _open_image_file(\"OpenImageFileThatDoesNotExist.nope\")\n\n\ndef test_write_image_file_valid():\n \"\"\"The image created is not corrupt.\"\"\"\n clean_file = CLEAN_PNG_LOCATION\n dirty_file = \"tests/dirtyImage_test_write_image_file_valid_image.png\"\n\n stegs = Steganographer()\n clean_data = _open_image_file(clean_file)\n dirty_data = clean_data[0], stegs._hide_string(clean_data[1], \"Hidden text from test_write_image_file_valid.\")\n output_file = _write_image_file(dirty_file, clean_file, dirty_data)\n\n try:\n Image.open(output_file)\n except OSError:\n pytest.fail(\"Image is corrupt \" + output_file)\n\n os.remove(output_file)\n\n\ndef test_write_image_diff_content():\n \"\"\"Writing out an image creates a different image at the bit level.\"\"\"\n clean_file = CLEAN_PNG_LOCATION\n dirty_file = \"tests/dirtyImage_test_write_image_file_diff_content.png\"\n\n stegs = Steganographer()\n clean_data = _open_image_file(clean_file)\n dirty_data = clean_data[0], stegs._hide_string(clean_data[1], \"Hidden text from test_write_image_diff_content.\")\n output_file = _write_image_file(dirty_file, clean_file, dirty_data)\n\n with open(clean_file, 'rb') as clean, open(output_file, 'rb') as dirty:\n assert clean.read() != dirty.read()\n\n os.remove(output_file)\n\n\ndef compare_images(img1, img2):\n \"\"\"Expects strings of the locations of two images. Will return an integer representing their difference\"\"\"\n with Image.open(img1) as img1, Image.open(img2) as img2:\n # Calculate a difference image that is the difference between the two images.\n diff = ImageChops.difference(img1, img2)\n\n return sum(_unpack_image(diff.getdata())[1])\n\n\ndef test_write_image_same_image():\n \"\"\"Writing out an image creates the same image when viewed generally.\"\"\"\n clean_file = CLEAN_PNG_LOCATION\n dirty_file = \"tests/dirtyImage_test_write_image_file_same_image.png\"\n\n stegs = Steganographer()\n clean_data = _open_image_file(clean_file)\n dirty_data = clean_data[0], stegs._hide_string(clean_data[1], \"Hidden text from test_write_image_same_image.\")\n output_file = _write_image_file(dirty_file, clean_file, dirty_data)\n\n assert compare_images(clean_file, output_file) < 500\n\n os.remove(output_file)\n\n\ndef test_write_image_diff_size():\n \"\"\"Writing out an image creates a file of a different size, if the file was not generated by PIL originally.\"\"\"\n clean_file = CLEAN_PNG_LOCATION\n dirty_file = \"tests/dirtyImage_test_write_image_file_diff_size.png\"\n\n stegs = Steganographer()\n clean_data = _open_image_file(clean_file)\n dirty_data = clean_data[0], stegs._hide_string(clean_data[1], \"Hidden text from test_write_image_diff_size.\")\n output_file = _write_image_file(dirty_file, clean_file, dirty_data)\n\n # Getting the file sizes for the clean and dirty files.\n with open(clean_file, 'rb') as clean:\n clean.seek(0, 2)\n clean_file_size = clean.tell()\n\n with open(output_file, 'rb') as dirty:\n dirty.seek(0, 2)\n dirty_file_size = dirty.tell()\n\n assert clean_file_size != dirty_file_size\n\n os.remove(output_file)\n\n\ndef test_write_image_diff_size_pil():\n \"\"\"Writing out an image creates a file of a different size, if the file was generated by PIL originally.\"\"\"\n clean_file = CLEAN_PNG_LOCATION\n dirty_file = \"tests/dirtyImage_test_write_image_file_diff_size_pil.png\"\n clean_file_pil = \"tests/cleanImagePIL.png\"\n with Image.open(clean_file) as pil_image:\n pil_image.save(clean_file_pil)\n\n stegs = Steganographer()\n clean_data = _open_image_file(clean_file_pil)\n dirty_data = clean_data[0], stegs._hide_string(clean_data[1], \"Hidden text from test_write_image_diff_size_pil.\")\n output_file = _write_image_file(dirty_file, clean_file_pil, dirty_data)\n\n # Getting the file sizes for the clean and dirty files.\n with open(clean_file_pil, 'rb') as clean:\n clean.seek(0, 2)\n clean_file_size = clean.tell()\n\n with open(output_file, 'rb') as dirty:\n dirty.seek(0, 2)\n dirty_file_size = dirty.tell()\n\n assert clean_file_size != dirty_file_size\n\n os.remove(output_file)\n\n\ndef test_write_image_exit_on_fail():\n \"\"\"When failing to write an image there is a system exit.\"\"\"\n clean_file = CLEAN_PNG_LOCATION\n dirty_file = \"WriteImageFileThatDoesNotExist.nope\"\n dirty_data = bytes(8)\n\n with pytest.raises(SystemExit):\n _write_image_file(clean_file, dirty_file, dirty_data)\n\n\ndef test_steganographer_hide_string():\n \"\"\"A string will correctly be hidden in a new image.\"\"\"\n clean_image = CLEAN_PNG_LOCATION\n dirty_image = \"tests/dirtyImage_test_steganographer_hide_string.png\"\n hidden_message = \"Hidden text from test_steganographer_hide_string.\"\n\n stegs = Steganographer()\n hidden_fname = stegs.steganographer_hide(clean_image, hidden_message, dirty_image)\n\n with open(clean_image, 'rb') as clean, open(hidden_fname, 'rb') as dirty:\n assert clean.read() != dirty.read()\n assert compare_images(clean_image, hidden_fname) < 500\n try:\n Image.open(hidden_fname)\n except OSError:\n pytest.fail(\"Image is corrupt \" + hidden_fname)\n\n os.remove(dirty_image)\n\n\ndef test_stegs_hide_string_nonsense():\n \"\"\"A random string, that can cause a decode error, will correctly be hidden in a new image.\"\"\"\n clean_image = CLEAN_PNG_LOCATION\n dirty_image = \"tests/dirtyImage_test_steganographer_hide_string_nonsense.png\"\n hidden_message = \"Äœð�¡‘ĜĜĜĜĜĜĜĜĜԬĜ\\U000fc423ĜĜĜĜԬĜĜĜԬԬĜԬ\\U000fc423ĜԬ\\U000fc423ԬԬĜ\\U000fc423ԬĜԬð�¡•ð�¡•ð�¡‘ð�¡•ð�¡•ð�¡•ð�¡‘ð�¡•ð�¡‘ð�¡•ð�¡‘\"\n\n stegs = Steganographer()\n hidden_fname = stegs.steganographer_hide(clean_image, hidden_message, dirty_image)\n\n with open(clean_image, 'rb') as clean, open(hidden_fname, 'rb') as dirty:\n assert clean.read() != dirty.read()\n assert compare_images(clean_image, hidden_fname) < 650\n try:\n Image.open(hidden_fname)\n except OSError:\n pytest.fail(\"Image is corrupt \" + hidden_fname)\n\n os.remove(dirty_image)\n\n\ndef test_steganographer_hide_file():\n \"\"\"A file can be hidden inside of an image and the image created is not corrupt\"\"\"\n clean_image = CLEAN_PNG_LOCATION\n dirty_image = \"tests/dirtyImage_test_steganographer_hide_file.png\"\n file_to_hide = \"tests/FileToHide.zip\"\n\n stegs = Steganographer()\n hidden_fname = stegs.steganographer_hide_file(clean_image, file_to_hide, dirty_image)\n\n with open(clean_image, 'rb') as clean, open(hidden_fname, 'rb') as dirty:\n assert clean.read() != dirty.read()\n assert compare_images(clean_image, hidden_fname) < 19000\n try:\n Image.open(hidden_fname)\n except OSError:\n pytest.fail(\"Image is corrupt \" + hidden_fname)\n\n os.remove(hidden_fname)\n\n\ndef test_steganographer_reveal_file():\n \"\"\"A file that has been hidden can be revealed.\"\"\"\n original_file = \"tests/FileToHide.zip\"\n dirty_image = \"tests/dirtyImageWFile.png\"\n revealed_file_name = \"tests/test_steganographer_reveal_file.zip\"\n\n stegs = Steganographer()\n revealed_file_data, _ = stegs.steganographer_reveal(dirty_image)\n\n with open(revealed_file_name, 'wb') as rev_file:\n rev_file.write(revealed_file_data)\n\n with open(original_file, 'rb') as original, open(revealed_file_name, 'rb') as revealed:\n assert original.read() == revealed.read()\n\n os.remove(revealed_file_name)\n\n\ndef test_steganographer_hide_name():\n \"\"\"The image a string is hidden in is the correct one.\"\"\"\n clean_image = CLEAN_PNG_LOCATION\n dirty_image = \"tests/dirtyImage_test_steganographer_hide_name.png\"\n hidden_message = \"Hidden text from test_steganographer_hide_name.\"\n\n stegs = Steganographer()\n hidden_fname = stegs.steganographer_hide(clean_image, hidden_message, dirty_image)\n\n assert hidden_fname == dirty_image\n\n os.remove(dirty_image)\n\n\ndef test_steganogrified_name():\n \"\"\"Data will be hidden in a file with steganogrified at the end, when no output file name is provided.\"\"\"\n clean_message_image = copy2(CLEAN_PNG_LOCATION, CLEAN_PNG_LOCATION[:-4] + \"_test_message_steganogrified_name.png\")\n clean_file_image = copy2(CLEAN_PNG_LOCATION, CLEAN_PNG_LOCATION[:-4] + \"_test_file_steganogrified_name.png\")\n hidden_message = \"Hidden text from test_steganogrified_name.\"\n file_to_hide = \"tests/FileToHide.zip\"\n\n stegs = Steganographer()\n hidden_message_fname = stegs.steganographer_hide(clean_message_image, hidden_message)\n steganogrified_message_fname = clean_message_image[:-4] + \"Steganogrified.png\"\n hidden_file_fname = stegs.steganographer_hide_file(clean_file_image, file_to_hide)\n steganogrified_file_fname = clean_file_image[:-4] + \"Steganogrified.png\"\n\n assert hidden_message_fname == steganogrified_message_fname\n assert os.path.isfile(steganogrified_message_fname)\n assert hidden_file_fname == steganogrified_file_fname\n assert os.path.isfile(steganogrified_file_fname)\n\n os.remove(clean_message_image)\n os.remove(hidden_message_fname)\n os.remove(clean_file_image)\n os.remove(hidden_file_fname)\n\n\n@given(hidden_message=text(characters(min_codepoint=1, blacklist_categories=('Cc', 'Cs'))))\ndef test_steganographer_inverse(hidden_message):\n \"\"\"Steganographer_reveal reveals what was hidden by steganographer_hide.\"\"\"\n clean_image = CLEAN_PNG_LOCATION\n dirty_image = \"tests/dirtyImage_test_steganographer_inverse.png\"\n\n stegs = Steganographer()\n revealed_message = stegs.steganographer_reveal(stegs.steganographer_hide(\n clean_image, hidden_message, dirty_image))[0].decode('utf-8')\n assert revealed_message == hidden_message\n\n os.remove(dirty_image)\n\n\ndef test_unicode_inverse():\n \"\"\"Unicode characters are hidden and revealed.\"\"\"\n message = \"test_unicode hidden message. Some random unicode characters: ð“�ˆ ᾨ Ô… Ô¹ Ø» Þ— ßš ङ ლ ጩ á�œ\"\n\n stegs = Steganographer()\n assert message == stegs.steganographer_reveal(stegs.steganographer_hide(CLEAN_PNG_LOCATION, message,\n \"tests/dirtyImage.png\"))[0].decode('utf-8')\n\n\ndef test_main_hide_msg_with_output(capfd):\n \"\"\"Command line calls to hide work when given an input image, a message, and an output file.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n hidden_message = 'test_main_hide_msg_with_output hidden message'\n dirty_fname = \"tests/dirtyImage_test_main_hide_msg_with_output.png\"\n\n result = os.system('python -m steganographer ' + CLEAN_PNG_LOCATION + ' -m \"' + hidden_message +\n '\" -o ' + dirty_fname)\n out, _ = capfd.readouterr()\n\n assert result == 0\n assert out == \"The message has been hidden in \" + dirty_fname + line_end\n assert compare_images(CLEAN_PNG_LOCATION, dirty_fname) < 500\n try:\n Image.open(dirty_fname)\n except OSError:\n pytest.fail(\"Image is corrupt \" + dirty_fname)\n\n os.remove(dirty_fname)\n\n\ndef test_main_hide_file_with_output(capfd):\n \"\"\"Command line calls to hide work when given an input image, a file to hide, and an output file.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n file_to_hide = \"tests/FileToHide.zip\"\n dirty_fname = \"tests/dirtyImage_test_main_hide_file_with_output.png\"\n\n result = os.system('python -m steganographer ' + CLEAN_PNG_LOCATION + ' -f \"' + file_to_hide +\n '\" -o ' + dirty_fname)\n out, _ = capfd.readouterr()\n\n assert result == 0\n assert out == \"The file \" + file_to_hide + \" has been hidden in \" + dirty_fname + line_end\n assert compare_images(CLEAN_PNG_LOCATION, dirty_fname) < 19000\n try:\n Image.open(dirty_fname)\n except OSError:\n pytest.fail(\"Image is corrupt \" + dirty_fname)\n\n os.remove(dirty_fname)\n\n\ndef test_main_hide_msg_no_output(capfd):\n \"\"\"Command line calls to hide work when given an input image, a message, and no output file.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n hidden_message = 'test_main_hide_msg_no_output hidden message'\n clean_image = copy2(CLEAN_PNG_LOCATION, CLEAN_PNG_LOCATION[:-4] + \"test_main_hide_msg_no_output.png\")\n steganogrified_fname = clean_image[:-4] + \"Steganogrified.png\"\n\n result = os.system('python -m steganographer ' + clean_image + ' -m \"' + hidden_message + '\"')\n out, _ = capfd.readouterr()\n\n assert result == 0\n assert out == \"The message has been hidden in \" + steganogrified_fname + line_end\n assert compare_images(clean_image, steganogrified_fname) < 500\n try:\n Image.open(steganogrified_fname)\n except OSError:\n pytest.fail(\"Image is corrupt \" + steganogrified_fname)\n\n os.remove(clean_image)\n os.remove(steganogrified_fname)\n\n\ndef test_main_hide_file_no_output(capfd):\n \"\"\"Command line calls to hide work when given an input image, a file to hide, and no output file.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n file_to_hide = \"tests/FileToHide.zip\"\n clean_image = copy2(CLEAN_PNG_LOCATION, CLEAN_PNG_LOCATION[:-4] + \"test_main_hide_file_no_output.png\")\n steganogrified_fname = clean_image[:-4] + \"Steganogrified.png\"\n\n result = os.system('python -m steganographer ' + clean_image + ' -f \"' + file_to_hide + '\"')\n out, _ = capfd.readouterr()\n\n assert result == 0\n assert out == \"The file \" + file_to_hide + \" has been hidden in \" + steganogrified_fname + line_end\n assert compare_images(clean_image, steganogrified_fname) < 19000\n try:\n Image.open(steganogrified_fname)\n except OSError:\n pytest.fail(\"Image is corrupt \" + steganogrified_fname)\n\n os.remove(clean_image)\n os.remove(steganogrified_fname)\n\n\ndef test_main_reveal_msg_no_output(capfd):\n \"\"\"Command line calls to reveal work when given an input image, and no output file.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n hidden_message = 'test_main_reveal_msg_no_output hidden message'\n dirty_fname = \"tests/dirtyImage_test_main_reveal_msg_no_output.png\"\n\n os.system('python -m steganographer ' + CLEAN_PNG_LOCATION + ' -m \"' + hidden_message +\n '\" -o ' + dirty_fname)\n _, _ = capfd.readouterr()\n\n result = os.system(\"python -m steganographer \" + dirty_fname)\n out, _ = capfd.readouterr()\n\n assert result == 0\n assert out == (\"The hidden message was...\" + line_end + hidden_message + line_end)\n\n os.remove(dirty_fname)\n\n\ndef test_main_reveal_file_no_output(capfd):\n \"\"\"Command line calls to reveal work when given an input image, the reveal file flag, and no output file.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n file_to_hide = \"tests/FileToHide.zip\"\n dirty_fname = \"tests/dirtyImage_test_main_reveal_file_no_output.png\"\n generated_output_file = file_to_hide\n\n os.system('python -m steganographer ' + CLEAN_PNG_LOCATION + ' -f \"' + file_to_hide +\n '\" -o ' + dirty_fname)\n _, _ = capfd.readouterr()\n\n result = os.system(\"python -m steganographer -r \" + dirty_fname)\n out, _ = capfd.readouterr()\n\n assert result == 0\n assert out == (\"The hidden file was revealed in \" + generated_output_file + line_end)\n\n os.remove(dirty_fname)\n\n\ndef test_main_reveal_msg_w_output(capfd):\n \"\"\"Command line calls to reveal work when given an input image, and an output file.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n hidden_message = 'test_main_reveal_msg_w_output hidden message'\n dirty_fname = \"tests/dirtyImage_test_main_reveal_msg_w_output.png\"\n output_fname = \"tests/outputMessage.txt\"\n\n os.system('python -m steganographer ' + CLEAN_PNG_LOCATION + ' -m \"' + hidden_message +\n '\" -o ' + dirty_fname)\n _, _ = capfd.readouterr()\n\n result = os.system(\"python -m steganographer \" + dirty_fname + \" -o \" + output_fname)\n out, _ = capfd.readouterr()\n\n assert result == 0\n with open(output_fname, 'r') as output:\n assert output.read() == hidden_message\n\n assert out == (\"The hidden message was written to \" + output_fname + line_end)\n\n os.remove(dirty_fname)\n\n\ndef test_main_reveal_file_w_output(capfd):\n \"\"\"Command line calls to reveal work when given an input image, a reveal file flag, and an output image.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n file_to_hide = \"tests/FileToHide.zip\"\n dirty_fname = \"tests/dirtyImage_test_main_reveal_file_w_output.png\"\n output_fname = \"tests/outputFile.zip\"\n\n os.system('python -m steganographer ' + CLEAN_PNG_LOCATION + ' -f \"' + file_to_hide +\n '\" -o ' + dirty_fname)\n _, _ = capfd.readouterr()\n\n result = os.system(\"python -m steganographer \" + dirty_fname + \" -r -o \" + output_fname)\n out, _ = capfd.readouterr()\n\n assert result == 0\n with open(output_fname, 'rb') as output, open(file_to_hide, 'rb') as original_file:\n assert output.read() == original_file.read()\n\n assert out == (\"The hidden file was revealed in \" + output_fname + line_end)\n\n os.remove(dirty_fname)\n\n\ndef test_main_reveal_no_msg(capfd):\n \"\"\"There should be an error returned when there is no message hidden in the image file.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n clean_fname = \"tests/cleanImage.jpg\"\n\n result = os.system(\"python -m steganographer \" + clean_fname)\n out, _ = capfd.readouterr()\n\n assert result == 0\n assert out == \"This file %s has no hidden message.\" % clean_fname + line_end\n\n\ndef test_main_reveal_no_op_unicode(capfd):\n \"\"\"Command line calls to reveal work when the hidden message contains high value unicode.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n hidden_message = 'test_main_reveal_no_op_unicode hidden message, Unicode characters: ð“�ˆ ᾨ Ô… Ô¹ Ø» Þ— ßš ङ ლ ጩ á�œ'\n dirty_fname = \"tests/dirtyImage_test_main_reveal_no_op_unicode.png\"\n output_fname = \"tests/dirtyImage_test_main_reveal_no_op_unicode_message.txt\"\n\n os.system('python -m steganographer ' + CLEAN_PNG_LOCATION + ' -m \"' + hidden_message +\n '\" -o ' + dirty_fname)\n _, _ = capfd.readouterr()\n\n result = os.system(\"python -m steganographer \" + dirty_fname)\n out, _ = capfd.readouterr()\n\n assert result == 0\n if os.path.isfile(output_fname):\n with open(output_fname, 'r', encoding='utf-8') as output:\n assert output.read() == hidden_message\n\n assert (out == (\"The hidden message contains unsupported unicode characters and cannot be fully displayed \" +\n \"here. The correct message has been written to \" + output_fname + line_end +\n str(hidden_message.encode('utf-8')) + line_end)\n or out == (\"The hidden message was...\" + line_end + hidden_message + line_end))\n\n os.remove(dirty_fname)\n if os.path.isfile(output_fname):\n os.remove(output_fname)\n\n\ndef test_jpegs(capfd):\n \"\"\"Jpegs can have a message hidden and revealed. Note they are converted to png.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n hidden_message = 'test_jpeg hidden message'\n dirty_fname = \"tests/dirtyImage_test_jpegs\"\n\n result = os.system('python -m steganographer tests/cleanImage.jpg -m \"' + hidden_message +\n '\" -o ' + dirty_fname + '.jpg')\n out, _ = capfd.readouterr()\n\n assert result == 0\n assert out == (\"The message has been hidden in \" + dirty_fname + '.png' + line_end)\n\n result = os.system(\"python -m steganographer \" + dirty_fname + '.png')\n out, _ = capfd.readouterr()\n\n assert result == 0\n assert out == (\"The hidden message was...\" + line_end + hidden_message + line_end)\n assert compare_images(\"tests/cleanImage.jpg\", dirty_fname + '.png') < 500\n\n\n@pytest.mark.xfail(strict=True, reason=\"Issue #59 bmp support is broken.\", run=True)\ndef test_bmps(capfd):\n \"\"\"Bmps can have a message hidden and revealed.\"\"\"\n line_end = '\\n'\n if sys.platform == 'win32':\n line_end = '\\r\\n'\n hidden_message = 'test_bmps hidden message'\n dirty_fname = \"tests/dirtyImage_test_bmps\"\n\n result = os.system('python -m steganographer tests/cleanImage.bmp -m \"' + hidden_message +\n '\" -o ' + dirty_fname + '.bmp')\n out, _ = capfd.readouterr()\n\n assert result == 0\n assert out == (\"The message has been hidden in \" + dirty_fname + '.png' + line_end)\n\n result = os.system(\"python -m steganographer \" + dirty_fname + '.png')\n out, _ = capfd.readouterr()\n\n assert result == 0\n assert out == (\"The hidden message was...\" + line_end + hidden_message + line_end)\n assert compare_images(\"tests/cleanImage.bmp\", dirty_fname + '.png') < 500\n","repo_name":"MotaDan/steganographerPY","sub_path":"tests/test_steganographer.py","file_name":"test_steganographer.py","file_ext":"py","file_size_in_byte":34786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73662911946","text":"from django.urls import path\nfrom .views import InventoryMan, SignUp, Dashboard, FakaIzinto, Edit, Khipha\nfrom django.contrib.auth import views as auth_views\n\nurlpatterns = [\n\tpath(\"\", InventoryMan.as_view(), name=\"inventory-man\"),\n\tpath(\"signup/\", SignUp.as_view(), name=\"signup\"),\n\tpath(\"dashboard/\", Dashboard.as_view(), name=\"dashboard\"),\n \tpath(\"add/\", FakaIzinto.as_view(), name=\"faka\"),\n \tpath(\"edit-item/\", Edit.as_view(), name=\"change\"),\n \tpath(\"delete-item/\", Khipha.as_view(), name=\"khipha\"),\n\tpath(\"login/\", auth_views.LoginView.as_view(template_name=\"login.html\"), name=\"login\"),\n\tpath(\"logout\", auth_views.LogoutView.as_view(template_name=\"logout.html\"), name=\"logout\")\n]","repo_name":"malwandemoyo/POSApp","sub_path":"inventory_man/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34057121006","text":"import pandas as pd\nfrom datetime import datetime\nimport quandl\n\n\ndados = pd.read_excel('/home/brenno/Downloads/ibov_sp.xlsx', na_values=\"-\")\ncdi_data = quandl.get('BCB/11', start_date='1994-06-01')\ncdi_data['Value'] = cdi_data['Value']/100\ncdi_data['cota'] = (1 + cdi_data['Value']).cumprod() \ncdi_data = cdi_data[['cota']]\ndados['Data'] = pd.to_datetime(dados['Data']).dt.date\ndados = dados.set_index('Data')\n\ndados = pd.merge(dados, cdi_data, left_index=True, right_index=True)\n\n\ndados = dados.dropna()\n\ndados = dados[dados.index > datetime(1999, 12, 31, 0, 0, 0)]\n\ndados = dados.pct_change(periods=(252 * 10))\n\ndados = dados.dropna()\n\nprint('% janelas', sum(dados['ibov'] > dados['cota'])/len(dados))\nprint(\"Periodos positivos ibov:\", sum(dados['ibov'] > 0)/len(dados))\nprint(\"Periodos positivos sp:\", sum(dados['sp'] > 0)/len(dados))\nprint('Media retornos', dados.mean())\n\ndados.to_excel('/home/brenno/Downloads/dados_120m_ibov_sp.xlsx')\n","repo_name":"brennosullivan/instagram_reels","sub_path":"janelas_12m_ibov_sp.py","file_name":"janelas_12m_ibov_sp.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2453750157","text":"from lambda_signals.signals import lambda_handler\nimport boto3\nfrom botocore.exceptions import ClientError\nimport datetime\nimport time\n\ndebug = True\n\ndef process_event(event):\n return dict(\n stack_name = event['ResourceProperties']['StackName'],\n resource_name = event['LogicalResourceId'],\n autoscale_grp = event['ResourceProperties']['AutoScaleGrp'],\n aws_region = event['ResourceProperties']['AwsRegion'],\n )\n\ndef get_client(region_name):\n return boto3.client(\"ec2\", region_name = region_name)\n\ndef get_as_client(region_name):\n return boto3.client(\"autoscaling\", region_name = region_name)\n\ndef get_ec2_id(autoscale_group, region_name):\n as_client = get_as_client(region_name)\n as_grp_list = as_client.describe_auto_scaling_groups(AutoScalingGroupNames=[autoscale_group])\n if len(as_grp_list['AutoScalingGroups']) != 1:\n raise Exception(\"ERROR: multiple autoscaling groups found\")\n return as_grp_list['AutoScalingGroups'][0]['Instances'][0]['InstanceId']\n\ndef check_ami_exists(client):\n try:\n response = client.describe_images(Filters=[{'Name':'tag:custom:uuid','Values':[uuid]}])\n if response.get('ResponseMetadata').get('HTTPStatusCode') == 200:\n if len(response.get('Images',[])) > 0:\n return (True, response['Images'][0]['ImageId'])\n except:\n return (False, dict())\n\ndef create(stack_name, resource_name, autoscale_grp, aws_region):\n client = get_client(aws_region)\n success = False\n datenow = datetime.datetime.now().strftime(\"%Y_%m_%d_%H%M\")\n ec2_label = resource_name + datenow\n ec2_id = get_ec2_id(autoscale_grp, aws_region)\n make_response = client.create_image(InstanceId=ec2_id, Name=ec2_label, NoReboot=False)\n if make_response.get('ResponseMetadata').get('HTTPStatusCode') == 200:\n success = True\n ami_id = make_response.get('ImageId')\n client.create_tags(\n Resources=[ami_id],\n Tags = [{\n \"Key\":\"cloudformation:amimanager:stack-name\",\n \"Value\":stack_name,\n },\n {\n \"Key\":\"cloudformation:amimanager:logical-id\",\n \"Value\":resource_name,\n }\n ],\n )\n if success:\n print(\"Success\")\n time.sleep(60)\n client = boto3.client(\"autoscaling\", region_name = aws_region)\n response = client.update_auto_scaling_group(AutoScalingGroupName=autoscale_grp,\n MinSize=0, DesiredCapacity=0)\n return (True, dict(PhysicalResourceId=ami_id,ImageId=ami_id))\n #TODO: set autoscale group size to 0\n return (False, dict())\n\ndef delete(aws_region):\n #TODO: fix delete old AMIs\n return (True, dict())\n #client = get_client(aws_region)\n\n #(ami_exists, ami_id) = check_ami_exists(client)\n #if ami_exists:\n # client.deregister_image(ImageId = ami_id)\n # return (True, dict(DeletedAmiId = ami_id))\n #return (True, dict(Info = \"AMI not found\"))\n\ndef create_resource(event, context):\n if debug:\n print(\"event\")\n print(event)\n print(\"context\")\n print(vars(context))\n event_details = process_event(event)\n return create(**event_details)\n\ndef update_resource(event, context):\n\n event_details = process_event(event)\n return create(**event_details)\n #TODO: delete old ami\n #if old_uuid != event_details['ami_uuid']:\n # (ami_existed, ami_id) = delete(old_uuid, event_details['aws_region'])\n # return create(**event_details)\n #else:\n # return (True, dict(Info=\"Ami uuid not changed\"))\n\ndef delete_resource(event, context):\n if debug:\n print(\"event\")\n print(event)\n print(\"context\")\n print(vars(context))\n event_details = process_event(event)\n\n return delete(event_details['aws_region'])\n\ndef test_resource(event, context):\n print(\"Add tests here\")\n return (True, dict())\n\ndef handler(event, context):\n lambda_handler(\n event,\n context,\n create_function = create_resource,\n delete_function = delete_resource,\n update_function = update_resource,\n test_function = test_resource,\n )\n\nif __name__ == \"__main__\":\n class FakeContext(object):\n def __init__(self):\n context = {\n 'aws_request_id': 'a3de505e-f16b-42f4-b3e6-bcd2e4a73903',\n 'log_stream_name': '2015/10/26/[$LATEST]c71058d852474b9895a0f221f73402ad',\n 'invoked_function_arn': 'arn:aws:lambda:us-west-2:123456789012:function:ExampleCloudFormationStackName-ExampleLambdaFunctionResourceName-AULC3LB8Q02F',\n 'client_context': None,\n 'log_group_name': '/aws/lambda/ExampleCloudFormationStackName-ExampleLambdaFunctionResourceName-AULC3LB8Q02F',\n 'function_name': 'ExampleCloudFormationStackName-ExampleLambdaFunctionResourceName-AULC3LB8Q02F',\n 'function_version': '$LATEST',\n 'identity': '<__main__.CognitoIdentity object at 0x7fd7042a2b90>',\n 'memory_limit_in_mb': '128'\n }\n self.__dict__.update(context)\n\n event = {\n 'StackId': 'Test',\n 'RequestId': '123',\n 'LogicalResourceId': '123',\n 'RequestType':'Test',\n 'ResponseURL':'Test',\n 'test':'test_value_1'\n }\n context = FakeContext()\n handler(event, context)\n","repo_name":"gotropo/gotropo","sub_path":"create/custom/ami_resource.py","file_name":"ami_resource.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"12165890331","text":"#-*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, absolute_import\n\nimport os\n\nfrom pyramid.settings import aslist\nfrom pyramid_jinja2 import renderer_factory\n\nfrom caliop.helpers.config import Configuration\n\n\ndef includeme(config):\n settings = config.registry.settings\n\n config.include('pyramid_jinja2')\n config.add_renderer('.html', renderer_factory)\n\n # XXX should be removed\n rootpath = os.path.dirname(os.path.realpath(__file__))\n\n # configure templates dir (angular build dir)\n template_path = os.path.join(rootpath, settings['caliop.ng.path'])\n config.add_jinja2_search_path(template_path)\n\n # configure static dir on the same dir (angular build dir)\n static_path = os.path.join(rootpath, 'static')\n config.add_static_view('/static', template_path)\n\n for file in aslist(settings['caliop.config']):\n name, path = file.split(':', 1)\n Configuration.load(path, name)\n","repo_name":"LaurentChemla/CaliOpen","sub_path":"caliop/caliop/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"16470124279","text":"#!/usr/bin/env python\nimport pyglet\nfrom pyglet.window import key\nimport numpy as np\n\nfrom pyglet_gui.theme import Theme\nfrom pyglet_gui.buttons import Button\nfrom pyglet_gui.manager import Manager\nfrom pyglet_gui.containers import VerticalContainer\n\ntheme = Theme({\"font\": \"Lucida Grande\",\n \"font_size\": 12,\n \"text_color\": [255, 255, 255, 255],\n \"gui_color\": [255, 0, 0, 255],\n \"button\": {\n \"down\": {\n \"image\": {\n \"source\": \"button-down.png\",\n \"frame\": [8, 6, 2, 2],\n \"padding\": [18, 18, 8, 6]\n },\n \"text_color\": [0, 0, 0, 255]\n },\n \"up\": {\n \"image\": {\n \"source\": \"button.png\",\n \"frame\": [6, 5, 6, 3],\n \"padding\": [18, 18, 8, 6]\n }\n }\n }\n }, resources_path='')\n\nPI_180 = np.pi/180\nnp.random.seed(0)\n\n#\n# Parameters that control game behavior and difficulty\n#\n\nast_sizes = [1, 1/2, 1/4] # Factor by which asteroids shrink as they break\nast_points = [100, 50, 25] # How many points each asteroid is worth\np_acc = 100 # Acceleration of player's ship\np_rot_rate = 10.0 # Rotation rate of player's ship\nshoot_speed = 600 # Speed of bullets\nast_max_vel = 50 # Max of x and y components of velocity of asteroids\nast_break_max_vel = 50 # Max amount to add to asteroid vel components after break\nnum_ast = 10 # Initial number of asteroids to generate\nlives = 2 # Number of etra lives that player stars with\n\nwindow = pyglet.window.Window(800, 600)\n\ndef make_img(filename, anchor_x=None, anchor_y=None):\n\timg = pyglet.image.load(filename)\n\timg.anchor_x = img.width // 2\n\timg.anchor_y = img.height // 2\n\tif anchor_x is not None:\n\t\timg.anchor_x = anchor_x\n\tif anchor_y is not None:\n\t\timg.anchor_y = anchor_y\n\treturn img\n\ndef ang_to_vec(angle):\n\ta_norm_x = np.cos(angle*PI_180)\n\ta_norm_y = -np.sin(angle*PI_180)\n\treturn a_norm_x, a_norm_y\n\ndef wrap(xpos, ypos, xmin, xmax, ymin, ymax):\n\tif xpos < xmin:\n\t\txpos = xmax\n\tif xpos > xmax:\n\t\txpos = xmin\n\tif ypos < ymin:\n\t\typos = ymax\n\tif ypos > ymax:\n\t\typos = ymin\n\n\treturn xpos, ypos\n\ndef off_screen(xpos, ypos, xmin, xmax, ymin, ymax):\n\tnewx, newy = wrap(xpos, ypos, xmin, xmax, ymin, ymax)\n\tif (newx != xpos) or (newy != ypos):\n\t\treturn True\n\treturn False\n\nclass Bullet:\n\tdef __init__(self, game, xpos, ypos, xvel, yvel):\n\t\tself.xpos = xpos\n\t\tself.ypos = ypos\n\t\tself.xvel = xvel\n\t\tself.yvel = yvel\n\t\tself.game = game\n\n\t\tbullet_img = make_img('bullet.png')\n\t\tself.bullet = pyglet.sprite.Sprite(bullet_img, x=self.xpos, y=self.ypos, batch=game.batch)\n\n\t# Return True if the bullet went off the screen and needs to be deleted\n\tdef update(self, dt):\n\t\tself.xpos += self.xvel*dt\n\t\tself.ypos += self.yvel*dt\n\n\t\treturn off_screen(self.xpos, self.ypos, self.game.xmin, self.game.xmax,\n\t\t\t self.game.ymin, self.game.ymax)\n\nclass Asteroid:\n\tdef __init__(self, game, xpos, ypos, xvel, yvel, size):\n\t\tself.xpos = xpos\n\t\tself.ypos = ypos\n\t\tself.xvel = xvel\n\t\tself.yvel = yvel\n\t\tself.size = size\n\t\tself.rot = np.random.rand()*360\n\t\tself.exploding = False\n\t\tself.game = game\n\n\t\tasteroid_img = make_img('asteroid.png')\n\t\tself.asteroid = pyglet.sprite.Sprite(asteroid_img, x=self.xpos, y=self.ypos,\n\t\t\t batch=game.batch, group=game.background)\n\n\tdef update(self, dt):\n\t\tself.xpos += self.xvel*dt\n\t\tself.ypos += self.yvel*dt\n\n\t\tself.xpos, self.ypos = wrap(self.xpos, self.ypos, self.game.xmin,\n\t\t\t self.game.xmax, self.game.ymin, \n\t\t\t self.game.ymax)\n\nclass Game:\n\tdef __init__(self, window):\n\t\tself.window = window\n\t\tself.background = pyglet.graphics.OrderedGroup(0)\n\t\tself.foreground = pyglet.graphics.OrderedGroup(1)\n\t\tself.xmin, self.xmax = 0, self.window.width\n\t\tself.ymin, self.ymax = 0, self.window.height\n\t\tself.state = 'MENU'\n\t\tself.player_img = make_img('player.png')\n\t\tself.init_menu_state()\n\n\tdef gen_exp_anim(self):\n\t\texp_anim = pyglet.image.Animation.from_image_sequence(self.explosion_seq, 0.1, False)\n\t\treturn exp_anim\n\n\tdef init_menu_state(self):\n\t\tself.batch = pyglet.graphics.Batch()\n\n\t\tlabel = pyglet.text.Label('AsTeRoIdS', font_name='Times New Roman',\n font_size=36, x=window.width//2,\n y=3*window.height//4, anchor_x='center',\n anchor_y='center', batch=self.batch, group=self.foreground)\n\n\t\tdef callback1(is_pressed):\n\t\t\tself.init_game_state()\n\t\t\tself.state = 'PLAYING'\n\t\tbutton1 = Button('Start Game', on_press=callback1)\n\n\t\tdef callback2(is_pressed):\n\t\t\texit()\n\t\tbutton2 = Button('Quit', on_press=callback2)\n\t\tManager(VerticalContainer([button1, button2]), window=window,\n\t\t\t theme=theme, batch=self.batch)\n\n\t\tself.asteroids = self.gen_asteroids(num_ast)\n\n\tdef init_game_state(self):\n\t\tself.batch = pyglet.graphics.Batch()\n\n\t\tself.reset()\n\n\t\tself.shoot_start = self.player.width // 2\n\t\tflame_img = make_img('flame.png', anchor_x=55)\n\t\tself.flame = pyglet.sprite.Sprite(flame_img, x=self.p_xpos, y=self.p_ypos, group=self.background)\n\n\t\texplosion = pyglet.image.load('explosion.png')\n\t\tself.explosion_seq = pyglet.image.ImageGrid(explosion, 1, 7)\n\n\t\ts_fac = 0.5\n\t\tself.s_lives = []\n\t\tlive_img = make_img('player.png')\n\t\tfor i in range(lives):\n\t\t\tlive_sprite = pyglet.sprite.Sprite(live_img, x=0, y=0, batch=self.batch,\n\t\t\t\t group=self.foreground)\n\t\t\txpos = i*(live_sprite.width*s_fac) + live_sprite.width // 2\n\t\t\typos = self.ymax - live_sprite.height // 2\n\t\t\tlive_sprite.update(xpos, ypos, scale=s_fac, rotation=-90)\n\t\t\tself.s_lives.append(live_sprite)\n\n\t\tself.score = 0\n\t\tself.score_prefix = 'Score: '\n\t\tself.score_label = pyglet.text.Label('', font_name='Times New Roman',\n font_size=18, batch=self.batch,\n anchor_x='left', anchor_y='center',\n group=self.foreground)\n\t\tself.score_label.x = live_sprite.width*(lives+1)\n\t\tself.score_label.y = live_sprite.y\n\t\tself.update_score_ui()\n\n\t\tself.key_left = False\n\t\tself.key_right = False\n\t\tself.key_up = False\n\t\tself.key_space = False\n\n\tdef init_dead_state(self):\n\t\tself.score_label.delete()\n\t\t\n\t\tgameover_label = pyglet.text.Label('Game Over', font_name='Times New Roman',\n font_size=36, x=window.width//2,\n y=3*window.height//4, anchor_x='center',\n anchor_y='center', batch=self.batch)\n\n\t\ttext = 'Final Score: ' + str(self.score)\n\t\tfinalscore_label = pyglet.text.Label(text, font_name='Times New Roman',\n font_size=18, x=window.width//2,\n y=3*window.height//4-50, anchor_x='center',\n anchor_y='center', batch=self.batch)\n\n\t\tdef callback1(is_pressed):\n\t\t\tself.init_game_state()\n\t\t\tself.state = 'PLAYING'\n\t\tbutton1 = Button('Start Game', on_press=callback1)\n\n\t\tdef callback2(is_pressed):\n\t\t\texit()\n\t\tbutton2 = Button('Quit', on_press=callback2)\n\t\tManager(VerticalContainer([button1, button2]), window=window,\n\t\t\t theme=theme, batch=self.batch)\n\n\tdef done_exploding_player(self):\n\t\tif len(self.s_lives) == 0:\n\t\t\tself.init_dead_state()\n\t\t\tself.state = 'DEAD'\n\t\telse:\n\t\t\tself.s_lives.remove(self.s_lives[-1])\n\t\t\tself.reset()\n\n\tdef reset(self):\n\t\tself.p_xpos = (self.xmax - self.xmin)/2\n\t\tself.p_ypos = (self.ymax - self.ymin)/2\n\t\tself.p_xvel, self.p_yvel = 0, 0\n\t\tself.p_rot = 0\n\t\tself.player = pyglet.sprite.Sprite(self.player_img, x=self.p_xpos, y=self.p_ypos, batch=game.batch)\n\t\tself.exploded = False\n\t\tself.bullets = []\n\t\tself.asteroids = self.gen_asteroids(num_ast)\n\n\tdef update_score_ui(self):\n\t\tscore_str = self.score_prefix + '{:4d}'.format(self.score)\n\t\tself.score_label.text = score_str\n\n\tdef gen_asteroids(self, N):\n\t\tasteroids = []\n\t\txpos = np.random.rand(N)*self.xmax\n\t\typos = np.random.rand(N)*self.ymax\n\t\txvel = np.random.rand(N)*ast_max_vel*2 - ast_max_vel\n\t\tyvel = np.random.rand(N)*ast_max_vel*2 - ast_max_vel\n\t\tsize_inds = np.asarray(np.random.rand(N)*len(ast_sizes), dtype=int)\n\t\tsizes = []\n\t\tfor i, idx in enumerate(size_inds):\n\t\t\tsizes.append(ast_sizes[idx])\n\n\t\tfor i in range(N):\n\t\t\tast = Asteroid(self, xpos[i], ypos[i], xvel[i], yvel[i], sizes[i])\n\t\t\tasteroids.append(ast)\n\n\t\treturn asteroids\n\n\t# Check every bullet and asteroid pair for a collision and return\n\t# the indices of both in the list if a collision occurs\n\tdef check_bullet_ast_coll(self):\n\t\tfor idx1 in range(len(self.bullets)):\n\t\t\tfor idx2 in range(len(self.asteroids)):\n\t\t\t\tif self.asteroids[idx2].exploding:\n\t\t\t\t\tcontinue\n\t\t\t\tx1 = self.bullets[idx1].xpos\n\t\t\t\tx2 = self.asteroids[idx2].xpos\n\t\t\t\ty1 = self.bullets[idx1].ypos\n\t\t\t\ty2 = self.asteroids[idx2].ypos\n\t\t\t\tradius = self.asteroids[idx2].asteroid.width // 2\n\n\t\t\t\tdist = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)\n\n\t\t\t\tif dist <= radius:\n\t\t\t\t\treturn idx1, idx2\n\n\t\treturn -1, -1\n\n\t# Check for a collision between the player and every asteroid\n\t# and return the index of the asteroid if one occurs\n\tdef check_player_ast_coll(self):\n\t\tfor idx in range(len(self.asteroids)):\n\t\t\tif self.asteroids[idx].exploding:\n\t\t\t\t\tcontinue\n\t\t\tx1 = self.p_xpos\n\t\t\tx2 = self.asteroids[idx].xpos\n\t\t\ty1 = self.p_ypos\n\t\t\ty2 = self.asteroids[idx].ypos\n\t\t\tradius = self.player.width // 2\n\n\t\t\tdist = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)\n\n\t\t\tif dist <= radius:\n\t\t\t\treturn idx\n\n\t\treturn -1\n\n\tdef shoot(self):\n\t\ta_norm_x, a_norm_y = ang_to_vec(self.p_rot)\n\t\tb_xpos = a_norm_x*self.shoot_start\n\t\tb_ypos = a_norm_y*self.shoot_start\n\t\tb_xvel = a_norm_x*shoot_speed\n\t\tb_yvel = a_norm_y*shoot_speed\n\t\tb = Bullet(self, self.p_xpos + b_xpos, self.p_ypos + b_ypos,\n\t\t\t self.p_xvel + b_xvel, self.p_yvel + b_yvel)\n\t\tself.bullets.append(b)\n\n\t# Break an asteroid into smaller pieces\n\tdef break_asteroid(self, idx):\n\t\ta = self.asteroids[idx]\n\t\tself.asteroids[idx].asteroid.image = self.gen_exp_anim()\n\t\tself.asteroids[idx].exploding = True\n\t\tdef asteroid_done():\n\t\t\tself.asteroids.remove(a)\n\t\tself.asteroids[idx].asteroid.on_animation_end = asteroid_done\n\n\t\tif a.size == ast_sizes[-1]:\n\t\t\treturn\n\t\t\n\t\tsize_idx = ast_sizes.index(a.size)\n\t\txv_add = np.random.rand(2)*ast_break_max_vel*2 - ast_break_max_vel\n\t\tyv_add = np.random.rand(2)*ast_break_max_vel*2 - ast_break_max_vel\n\t\ta1_new = Asteroid(self, a.xpos, a.ypos, a.xvel + xv_add[0],\n\t\t\t a.yvel + yv_add[0], ast_sizes[size_idx+1])\n\t\ta2_new = Asteroid(self, a.xpos, a.ypos, a.xvel + xv_add[1],\n\t\t\t a.yvel + yv_add[1], ast_sizes[size_idx+1])\n\n\t\tself.asteroids.append(a1_new)\n\t\tself.asteroids.append(a2_new)\n\n\tdef update(self, dt):\n\t\tif self.state == 'MENU':\n\t\t\tself.menu_update(dt)\n\t\telif self.state == 'PLAYING':\n\t\t\tself.playing_update(dt)\n\n\tdef menu_update(self, dt):\n\t\tfor a in self.asteroids:\n\t\t\ta.update(dt)\n\n\tdef playing_update(self, dt):\n\t\t# Update player position\n\t\tif not self.exploded:\n\t\t\tself.p_xpos, self.p_ypos = wrap(self.p_xpos, self.p_ypos, self.xmin,\n\t\t\t\t self.xmax, self.ymin, self.ymax)\n\n\t\t\tself.p_xpos += self.p_xvel*dt\n\t\t\tself.p_ypos += self.p_yvel*dt\n\n\t\t# Update asteroid and bullet positions\n\t\tfor a in self.asteroids:\n\t\t\ta.update(dt)\n\n\t\tfor b in self.bullets:\n\t\t\tcleanup = b.update(dt)\n\t\t\tif cleanup:\n\t\t\t\tself.bullets.remove(b)\n\n\t\t# Check for player-asteroid and bullet-asteroid collisions\n\t\tif not self.exploded:\n\t\t\ti1 = self.check_player_ast_coll()\n\t\t\tif i1 >= 0:\n\t\t\t\tself.exploded = True\n\t\t\t\tself.release_all_keys()\n\t\t\t\tself.player.image = self.gen_exp_anim()\n\t\t\t\tself.player.on_animation_end = self.done_exploding_player\n\n\t\ti1, i2 = self.check_bullet_ast_coll()\n\t\tif i1 >= 0 and i2 >= 0:\n\t\t\tself.bullets.remove(self.bullets[i1])\n\t\t\tpoints = ast_points[ast_sizes.index(self.asteroids[i2].size)]\n\t\t\tself.score += points\n\t\t\tself.break_asteroid(i2)\n\n\t\tself.update_score_ui()\n\n\t\t# Handle keyboard input\n\t\tif self.key_left:\n\t\t\tself.p_rot -= p_rot_rate\n\t\t\tif self.p_rot < 0:\n\t\t\t\tself.p_rot += 360\n\t\tif self.key_right:\n\t\t\tself.p_rot += p_rot_rate\n\t\t\tif self.p_rot > 360:\n\t\t\t\tself.p_rot -= 360\n\t\tif self.key_up:\n\t\t\ta_norm_x, a_norm_y = ang_to_vec(self.p_rot)\n\t\t\tself.p_xvel += a_norm_x*p_acc*dt\n\t\t\tself.p_yvel += a_norm_y*p_acc*dt\n\t\tif self.key_space:\n\t\t\tself.shoot()\n\t\t\tself.key_space = False # Prevent player from holding down the shoot button\n\n\tdef release_all_keys(self):\n\t\tself.key_space = False\n\t\tself.key_up = False\n\t\tself.key_left = False\n\t\tself.key_right = False\n\ngame = Game(window)\n\n@window.event\ndef on_key_press(symbol, modifiers):\n\tif game.state == 'PLAYING':\n\t\tif game.exploded:\n\t\t\treturn\n\n\t\tif symbol == key.LEFT:\n\t\t\tgame.key_left = True\n\t\tif symbol == key.RIGHT:\n\t\t\tgame.key_right = True\n\t\tif symbol == key.UP:\n\t\t\tgame.key_up = True\n\t\tif symbol == key.SPACE:\n\t\t\tgame.key_space = True\n\n@window.event\ndef on_key_release(symbol, modifiers):\n\tif game.state == 'PLAYING':\n\t\tif game.exploded:\n\t\t\treturn\n\n\t\tif symbol == key.LEFT:\n\t\t\tgame.key_left = False\n\t\tif symbol == key.RIGHT:\n\t\t\tgame.key_right = False\n\t\tif symbol == key.UP:\n\t\t\tgame.key_up = False\n\t\tif symbol == key.SPACE:\n\t\t\tgame.key_space = False\n\n@window.event\ndef on_draw():\n\twindow.clear()\n\n\tif game.state == 'MENU':\n\t\tfor a in game.asteroids:\n\t\t\ta.asteroid.update(a.xpos, a.ypos, scale=a.size, rotation=a.rot)\n\n\t\tgame.batch.draw()\n\n\tif game.state == 'PLAYING':\n\t\tfor a in game.asteroids:\n\t\t\ta.asteroid.update(a.xpos, a.ypos, scale=a.size, rotation=a.rot)\n\n\t\tgame.player.update(x=game.p_xpos, y=game.p_ypos, rotation=game.p_rot)\n\n\t\tfor b in game.bullets:\n\t\t\tb.bullet.update(b.xpos, b.ypos)\n\n\t\tif game.key_up:\n\t\t\tgame.flame.update(x=game.p_xpos, y=game.p_ypos,\n\t\t\t\t rotation=game.p_rot)\n\t\t\tgame.flame.draw()\n\n\t\tgame.batch.draw()\n\n\tif game.state == 'DEAD':\n\t\tgame.batch.draw()\n\nif __name__ == \"__main__\":\n\tpyglet.clock.schedule_interval(game.update, 1 / 120.0)\n\n\t# Tell pyglet to do its thing\n\tpyglet.app.run()","repo_name":"spencerw/asteroids_pyglet","sub_path":"asteroids.py","file_name":"asteroids.py","file_ext":"py","file_size_in_byte":14210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12405722749","text":"import sqlite3\n\n\ndef buscar_dato_pais(pais):\n conn = sqlite3.connect('../db.sqlite3')\n c = conn.cursor()\n c.execute(\"SELECT idpais from basedatos_pais WHERE nombrep =\"+str(pais))\n rows = c.fetchall()\n for row in rows:\n print(row[0])\n\n conn.commit()\n conn.close()\n\ndef lista_pais():\n conn = sqlite3.connect('../db.sqlite3')\n c = conn.cursor()\n c.execute(\"SELECT nombrep from basedatos_pais\")\n rows = c.fetchall()\n conn.commit()\n conn.close()\n return rows\n\n\n\n","repo_name":"marceloibarran/conmanzanas","sub_path":"Scrap/consulta.py","file_name":"consulta.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21921737630","text":"\"\"\"\nSalt Fetching Function for Password Hashing\n- Query the salt in DB with EmailId\n- Pass the salt to Client\n\nAuthor: Mingfei Yang (Mingfei.Yang.1@asu.edu)\n\"\"\"\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\n# Global Table Name\nsalt_table = 'Salts'\n\n\ndef salt_fetcher(event, context):\n \"\"\"\n\n :param event: Event data passed by AWS Lambda, it is usually of the Python dict type.\n (It can also be list, str, int, float, or NoneType type)\n :param context: Runtime information provided by AWS Lambda, it is of the LambdaContext type.\n :return:\n \"\"\"\n # EmailId\n user_email = event['EmailId'].lower()\n\n # Query corresponding salt to DB\n db_client = boto3.client('dynamodb')\n\n # Try - Except - Else Block\n try:\n # DB Salt Query\n response = db_client.query(TableName=salt_table,\n KeyConditionExpression=\"EmailId = :id\",\n ExpressionAttributeValues={':id': {'S': user_email}})\n except ClientError:\n raise Exception(\"Bad Request: Query Failed.\")\n else:\n # If Items retrieved successfully\n if 'Items' in response.keys() and len(response['Items']) != 0:\n # Fetch the unique salt\n item = response['Items'][0]\n\n return {\n 'Salt': item['Salt']['B'].decode()\n }\n # End of Try - Except - Else\n\n raise Exception(\"Bad Request: Query Failed.\")\n","repo_name":"vedant511/DrinkRank","sub_path":"Back-end/PasswordHashing/SaltFetcher.py","file_name":"SaltFetcher.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42362412695","text":"import Customer\n\n\nclass Order():\n def __init__(self):\n self.orders = []\n self.exit = False\n self.in_stock = [\"Beer\", \"Wine\", \"Juice\"]\n\n def get_order(self):\n # manually inserting Drink objects into self.orders for smoother testing\n cust = Customer.Customer(\"Christer\", \"Cash\")\n self.orders.append(cust.customer_one_order(\"Beer\", \"Salikatt\", 2))\n self.orders.append(cust.customer_one_order(\"Wine\", \"Radicon\", 2))\n self.orders.append(cust.customer_one_order(\"Juice\", \"Apple\", 3))\n\n return self.orders\n\n def get_order_from_users(self):\n while self.exit == False:\n for i in self.in_stock:\n print(\"In stock: %s\" % i)\n beverage = input(\"What type of beverage do you want? \")\n beverageName = input(\"Name of beverage: \")\n amount = int(input(\"How many? \"))\n\n self.orders.append(Customer.Customer(\"Christer\", \"Cash\").customer_one_order(\n beverage, beverageName, amount))\n answer = input(\"Do you want to order something else y/n \")\n if answer == \"n\":\n self.exit = True\n return self.orders\n","repo_name":"christeriordan/Project","sub_path":"Task1/Orders.py","file_name":"Orders.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23490037217","text":"import random\nfrom django.contrib.auth import get_user_model, authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\n# from django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\n\n# from django.views import generic as views\n\nUserModel = get_user_model()\n\n\n# Create your views here.\n\n\n@login_required\ndef index(request):\n user = random.randint(1, 1000)\n\n # good practice\n # UserModel.objects.create_user(\n # username=f'admin{user}',\n # email=f'admin{user}@example.com',\n # password='1234',\n # )\n # bad practice\n # UserModel.objects.create(\n # username=f'admin{user}',\n # email=f'admin{user}@example.com',\n # password='1234',\n # )\n some_object = UserModel.objects.get(username=f'admin63')\n # some_object = User.objects.get(username='admin')\n context = {\n \"user\": some_object,\n 'permission': request.user.has_perm('web.view_user')\n }\n return render(request, 'index.html', context)\n\n\ndef login_user(request):\n user = authenticate(\n username='admin327',\n password='1234',\n )\n login(request, user)\n print(f\"test result -->{user}\")\n\n return redirect('index')\n\n\ndef logout_user(request):\n logout(request)\n return redirect('index')\n","repo_name":"qceka88/Python-Web-Basics-and-FrameWork","sub_path":"Python-Web-Framework/04 Authentication and Authorization/autetications_authorisation/autetications_authorisation/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10047655212","text":"import logging\nimport queue\nimport threading\nfrom typing import Dict, List, Tuple, Callable, Optional, Union # For self-documenting typing\nimport uuid\nimport os\nimport asyncio\n\nfrom .tracker import TrackingPoller, TrackerBase\nfrom .job import JobStatus, Job, ExternalJob\nfrom ..exceptions import JobNotFoundException\nfrom ..notifications.notifiers import Notifier\n\n# Do not expose anything by default (internal module)\n__all__ = [] # type: List[str]\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass QueueProcessor:\n \"\"\"\n Process items from queue in a new thread. Usage:\n 1) queue_processor.start(queue_=..., process_item=...)\n 2) queue_processor.schedule_stop()\n 3) queue_processor.wait_stop()\n The two shutdown steps ensure that the caller can cancel the currently running task between them,\n ensuring that processor exits without processing new jobs.\n \"\"\"\n def __init__(self):\n self._stop_event = threading.Event()\n self._queue = None\n self._thread = None\n\n def start(self, queue_, process_item):\n \"\"\"\n Read and handle tasks from queue `queue_` until (1) queue item is None or (2) stop_event is set. Note that\n the currently running job is not forced to cancel: that should be done from another thread, letting queue reader\n to check loop condition. `QueueProcessor` is NOT safe for reuse for processing other queues.\n :param queue_: Synchronized queue\n :param process_item: Callback called with queue item as argument\n :return:\n \"\"\"\n self._queue = queue_\n self._thread = threading.Thread(target=self.__process, args=(process_item,))\n self._stop_event.clear()\n self._thread.start()\n\n def __process(self, process_item):\n if not self.is_running():\n raise RuntimeError(\"QueueProcessor must be started first.\")\n while not self._stop_event.is_set():\n item = self._queue.get(block=True)\n if item is None or self._stop_event.is_set():\n break\n process_item(item)\n self._queue.task_done()\n\n def schedule_stop(self):\n if not self.is_running():\n return\n self._stop_event.set() # Signal exit to worker thread, required as \"None\" may not be next task\n self._queue.put(None, block=False) # Signal exit if thread is blocking\n\n def is_running(self):\n return self._thread is not None and self._thread.is_alive()\n\n def wait_stop(self):\n if self.is_running():\n self._thread.join()\n self._thread = None\n self._queue = None\n\n\nclass Scheduler:\n def __init__(self, queue_processor: QueueProcessor, notifier: Notifier = None,\n event_loop: Optional[asyncio.AbstractEventLoop] = None):\n self._queue_processor = queue_processor\n self.submitted_jobs = dict() # type: Dict[uuid.UUID, Job]\n self.external_jobs = dict() # type: Dict[uuid.UUID, ExternalJob]\n self._job_queue = queue.Queue() # type: queue.Queue\n self._running_job = None # type: Optional[Job]\n self._job_poller = TrackingPoller(self.__query_and_report)\n self._event_loop = event_loop or asyncio.get_event_loop() # Save the event loop for out-of-thread operations\n self._notifier = notifier # type: Optional[Notifier]\n self.active_external_job_id = None # type: Optional[uuid.UUID] # TODO Allow one per process ID\n self.external_job_polling_tasks = dict() # type: Dict[uuid.UUID, asyncio.Task]\n\n # Properties and Python magic\n\n @property\n def jobs(self): # Needed to access internal list of jobs as object parameters are unexposable, only methods\n return list(self.submitted_jobs.values()) + list(self.external_jobs.values())\n\n @property\n def is_running(self):\n return self._queue_processor.is_running()\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stop()\n\n # Job handling methods\n def _handle_job(self, job: Job) -> None:\n LOGGER.debug(\"Handling job: %s\", job)\n if job.status.stale:\n return\n self._running_job = job\n\n task = None # type: Optional[asyncio.Task]\n if job.poll_time:\n # Create and schedule a task from the Polling job, so we can cancel it without killing the event loop\n task = self._event_loop.create_task(self._job_poller.poll(job.id, job.poll_time))\n if self._notifier:\n self._notifier.notify_job_start(job)\n try:\n job.launch_and_wait()\n except Exception: # pylint:disable=broad-except\n LOGGER.exception(\"Running job failed\")\n finally:\n if task:\n task.cancel()\n\n if self._notifier:\n self._notifier.notify_job_end(job)\n self._running_job = None\n LOGGER.debug(\"Finished handling job: %s\", job)\n\n def submit_job(self, job: Job):\n job.status = JobStatus.QUEUED\n self.submitted_jobs[job.id] = job\n self._job_queue.put(job) # TODO Blocks if queue full\n LOGGER.debug(\"Job submitted: %s\", job)\n\n def stop_job(self, job_id: uuid.UUID):\n if job_id not in self.submitted_jobs:\n LOGGER.debug(\"Ignoring stopping unknown job with ID: %s\", str(job_id))\n return\n self.submitted_jobs[job_id].cancel()\n\n def register_external_job(self, job_id: uuid.UUID):\n self.active_external_job_id = job_id\n job = self.external_jobs[job_id] # type: ExternalJob\n LOGGER.debug(\"Handling job: %s\", job)\n\n if job.poll_time:\n task = self._event_loop.create_task(self._job_poller.poll(job.id, job.poll_time))\n self.external_job_polling_tasks[job_id] = task\n if self._notifier:\n self._notifier.notify_job_start(job)\n\n def unregister_external_job(self, job_id: uuid.UUID):\n job = self.external_jobs[job_id] # type: ExternalJob\n self.active_external_job_id = None\n if self._notifier:\n self._notifier.notify_job_end(job)\n task = self.external_job_polling_tasks.pop(job.id, None) # type: Optional[asyncio.Task]\n if task is not None:\n task.cancel()\n\n def __get_job_by_pid(self, pid) -> uuid.UUID:\n jobs = [job for job in self.jobs if job.pid == pid]\n if not jobs:\n raise JobNotFoundException(job_id=str(pid))\n if len(jobs) == 1:\n return jobs[0].id\n # Check if one of them is active\n active_jobs = [job for job in jobs if self.active_external_job_id == job.id]\n if not active_jobs:\n return jobs[0].id\n return active_jobs[0].id\n\n def add_condition(self, pid: int, *vals: str, condition: Callable[[float], bool], only_relevant: bool):\n \"\"\"Adds a new condition for a job that matches the given process id.\n :param pid: process id\n :param vals: list of scalar names (strings)\n :param condition: a callable that accepts as many values as vals, and returns a boolean whether a condition has\n been met\n :param only_relevant: a boolean, whether or not only the values relevant to the condition should be plotted when\n this condition is met\n \"\"\"\n job_id = self.__get_job_by_pid(pid)\n job = self.get_job_by_id(job_id=job_id)\n job.add_condition(*vals, condition=condition, only_relevant=only_relevant)\n\n def report_scalar(self, pid, name, val):\n # Find the right job id\n job_id = self.__get_job_by_pid(pid)\n job = self.get_job_by_id(job_id)\n condition = job.add_scalar_to_history(scalar_name=name, scalar_value=val)\n if condition and self._notifier:\n # TODO - we can add the condition that triggered the notification...\n names = condition.names if condition.only_relevant else list()\n vals, imgpath = self.query_scalars(*names, job_id=job.id, latest_only=False, plot=True)\n self._notifier.notify(job, imgpath, n_iterations=-1)\n if imgpath is not None:\n os.remove(imgpath)\n\n def get_job_by_id(self, job_id: uuid.UUID):\n job = self.submitted_jobs.get(job_id) or self.external_jobs.get(job_id)\n if not job:\n raise JobNotFoundException(job_id=str(job_id))\n return job\n\n def query_scalars(self, *names: Tuple[str, ...], job_id, latest_only: bool = True, plot: bool = False):\n job = self.get_job_by_id(job_id=job_id)\n return job.get_updates(*names, plot=plot, latest=latest_only)\n\n def __query_and_report(self, job_id: uuid.UUID):\n if self._notifier:\n job = self.get_job_by_id(job_id)\n # Get updates; TODO - vals should be reported once we update schema...\n vals, imgpath = self.query_scalars(job_id=job_id, latest_only=True, plot=True)\n if vals: # Only send updates if there exists any updates\n self._notifier.notify(job, imgpath, n_iterations=-1)\n if imgpath is not None:\n os.remove(imgpath)\n\n # Scheduler service methods\n\n def start(self):\n if not self._queue_processor.is_running():\n LOGGER.debug(\"Start queue processor\")\n self._queue_processor.start(queue_=self._job_queue, process_item=self._handle_job)\n LOGGER.debug(\"Queue processor started\")\n\n def stop(self):\n # self._job_poller.stop()\n self._queue_processor.schedule_stop()\n if self._running_job is not None:\n # TODO Add an option to not cancel the currently running job?\n self._running_job.cancel()\n if self._queue_processor.is_running():\n # Wait for the thread to finish\n self._queue_processor.wait_stop()\n","repo_name":"meeshkan/meeshkan-client","sub_path":"meeshkan/core/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":9841,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"29821603111","text":"import pytest\nimport os\nfrom utils import aws_s3\nimport boto3\n\n\nclass Tests:\n @pytest.fixture\n def example_fixture(self):\n '''\n An example of a pytest fixture - a function that can be used for setup and teardown before and after test functions are run.\n '''\n\n # place any setup you want to do before any test function that uses this fixture is run\n\n yield # at th=e yield point, the test function will run and do its business\n\n # place with any teardown you want to do after any test function that uses this fixture has completed\n\n #\n # Test functions\n #\n\n def test_sanity_check(self, example_fixture):\n \"\"\"\n Test debugging... making sure that we can run a simple test that always passes.\n Note the use of the example_fixture in the parameter list - any setup and teardown in that fixture will be run before and after this test function executes\n From the main project directory, run the `python3 -m pytest` command to run all tests.\n \"\"\"\n expected = True # the value we expect to be present\n actual = True # the value we see in reality\n assert actual == expected, \"Expected True to be equal to True!\"\n\n def test_upload_file(self, example_fixture):\n \"\"\"\n Test the upload_file function from the aws_s3 module\n \"\"\"\n expected = \"s3://software-eng-project-4/sample_audio.mp3\"\n file_path = os.path.join(os.getcwd(), \"recordings\", \"sample_audio.mp3\")\n actual = aws_s3.upload_file(file_path, 'software-eng-project-4')\n assert actual == expected, f\"Expected {actual} to be equal to s3://software-eng-project-4/sample_audio.mp3!\"\n\n def test_wrong_file(self, example_fixture):\n \"\"\"\n Test the upload_file function throw exception when file does not exist\n \"\"\"\n try:\n with pytest.raises(FileNotFoundError):\n aws_s3.upload_file('not_a_file.mp5', 'software-eng-project-4')\n except Exception as e:\n assert False, \"Expected FileNotFoundError to be thrown!\"\n\n def test_file_exists_in_bucket(self, example_fixture):\n \"\"\"\n Test whether file exists in bucket after upload_file function\n \"\"\"\n s3 = boto3.resource('s3')\n bucket = s3.Bucket('software-eng-project-4')\n file_path = os.path.join(os.getcwd(), \"recordings\", \"sample_audio.mp3\")\n file_name = os.path.basename(file_path)\n actual = aws_s3.upload_file(file_path, 'software-eng-project-4')\n objs = list(bucket.objects.filter(Prefix=file_name))\n if any([w.key == file_name for w in objs]):\n assert True, \"File exist in Bucket!\"\n else:\n assert False, \"File does not exist in Bucket!\"\n","repo_name":"software-students-fall2022/containerized-app-exercise-team3","sub_path":"machine-learning-client/tests/test_s3.py","file_name":"test_s3.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"12204230812","text":"# -*- coding: utf-8 -*-\n\nfrom django.http import HttpResponse\nfrom userprofile.models import Profile\nfrom django.contrib.auth.models import User\nimport settings\nfrom uuid import uuid5, UUID\nfrom core.portal.render import render_to_portal, render_ajax\nfrom django.template import RequestContext, loader as template_loader\nfrom core.portal.exceptions import Http302\n\n@render_to_portal(template='profile/index.html')\ndef index(request):\n\tcontext = {}\n\tuser = request.user\n\tif not user.is_authenticated():\n\t\traise Http302('/login/')\n\ttry:\n\t\tcontext['profile'] = user.get_profile()\n\texcept:\n\t\tcontext['profile'] = None\n\tcontext['user'] = user\n\treturn context\n\n@render_ajax(type=\"html\", template='profile/view.html')\ndef update(request):\n\tcontext = {}\n\tuser = request.user\n\tif not user.is_authenticated():\n\t\traise Http302('/login/')\n\ttry:\n\t\tcontext['profile'] = user.get_profile()\n\texcept:\n\t\tcontext['profile'] = None\n\treturn context\n\n@render_ajax(type=\"html\")\ndef edit(request):\n\tfrom forms import ProfileForm\n\tcontext = {}\n\tuser = request.user\n\tif not user.is_authenticated():\n\t\traise Http302('/login/')\n\ttry:\n\t\tprofile = user.get_profile()\n\texcept:\n\t\tprofile = None\n\tif request.method == 'POST':\n\t\tform = ProfileForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tdata = form.cleaned_data\n\t\t\tuser.last_name = data['last_name']\n\t\t\tuser.first_name = data['first_name']\n\n\t\t\tdrop_email = user.email != data['email']\n\n\t\t\tuser.email = data['email']\n\t\t\tuser.save()\n\t\t\tif data['middle_name'] or data['subscription']:\n\t\t\t\tif profile:\n\t\t\t\t\tprofile.middle_name = data['middle_name']\n\t\t\t\t\tprofile.subscription = data['subscription']\n\t\t\t\telse:\n\t\t\t\t\tprofile = Profile(user=user,\n\t\t\t\t\t\t\tmiddle_name=data['middle_name'],\n\t\t\t\t\t\t\tsubscription = data['subscription'])\n\n\t\t\t\tif drop_email:\n\t\t\t\t\tprofile.is_email_confirmed = False\n\n\t\t\t\tprofile.save()\n\t\t\treturn u'Данные успешно сохранены'\n\t\tcontext['form'] = form\n\telse:\n\t\tdata = {'last_name':user.last_name,\n\t\t\t\t'first_name': user.first_name,\n\t\t\t\t'email':user.email}\n\t\tif profile:\n\t\t\tdata['middle_name'] = profile.middle_name\n\t\t\tdata['subscription'] = profile.subscription\n\t\tcontext['form'] = ProfileForm(initial=data)\n\tcontext['profile'] = profile\n\treturn template_loader.get_template(\"profile/edit.html\").render(\n\t\t\tRequestContext(request, context))\n\n@render_ajax()\ndef send_email_confirm(request):\n\tfrom django.core.mail import send_mail\n\tfrom uuid import uuid5, UUID\n\timport settings\n\n\tif not request.user.is_authenticated():\n\t\traise Http302('/login/')\n\n\tcode = uuid5(UUID(settings.UUID_NAMESPACE_FOR_EMAIL_CONFIRM), str(request.user.email))\n\tsubj = u'[www.tsogu.ru] Подтверждение email'\n\tbody = u'Доброго времени суток.\\n\\nПользователь %s отправил запрос на подтверждение электронной почты. Для подтверждения электронной почты на сайте ТюмГНГУ пройдите по ссылке:\\nhttp://www.tsogu.ru/userprofile/email_confirm/%s/%s/\\nЕсли данное письмо попало к Вам по ошибке - удалите его.\\n\\n---\\nС уважением,\\nАдминистрация портала www.tsogu.ru,\\nwebmaster@tsogu.ru' % (request.user.username, request.user.id, code)\n\n\ttry:\n\t\tsend_mail(subj, body, settings.DEFAULT_FROM_EMAIL, [request.user.email], fail_silently=False)\n\t\treturn u'Пароль подтверждения отправлен на электронную почту'\n\texcept:\n\t\treturn u'Произошел сбой при отправке почты. Повторите попытку позже или сообщите вебмастеру о сбое.'\n\ndef student_status_confirm(request):\n\timport urllib\n\timport simplejson as json\n\tfrom hashlib import md5\n\n\timport settings\n\n\tuser = request.user\n\tif not user.is_authenticated():\n\t\traise Http302('/login/')\n\ttry:\n\t\tuserprofile = user.get_profile()\n\texcept:\n\t\tuserprofile = Profile(user=user)\n\tlogin = request.POST.get('login', '')\n\tpassword = md5(request.POST.get('password', '').encode('utf-8')).hexdigest()\n\tparams = \"login=%s&password=%s\"%(login.encode('utf-8'), password)\n\tresult = urllib.urlopen(settings.EDUCON_URL, params).read() or u\"Произошла досадная ошибка, попробуйте позже.\"\n\ttry:\n\t\tuserprofile.student_id = int(result)\n\t\tuserprofile.save()\n\t\treturn HttpResponse(u\"Статус студента подтверждён, обновите профиль.\", mimetype=\"text/plain\")\n\texcept:\n\t\treturn HttpResponse(content=result, mimetype=\"text/plain\", status=500)\n\ndef get_student_info(student_id):\n\timport urllib2, base64\n\timport simplejson as json\n\n\timport settings\n\n\tparams = \"field=group\"\n\trequest = urllib2.Request('%sstudents/student/%d/?%s'%(settings.API_TSOGU, student_id, params))\n\tbase64string = base64.encodestring('%s:%s' % (settings.API_TSOGU_USERNAME, settings.API_TSOGU_PASSWORD))[:-1]\n\tauthheader = \"Basic %s\" % base64string\n\trequest.add_header(\"Authorization\", authheader)\n\tdata = json.loads(urllib2.urlopen(request).read())\n\treturn data\n\n\ndef email_confirm(request, user_id, code):\n\tuser = User.objects.get(id=user_id)\n\ttrue_code = uuid5(UUID(settings.UUID_NAMESPACE_FOR_EMAIL_CONFIRM), str(user.email))\n\tif str(true_code) == str(code):\n\t\ttry:\n\t\t\tprofile = user.get_profile()\n\t\texcept:\n\t\t\tprofile = Profile(user=user)\n\t\tprofile.is_email_confirmed = True\n\t\tprofile.save()\n\t\treturn HttpResponse(u'Ваш email подтвержден')\n\telse:\n\t\treturn HttpResponse(u'Код подтверждения не верен')\n","repo_name":"vden/TsoguNG","sub_path":"userprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5925141351","text":"# 주식가격\n\ndef solution(prices):\n plen = len(prices)\n answer = [0] * plen\n for i in range(plen):\n for j in range(i + 1, plen):\n answer[i] += 1\n if prices[j] < prices[i]:\n break\n return answer\n","repo_name":"KimHS0915/programmers-learn-challenges","sub_path":"lv2/stock_prices.py","file_name":"stock_prices.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73251392264","text":"from transformers import BertTokenizerFast, EncoderDecoderModel\nimport torch\nfrom bs4 import BeautifulSoup\nimport ssl\nimport urllib.request, urllib.parse, urllib.error\nimport requests\nimport sqlite3\nimport timeit\n\n_start_time=timeit.default_timer()\n#print(_start_time)\ndef end_timer(suffix = None):\n evalTime = timeit.default_timer() - _start_time\n #suffix = \"\" if suffix is None else \"_\" + suffix\n print(\"time: \",evalTime)\n text = (str(evalTime))\n with open('runtime.txt', 'w') as f:\n f.write(text)\n\nconn = sqlite3.connect('sumapp.sqlite')\ncur = conn.cursor()\n\n#Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nstarturl = input('Enter web url or enter: ')\nif ( len(starturl) < 1 ) : starturl = 'https://en.wikipedia.org/wiki/Portal:Speculative_fiction'\nif ( starturl.endswith('/') ) : starturl = starturl[:-1]\n\nhtml = urllib.request.urlopen(starturl, context=ctx).read()\n\n#summarizer = pipeline(\"summarization\")\n#URL = \"http://galactanet.com/oneoff/theegg_mod.html\"\n#URL = \"https://en.wikipedia.org/wiki/Kawaii\"\n\nr = requests.get(starturl)\nsoup = BeautifulSoup(r.text, 'html.parser')\nresults = soup.find_all(['h1', 'p'])\ntext = [result.text for result in results]\nARTICLE = ' '.join(text)\n#print(ARTICLE)\n\nweb = starturl\nif ( starturl.endswith('.htm') or starturl.endswith('.html') ) :\n pos = starturl.rfind('/')\n web = starturl[:pos]\n\nif ( len(web) > 1 ) :\n cur.execute('INSERT OR IGNORE INTO Websites (url) VALUES ( ? )', ( web, ) )\n cur.execute('INSERT OR IGNORE INTO Pages (url, html, new_rank) VALUES ( ?, ?, 1.0 )', ( starturl, html) )\n conn.commit()\n\ntry:\n document = urlopen(url, context=ctx)\n\n html = document.read()\n if document.getcode() != 200 :\n print(\"Error on page: \",document.getcode())\n cur.execute('UPDATE Pages SET error=? WHERE url=?', (document.getcode(), web) )\n\n if 'text/html' != document.info().get_content_type() :\n print(\"Ignore non text/html page\")\n cur.execute('DELETE FROM Pages WHERE url=?', ( web, ) )\n conn.commit()\n \n\n print('('+str(len(html))+')', end=' ')\n\n \nexcept KeyboardInterrupt:\n print('')\n print('Program interrupted by user...')\n \nexcept:\n #print(\"Unable to retrieve or parse page\")\n cur.execute('UPDATE Pages SET error=-1 WHERE url=?', (web, ) )\n conn.commit()\n\nmax_chunk = 500\n\nARTICLE = ARTICLE.replace('.', '.')\nARTICLE = ARTICLE.replace('?', '?')\nARTICLE = ARTICLE.replace('!', '!')\n\nsentences = ARTICLE.split('')\ncurrent_chunk = 0 \nchunks = []\nfor sentence in sentences:\n if len(chunks) == current_chunk + 1: \n if len(chunks[current_chunk]) + len(sentence.split(' ')) <= max_chunk:\n chunks[current_chunk].extend(sentence.split(' '))\n else:\n current_chunk += 1\n chunks.append(sentence.split(' '))\n else:\n #print(current_chunk)\n chunks.append(sentence.split(' '))\n\nfor chunk_id in range(len(chunks)):\n chunks[chunk_id] = ' '.join(chunks[chunk_id])\n\nprint(\"Number of chunks:\", len(chunks), \",summarize..\")\n\n#res = summarizer(chunks, max_length=120, min_length=30, do_sample=False)\n \ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ntokenizer = BertTokenizerFast.from_pretrained('mrm8488/bert-small2bert-small-finetuned-cnn_daily_mail-summarization')\nmodel = EncoderDecoderModel.from_pretrained('mrm8488/bert-small2bert-small-finetuned-cnn_daily_mail-summarization').to(device)\n\ndef generate_summary(text):\n # cut off at BERT max length 512\n inputs = tokenizer([text], padding=\"max_length\", truncation=True, max_length=512, return_tensors=\"pt\")\n input_ids = inputs.input_ids.to(device)\n attention_mask = inputs.attention_mask.to(device)\n\n output = model.generate(input_ids, attention_mask=attention_mask)\n \n return tokenizer.decode(output[0], skip_special_tokens=True)\n\n#fname = input('Enter file name: ')\n#if (len(fname) < 1): fname = 'romeo.txt' \n#txt = [\n #(open(fname)).read()\n# str(ARTICLE)\n\n#]\nsum = generate_summary(str(ARTICLE))\nprint(sum)\n\n# Retrieve all of the title tags\ntxt = list()\ntags_second = soup('title')\nfor title in tags_second:\n txt.append(title.get_text())\n #print(title.get_text())\n\ncur.execute('INSERT OR IGNORE INTO urlIndexes (url, url_title, url_full_txt, url_sum) VALUES ( ?, ?, ?, ?)',\n ( (starturl, title.get_text().strip() ,str(ARTICLE).strip() , str(sum)) ) )\n\nconn.commit()\ncur.close()\n\nend_timer()\n","repo_name":"LehnerL/sumApp","sub_path":"sumApp/article_sum.py","file_name":"article_sum.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42875188484","text":"'''\nGiven an array of positive numbers and a positive number ‘k’, find the maximum sum of any contiguous subarray of size ‘k’.\n\nExample 1:\n\nInput: [2, 1, 5, 1, 3, 2], k=3 \nOutput: 9\nExplanation: Subarray with maximum sum is [5, 1, 3].\nExample 2:\n\nInput: [2, 3, 4, 1, 5], k=2 \nOutput: 7\nExplanation: Subarray with maximum sum is [3, 4].\n'''\n\n\ndef max_sub_array(nums, k):\n\n windowSum = 0\n windowStart = 0\n max_sum = 0\n\n for windowEnd in range(0, len(nums)):\n\n windowSum += nums[windowEnd]\n\n if windowEnd >= k-1:\n max_sum = max(max_sum, windowSum)\n windowSum -= nums[windowStart]\n windowStart += 1\n\n return max_sum\n\n\nprint(max_sub_array([5, 1, 3], 3))\n","repo_name":"prashantchanne12/Leetcode","sub_path":"maximum sum sub array of size k.py","file_name":"maximum sum sub array of size k.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"24692843724","text":"\"\"\"\n@Coding: uft-8\n@Time: 2019-08-25 18:12\n@Author: Ryne Chen\n@File: CoinCombination.py \n@Python Version: 3.6\n\"\"\"\n\nimport numpy as np\n\n\ndef combination(coins, n):\n m = len(coins)\n dp = np.zeros((m + 1, n + 1))\n\n for i in range(m + 1):\n dp[i][0] = 1\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n for k in range(j // coins[i - 1] + 1):\n dp[i][j] += dp[i - 1][j - k * coins[i - 1]]\n\n return int(dp[m][n])\n\n\ndef main():\n coins = [1, 2, 5, 10]\n n = 5\n\n comb = combination(coins, n)\n print(comb)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pchen12567/Leetcode","sub_path":"Mock/瓜子2019秋/硬币组合/CoinCombination.py","file_name":"CoinCombination.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72016148104","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 25 18:18:41 2021\r\n\r\n@author: mdt20\r\n\"\"\"\r\n\r\n#UFO scrape\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef get_links():\r\n \"\"\"gather links to scrape\"\"\"\r\n \r\n lst_of_links = []\r\n url_for_links = \"http://www.nuforc.org/webreports/ndxevent.html\"\r\n fhand = requests.get(url_for_links)\r\n soup = BeautifulSoup(fhand.content, 'html.parser') \r\n\r\n for link in soup.findAll('a'):\r\n urls = link.get('href')\r\n lst_of_links.append(urls)\r\n \r\n #don't return the first value, its not a useful link\r\n return lst_of_links[1:] \r\n \r\ndef build_urls_list(lst_of_links):\r\n \"\"\"build list of complete urls for scraping\"\"\"\r\n \r\n url_lst = []\r\n base = \"http://www.nuforc.org/webreports/\"\r\n \r\n for partial in lst_of_links:\r\n full_url = base + partial\r\n url_lst.append(full_url)\r\n \r\n with open(\"ufo_full_urls.txt\", \"w\") as write_file:\r\n for link in url_lst:\r\n write_file.write(link + \"\\n\")\r\n\r\n\r\ndef main():\r\n gather = get_links()\r\n build_urls_list(gather)\r\n \r\nmain()\r\n\r\n","repo_name":"blunderfist/py-ufo-scrape","sub_path":"ufo_scrape_prep_links_list.py","file_name":"ufo_scrape_prep_links_list.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5594354914","text":"from __future__ import division\n\nimport os\n\nimport torch\nfrom PIL import Image\nfrom tensorboard_logger import configure, log_value\nfrom torch.autograd import Variable\nfrom torch.utils import data\nfrom torchvision.transforms import Compose, Normalize, ToTensor\nfrom tqdm import tqdm\n\nfrom argmyparse import get_src_only_training_parser, add_additional_params_to_args, fix_img_shape_args\nfrom datasets import get_dataset\nfrom loss import CrossEntropyLoss2d\nfrom models.model_util import get_optimizer, get_full_model # check_training\nfrom transform import ReLabel, ToLabel, Scale, RandomSizedCrop, RandomHorizontalFlip, RandomRotation\nfrom util import check_if_done, save_checkpoint, adjust_learning_rate, emphasize_str, get_class_weight_from_file\nfrom util import mkdir_if_not_exist, save_dic_to_json\n\nparser = get_src_only_training_parser()\nargs = parser.parse_args()\nargs = add_additional_params_to_args(args)\nargs = fix_img_shape_args(args)\n\nif args.resume:\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n if not os.path.exists(args.resume):\n raise OSError(\"%s does not exist!\" % args.resume)\n\n indir, infn = os.path.split(args.resume)\n\n old_savename = args.savename\n args.savename = infn.split(\"-\")[0]\n print (\"savename is %s (original savename %s was overwritten)\" % (args.savename, old_savename))\n\n checkpoint = torch.load(args.resume)\n args = checkpoint['args'] # Load args!\n\n model = get_full_model(net=args.net, res=args.res, n_class=args.n_class, input_ch=args.input_ch)\n optimizer = get_optimizer(model.parameters(), opt=args.opt, lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}'\".format(args.resume))\n\n json_fn = os.path.join(args.outdir, \"param_%s_resume.json\" % args.savename)\n check_if_done(json_fn)\n args.machine = os.uname()[1]\n save_dic_to_json(args.__dict__, json_fn)\n\nelse:\n model = get_full_model(net=args.net, res=args.res, n_class=args.n_class, input_ch=args.input_ch)\n optimizer = get_optimizer(model.parameters(), opt=args.opt, lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n args.outdir = os.path.join(args.base_outdir, \"%s-%s_only_%sch\" % (args.src_dataset, args.split, args.input_ch))\n args.pth_dir = os.path.join(args.outdir, \"pth\")\n\n if args.net in [\"fcn\", \"psp\"]:\n model_name = \"%s-%s-res%s\" % (args.savename, args.net, args.res)\n else:\n model_name = \"%s-%s\" % (args.savename, args.net)\n\n args.tflog_dir = os.path.join(args.outdir, \"tflog\", model_name)\n mkdir_if_not_exist(args.pth_dir)\n mkdir_if_not_exist(args.tflog_dir)\n\n json_fn = os.path.join(args.outdir, \"param-%s.json\" % model_name)\n check_if_done(json_fn)\n args.machine = os.uname()[1]\n save_dic_to_json(args.__dict__, json_fn)\n\ntrain_img_shape = tuple([int(x) for x in args.train_img_shape])\n\nimg_transform_list = [\n Scale(train_img_shape, Image.BILINEAR),\n ToTensor(),\n Normalize([.485, .456, .406], [.229, .224, .225])\n]\n\nif args.augment:\n aug_list = [\n RandomRotation(),\n # RandomVerticalFlip(), # non-realistic\n RandomHorizontalFlip(),\n RandomSizedCrop()\n ]\n img_transform_list = aug_list + img_transform_list\n\nimg_transform = Compose(img_transform_list)\n\nlabel_transform = Compose([\n Scale(train_img_shape, Image.NEAREST),\n ToLabel(),\n ReLabel(255, args.n_class - 1),\n])\n\nsrc_dataset = get_dataset(dataset_name=args.src_dataset, split=args.split, img_transform=img_transform,\n label_transform=label_transform, test=False, input_ch=args.input_ch)\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {}\ntrain_loader = torch.utils.data.DataLoader(src_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)\n\nweight = get_class_weight_from_file(n_class=args.n_class, weight_filename=args.loss_weights_file,\n add_bg_loss=args.add_bg_loss)\n\nif torch.cuda.is_available():\n model.cuda()\n weight = weight.cuda()\n\ncriterion = CrossEntropyLoss2d(weight)\n\nconfigure(args.tflog_dir, flush_secs=5)\n\nmodel.train()\n\nfor epoch in range(args.epochs):\n epoch_loss = 0\n for ind, (images, labels) in tqdm(enumerate(train_loader)):\n\n imgs = Variable(images)\n lbls = Variable(labels)\n if torch.cuda.is_available():\n imgs, lbls = imgs.cuda(), lbls.cuda()\n\n # update generator and classifiers by source samples\n optimizer.zero_grad()\n preds = model(imgs)\n if args.net == \"psp\":\n preds = preds[0]\n\n loss = criterion(preds, lbls)\n loss.backward()\n c_loss = loss.data[0]\n epoch_loss += c_loss\n\n optimizer.step()\n\n if ind % 100 == 0:\n print(\"iter [%d] CLoss: %.4f\" % (ind, c_loss))\n\n if ind > args.max_iter:\n break\n\n print(\"Epoch [%d] Loss: %.4f\" % (epoch + 1, epoch_loss))\n log_value('loss', epoch_loss, epoch)\n log_value('lr', args.lr, epoch)\n\n if args.adjust_lr:\n args.lr = adjust_learning_rate(optimizer, args.lr, args.weight_decay, epoch, args.epochs)\n\n if args.net == \"fcn\" or args.net == \"psp\":\n checkpoint_fn = os.path.join(args.pth_dir, \"%s-%s-res%s-%s.pth.tar\" % (\n args.savename, args.net, args.res, epoch + 1))\n else:\n checkpoint_fn = os.path.join(args.pth_dir, \"%s-%s-%s.pth.tar\" % (\n args.savename, args.net, epoch + 1))\n\n args.start_epoch = epoch + 1\n save_dic = {\n 'args': args,\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()\n }\n\n save_checkpoint(save_dic, is_best=False, filename=checkpoint_fn)\n","repo_name":"mil-tokyo/MCD_DA","sub_path":"segmentation/source_trainer.py","file_name":"source_trainer.py","file_ext":"py","file_size_in_byte":5889,"program_lang":"python","lang":"en","doc_type":"code","stars":528,"dataset":"github-code","pt":"81"} +{"seq_id":"39262031598","text":"'''\n Chef's computer has N GB of free space. \n He wants to save X files, each of size 1 GB and Y files, each of size 2 GB on his computer. \n Will he be able to do so?\n Chef can save all the files on his computer only if the total size of the files is less than or equal to the space available on his computer.\n'''\n\n# Running Loop for Test Cases\nfor t in range(int(input())):\n \n # Taking Input For N = Free Space In Chef's Computer; X = Files Of 1 GB That Chef Want To Save; Y = Files Of 2 GB That Chef Want To Save\n N,X,Y = map(int, input().split())\n \n # If Total Space Occupied By Files < Total Free Space Availabe; Then Chef Can Save All The Files\n EnoughSpace = \"YES\" if (X+(Y*2)) <= N else \"NO\"\n \n # Printing Result\n print(EnoughSpace)\n","repo_name":"masquerade28/Codechef-Solution-Python-300-Series","sub_path":"Enough Space.py","file_name":"Enough Space.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74368179465","text":"def array_quadruplet(arr, s):\n if len(arr) < 4:\n return []\n\n arr = sorted(arr)\n for i in range(len(arr) - 3): # optimization accounting for # of pointers\n for j in range(i + 1, len(arr) - 2):\n x = j + 1\n y = len(arr) - 1\n remainder = s - arr[i] - arr[j]\n while x < y:\n if x + y > remainder:\n y -= 1\n elif x + y < remainder:\n x += 1\n else:\n return [i, j, x, y]\n\n return []\n\n\nprint(array_quadruplet([2, 7, 4, 0, 9, 5, 1, 3], 20))\n","repo_name":"Royal4224/LeetCode-Practice","sub_path":"array_quadruplet.py","file_name":"array_quadruplet.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3813843831","text":"import sys\nimport shelve\nimport os\ndef outSave(filename,ls):\n s=shelve.open(filename,writeback=True)\n if 'db' in s:\n s['db']+=(' '.join(ls[0:3])+os.linesep+ls[3]+os.linesep)\n else:\n s['db']=(' '.join(ls[0:3])+os.linesep+ls[3]+os.linesep)\n\n s.close()\ndef output(filename):\n s=shelve.open(filename)\n print(s['db'])\n s.close()\n \ndef operate(N1,op,N2):\n N1=float(N1)\n N2=float(N2)\n s=['+','-','*','/','%','**']\n if op==s[0]:\n return N1+N2\n if op==s[1]:\n return N1-N2\n if op==s[2]:\n return N1*N2\n if op==s[3]:\n return N1/N2\n if op==s[4]:\n return N1%N2\n if op==s[5]:\n return N1**N2\n print(N1,op,N2)\n return 'erorr'\n\nif __name__=='__main__':\n if sys.argv[1]=='print':\n output('data.shl')\n else:\n re=operate(sys.argv[1],sys.argv[2],sys.argv[3])\n ls=sys.argv[1:]\n ls.append(str(re))\n outSave('data.shl',ls)\n print(re)\n\n\n\n\n\n","repo_name":"sdzr/python_for_study","sub_path":"chapter09/practice9_14.py","file_name":"practice9_14.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72929257546","text":"from typing import List\nfrom bisect import bisect_right\n\n\nclass Solution:\n def findContentChildren(self, g: List[int], s: List[int]) -> int:\n g.sort()\n s.sort()\n res = 0\n\n for i in s:\n idx = bisect_right(g, i)\n if idx > res:\n res += 1\n\n return res\n\n\nprint(Solution().findContentChildren([1,2], [1,2,3]))","repo_name":"boorooksus/Algorithm-Study","sub_path":"LeetCode/2회차_new/B82_Assign Cookies2.py","file_name":"B82_Assign Cookies2.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20317233151","text":"#!/usr/bin/env python\n\n# Author: John Hawkins (jsh) [really@gmail.com]\nimport itertools\nimport logging\nimport colorcet as cc\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os.path\nimport pandas as pd\nimport re\nimport seaborn as sns\nimport scipy.stats as st\nimport sys\n\nfrom sklearn import decomposition\nfrom sklearn import preprocessing\n\nimport global_config as gcf\n\nimport IPython\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')\n\nnp.set_printoptions(precision=4, suppress=True)\n\nPREFIX = os.path.splitext(os.path.basename(__file__))[0]\n\n#############\n\ndiffdatafile = os.path.join(gcf.OUTPUT_DIR, 'lib234.diffdata.tsv')\ndiffdata = pd.read_csv(diffdatafile, sep='\\t', header=0)\nsolodatafile = os.path.join(gcf.OUTPUT_DIR, 'lib234.data.tsv')\nsolodata = pd.read_csv(solodatafile, sep='\\t', header=0)\n\n#############\n\ndef widen_groups(data):\n indexed = data.set_index(['variant', 'sample_s', 'sample_e'])\n return indexed.gamma.unstack(level=[1,2])\n\ndef impute_over_nans(data):\n imp = preprocessing.Imputer(strategy='median', axis=1)\n imp.fit(data)\n filled = imp.transform(data)\n return pd.DataFrame(filled, columns=data.columns, index=data.index)\n\nwidediffdata = widen_groups(diffdata)\ncleaned = impute_over_nans(widediffdata)\n\ntaggers = dict()\ntaggers['glob'] = lambda s, e: True\ntaggers['early'] = lambda s, e: (s[1] == '1' and e[1] == '1')\ntaggers['mid'] = lambda s, e: (s[1] == '2' and e[1] == '2')\ntaggers['late'] = lambda s, e: (s[1] == '3' and e[1] == '3')\ntaggers['none'] = lambda s, e: (s[0] == 'a' or s[2:] == 'd1')\ntaggers['low'] = lambda s, e: (s[0] != 'a' and s[2:] == 'd2')\ntaggers['high'] = lambda s, e: (s[0] != 'a' and s[2:] == 'd3')\n\nstarts = cleaned.columns.get_level_values(0)\nends = cleaned.columns.get_level_values(1)\npairs = list(zip(starts, ends))\n\ndef D_from_taggers(names):\n tags = [[taggers[t](s, e) for t in names] for (s, e) in pairs]\n return np.asarray(tags)\n\nD = dict()\nD['exp'] = D_from_taggers(['glob',\n 'early', 'mid', 'late',\n 'none', 'low', 'high'])\nD['dose'] = D_from_taggers(['low', 'high'])\nD['glob'] = D_from_taggers(['glob'])\nD['early'] = D_from_taggers(['early'])\nD['mid'] = D_from_taggers(['mid'])\nD['late'] = D_from_taggers(['late'])\nA = cleaned\n\ndef rebase(A, D):\n U_, s_, Vt_ = np.linalg.svd(D, full_matrices=True)\n rank_ = (~np.isclose(s_, 0)).sum()\n basis_ = U_[:, :rank_]\n return np.dot(A, np.dot(basis_, basis_.T))\n\nA_exp = rebase(A, D['exp'])\nA_exp_nodose = A_exp - rebase(A_exp, D['dose'])\n\naligned = dict()\nfor span in ['early', 'mid', 'late']:\n aligned[span] = rebase(A_exp_nodose, D[span])\n\nglob_projection = rebase(A_exp_nodose, D['glob'])\nfor name, residue in aligned.items():\n relevant = [taggers[name](s, e) for (s, e) in pairs]\n fig = plt.figure(figsize=(6,6))\n g = sns.jointplot(glob_projection[:, relevant][:, 0],\n residue[:, relevant][:, 0],\n s=2, linewidth=0.5, alpha=0.5)\n plt.suptitle(\n 'global vs. aligned of {name} span'.format(**vars()),\n fontsize=16)\n g.set_axis_labels('global', name + ' aligned')\n graphflat = os.path.join(gcf.OUTPUT_DIR, '.'.join([PREFIX, name, 'png']))\n plt.tight_layout()\n logging.info('Writing flat graph to {graphflat}'.format(**vars()))\n plt.savefig(graphflat)\n plt.close()\n\nfor xname, yname in [('early', 'mid'), ('early', 'late'), ('mid', 'late')]:\n xrelevant = [taggers[xname](s, e) for (s, e) in pairs]\n yrelevant = [taggers[yname](s, e) for (s, e) in pairs]\n xrep = aligned[xname][:, xrelevant][:, 0]\n yrep = aligned[yname][:, yrelevant][:, 0]\n fig = plt.figure(figsize=(6,6))\n g = sns.jointplot(xrep, yrep, s=2, linewidth=0.5, alpha=0.5)\n plt.suptitle(\n '{xname} aligned vs. {yname} aligned'.format(**vars()),\n fontsize=16)\n g.set_axis_labels(xname + ' aligned', yname + ' aligned')\n graphflat = os.path.join(gcf.OUTPUT_DIR,\n '.'.join([PREFIX, xname, yname, 'png']))\n plt.tight_layout()\n logging.info('Writing flat graph to {graphflat}'.format(**vars()))\n plt.savefig(graphflat)\n plt.close()\n","repo_name":"traeki/lowficrispri","sub_path":"docs/20171205_lib2_ind/code/20180321.explore_spans.sub.py","file_name":"20180321.explore_spans.sub.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10549549942","text":"from bs4 import BeautifulSoup\nimport requests\n\ndef categoryIKEA():\n site = \"https://www.ikea.com/fr/fr/\"\n category = []\n\n page_response = requests.get(site, headers={'User-Agent': 'Mozilla/5.0'})\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n\n cat = page_content.find(\"div\",{\"class\":\"departmentLinkBlock\"}).findAll(\"li\")\n\n for item in cat:\n urlCat = \"https://www.ikea.com\" + item.find(\"a\").get(\"href\")\n nameCat = item.find(\"a\").text.strip()\n\n category.append({\n 'name':nameCat,\n 'url':urlCat\n })\n\n return category\n\nprint(categoryIKEA())\n\ndef subcategoryIKEA():\n category = categoryIKEA()\n subcat = []\n\n for item in category[0:1]:\n page_response = requests.get(item[\"url\"], headers={'User-Agent': 'Mozilla/5.0'})\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n try:\n subcategory = page_content.findAll(\"div\",{\"class\":\"visualNavContainer\"})\n for elem in subcategory:\n subcatUrl = \"https://www.ikea.com\" + elem.find(\"a\",{\"class\":\"categoryName\"}).get(\"href\")\n subcatName = elem.find(\"a\",{\"class\":\"categoryName\"}).text\n\n subcat.append({\n \"url\":subcatUrl,\n \"name\":subcatName\n })\n except:\n continue\n\n return subcat\n\n#print(subcategoryIKEA())\n\n\ndef linkProduit():\n subcat = subcategoryIKEA()\n links = []\n\n for item in subcat[0:1]:\n page_response = requests.get(item[\"url\"], headers={'User-Agent': 'Mozilla/5.0'})\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n\n try:\n post = page_content.find('div',{'id':'productLists'}).findAll('a',{\"class\":\"productLink\"})\n for elem in post:\n url = \"https://www.ikea.com\" + elem.get(\"href\")\n\n links.append({\n 'url':url,\n 'cat':item[\"name\"]\n })\n except:\n continue\n\n return links\n\n#print(linkProduit())\n\n\ndef scrapIKEA(origin):\n site = \"https://www.ikea.com/fr/fr/\"\n products = linkProduit()\n produits = []\n\n for link in products[0:1]:\n page_response = requests.get(link[\"url\"], headers={'User-Agent': 'Mozilla/5.0'})\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n\n logo = \"\"\n logoS = \"\"\n\n try:\n lib = page_content.find('div',{\"id\":\"productInfoWrapper1\"}).find(\"h1\").text.strip()\n url = link[\"url\"]\n img = \"https://www.ikea.com\" + page_content.find('img',{\"id\":\"productImg\"}).get(\"src\")\n prix = page_content.find('div',{\"id\":\"prodPrice\"}).find(\"span\",{\"id\":\"price1\"}).text.strip()\n desc = page_content.find(\"div\",{\"id\":\"productInfo1\"})\n\n produits.append(\n {\n 'libProduct': lib,\n 'slug': '',\n 'descProduct': desc,\n 'priceProduct': prix,\n 'imgProduct': img,\n 'numSeller': '',\n 'src': site,\n 'urlProduct': url,\n 'logo': logo,\n 'logoS': logoS,\n 'origin': origin,\n \"country\": \"\",\n 'subcategory': link['cat'],\n })\n\n except:\n continue\n\n return produits\n\nprint(scrapIKEA(origin=0))","repo_name":"sysall/WebScrapping","sub_path":"Sites/International/IKEA.py","file_name":"IKEA.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72248269704","text":"import numpy as np\nimport scipy.stats as stats \nimport math\nimport mysklearn.myutils as myutils\nimport mysklearn.myevaluation as myevaluation\nfrom mysklearn.myclassifiers import MySimpleLinearRegressor, MyKNeighborsClassifier, MyNaiveBayesClassifier, MyDecisionTreeClassifier, MyRandomForestClassifier\n\ninterview_header = [\"level\", \"lang\", \"tweets\", \"phd\", \"interviewed_well\"]\ninterview_table = [\n [\"Senior\", \"Java\", \"no\", \"no\", \"False\"],\n [\"Senior\", \"Java\", \"no\", \"yes\", \"False\"],\n [\"Mid\", \"Python\", \"no\", \"no\", \"True\"],\n [\"Junior\", \"Python\", \"no\", \"no\", \"True\"],\n [\"Junior\", \"R\", \"yes\", \"no\", \"True\"],\n [\"Junior\", \"R\", \"yes\", \"yes\", \"False\"],\n [\"Mid\", \"R\", \"yes\", \"yes\", \"True\"],\n [\"Senior\", \"Python\", \"no\", \"no\", \"False\"],\n [\"Senior\", \"R\", \"yes\", \"no\", \"True\"],\n [\"Junior\", \"Python\", \"yes\", \"no\", \"True\"],\n [\"Senior\", \"Python\", \"yes\", \"yes\", \"True\"],\n [\"Mid\", \"Python\", \"no\", \"yes\", \"True\"],\n [\"Mid\", \"Java\", \"yes\", \"no\", \"True\"],\n [\"Junior\", \"Python\", \"no\", \"yes\", \"False\"]\n ]\n\nM = 7\nN = 20\nF = 2\ninterviewTest = MyRandomForestClassifier(M, N, F)\ninterviewData, interviewClasses = myutils.separate(interview_table, interview_header, \"interviewed_well\")\n\n\ndef test_decision_tree_classifier_fit():\n\n X_tr, X_t, y_tr, y_t = myevaluation.train_test_split(interviewData,interviewClasses)\n interviewTest.fit(X_tr, y_tr)\n assert len(interviewTest.best_M_trees) == M\n\ndef test_decision_tree_classifier_predict():\n results = interviewTest.predict([[\"Junior\", \"Java\", \"yes\", \"no\"], [\"Junior\", \"Java\", \"yes\", \"yes\"]])\n actual = ['True', 'False']\n assert len(results) == len(actual)\n print(str(results) + \" vs \" + str(actual))","repo_name":"caleb-cramer/MLFinalProject","sub_path":"test_myclassifiers.py","file_name":"test_myclassifiers.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1242655746","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 2 09:30:30 2016\n\n@author: vf140245\nCopyrignt : CEA NeuroSpin - 2014\n\"\"\"\nfrom nilearn import plotting\nimport nibabel\nimport argparse\nimport os\nfrom clindmri.registration.fsl import flirt\nfrom glob import glob\n\nfrom hopla.converter import hopla\n\n# Parameters to keep trace\n__hopla__ = [\"tool\", \"version\"]\n\ndoc = \"\"\"\nHopla context to invoke register_pitie.py. Perform unit registration of \nT2 on T1 image using fsl/flirt command. Use mutual info and trigger sinc resampling\n\nOrganize results according the project rules.\n\nCommand:\n========\n\npython hl_register_pitie.py \\\n -b /volatile/frouin/radiomique_radiogenomique \\\n -r AxT1enhanced \\\n -i AxT2\n \n\npython hl_register_pitie.py \\\n -b /volatile/frouin/radiomique_radiogenomique \\\n -r AxT1enhanced \\\n -i AxT2 \\\n -p\n\n\"\"\"\n\n# Parsing \ndef is_dir(dirarg):\n \"\"\" Type for argparse - checks that output dir exists.\n \"\"\"\n if not os.path.isdir(dirarg):\n raise argparse.ArgumentError(\n \"The dir '{0}' does not exist!\".format(dirarg))\n return dirarg\n\nparser = argparse.ArgumentParser(description=doc)\nparser.add_argument(\n \"-v\", \"--verbose\", dest=\"verbose\", type=int, choices=[0, 1, 2], default=0,\n help=\"increase the verbosity level: 0 silent, [1, 2] verbose.\")\nparser.add_argument(\n \"-b\", \"--basedir\", dest=\"basedir\", required=True, metavar=\"PATH\",\n help=\"the subject main directory to parse files\",\n type=is_dir)\nparser.add_argument(\n \"-r\", \"--ref\", dest=\"refile\", metavar=\"STRING\",\n help=\"Motif (regular expression) to serarch for ref images\")\nparser.add_argument(\n \"-s\", \"--subject\", dest=\"subject\", metavar=\"STRING\",\n help=\"An existing subject in the data tree\")\nparser.add_argument(\n \"-i\", \"--in\", dest=\"infile\", metavar=\"STRING\",\n help=\"Motif (regular expression) to serarch for images to register\")\nparser.add_argument(\n \"-p\", \"--process\", dest=\"process\", action='store_true',\n help=\"if activated execute.\")\nargs = parser.parse_args()\n\n\n# glob the file to analyze\n# list subjects from basedir\n#print glob(os.path.join(args.basedir, '*', args.infile))\n\nif args.subject is not None:\n infiles = set([os.path.dirname(os.path.abspath(i)) for i in \\\n glob(os.path.join(args.basedir, '{}'.format(args.subject), args.infile))])\n refiles = set([os.path.dirname(os.path.abspath(i)) for i in \\\n glob(os.path.join(args.basedir, '{}'.format(args.subject), args.refile))])\nelse:\n infiles = set([os.path.dirname(os.path.abspath(i)) for i in \\\n glob(os.path.join(args.basedir, '*', args.infile))])\n refiles = set([os.path.dirname(os.path.abspath(i)) for i in \\\n glob(os.path.join(args.basedir, '*', args.refile))])\nsubjects = set.intersection(infiles, refiles)\nsubjects = [os.path.basename(i) for i in subjects]\n# get infiles and filter them\ninfiles = []\nrefiles = []\nfor s in subjects:\n infiles.extend(glob(os.path.join(args.basedir, '{}'.format(s),\n args.infile, '{}.nii.gz'.format(args.infile))))\n refiles.extend(glob(os.path.join(args.basedir, '{}'.format(s),\n args.refile, '{}.nii.gz'.format(args.refile))))\noutfiles = [i.replace(args.basedir, os.path.join(args.basedir, 'preprocess')) for i in infiles]\noutfiles = [i.replace(args.infile,'',1) for i in outfiles]\noutfiles = [os.path.splitext(os.path.splitext(i)[0])[0] for i in outfiles]\noutfiles = [os.path.join('{}'.format(os.path.dirname(i)),\n 'r{}.nii.gz'.format(os.path.basename(i)))\n for i in outfiles]\n\nif args.process:\n logfile = \"{}/preprocess/log.txt\".format(args.basedir)\n if not os.path.isdir(os.path.dirname(logfile)):\n os.makedirs(os.path.dirname(logfile))\n #\n status, exitcodes = hopla(\n os.path.join('/volatile','frouin','radiomique_radiogenomique',\n 'register_pitie.py'),\n i=infiles,\n r=refiles,\n o=outfiles,\n hopla_iterative_kwargs=[\"i\", \"r\", \"o\"],\n hopla_cpus=2,\n hopla_logfile=logfile,\n hopla_verbose=1)\n","repo_name":"neurospin/scripts","sub_path":"2017_rr/metastasis/hl_register_pitie.py","file_name":"hl_register_pitie.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73369155465","text":"import json\n\nimport httpx\nfrom fastapi import status\n\nfrom marvin.loaders.web import SitemapLoader, URLLoader\nfrom marvin.plugins import Plugin\nfrom marvin.utilities.strings import html_to_content, slice_tokens\nfrom marvin.utilities.web import url_is_ok\n\n\nclass VisitURL(Plugin):\n name: str = \"visit-url\"\n description: str = (\n \"Visit a URL and return its contents. Don't provide a URL unless you're\"\n \" absolutely sure it exists.\"\n )\n\n async def run(self, url: str) -> str:\n if not url.startswith(\"http\"):\n url = f\"http://{url}\"\n async with httpx.AsyncClient(follow_redirects=True, timeout=2) as client:\n try:\n response = await client.get(url)\n except httpx.ConnectTimeout:\n return \"Failed to load URL: Connection timed out\"\n if response.status_code == status.HTTP_200_OK:\n text = response.text\n\n # try to parse as JSON in case the URL is an API\n try:\n content = str(json.loads(text))\n # otherwise parse as HTML\n except json.JSONDecodeError:\n content = html_to_content(text)\n return slice_tokens(content, 1000)\n else:\n return f\"Failed to load URL: {response.status_code}\"\n\n\nclass LoadAndStoreURL(Plugin):\n name: str = \"load-and-store-url\"\n description: str = (\n \"Visit a URL and use a loader to load its contents. Don't provide a URL unless\"\n \" you're absolutely sure it exists. A topic name can be provided to store the\"\n \" documents in a particular topic.\"\n )\n\n async def run(self, url: str, topic_name: str = None) -> str:\n \"\"\"\n Load and store the contents of a URL into a topic. If no topic name is provided,\n the Marvin default topic will be used.\n \"\"\"\n if not url.startswith(\"http\"):\n url = f\"http://{url}\"\n\n if url.endswith(\".pdf\"):\n return f\"URL {url} is a PDF. Use the `load-and-store-pdf` plugin instead.\"\n if url.endswith(\".xml\"):\n return (\n f\"URL {url} is a sitemap. Use the `load-and-store-sitemap` plugin\"\n \" instead.\"\n )\n\n if not await url_is_ok(url):\n return (\n \"URL was not reachable - make sure it exists and is publicly accessible\"\n )\n\n loader = URLLoader(urls=[url])\n await loader.load_and_store(topic_name=topic_name)\n return f\"Loaded {url} into topic {topic_name!r}\"\n\n\nclass LoadAndStorePDF(Plugin):\n name: str = \"load-and-store-pdf\"\n description: str = (\n \"Load and store the contents of a PDF URL into a topic. if no topic name is\"\n \" provided, the Marvin default topic will be used.\"\n )\n\n async def run(self, pdf_url: str, topic_name: str = None) -> str:\n from marvin.loaders.pdf import PDFLoader\n\n loader = PDFLoader(file_path=pdf_url)\n await loader.load_and_store(topic_name=topic_name)\n return f\"Loaded {pdf_url} into topic {topic_name!r}\"\n\n\nclass LoadAndStoreSitemap(Plugin):\n name: str = \"load-and-store-sitemap\"\n description: str = (\n \"Load and store the contents of a sitemap URL into a topic. if no topic name is\"\n \" provided, the Marvin default topic will be used.\"\n )\n\n async def run(self, sitemap_url: str, topic_name: str = None) -> str:\n loader = SitemapLoader(urls=[sitemap_url])\n await loader.load_and_store(topic_name=topic_name)\n return f\"Loaded {sitemap_url} into topic {topic_name!r}\"\n","repo_name":"rehmatsg/StarfishGPT","sub_path":"venv/lib/python3.10/site-packages/marvin/plugins/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6703102806","text":"# edges = [['A','B'],['A','C'],['A','D'],['D','E'],['B','E']]\n# nodes = ['A','B','C','D','E']\nedges = [['A','B'],['A','C'],['A','D'],['D','E'],\n ['B','E'],['F','H'],['F','G'],['I','J']]\nnodes = ['A','B','C','D','E','F','G','H','I','J','K']\n\n# def dfs(graph,node,visited = set()):\n# print(node)\n# visited.add(node)\n# for child in graph[node]:\n# if child not in visited:\n# dfs(graph,child,visited) \n\n\n# def no_of_node(graph,node,visited = set()):\n# # print(node)\n# visited.add(node)\n# sm = 0\n# for child in graph[node]:\n# if child not in visited:\n# sm += no_of_node(graph,child,visited) \n# return sm+1\n\ndef dfs(graph,node,visited = set()):\n print(node)\n visited.add(node)\n sm = 0\n for child in graph[node]:\n if child not in visited:\n sm += dfs(graph,child,visited) \n return sm+1\n\ngraph = {}\nfor i in nodes:\n graph[i] = []\n\nfor (u,v) in edges:\n graph[u].append(v) \n graph[v].append(u)\n\nfor i in graph.items():\n print(i) \n\n# dfs(graph,'A') \n# print(no_of_node(graph,'A'))\nanswer = []\nvisited = set()\nfor item in nodes:\n if item not in visited:\n temp = dfs(graph,item,visited)\n answer.append(temp)\n\nprint(answer)","repo_name":"AseemGupta39/Graph_Data_Structure_in_Python","sub_path":"connected_graph.py","file_name":"connected_graph.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28252926921","text":"def desenha_lista(lista, marcar_meio, meio, exibir_de, exibir_ate):\n for index,x in enumerate(lista):\n if(index > exibir_ate):\n continue\n if(index < exibir_de):\n print(\" \", end=\"\")\n elif(marcar_meio == True and x == meio):\n print(\"+=====\", end=\"\")\n else:\n print(\"+-----\", end=\"\")\n print(\"+\")\n for index,x in enumerate(lista):\n if(index > exibir_ate):\n continue\n if(index < exibir_de):\n print(\" \", end=\"\")\n elif(marcar_meio == True and x == meio):\n print(\"||{}|\".format(x), end=\"\")\n else:\n print(\"| {} \".format(x), end=\"\")\n print(\"|\")\n for index,x in enumerate(lista):\n if(index > exibir_ate):\n continue\n if(index < exibir_de):\n print(\" \", end=\"\")\n elif(marcar_meio == True and x == meio):\n print(\"+=====\", end=\"\")\n else:\n print(\"+-----\", end=\"\")\n print(\"+\")\n\ndef busca_binaria(lista,elemento):\n posicao_primeiro_elem = 0\n posicao_ultimo_elem =len(lista)-1\n posicao_meio = (posicao_primeiro_elem+posicao_ultimo_elem)//2\n desenha_lista(lista,False,\"\",posicao_primeiro_elem,posicao_ultimo_elem)\n if(lista != sorted(lista)):\n return -2\n else:\n desenha_lista(lista,True,lista[posicao_meio],posicao_primeiro_elem,posicao_ultimo_elem)\n while(posicao_ultimo_elem >= posicao_primeiro_elem):\n if(lista[posicao_meio] == elemento):\n print(\"Encontrado na posicao: {}\".format(posicao_meio))\n return True\n else:\n if elemento < lista[posicao_meio]:\n posicao_ultimo_elem = posicao_meio - 1\n else:\n posicao_primeiro_elem = posicao_meio + 1\n posicao_meio = (posicao_primeiro_elem+posicao_ultimo_elem)//2\n desenha_lista(lista, True, lista[posicao_meio],posicao_primeiro_elem,posicao_ultimo_elem)\n if(posicao_primeiro_elem == posicao_ultimo_elem and lista[posicao_meio] != elemento):\n return False\n return False\n\n\n\n\n\nelemento = input().zfill(3)\nlista = [e.zfill(3) for e in input().split()]\n\nprint(\"Elemento procurado: {}\".format(elemento))\n\nresultado = busca_binaria(lista, elemento)\n\nif resultado == False:\n print(\"O elemento nao foi encontrado\")\nelif resultado == -2:\n print(\"Lista nao ordenada\")\n","repo_name":"matheusconceicao7/mc102z","sub_path":"Lab13/lab13.py","file_name":"lab13.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13810252444","text":"from flask import Blueprint\nfrom api.controllers.server_controller import ServerController\n\nserver_bp = Blueprint ('server_bp', __name__)\n\nserver_bp.route('/server/', methods = ['GET'])(ServerController.get_server)\nserver_bp.route('/servers', methods = ['GET'])(ServerController.get_servers)\nserver_bp.route('/create_server', methods = ['POST'])(ServerController.create_server)\nserver_bp.route('/update_server/', methods = ['PUT'])(ServerController.update_server)\nserver_bp.route('/delete_server/', methods = ['DELETE'])(ServerController.delete_server)\nserver_bp.route('/search/', methods = ['GET'])(ServerController.search_servers)","repo_name":"Llane4/Proyecto-Backend","sub_path":"api/routes/server_bp.py","file_name":"server_bp.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12147735607","text":"from rest_framework import serializers\nfrom .models import Overview\nimport ipdb\n\n\nclass OverviewSerializer(serializers.ModelSerializer):\n bmi_classification = serializers.SerializerMethodField()\n\n whr_classification = serializers.SerializerMethodField()\n\n def get_bmi_classification(self, obj):\n if int(obj.bmi) < 18.5:\n return 'underweight'\n\n if int(obj.bmi) >= 18.5 and int(obj.bmi) < 25:\n return 'normal'\n\n if int(obj.bmi) >= 25 and int(obj.bmi) < 30:\n return 'overweight'\n\n if int(obj.bmi) >= 30 and int(obj.bmi) < 35:\n return 'obese'\n\n if int(obj.bmi) >= 35:\n return 'extremely obese'\n \n\n def get_whr_classification(self, obj):\n if obj.sex == 'female' and int(obj.whr) <= 0.80:\n return 'low'\n\n if obj.sex == 'female' and int(obj.whr) > 0.80 and int(obj.whr) <= 0.85:\n return 'moderate'\n\n if obj.sex == 'female' and int(obj.whr) > 0.85:\n return 'high'\n \n if obj.sex == 'male' and int(obj.whr) <= 0.95:\n return 'low'\n\n if obj.sex == 'male' and int(obj.whr) > 0.95 and int(obj.whr) < 1.0:\n return 'moderate'\n\n if obj.sex == 'male' and int(obj.whr) >= 1.0:\n return 'high'\n\n \n\n class Meta:\n model = Overview\n\n fields = [\n 'id',\n 'sex',\n 'height',\n 'weight',\n 'bmi',\n 'bmi_classification',\n 'whr',\n 'whr_classification',\n 'lean_mass',\n 'fat_mass',\n 'body_fat',\n 'created_at',\n 'created_by'\n ]\n\n read_only_fields = [\n 'bmi_classification',\n 'whr_classification',\n 'created_at',\n 'created_by'\n ]\n\n\n def create(self, validated_data: dict) -> Overview:\n return Overview.objects.create(**validated_data)\n","repo_name":"FitnessFormula/fitness-formula-api","sub_path":"overview/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8965769579","text":"import django_filters\r\nfrom django_filters import CharFilter, NumberFilter\r\nfrom .models import *\r\n\r\n\r\nclass ProductFilter(django_filters.FilterSet):\r\n name = CharFilter(field_name='name',\r\n lookup_expr='icontains', label='what are you looking for?')\r\n\r\n class Meta:\r\n model = Product\r\n fields = '__all__'\r\n exclude = ['description', 'price', 'image', 'vendor','category']\r\n","repo_name":"Kihara-Njoroge/tronic-electronics","sub_path":"shop/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70392436746","text":"from __future__ import division\nimport matplotlib\nimport numpy as np \nimport time\nimport math\nimport numpy as np\nfrom numpy.lib.npyio import savetxt\nimport scipy.io\nfrom numpy.core.numeric import NaN\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import rcParams\nimport matplotlib.animation\n\nfrom numpy import ones,vstack\nfrom numpy.linalg import lstsq\n\nplt.close('all')\ndata_array = np.load('StandingTestData.npy')\n\nend = time.time()\n\n#print(\"\\nData summary:\\n\", data_array)\nprint(\"\\nData shape:\\n\", data_array.shape)\n\n\n\n# plt.xlim([-600,0])\n# plt.ylim([600,2500])\ni = 0\nskips = 0\nfor i in range(213):\n \n for g in range(10,17):\n \n data_array2 = data_array[:,g:(g+2),:]\n\n def count_nans(nans, one, two):\n \n count = 0 \n z = -3\n for p in range(6):\n print(\"count\", count)\n print(\"nans: \", nans)\n print(\"i\", i)\n print(\"i+z\", (i+z))\n if np.isnan(nans):\n count = count + 1\n nans = data_array2[(i+z), one, two]\n \n z = z+1\n print(\"count\", count)\n return count\n\n\n\n def replace_nans(cord, first, second, j):\n if np.isnan(cord):\n total_nans = count_nans(cord, first, second)\n if (total_nans > 4):\n cord = 0\n pass\n else:\n z = -3\n new_cord = 0\n for p in range(6):\n cord2 = data_array2[(j+z), first, second]\n if np.isnan(cord2):\n pass\n else:\n \n new_cord = new_cord + cord2\n print(\"cord+cord2\", new_cord)\n cord = new_cord\n\n z = z+1\n cord = cord / (6 - total_nans)\n print(\"end of replace nans: \", cord)\n \n\n\n \n return cord\n\n\n\n\n \n \n X2 = data_array2[i, 0, 0]\n X2 = replace_nans(X2, 0,0, i)\n print(\"X2\", X2)\n\n Y2 = data_array2[i, 0, 1]\n Y2 = replace_nans(Y2, 0,1, i)\n print(\"Y2\", Y2)\n\n X3 = data_array2[i, 1, 0]\n X3 = replace_nans(X3, 1,0, i)\n print(\"X3\", X3)\n Y3 = data_array2[i, 1, 1]\n Y3 = replace_nans(Y3, 1,1, i)\n print(\"Y3\", Y3)\n\n points = [(X2,Y2),(X3,Y3)]\n x_coords, y_coords = zip(*points)\n A = vstack([x_coords,ones(len(x_coords))]).T\n m, c = lstsq(A, y_coords)[0]\n print(\"Line Solution is y = {m}x + {c}\".format(m=m,c=c))\n\n\n\n X = X2 - ((X2-X3) *.8) \n Y = Y2 - ((Y2-Y3) *.8)\n\n\n print(\"X\", X)\n print(\"Y\", Y)\n\n print(\" \")\n\n\n plt.axes()\n \n plt.xlim(-500,500)\n plt.ylim(-500,500)\n\n scatterplot1 = plt.plot(X2, Y2, 'bo')\n scatterplot1 = plt.plot(X3, Y3, 'bo')\n scatterplot1 = plt.plot(X, Y, 'ro')\n \n \n \n#plt.show(scatterplot1)\n\nplt.close('all')\n\n\n\n\n\n\n\n\n\n\nprint('skips', skips)\n\n\n\n\n\n","repo_name":"rhettd33/FreemocapCOM-","sub_path":"freemocaptest5.py","file_name":"freemocaptest5.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15971164415","text":"from PIL import Image\nfrom src.models.cc0 import patcher\nimport numpy as np\nimport skimage.io as io\nfrom src.utils.imgproc import *\nfrom skimage.color import rgb2hsv, hsv2rgb\n\n\nclass patcher(patcher):\n def __init__(self, body='./body/body_sakurana.png', **options):\n try:\n options = options['options']\n except:\n pass\n options['is_4k'] = False\n super().__init__(options=options)\n self.name = 'サクラナ'\n self.body = Image.open(body)\n self.body_size = self.body.size\n self.pantie_position = [3433, 1782]\n try:\n self.with_bra = options['with_bra']\n except:\n self.with_bra = self.ask(question='With bra?', default=True)\n if self.with_bra:\n self.bra_position = [2017, 1491]\n self.bra = np.float32(io.imread('./mask/bra_sakurana.png') / 255)\n self.bra_center = np.float32(io.imread('./mask/bra_sakurana_center.png') / 255)\n self.bra_shade = np.float32(io.imread('./material/bra_sakurana_shade.png') / 255)\n self.bra_frill = np.float32(io.imread('./material/bra_sakurana_frill.png') / 255)\n self.bra_shade_alpha = self.bra_shade[:, :, -1]\n self.bra_frill_mask = self.bra_frill[:, :, -1] > 0.5\n\n def gen_bra(self, image):\n def pick_color(arr):\n return np.mean(np.mean(arr, axis=0), axis=0)\n pantie = np.array(image)\n\n # pickup colors\n front = pantie[20:100, 30:80, :3] / 255.0\n front_shade = pantie[130:150, 0:40, :3] / 255.0\n front_color = pick_color(front)\n front_shade_color = pick_color(front_shade)\n front_shade_color = rgb2hsv(front_shade_color[None, None])\n front_shade_color[0, 0, 1] *= front_shade_color[0, 0, 2] / 0.3\n if front_shade_color[0, 0, 1] > 0.7:\n front_shade_color[0, 0, 1] *= 0.7\n front_shade_color[0, 0, 2] *= front_shade_color[0, 0, 2] / 0.4\n front_shade_color = np.clip(hsv2rgb(front_shade_color)[0, 0], 0, 1)\n ribbon = pantie[24:32, 15:27, :3] / 255.0\n ribbon_color = pick_color(ribbon)\n\n # making a center texture\n center = pantie[20:170, -200:-15, :3][:, ::-1]\n center = resize(center, [2.3, 2.5])\n\n bra_center = np.copy(self.bra_center)\n bra_center[:center.shape[0], :center.shape[1], :3] = center * np.float32(bra_center[:center.shape[0], :center.shape[1], :3] > 0)\n bra = self.bra[:, :, :3] * front_color\n bra_shade = (self.bra_shade[:, :, -1])[:, :, None] * front_shade_color\n bra_frill = self.bra_frill[:, :, :3] * ribbon_color\n\n # overlaying layers\n bra = alpha_brend(bra_center[:, :, :3], bra[:, :, :3], bra_center[:, :, 0] > 0.1)\n bra = alpha_brend(bra_frill, bra, self.bra_frill_mask)\n bra = alpha_brend(bra_shade, bra, self.bra_shade_alpha)\n bra = np.dstack((bra, self.bra[:, :, 0] > 0))\n return Image.fromarray(np.uint8(np.clip(bra, 0, 1) * 255))\n\n def patch(self, image, transparent=False):\n pantie = self.convert(image)\n if transparent:\n patched = Image.new(\"RGBA\", (4096, 4096))\n else:\n patched = self.body.copy()\n pantie = pantie.resize((int(pantie.width * .75), int(pantie.height * .77)), resample=Image.BICUBIC)\n pantie = pantie.rotate(-90, expand=True)\n patched = self.paste(patched, pantie, self.pantie_position)\n if self.with_bra:\n patched = self.paste(patched, self.gen_bra(image), self.bra_position)\n return patched\n","repo_name":"TenteEEEE/quiche_pantie_patch","sub_path":"src/models/sakurana.py","file_name":"sakurana.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"81"} +{"seq_id":"39204293436","text":"\"\"\"\n!!!DISCLAIMER!!!\nALL INFORMATION CONTAINED IN THIS FILE HAS NOTHING TO DO WITH REAL LIFE.\nALL CHARACTERS DESCRIBED HERE ARE FICTIONARY.\nANY AND ALL SIMILARITIES ARE COMPLETELY COINCIDENTAL.\n\"\"\"\n\nEN_LIBERTY_WORDS = [\"liberty\", \"rights\", \"freedom\", \"choice\", \"will\"]\nEN_LIBERTY_OFFENSIVE_WORDS = [\"liberals\", \"popped\"]\n\nEN_AUTHORITY_WORDS = [\"authority\", \"laws\", \"governance\", \"guidance\", \"control\",\n \"submission\"]\nEN_AUTHORITY_OFFENSIVE_WORDS = [\"opressors\", \"enslavers\", \"megalomaniac\", \"bootlicker\", \"sheeple\"]\n\nEN_PACIFISM_WORDS = [\"pacifism\", \"peace\", \"peacemaker\", \"understanding\", \"tolerance\",\n \"agreements\", \"friendship\"]\nEN_PACIFISM_OFFENSIVE_WORDS = [\"peacemonger\", \"coward\", \"spineless\"]\n\nEN_MILITARISM_WORDS = [\"militarism\", \"war\", \"defense\", \"assault\", \"weapon\",\n \"protection\", \"enemies\", \"attack\", \"capture\", \"bloody\"]\nEN_MILITARISM_OFFENSIVE_WORDS = [\"warmonger\", \"bloodletter\"]\n\nEN_INTENSIVISM_WORDS = [\"intensivism\", \"distribution\", \"economy\", \"efficency\", \"management\"]\nEN_INTENSIVISM_OFFENSIVE_WORDS = [\"scrooge\"]\n\nEN_EXTENSIVISM_WORDS = [\"extensivism\", \"acquisition\", \"resources\", \"accession\", \"expansion\"]\nEN_EXTENSIVISM_OFFENSIVE_WORDS = [\"wastrel\"]\n\n# EN_MATERIALISM_WORDS = [\"materialism\", \"prosperity\", \"resources\", \"acquisition\", \"science\",\n# \"education\", \"cognition\", \"naturalness\", \"universe\", \"objective\"]\n# EN_MATERIALISM_OFFENSIVE_WORDS = [\"scrooge\", \"faithless\"]\n\n# EN_SPIRITUALISM_WORDS = [\"spiritualism\", \"deity\", \"worship\", \"extrasensory\", \"insight\",\n# \"faith\", \"religion\", \"prophecy\", \"spirit\", \"paranormal\"]\n# EN_SPIRITUALISM_OFFENSIVE_WORDS = [\"fanatic\", \"zealot\", \"bigot\"]\n\nEN_INDIVIDUALISM_WORDS = [\"individualism\", \"personality\", \"uniqueness\", \"speciality\", \"eccentric\",\n \"expression\", \"entrepreneurship\", \"private\"]\nEN_INDIVIDUALISM_OFFENSIVE_WORDS = [\"egocentric\", \"snowflake\"]\n\nEN_COLLECTIVISM_WORDS = [\"collectivism\", \"unity\", \"gathering\", \"coherent\", \"collectively\",\n \"together\", \"union\", \"alliance\", \"public\"]\nEN_COLLECTIVISM_OFFENSIVE_WORDS = [\"crowd\", \"herd\"]\n\nEN_REFORMISM_WORDS = [\"reformism\", \"legislation\", \"reform\", \"stability\", \"discussion\",\n \"compromise\", \"gradually\"]\nEN_REFORMISM_OFFENSIVE_WORDS = [\"indecisive\"]\n\nEN_REVOLUTIONISM_WORDS =[\"revolutionism\", \"negation\", \"coup\", \"change\", \"riot\",\n \"protest\", \"abruptly\", \"revolution\"]\nEN_REVOLUTIONISM_OFFENSIVE_WORDS = [\"reactionery\"]\n\nEN_CONSTRUCTIVISM_WORDS = [\"constructivism\", \"industry\", \"goal\", \"artificial\", \"crafted\",\n \"custom\", \"synthesis\", \"elaborate\"]\nEN_CONSTRUCTIVISM_OFFENSIVE_WORDS = [\"geek\", \"pervert\"]\n\nEN_ESSENTIALISM_WORDS = [\"essentialism\", \"nature\", \"essence\", \"natural\", \"genuine\",\n \"normal\", \"life\", \"simple\"]\nEN_ESSENTIALISM_OFFENSIVE_WORDS = [\"hillbilly\", \"retrograde\"]\n\nEN_DENIAL_WORDS = [\"denial\", \"absence\", \"void\", \"nothing\", \"non-existance\", \"non-conformity\", \"wordly\", \"destructive\"]\nEN_DENIAL_OFFENSIVE_WORDS = [\"cynic\", \"degenerate\", \"unscrupulous\"]\n\nEN_ACCEPTANCE_WORDS = [\"acceptance\", \"presence\", \"embrace\", \"everything\", \"existance\", \"conformity\", \"sacred\", \"constructive\"]\nEN_ACCEPTANCE_OFFENSIVE_WORDS = [\"idealist\"]\n\n# EN_HEDONISM_WORDS = [\"hedonism\", \"lust\", \"joy\", \"excess\", \"pleasure\", \"happiness\"]\n# EN_HEDONISM_OFFENSIVE_WORDS = [\"deviant\", \"snob\", \"nymphomaniac\", \"sybarite\", \"glutton\", \"chad\"]\n\n# EN_ASCETICISM_WORDS = [\"asceticism\", \"purity\", \"moderation\", \"abstinence\", \"cleanliness\", \"restraint\"]\n# EN_ASCETICISM_OFFENSIVE_WORDS = [\"prude\", \"virgin\"]\n\n\n\nRU_LIBERTY_WORDS = [\"свобода\", \"права\", \"воля\", \"выбор\", \"желания\"]\nRU_LIBERTY_OFFENSIVE_WORDS = [\"либерахи\", \"порвало\"]\n\nRU_AUTHORITY_WORDS = [\"власть\", \"законы\", \"руководство\", \"управление\", \"контроль\",\n \"подчинение\"]\nRU_AUTHORITY_OFFENSIVE_WORDS = [\"угнетатели\", \"рабовладельцы\", \"мегаломан\", \"овечка\"]\n\nRU_PACIFISM_WORDS = [\"пацифизм\", \"мир\", \"миротворец\", \"понимание\", \"терпимость\",\n \"соглашения\", \"дружба\"]\nRU_PACIFISM_OFFENSIVE_WORDS = [\"миролюб\", \"трусишка\", \"мягкотелый\"]\n\nRU_MILITARISM_WORDS = [\"милитаризм\", \"война\", \"защита\", \"нападение\", \"оружие\",\n \"оборона\", \"враги\", \"атака\", \"захват\", \"кровавый\"]\nRU_MILITARISM_OFFENSIVE_WORDS = [\"варвар\", \"кровопроливец\"]\n\nRU_INTENSIVISM_WORDS = [\"интенсивизм\", \"распределение\", \"экономия\", \"эффективность\", \"менеджмент\"]\nRU_INTENSIVISM_OFFENSIVE_WORDS = [\"скряга\"]\n\nRU_EXTENSIVISM_WORDS = [\"экстенсивизм\", \"добыча\", \"ресурсы\", \"доступ\", \"экспансия\"]\nRU_EXTENSIVISM_OFFENSIVE_WORDS = [\"транжира\"]\n\n# RU_MATERIALISM_WORDS = [\"материализм\", \"процветание\", \"ресурсы\", \"добыча\", \"наука\",\n# \"образование\", \"познание\", \"естественность\", \"вселенная\", \"объективный\"]\n# RU_MATERIALISM_OFFENSIVE_WORDS = [\"скряга\", \"безбожник\"]\n\n# RU_SPIRITUALISM_WORDS = [\"спиритуализм\", \"божество\", \"поклонение\", \"эзотерика\", \"прозрение\",\n# \"вера\", \"религия\", \"пророчество\", \"дух\", \"сверхестественность\"]\n# RU_SPIRITUALISM_OFFENSIVE_WORDS = [\"фанатик\", \"изувер\", \"мракобес\"]\n\nRU_INDIVIDUALISM_WORDS = [\"индивидуализм\", \"личность\", \"уникальность\", \"эксцентричный\",\n \"самовыражение\", \"предпринимательство\", \"частный\"]\nRU_INDIVIDUALISM_OFFENSIVE_WORDS = [\"эгоцентрист\", \"снежинка\"]\n\nRU_COLLECTIVISM_WORDS = [\"коллективизм\", \"единство\", \"собрание\", \"согласованность\", \"коллективно\",\n \"вместе\", \"объединение\", \"союз\", \"публично\"]\nRU_COLLECTIVISM_OFFENSIVE_WORDS = [\"толпа\", \"стая\"]\n\nRU_REFORMISM_WORDS = [\"реформизм\", \"законопроект\", \"реформа\", \"стабильность\", \"обсуждение\",\n \"компромисс\", \"постепенно\"]\nRU_REFORMISM_OFFENSIVE_WORDS = [\"нерешительный\"]\n\nRU_REVOLUTIONISM_WORDS =[\"революционизм\", \"переговоры\", \"переворот\", \"перемены\", \"восстание\",\n \"протест\", \"резко\", \"революция\"]\nRU_REVOLUTIONISM_OFFENSIVE_WORDS = [\"реакционер\"]\n\nRU_CONSTRUCTIVISM_WORDS = [\"конструктивизм\", \"урбанизация\", \"эффективность\", \"индустрия\", \"бизнес\"]\nRU_CONSTRUCTIVISM_OFFENSIVE_WORDS = [\"задрот\", \"извращенец\"]\n\nRU_ESSENTIALISM_WORDS = [\"эссенциализм\", \"природа\", \"экология\", \"животные\", \"вымирание\"]\nRU_ESSENTIALISM_OFFENSIVE_WORDS = [\"деревенщина\", \"ретроград\"]\n\nRU_DENIAL_WORDS = [\"отрицание\", \"отсутствие\", \"пустота\", \"ничего\", \"несуществование\", \"нонконформизм\", \"мирской\", \"деструктивный\"]\nRU_DENIAL_OFFENSIVE_WORDS = [\"циник\", \"дегенерат\", \"бессовестный\"]\n\nRU_ACCEPTANCE_WORDS = [\"принятие\", \"присутствие\", \"принимание\", \"всё\", \"существование\", \"конформизм\", \"сакральный\", \"конструктивный\"]\nRU_ACCEPTANCE_OFFENSIVE_WORDS = [\"идеалист\", \"мракобес\"]\n\n# RU_HEDONISM_WORDS = [\"гедонизм\", \"блуд\", \"радость\", \"излишество\", \"удовольствие\", \"счастье\"]\n# RU_HEDONISM_OFFENSIVE_WORDS = [\"девиант\", \"сноб\", \"нимфоман\", \"сибарит\", \"чревоугодец\", \"чад\"]\n\n# RU_ASCETICISM_WORDS = [\"аскетизм\", \"непорочность\", \"скромность\", \"воздержание\", \"чистота\", \"сдержанность\"]\n# RU_ASCETICISM_OFFENSIVE_WORDS = [\"святоша\", \"девственник\"]\n\n\n\nLA_LIBERTY_WORDS = [\"libertas\", \"juris\", \"autem\", \"delectu\", \"voluntatis\"]\nLA_LIBERTY_OFFENSIVE_WORDS = [\"liberacha\", \"rumpere\"]\n\nLA_AUTHORITY_WORDS = [\"imperium\", \"legis\", \"administratio\", \"curatio\", \"custodia\",\n \"dominatio\"]\nLA_AUTHORITY_OFFENSIVE_WORDS = [\"oppressionici\", \"servipossessores\", \"magnautorus\", \"ovila\"]\n\nLA_PACIFISM_WORDS = [\"pacifismus\", \"pax\", \"pacificatorum\", \"intelligentia\", \"tolerantia\",\n \"pacti\", \"amicitia\"]\nLA_PACIFISM_OFFENSIVE_WORDS = [\"pacificarus\", \"timidus\", \"molliformis\"]\n\nLA_MILITARISM_WORDS = [\"militarismus\", \"bellum\", \"defensio\", \"aggressura\", \"arma\",\n \"protectio\", \"hostibus\", \"impetum\", \"occupatio\", \"crudus\"]\nLA_MILITARISM_OFFENSIVE_WORDS = [\"barbarus\", \"suco\"]\n\nLA_INTENSIVISM_WORDS = [\"интенсивизм\", \"распределен��е\", \"экономия\", \"эффективность\", \"менеджмент\"]\nLA_INTENSIVISM_OFFENSIVE_WORDS = [\"скряга\"]\n\nLA_EXTENSIVISM_WORDS = [\"экстенсивизм\", \"добыча\", \"ресурсы\", \"доступ\", \"экспансия\"]\nLA_EXTENSIVISM_OFFENSIVE_WORDS = [\"транжира\"]\n\n# LA_MATERIALISM_WORDS = [\"materialismus\", \"prosperitas\", \"copiae\", \"extraction\", \"scientia\",\n# \"educatio\", \"cognitio\", \"naturalitas\", \"universum\", \"objectivus\"]\n# LA_MATERIALISM_OFFENSIVE_WORDS = [\"trahax\", \"impius\"]\n\n# LA_SPIRITUALISM_WORDS = [\"spiritualismus\", \"divinitatem\", \"idololatria\", \"immaterialismus\", \"viderio\",\n# \"fides\", \"religio\", \"divinaria\", \"spiritus\", \"extranaturalis\"]\n# LA_SPIRITUALISM_OFFENSIVE_WORDS = [\"fanaticum\", \"falsus\", \"cacodaemon\"]\n\nLA_INDIVIDUALISM_WORDS = [\"individualismus\", \"persona\", \"unictatem\", \"eccentricus\",\n \"auto-expressio\", \"mercatorum\", \"privatus\"]\nLA_INDIVIDUALISM_OFFENSIVE_WORDS = [\"egocentriista\", \"nixilla\"]\n\nLA_COLLECTIVISM_WORDS = [\"collegiismus\", \"unitas\", \"conventus\", \"consensus\", \"collegialis\",\n \"simul\", \"complexio\", \"unio\", \"vulgo\"]\nLA_COLLECTIVISM_OFFENSIVE_WORDS = [\"vulgus\", \"turba\"]\n\nLA_REFORMISM_WORDS = [\"reformationismus\", \"rogatio\", \"reformatio\", \"stabilitas\", \"discussio\",\n \"compromissum\", \"gradatim\"]\nLA_REFORMISM_OFFENSIVE_WORDS = [\"cunctator\"]\n\nLA_REVOLUTIONISM_WORDS =[\"revolutionismus\", \"actiones\", \"eversio\", \"mutationis\", \"tumultus\",\n \"interdictum\", \"acriter\", \"revolutio\"]\nLA_REVOLUTIONISM_OFFENSIVE_WORDS = [\"reactiator\"]\n\nLA_CONSTRUCTIVISM_WORDS = [\"constructivismus\", \"urbanisatio\", \"effectivus\", \"industria\", \"negotium\"]\nLA_CONSTRUCTIVISM_OFFENSIVE_WORDS = [\"technomator\", \"извращенец\"]\n\nLA_ESSENTIALISM_WORDS = [\"essentialismus\", \"natura\", \"oecologia\", \"animali\", \"exstinctionem\"]\nLA_ESSENTIALISM_OFFENSIVE_WORDS = [\"biberius\", \"ретроград\"]\n\nLA_DENIAL_WORDS = [\"отрицание\", \"отсутствие\", \"пустота\", \"ничего\", \"несуществование\", \"нонконформизм\", \"сакральный\", \"дестркутивный\"]\nLA_DENIAL_OFFENSIVE_WORDS = [\"циник\", \"дегенерат\", \"бессовестный\"]\n\nLA_ACCEPTANCE_WORDS = [\"принятие\", \"присутствие\", \"принимание\", \"всё\", \"существование\", \"конформизм\", \"мирской\", \"конструктивный\"]\nLA_ACCEPTANCE_OFFENSIVE_WORDS = [\"идеалист\", \"мракобес\"]\n\n# LA_HEDONISM_WORDS = [\"гедонизм\", \"блуд\", \"радость\", \"излишество\", \"удовольствие\", \"счастье\"]\n# LA_HEDONISM_OFFENSIVE_WORDS = [\"девиант\", \"сноб\", \"нимфоман\", \"сибарит\", \"чревоугодец\", \"чад\"]\n\n# LA_ASCETICISM_WORDS = [\"аскетизм\", \"непорочность\", \"скромность\", \"воздержание\", \"чистота\", \"сдержанность\"]\n# LA_ASCETICISM_OFFENSIVE_WORDS = [\"святоша\", \"девственник\"]\n\n\n\nLOCALE = {\"en\": {\"Liberty\": EN_LIBERTY_WORDS, \"Authority\": EN_AUTHORITY_WORDS,\n \"Liberty_Offensive\": EN_LIBERTY_OFFENSIVE_WORDS, \"Authority_Offensive\": EN_AUTHORITY_OFFENSIVE_WORDS,\n \"Pacifism\": EN_PACIFISM_WORDS, \"Militarism\": EN_MILITARISM_WORDS,\n \"Pacifism_Offensive\": EN_PACIFISM_OFFENSIVE_WORDS, \"Militarism_Offensive\": EN_MILITARISM_OFFENSIVE_WORDS,\n \"Intensivism\": EN_INTENSIVISM_WORDS, \"Extensivism\": EN_EXTENSIVISM_WORDS,\n \"Intensivism_Offensive\": EN_INTENSIVISM_OFFENSIVE_WORDS, \"Extensivism_Offensive\": EN_EXTENSIVISM_OFFENSIVE_WORDS,\n # \"Materialism\": EN_MATERIALISM_WORDS, \"Spiritualism\": EN_SPIRITUALISM_WORDS,\n # \"Materialism_Offensive\": EN_MATERIALISM_OFFENSIVE_WORDS, \"Spiritualism_Offensive\": EN_SPIRITUALISM_OFFENSIVE_WORDS,\n \"Individualism\": EN_INDIVIDUALISM_WORDS, \"Collectivism\": EN_COLLECTIVISM_WORDS,\n \"Individualism_Offensive\": EN_INDIVIDUALISM_OFFENSIVE_WORDS, \"Collectivism_Offensive\": EN_COLLECTIVISM_OFFENSIVE_WORDS,\n \"Reformism\": EN_REFORMISM_WORDS, \"Revolutionism\": EN_REVOLUTIONISM_WORDS,\n \"Reformism_Offensive\": EN_REFORMISM_OFFENSIVE_WORDS, \"Revolutionism_Offensive\": EN_REVOLUTIONISM_OFFENSIVE_WORDS,\n \"Constructivism\": EN_CONSTRUCTIVISM_WORDS, \"Essentialism\": EN_ESSENTIALISM_WORDS,\n \"Constructivism_Offensive\": EN_CONSTRUCTIVISM_OFFENSIVE_WORDS, \"Essentialism_Offensive\": EN_ESSENTIALISM_OFFENSIVE_WORDS,\n \"Denial\": EN_DENIAL_WORDS, \"Acceptance\": EN_ACCEPTANCE_WORDS,\n \"Denial_Offensive\": EN_DENIAL_OFFENSIVE_WORDS, \"Acceptance_Offensive\": EN_ACCEPTANCE_OFFENSIVE_WORDS,\n # \"Hedonism\": EN_HEDONISM_WORDS, \"Asceticism\": EN_ASCETICISM_WORDS,\n # \"Hedonism_Offensive\": EN_HEDONISM_OFFENSIVE_WORDS, \"Asceticism_Offensive\": EN_ASCETICISM_OFFENSIVE_WORDS,\n\n \"Book_Titles\": [\"The %s Manifesto\", \"The Book of %s\", \"The %s Book\", \"A Short Guide to %s\", \"%s Manual\"],\n\n \"Announcement_end\": \"\", # A very specific feature for latin.\n\n \"Reads\": \"reads\", \"Writes\": \"writes\", \"Gives\": \"gives\",\n\n \"Says\": \"says\", \"Mumbles\": \"mumbles\", \"Exclaims\": \"exclaims\", \"Asks\": \"asks\", \"Quotes\": \"quotes\",\n \"All\": \"all\",\n \"Convinced\": \"convinced\",\n \"Changed Ideology From\": \"has changed their ideology from\",\n\n \"Relationship Changed To\": \"changed their relationship to\",\n \"From\": \"from\", \"To\": \"to\",\n \"Reverence\": \"Reverence\", \"Respect\": \"Respect\", \"Sympathy\": \"Sympathy\",\n \"None\": \"None\",\n \"Distaste\": \"Distaste\", \"Hate\": \"Hate\", \"Feud\": \"Feud\",\n\n \"cry\": \"cries\", \"cry_target\": \"cries because of\",\n \"lecture\": \"lectures\", \"lecture_target\": \"lectures\",\n \"awe\": \"is in awe\", \"awe_target\": \"is in awe of\",\n \"praise\": \"praises\", \"praise_target\": \"praises\",\n \"dissapointment\": \"is dissapointed\", \"dissapointment_target\": \"is dissapointed in\",\n \"curse\": \"curses\", \"curse_target\": \"curses\",\n \"preach\": \"preaches\", \"preach_target\": \"preaches\",\n\n # \"\"\"Centrists\"\"\".\n \"Centrism\": \"Centrism\",\n\n # Wacky.\n \"Post-cosmic anarcho-nihilism\": \"Post-cosmic anarcho-nihilism\",\n \"Trans anarcho-pacifism\": \"Trans anarcho-pacifism\",\n\n # Communists.\n \"Communism\": \"Communism\",\n \"Anarcho-communism\": \"Anarcho-communism\",\n \"Trotskyism\": \"Trotskyism\",\n \"Anarcho-trotskyism\": \"Anarcho-trotskyism\",\n \"Posadism\": \"Posadism\",\n \"Anarcho-posadism\": \"Anarcho-posadism\",\n\n # Capitalists.\n \"Capitalism\": \"Capitalism\",\n \"Liberal capitalism\": \"Liberal capitalism\",\n \"Anarcho-capitalism\": \"Anarcho-capitalism\",\n\n # Fascists.\n \"Fascism\": \"Fascism\",\n \"Anarcho-fascism\": \"Anarcho-fascism\",\n \"Strasserism\": \"Strasserism\",\n \"Ecofascism\": \"Ecofascism\",\n \"Esoteric fascism\": \"Esoteric fascism\",\n \"Techno-fascism\": \"Techno-fascism\",\n\n # Syndicalists.\n \"Syndicalism\": \"Syndicalism\",\n \"Anarcho-syndicalism\": \"Anarcho-syndicalism\",\n \"Fascist syndicalism\": \"Fascist syndicalism\",\n\n # Government enthusiasts.\n \"Monarchism\": \"Monarchism\",\n \"Imperialism\": \"Imperialism\",\n \"Statism\": \"Statism\",\n \"Minarchism\": \"Minarchism\",\n \"Social democracy\": \"Social democracy\",\n\n # Peaceful protesters.\n \"Agorism\": \"Agorism\",\n\n # \"Greens\".\n \"Green politics\": \"Green politics\",\n \"Anarcho-primitivism\": \"Anarcho-primitivism\",\n\n # EGO.\n \"Anarcho-individualism\": \"Anarcho-individualism\",\n \"Meritocracy\": \"Meritocracy\",\n \"Technocracy\": \"Technocracy\",\n\n # Spiritualists.\n \"Theocracy\": \"Theocracy\",\n \"Liberation theology\": \"Liberation theology\",\n\n # Radicals vulgaris.\n \"Liberalism\": \"Liberalism\",\n\n # Extremes vulgaris.\n \"Anarchism\": \"Anarchism\",\n \"Conservatism\": \"Conservatism\",\n \"Accelerationism\": \"Accelerationism\",\n \"Transhumanism\": \"Transhumanism\",\n \"Nihilism\": \"Nihilism\",\n },\n\n \"ru\": {\"Liberty\": RU_LIBERTY_WORDS, \"Authority\": RU_AUTHORITY_WORDS,\n \"Liberty_Offensive\": RU_LIBERTY_OFFENSIVE_WORDS, \"Authority_Offensive\": RU_AUTHORITY_OFFENSIVE_WORDS,\n \"Pacifism\": RU_PACIFISM_WORDS, \"Militarism\": RU_MILITARISM_WORDS,\n \"Pacifism_Offensive\": RU_PACIFISM_OFFENSIVE_WORDS, \"Militarism_Offensive\": RU_MILITARISM_OFFENSIVE_WORDS,\n \"Intensivism\": RU_INTENSIVISM_WORDS, \"Extensivism\": RU_EXTENSIVISM_WORDS,\n \"Intensivism_Offensive\": RU_INTENSIVISM_OFFENSIVE_WORDS, \"Extensivism_Offensive\": RU_EXTENSIVISM_OFFENSIVE_WORDS,\n # \"Materialism\": RU_MATERIALISM_WORDS, \"Spiritualism\": RU_SPIRITUALISM_WORDS,\n # \"Materialism_Offensive\": RU_MATERIALISM_OFFENSIVE_WORDS, \"Spiritualism_Offensive\": RU_SPIRITUALISM_OFFENSIVE_WORDS,\n \"Individualism\": RU_INDIVIDUALISM_WORDS, \"Collectivism\": RU_COLLECTIVISM_WORDS,\n \"Individualism_Offensive\": RU_INDIVIDUALISM_OFFENSIVE_WORDS, \"Collectivism_Offensive\": RU_COLLECTIVISM_OFFENSIVE_WORDS,\n \"Reformism\": RU_REFORMISM_WORDS, \"Revolutionism\": RU_REVOLUTIONISM_WORDS,\n \"Reformism_Offensive\": RU_REFORMISM_OFFENSIVE_WORDS, \"Revolutionism_Offensive\": RU_REVOLUTIONISM_OFFENSIVE_WORDS,\n \"Constructivism\": RU_CONSTRUCTIVISM_WORDS, \"Essentialism\": RU_ESSENTIALISM_WORDS,\n \"Constructivism_Offensive\": RU_CONSTRUCTIVISM_OFFENSIVE_WORDS, \"Essentialism_Offensive\": RU_ESSENTIALISM_OFFENSIVE_WORDS,\n \"Denial\": RU_DENIAL_WORDS, \"Acceptance\": RU_ACCEPTANCE_WORDS,\n \"Denial_Offensive\": RU_DENIAL_OFFENSIVE_WORDS, \"Acceptance_Offensive\": RU_ACCEPTANCE_OFFENSIVE_WORDS,\n # \"Hedonism\": RU_HEDONISM_WORDS, \"Asceticism\": RU_ASCETICISM_WORDS,\n # \"Hedonism_Offensive\": RU_HEDONISM_OFFENSIVE_WORDS, \"Asceticism_Offensive\": RU_ASCETICISM_OFFENSIVE_WORDS,\n\n \"Book_Titles\": [\"Манифест %s\", \"Книга о %s\", \"Книга %s\", \"Краткое Пособие к %s\", \"Мануал %s\"],\n\n \"Announcement_end\": \"\", # A very specific feature for latin.\n\n \"Reads\": \"читает\", \"Writes\": \"пишет\", \"Gives\": \"даёт\",\n\n \"Says\": \"говорит\", \"Mumbles\": \"бубнит\", \"Exclaims\": \"восклицает\", \"Asks\": \"спрашивает\", \"Quotes\": \"цитирует\",\n \"All\": \"все\",\n \"Convinced\": \"убедил\", \"Changed Ideology From\": \"сменил свою идеологию с\",\n\n \"Relationship Changed To\": \"изменил своё отношение к\", \n \"From\": \"с\", \"To\": \"на\",\n \"Reverence\": \"Почтение\", \"Respect\": \"Уважение\", \"Sympathy\": \"Симпатия\",\n \"None\": \"Никакое\",\n \"Distaste\": \"Неприязнь\", \"Hate\": \"Ненависть\", \"Feud\": \"Вражда\",\n\n \"cry\": \"плачет\", \"cry_target\": \"плачет из-за\",\n \"lecture\": \"отчитывает\", \"lecture_target\": \"отчитывает\",\n \"awe\": \"восхищён\", \"awe_target\": \"восхищён\",\n \"praise\": \"фанатично почитает\", \"praise_target\": \"фанатично почитает\",\n \"dissapointment\": \"разочарован\", \"dissapointment_target\": \"разочарован в\",\n \"curse\": \"проклинает\", \"curse_target\": \"проклинает\",\n \"preach\": \"поучает\", \"preach_target\": \"поучает\",\n\n # \"\"\"Centrists\"\"\".\n \"Centrism\": \"Центризм\",\n\n # Wacky.\n \"Post-cosmic anarcho-nihilism\": \"Пост-космический анархо-нигилизм\",\n \"Trans anarcho-pacifism\": \"Транс анархо-пацифизм\",\n\n # Communists.\n \"Communism\": \"Коммунизм\",\n \"Anarcho-communism\": \"Анархо-коммунизм\",\n \"Trotskyism\": \"Троцкизм\",\n \"Anarcho-trotskyism\": \"Анархо-троцкизм\",\n \"Posadism\": \"Посадизм\",\n \"Anarcho-posadism\": \"Анархо-посадизм\",\n\n # Capitalists.\n \"Capitalism\": \"Капитализм\",\n \"Liberal capitalism\": \"Либеральный капитализм\",\n \"Anarcho-capitalism\": \"Анархо-капитализм\",\n\n # Fascists.\n \"Fascism\": \"Фашизм\",\n \"Anarcho-fascism\": \"Анархо-фашизм\",\n \"Strasserism\": \"Штрассеризм\",\n \"Ecofascism\": \"Экофашизм\",\n \"Esoteric fascism\": \"Эзотерический фашизм\",\n \"Techno-fascism\": \"Техно-фашизм\",\n\n # Syndicalists.\n \"Syndicalism\": \"Синдикализм\",\n \"Anarcho-syndicalism\": \"Анархо-синдикализм\",\n \"Fascist syndicalism\": \"Фашистский синдикализм\",\n\n # Government enthusiasts.\n \"Monarchism\": \"Монархизм\",\n \"Imperialism\": \"Империализм\",\n \"Statism\": \"Этатизм\",\n \"Minarchism\": \"Минархизм\",\n \"Social democracy\": \"Социальная-демократия\",\n\n # Peaceful protesters.\n \"Agorism\": \"Агоризм\",\n\n # \"Greens\".\n \"Green politics\": \"Зелёная политика\",\n \"Anarcho-primitivism\": \"Анархо-примитивизм\",\n\n # EGO.\n \"Anarcho-individualism\": \"Анархо-индивидуализм\",\n \"Meritocracy\": \"Меритократия\",\n \"Technocracy\": \"Технократия\",\n\n # Spiritualists.\n \"Theocracy\": \"Теократия\",\n \"Liberation theology\": \"Теология освобождения\",\n\n # Radicals vulgaris.\n \"Liberalism\": \"Либерализм\",\n\n # Extremes vulgaris.\n \"Anarchism\": \"Анархизм\",\n \"Conservatism\": \"Консерватизм\",\n \"Accelerationism\": \"Акселерационизм\",\n \"Transhumanism\": \"Трансгуманизм\",\n \"Nihilism\": \"Нигилизм\"\n },\n\n \"la\": {\"Liberty\": LA_LIBERTY_WORDS, \"Authority\": LA_AUTHORITY_WORDS,\n \"Liberty_Offensive\": LA_LIBERTY_OFFENSIVE_WORDS, \"Authority_Offensive\": LA_AUTHORITY_OFFENSIVE_WORDS,\n \"Pacifism\": LA_PACIFISM_WORDS, \"Militarism\": LA_MILITARISM_WORDS,\n \"Pacifism_Offensive\": LA_PACIFISM_OFFENSIVE_WORDS, \"Militarism_Offensive\": LA_MILITARISM_OFFENSIVE_WORDS,\n \"Intensivism\": LA_INTENSIVISM_WORDS, \"Extensivism\": LA_EXTENSIVISM_WORDS,\n \"Intensivism_Offensive\": LA_INTENSIVISM_OFFENSIVE_WORDS, \"Extensivism_Offensive\": LA_EXTENSIVISM_OFFENSIVE_WORDS,\n # \"Materialism\": LA_MATERIALISM_WORDS, \"Spiritualism\": LA_SPIRITUALISM_WORDS,\n # \"Materialism_Offensive\": LA_MATERIALISM_OFFENSIVE_WORDS, \"Spiritualism_Offensive\": LA_SPIRITUALISM_OFFENSIVE_WORDS,\n \"Individualism\": LA_INDIVIDUALISM_WORDS, \"Collectivism\": LA_COLLECTIVISM_WORDS,\n \"Individualism_Offensive\": LA_INDIVIDUALISM_OFFENSIVE_WORDS, \"Collectivism_Offensive\": LA_COLLECTIVISM_OFFENSIVE_WORDS,\n \"Reformism\": LA_REFORMISM_WORDS, \"Revolutionism\": LA_REVOLUTIONISM_WORDS,\n \"Reformism_Offensive\": LA_REFORMISM_OFFENSIVE_WORDS, \"Revolutionism_Offensive\": LA_REVOLUTIONISM_OFFENSIVE_WORDS,\n \"Constructivism\": LA_CONSTRUCTIVISM_WORDS, \"Essentialism\": LA_ESSENTIALISM_WORDS,\n \"Constructivism_Offensive\": LA_CONSTRUCTIVISM_OFFENSIVE_WORDS, \"Essentialism_Offensive\": LA_ESSENTIALISM_OFFENSIVE_WORDS,\n \"Denial\": LA_DENIAL_WORDS, \"Acceptance\": LA_ACCEPTANCE_WORDS,\n \"Denial_Offensive\": LA_DENIAL_OFFENSIVE_WORDS, \"Acceptance_Offensive\": LA_ACCEPTANCE_OFFENSIVE_WORDS,\n # \"Hedonism\": LA_HEDONISM_WORDS, \"Asceticism\": LA_ASCETICISM_WORDS,\n # \"Hedonism_Offensive\": LA_HEDONISM_OFFENSIVE_WORDS, \"Asceticism_Offensive\": LA_ASCETICISM_OFFENSIVE_WORDS,\n\n \"Book_Titles\": [\"Манифест %s\", \"Книга о %s\", \"Книга %s\", \"Краткое Пособие к %s\", \"Мануал %s\"],\n\n \"Announcement_end\": \" est\", # A very specific feature for latin.\n\n \"Reads\": \"читает\", \"Writes\": \"пишет\", \"Gives\": \"даёт\",\n\n \"Says\": \"dicit\", \"Mumbles\": \"murmurit\", \"Exclaims\": \"clamit\", \"Asks\": \"quaerit\", \"Quotes\": \"цитирует\",\n \"All\": \"omnia\",\n \"Convinced\": \"persuasit\", \"Changed Ideology From\": \"vertit ille doctrina ab\",\n\n \"Relationship Changed To\": \"vertit ille relatio ad\",\n \"From\": \"ab\", \"To\": \"ad\",\n \"Reverence\": \"Veneratio\", \"Respect\": \"Respectus\", \"Sympathy\": \"Sympathia\",\n \"None\": \"Nihili\",\n \"Distaste\": \"Inimitia\", \"Hate\": \"Odium\", \"Feud\": \"Hostilitas\",\n\n \"cry\": \"flarit\", \"cry_target\": \"flarit ob\",\n \"lecture\": \"citatit\", \"lecture_target\": \"vituperarit\",\n \"awe\": \"in admirationem\", \"awe_target\": \"in admirationem ab\",\n \"praise\": \"veneratit\", \"praise_target\": \"veneratit\",\n \"dissapointment\": \"non dulcedit\", \"dissapointment_target\": \"non dulcedit ad\",\n \"curse\": \"vomicit\", \"curse_target\": \"vomicit\",\n \"preach\": \"поучает\", \"preach_target\": \"поучает\",\n\n # \"\"\"Centrists\"\"\".\n \"Centrism\": \"Centrismus\",\n\n # Wacky.\n \"Post-cosmic anarcho-nihilism\": \"Пост-космический анархо-нигилизм\",\n \"Trans anarcho-pacifism\": \"Транс анархо-пацифизм\",\n\n # Communists.\n \"Communism\": \"Communismus\",\n \"Anarcho-communism\": \"Communismus irauctoris\",\n \"Trotskyism\": \"Trotskyismus\",\n \"Anarcho-trotskyism\": \"Trotskyismus irauctoris\",\n \"Posadism\": \"Posadismus\",\n \"Anarcho-posadism\": \"Posadismus irauctoris\",\n\n # Capitalists.\n \"Capitalism\": \"Capitalismus\", # Capitalismus vulgaris. As in, classical Capitalism. Change to this if you want to be more fancy.\n \"Liberal capitalism\": \"Capitalismus liberalis\",\n \"Anarcho-capitalism\": \"Capitalismus irauctoris\",\n\n # Fascists.\n \"Fascism\": \"Fascismus\",\n \"Anarcho-fascism\": \"Fascismus irauctoris\",\n \"Strasserism\": \"Strasserismus\",\n \"Ecofascism\": \"Domofascismus\",\n \"Esoteric fascism\": \"Fascismus immaterialis\",\n \"Techno-fascism\": \"Arsi-fascismus\",\n\n # Syndicalists.\n \"Syndicalism\": \"Syndicalismus\",\n \"Anarcho-syndicalism\": \"Syndicalismus irauctoris\",\n \"Fascist syndicalism\": \"Syndicalismus fascistis\",\n\n # Government enthusiasts.\n \"Monarchism\": \"Монархизм\",\n \"Imperialism\": \"Империализм\",\n \"Statism\": \"Civitismus\",\n \"Minarchism\": \"Minauctorismus\",\n \"Social democracy\": \"Populiauctorismus socialis\",\n\n # Peaceful protesters.\n \"Agorism\": \"Agorismus\",\n\n # \"Greens\".\n \"Green politics\": \"Politica virida\",\n \"Anarcho-primitivism\": \"Primitivismus irauctoris\",\n\n # EGO.\n \"Anarcho-individualism\": \"Individualismus irauctoris\",\n \"Meritocracy\": \"Dignauctoria\",\n \"Technocracy\": \"Arsauctoria\",\n\n # Spiritualists.\n \"Theocracy\": \"Deiauctoria\",\n \"Liberation theology\": \"Deiscientia liberatae\",\n\n # Radicals vulgaris.\n \"Liberalism\": \"Liberalismus\",\n\n # Extremes vulgaris.\n \"Anarchism\": \"Irauctorismus\",\n \"Conservatism\": \"Conservatismus\",\n \"Accelerationism\": \"Accelerationismus\",\n \"Transhumanism\": \"Transhumanismus\",\n \"Nihilism\": \"Nihilismus\"\n }\n}\n","repo_name":"LudwigVonChesterfield/Totally-Accurate-Political-Simulator","sub_path":"source_code/locale_game.py","file_name":"locale_game.py","file_ext":"py","file_size_in_byte":31330,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"32038652774","text":"\"\"\"\nTags for performing basic value comparisons in templates.\n\"\"\"\nfrom django import template\n\n\nfrom template_utils import blocks\n\nclass BlockNode(template.Node):\n def __init__(self, name, nodelist, varname, *vars):\n self.name = name\n self.vars = list(map(template.Variable, vars))\n self.nodelist = nodelist\n self.varname = varname\n \n def render(self, context):\n result = blocks[self.name](context, self.nodelist, *self.vars)\n if self.varname:\n context[self.varname] = result\n return ''\n return result\n\ndef do_block(parser, token):\n bits = token.contents.split()\n nodelist = parser.parse(('end%s' % bits[0],))\n parser.delete_first_token()\n varname = None\n if len(bits) > 2 and bits[-2] == 'as':\n varname = bits[-1]\n bits = bits[:-2]\n return BlockNode(bits[0], nodelist, varname, *bits[1:])\n\nregister = template.Library()\nfor tag_name in blocks:\n register.tag(tag_name, do_block)\n","repo_name":"AlphacrucisCollege/django-template-utils","sub_path":"template_utils/templatetags/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21170947869","text":"\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\n\n\ndef index_to_onehot(index, num_indices):\n onehot = torch.zeros(*index.size(), num_indices)\n onehot.scatter_(-1, index.unsqueeze(dim=-1), 1)\n return onehot\n\nclass FlattenInput:\n def __call__(self, x):\n return x.view(-1)\n\ndef MNISTDataset(batch_size, num_workers):\n\n transform = transforms.Compose([\n transforms.ToTensor(),\n #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n FlattenInput(),\n ])\n\n trainset = torchvision.datasets.MNIST(\n root='./data', train=True, download=True, transform=transform)\n testset = torchvision.datasets.MNIST(\n root='./data', train=False, download=True, transform=transform)\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n\n return trainloader, testloader\n\n\n\n\n\n\n\n\n","repo_name":"zeligism/eqprop","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"73222098186","text":"#Given a list of numbers and a number k, return whether any two numbers from the list add up to k.\r\n\r\n#[10, 15, 3, 7]\r\n#17\r\n\r\ndef yes(list, k):\r\n for i in list:\r\n for j in list:\r\n if (i == j):\r\n continue\r\n if (i + j == k):\r\n return True\r\n return False\r\n\r\n\r\nlist = []\r\n\r\nsize = int(input(\"Number of elements: \"))\r\nfor i in range(0, size):\r\n i = int(input(\"Element of list: \"))\r\n list.append(i)\r\nprint(list)\r\n\r\nk = int(input(\"Input k: \"))\r\nprint(yes(list, k))\r\n","repo_name":"kittykaty/dailycoding","sub_path":"12.03.py","file_name":"12.03.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38593448231","text":"import os\nimport logging\nimport random\n\nlogger = logging.getLogger()\n\n\nseed = os.environ[\"SEED\"]\nseed = int(seed)\nrandom.seed(seed)\n\n\ndef handler(event, context):\n logger.info(\"received processing event {!r}\".format(event))\n\n # in pharmacy benefits, the prices are just made up\n return {\n \"price\": random.randint(1, 10000),\n }\n","repo_name":"capitalrx/devops","sub_path":"terraform/pricing.py","file_name":"pricing.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5639151491","text":"# create a dictionary\nmydict = {\n 'waqt': 'time',\n 'intezar':'wait for someone',\n 'kahani': 'story',\n 'itafaq' : 'by chance'\n}\nprint('-------',mydict.keys(),'--------\\n' )\n\nw = input('Enter a Hindi word : \\n')\nprint('\\n',mydict.get(w))\n","repo_name":"Sujit-sahoo3571/python_programming","sub_path":"01_question_dict.py","file_name":"01_question_dict.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28881528812","text":"from typing import Dict, Optional, Union\nfrom uuid import uuid4\n\nimport webview\n\nfrom kanmail.log import logger\nfrom kanmail.server.app import server\nfrom kanmail.settings.constants import FRAMELESS, IS_APP, SERVER_HOST, SESSION_TOKEN\n\nID_TO_WINDOW = {} # internal ID -> window object\nUNIQUE_NAME_TO_ID = {} # name -> internal ID for unique windows\n\nUNIQUE_KEY_TO_LOCALIZATION = {\n \"settings\": {\n \"global.quit\": \"Close without saving\",\n \"global.cancel\": \"Return to settings\",\n \"global.quitConfirmation\": (\n \"Any changes will be lost, do you still want to close the window?\"\n ),\n },\n}\n\n\ndef create_window(\n endpoint: str = \"/\",\n unique_key: Optional[str] = None,\n **kwargs,\n) -> Union[str, bool]:\n if not IS_APP:\n logger.warning(\"Cannot open window in server mode!\")\n return False\n\n internal_id = str(uuid4())\n link = (\n f\"http://{SERVER_HOST}:{server.get_port()}{endpoint}\"\n f\"?window_id={internal_id}\"\n f\"&Kanmail-Session-Token={SESSION_TOKEN}\"\n )\n\n logger.debug(\n f\"Opening window ({internal_id}) \" f\"url={endpoint} kwargs={kwargs}\",\n )\n\n # Nuke any existing unique window\n if unique_key and unique_key in UNIQUE_NAME_TO_ID:\n old_window_id = UNIQUE_NAME_TO_ID.get(unique_key)\n if old_window_id:\n destroy_window(old_window_id)\n\n window = webview.create_window(\n \"Kanmail\",\n link,\n frameless=FRAMELESS,\n easy_drag=False,\n text_select=True,\n localization=UNIQUE_KEY_TO_LOCALIZATION.get(unique_key),\n **kwargs,\n )\n\n ID_TO_WINDOW[internal_id] = window\n\n if unique_key:\n UNIQUE_NAME_TO_ID[unique_key] = internal_id\n\n return internal_id\n\n\ndef destroy_window(internal_id: str) -> None:\n window = ID_TO_WINDOW.pop(internal_id, None)\n\n if window:\n try:\n window.destroy()\n except KeyError: # happens if the window has already been destroyed (user close)\n pass\n else:\n return\n\n logger.warning(f\"Tried to destroy non-existant window: {internal_id}\")\n\n\ndef resize_window(internal_id: str, width: int, height: int) -> None:\n window = ID_TO_WINDOW[internal_id]\n\n if window:\n window.resize(width, height)\n\n logger.warning(f\"Tried to resize non-existant window: {internal_id}\")\n\n\ndef reload_main_window() -> None:\n if IS_APP:\n window = get_main_window()\n window.evaluate_js(\"window.location.reload()\")\n\n\ndef get_main_window() -> webview.Window:\n return ID_TO_WINDOW[UNIQUE_NAME_TO_ID[\"main\"]]\n\n\ndef destroy_main_window() -> None:\n destroy_window(UNIQUE_NAME_TO_ID[\"main\"])\n\n\ndef get_main_window_size_position() -> Dict[str, int]:\n window = get_main_window()\n return {\n \"left\": window.x,\n \"top\": window.y,\n \"width\": window.width,\n \"height\": window.height,\n }\n\n\ndef init_window_hacks() -> None:\n try:\n from webview.platforms import cocoa\n\n from .macos import reposition_traffic_light_buttons, show_traffic_light_buttons\n except ImportError:\n pass\n else:\n # This cocoa specific hack shows the traffic light buttons (pywebview hides these\n # in frameless mode by default) and also moves them so they look better placed\n # in the sidebar header.\n\n class CustomBrowserView(cocoa.BrowserView):\n def first_show(self, *args, **kwargs):\n show_traffic_light_buttons(self.window)\n reposition_traffic_light_buttons(self.window)\n super().first_show(*args, **kwargs)\n\n class CustomWindowDelegate(cocoa.BrowserView.WindowDelegate):\n def windowDidResize_(self, notification):\n reposition_traffic_light_buttons(notification.object())\n\n cocoa.BrowserView = CustomBrowserView\n cocoa.BrowserView.WindowDelegate = CustomWindowDelegate\n","repo_name":"Oxygem/Kanmail","sub_path":"kanmail/window/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":1242,"dataset":"github-code","pt":"81"} +{"seq_id":"70125546504","text":"#!/usr/bin/python3\n# -*-coding:utf-8 -*-\n\"\"\"Unit tests.\"\"\"\n\nimport pytest # noqa: F401\n\nfrom utils.gif_json import GifJson\n\n\ndef test_get_gif():\n \"\"\"Test get_gif.\"\"\"\n gifs = GifJson(\"gifs.sample.json\")\n my_gif: dict = gifs.get_gif(\"your_gif_name\")\n assert my_gif['url'] == \"your_gif_url\"\n\n\ndef test_get_gif_fail():\n \"\"\"Test get_gif fail.\"\"\"\n gifs = GifJson(\"gifs.sample.json\")\n my_gif = gifs.get_gif(\"toto\")\n assert my_gif is None # gif toto return none\n\n\ndef test_get_names_string():\n \"\"\"Test get_gif fail.\"\"\"\n gifs_json = GifJson(\"gifs.sample.json\")\n # gif_json_mock = MagicMock(GifJson)\n gifs_json.gifs = {\"foo\": {\"public\": True,\n \"url\": \"https://foo.gif\"},\n \"bar\": {\"public\": True,\n \"url\": \"http://bar.gif\"},\n \"foobar\": {\"public\": True,\n \"url\": \"http://bar.gif\"}\n }\n gif_string = gifs_json.get_names_string(private=False)\n assert gif_string == \"foo\\nbar\\nfoobar\"\n\n\ndef test_get_names_string_private():\n \"\"\"Test get_gif fail.\"\"\"\n gifs_json = GifJson(\"gifs.sample.json\")\n # gif_json_mock = MagicMock(GifJson)\n gifs_json.gifs = {\"foo\": {\"public\": True,\n \"url\": \"https://foo.gif\"},\n \"bar\": {\"public\": True,\n \"url\": \"http://bar.gif\"},\n \"fooba\": {\"public\": False,\n \"url\": \"http://bar.gif\"}\n }\n gif_string = gifs_json.get_names_string(private=True)\n assert gif_string == \"foo\\nbar\"\n","repo_name":"Azrood/DCTbot","sub_path":"tests/test_func_gifs.py","file_name":"test_func_gifs.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"22096044162","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import serializers\n\nfrom .models import (\n UserProfile, UserProfileQuestionnaireAnswer, PollutantSensitivity)\n\nUserModel = get_user_model()\n\n\nclass PollutantSensitivitySerializer(serializers.ModelSerializer):\n level_display = serializers.CharField(source='get_level_display')\n\n class Meta:\n model = PollutantSensitivity\n fields = ('pollutant_id', 'level', 'level_display', )\n\n\nclass UserProfileQuestionnaireAnswerSerializer(serializers.ModelSerializer):\n\n def to_internal_value(self, data):\n question_id = data.get('question_id')\n answer_id = data.get('answer_id')\n\n # Perform the data validation.\n if not answer_id:\n raise serializers.ValidationError({\n 'answer_id': 'This field is required.'\n })\n\n # Return the validated values. This will be available as\n # the `.validated_data` property.\n return {\n 'question_id': question_id,\n 'answer_id': answer_id\n }\n\n class Meta:\n model = UserProfileQuestionnaireAnswer\n fields = ('question_id', 'answer_id', )\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n questionnaire_answers = UserProfileQuestionnaireAnswerSerializer(\n required=False, many=True)\n\n sensitivity_levels = PollutantSensitivitySerializer(\n many=True, read_only=True)\n\n class Meta:\n model = UserProfile\n fields = (\n 'age', 'colour_blindness', 'aqi_scale', 'modified', 'vis_tool',\n 'questionnaire_answers', 'sensitivity_levels', )\n read_only_fields = ('modified', )\n\n def update(self, instance, validated_data):\n questionnaire_answers = validated_data.pop(\n 'questionnaire_answers', [])\n answers_changed = instance.answers_will_change(\n questionnaire_answers)\n for a in questionnaire_answers:\n UserProfileQuestionnaireAnswer.objects.update_or_create(\n profile=instance, question_id=a['question_id'], defaults={\n 'answer_id': a['answer_id']\n })\n\n # Update sensitivity levels\n if answers_changed:\n instance.update_sensitivity_levels()\n return super(UserProfileSerializer, self).update(\n instance, validated_data)\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n\n def create(self, validated_data):\n user = super(UserSerializer, self).create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n class Meta:\n model = UserModel\n fields = (\n 'id', 'url', 'username', 'email', 'password', 'first_name',\n 'last_name')\n extra_kwargs = {\n 'url': {\n 'view_name': 'api:accounts:user-detail'\n },\n 'password': {'write_only': True}\n }\n\n\nclass ExtendedUserSerializer(UserSerializer):\n\n profile = UserProfileSerializer(required=False)\n\n class Meta(UserSerializer.Meta):\n fields = (\n 'id', 'url', 'username', 'email', 'first_name',\n 'last_name', 'profile', 'is_staff', 'is_active',\n 'is_superuser', 'date_joined', )\n read_only_fields = (\n 'is_staff', 'is_superuser', 'is_active', 'date_joined', )\n\n def update(self, instance, validated_data):\n profile = validated_data.pop('profile', None)\n questionnaire_answers = profile.pop(\n 'questionnaire_answers', [])\n if profile is not None:\n upProfile, created = UserProfile.objects.update_or_create(\n user=instance, defaults=profile)\n answers_changed = upProfile.answers_will_change(\n questionnaire_answers)\n for a in questionnaire_answers:\n UserProfileQuestionnaireAnswer.objects.update_or_create(\n profile=upProfile, question_id=a['question_id'], defaults={\n 'answer_id': a['answer_id']\n })\n\n # Update sensitivity levels\n if answers_changed:\n upProfile.update_sensitivity_levels()\n return super(ExtendedUserSerializer, self).update(\n instance, validated_data)\n","repo_name":"dschurholz/myaqi-backend","sub_path":"aqi_backend/accounts/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74921529545","text":"\"\"\"\nThis file is part of the TheLMA (THe Laboratory Management Application) project.\nSee LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.\n\nReservoir specs mapper.\n\"\"\"\nfrom sqlalchemy.orm import relationship\n\nfrom everest.repositories.rdb.utils import as_slug_expression\nfrom everest.repositories.rdb.utils import mapper\nfrom thelma.entities.liquidtransfer import ReservoirSpecs\nfrom thelma.entities.rack import RackShape\n\n\n__docformat__ = 'reStructuredText en'\n__all__ = ['create_mapper']\n\n\ndef create_mapper(reservoir_specs_tbl):\n \"Mapper factory.\"\n tbl = reservoir_specs_tbl\n m = mapper(ReservoirSpecs, reservoir_specs_tbl,\n id_attribute='reservoir_specs_id',\n slug_expression=lambda cls: as_slug_expression(cls._name), # pylint: disable=W0212\n properties=dict(\n _rack_shape=relationship(RackShape, uselist=False,\n# lazy='joined'\n ),\n _name=tbl.c.name,\n _description=tbl.c.description,\n _max_volume=tbl.c.max_volume,\n _min_dead_volume=tbl.c.min_dead_volume,\n _max_dead_volume=tbl.c.max_dead_volume)\n )\n return m\n","repo_name":"helixyte/TheLMA","sub_path":"thelma/repositories/rdb/mappers/reservoirspecs.py","file_name":"reservoirspecs.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41256768817","text":"#import sys;\n#sys.path.append(\"../cuqipy\")\n\nimport numpy as np\nimport torch as xp\nimport matplotlib.pyplot as plt\nfrom inclusion import U_class, image, full_tensor_sino\n\nimport arviz\nfrom cuqi.distribution import Gaussian, Uniform, JointDistribution\nfrom cuqi.sampler import Gibbs, MH, pCN\n\nclass forward_map():\n def __init__(self,N,view_angles,s=2):\n self.U = U_class(N=N)\n\n self.radon = full_tensor_sino()\n self.view_angles = xp.asarray(view_angles)\n\n def forward(self,s,v):\n if isinstance(s, np.ndarray):\n s = xp.asarray(s)\n if isinstance(v, np.ndarray):\n v = xp.asarray(v)\n self.U.compute_sqrt_lambda(s)\n u = self.U.make_u_from_v( v )\n sino = self.radon.make_sino(u,self.view_angles)\n return sino.numpy().flatten()\n\ndef F(s,v):\n if isinstance(s, np.ndarray):\n s = xp.asarray(s)\n if isinstance(v, np.ndarray):\n v = xp.asarray(v)\n out = forward.forward(s,v)\n if isinstance(out, xp.Tensor):\n out = out.numpy()\n return out.flatten()\n\nclass Metro(MH):\n def step(self, x):\n self.x0 = x\n self.scale = 0.8\n return self.sample(20, ).samples[:,-1]\n\n def _print_progress(*args, **kwargs):\n pass\n\nclass PCN(pCN):\n def step(self, x):\n self.x0 = x\n self.scale = 0.07\n return self.sample(20).samples[:,-1]\n\n def _print_progress(*args, **kwargs):\n pass\n\ndef run_gibbs():\n # loading observation file\n obs_data = np.load('./obs/obs_gear_90.npz')\n N = obs_data['N'] # discretization size\n sino = xp.asarray( obs_data['sino'] )\n noise_vec = xp.asarray( obs_data['noise_vec'] ) # noise vector\n view_angles = obs_data['view_angles'] # view angles\n\n # defining the forward problem\n forward = forward_map(N,view_angles)\n\n sigma = np.linalg.norm(sino)/100\n sigma2 = sigma*sigma\n y_obs = sino + sigma*noise_vec\n y_obs_flat = y_obs.flatten()\n\n log_like = lambda v,s: - ( 0.5*np.sum( (forward.forward(s,v) - y_obs)**2)/sigma2 )\n\n m = len(y_obs_flat)\n Im = np.ones(m)\n\n # Bayesian model\n s = Uniform(0.5,5)\n v = Gaussian(np.zeros(N) , 1)\n y = Gaussian(forward.forward, sigma2*Im)\n\n # joint distribution\n P = JointDistribution(s,v,y)\n\n # Gibbs sampler\n sampler = Gibbs(P(y=y_obs_flat), {'s':Metro, 'v':PCN})\n\n # run sampler\n samples = sampler.sample(10000)\n\n np.savez('./stats/stat_gear.npz',samp1=samples['v'].samples.T,samp2=samples['s'].samples.T)\n\nif __name__ == '__main__':\n run_gibbs()\n\n\n","repo_name":"babakmaboudi/uncertain_roughness","sub_path":"CT/inverse_CUQIpy.py","file_name":"inverse_CUQIpy.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2556481253","text":"# coding=utf-8\nimport sys\nfile = sys.argv[1]\nimport json\narray = []\n# fakenumber = 0\n# truenumber = 0\n# negnumber = 0\n# posnumber = 0\nnumber=[0,0,0,0]\ntotal = 0\ntrue={}\nfake={}\nneg={}\npos={}\ndef trueOrFake(word):\n if word[1]=='Fake':\n number[0]+=1\n for x in range(3,len(word)):\n if word[x] in fake:\n fake[word[x]]+=1\n else:\n fake[word[x]]=1\n\n else:\n number[1] += 1\n for x in range(3,len(word)):\n if word[x] in true:\n true[word[x]]+=1\n else:\n true[word[x]]=1\n\n\n if word[2]=='Neg':\n number[2] += 1\n for x in range(3,len(word)):\n if word[x] in neg:\n neg[word[x]]+=1\n else:\n neg[word[x]]=1\n else:\n number[3] += 1\n for x in range(3,len(word)):\n if word[x] in pos:\n pos[word[x]]+=1\n else:\n pos[word[x]]=1\n\n\n# with open(\"train-labeled.txt\",\"r\") as f:\nwith open(file, \"r\") as f:\n content = f.readlines()\n for x in content:\n total += 1\n x = x.strip('\\n')\n word = x.split(\" \")\n array.append(word[0]);\n trueOrFake(word);\n print(number)\n print(total)\n print(true)\n print(fake)\n print(neg)\n print(pos)\n\n\nimport json\nlistdic={}\nlistdic[\"number\"]=number\njsObj1 = json.dumps(true)\njsObj2= json.dumps(fake)\njsObj3 = json.dumps(neg)\njsObj4 = json.dumps(pos)\njsObj5 = json.dumps(listdic)\n\nfileObject = open(\"nbmodel.txt\",\"w\")\n#fileObject = open(\"dict_f.json\",\"w\")\nfileObject.write(jsObj1)\nfileObject.write('\\n')\nfileObject.write(jsObj2)\nfileObject.write('\\n')\nfileObject.write(jsObj3)\nfileObject.write('\\n')\nfileObject.write(jsObj4)\nfileObject.write('\\n')\nfileObject.write(jsObj5)","repo_name":"FanZhangSara/NLP-naive-Bayes-classifier","sub_path":"nblearn3.py","file_name":"nblearn3.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14006280645","text":"import typing\nimport copy\nimport random\nimport json\nimport gzip\nimport markovify\nimport spacy\nfrom app.caching import cache\nfrom flask import current_app\n\n\nclass POSifiedText(markovify.Text):\n '''\n POSified text is used for both construction and sentence sythesis\n Includes POS (parts of speech) to allow for more sensible sentence structure.\n '''\n def word_split(self, sentence):\n nlp_model = get_nlp_model('en_core_web_sm')\n return ['::'.join((word.orth_, word.pos_)) for word in nlp_model(sentence)]\n\n def word_join(self, words):\n sentence = ' '.join(word.split('::')[0] for word in words)\n return sentence\n\n\ndef markov_generate(persona: str='Trump', params: dict={}) -> str:\n '''\n Generate a quote from initialized markov models\n Models have already been calibrated; this auto-generates something from \n the Markov model and returns.\n\n Parameters:\n - persona\n - params \n '''\n print('Generating markov text for persona: {}'.format(persona))\n\n # Retreive list fo models applicable\n models = get_models(persona, {k:params[k] for k in ['MODEL_DIRECTORY', 'MARKOV_MODELS', 'PERSONAS']}) \n model_spec = models[0] # models[random.randint(0, 100) % 2]\n model = model_spec['model']\n text = model.make_short_sentence(max_chars = model_spec['max_chars'])\n print('Markov text ({}): {}'.format(persona, text))\n return text\n\n\n@cache.memoize()\ndef get_nlp_model(lang_module='en_core_web_sm'):\n return spacy.load(lang_module)\n\n\n@cache.memoize() # Cache decorator; save to memory\ndef get_models(persona: str='Trump', params: dict={}) -> list:\n '''\n Initializes list of models specified for this persona\n Models are pre-calibrated and saved into json format\n Read into POSifiedText markovify model extension\n \n Params:\n persona: \n params: \n '''\n # Initialize all available models specified\n model_specs = copy.deepcopy(params['MARKOV_MODELS'][persona])\n model_dir = params['MODEL_DIRECTORY']\n \n print('Initializing markov model for persona: {}'.format(persona))\n models = []\n for model_spec in model_specs:\n model_json = read_model_file(model_dir, model_spec['filename'])\n model_obj = POSifiedText.from_json(model_json)\n model_spec['model'] = model_obj\n models.append(model_spec)\n\n return models\n\n\ndef read_model_file(model_dir, model_file):\n '''\n The models should be in compressed (zipped) format)\n Additionally, we are reading from within the application\n '''\n model_file_path = '{app_root}/{resource_dir}{model_file}'.format(app_root=current_app.root_path,\n resource_dir=model_dir, model_file=model_file)\n with gzip.GzipFile(model_file_path, 'r') as fin:\n model_json = json.loads(fin.read().decode('utf-8'))\n \n return model_json\n\n\ndef sentence_topic_extract(text: str, rules: typing.List[typing.Dict], short_circuit: bool=True) -> list:\n '''\n Find the \"topic\" of a given sentence\n Based on a set of rules (lambda functions), operate on either document-level \n named entities, or on tokens in the document\n This will inform image search + document title\n\n Parameters:\n text (str): the sentence / string to search\n rules (list): a list of rules (defined in default.py configuration)\n short_circuit (bool) - if a prioritied match is found, return first matching\n '''\n # TODO: if a named ent is all caps, check that the lower() version is not a verb (i.e. \"DO\" classified as org)\n\n nlp = get_nlp_model('en_core_web_sm')\n doc = nlp(text)\n matches = []\n for rule in rules:\n search_elem = [ent for ent in doc.ents] if rule['type'] == 'ent' else [tok for tok in doc]\n curr_matches = [elem for elem in search_elem if rule['lambda'](elem, nlp)]\n matches += curr_matches\n if short_circuit and len(curr_matches) > 0:\n break\n\n return matches\n\n\n","repo_name":"liangjh/tspire-api","sub_path":"app/spire/spiremodel.py","file_name":"spiremodel.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28784847608","text":"import torch \nimport torch.nn as nn\nfrom torchvision.models.feature_extraction import get_graph_node_names\n\nclass SiameseModel(nn.Module):\n \n # potential configurable arguments \n # channels: int, n_classes: int, dim_sizes: List[int], kernel_size: int, stride: int, padding: int, **kwargs\n # pretrained model and weights \n def __init__(self, base_model, base_model_weights): \n super(SiameseModel, self).__init__()\n self.emb_size = 20\n self.weights = base_model_weights\n self.siamese = base_model(weights=self.weights)\n train_nodes, eval_nodes = get_graph_node_names(self.siamese)\n # self.classifier = nn.Sequential(\n # nn.Linear(self.siamese.get_submodule(train_nodes[-1]).out_features, 1),\n # nn.Sigmoid()\n # )\n # self.classifier = nn.Linear(self.siamese.get_submodule(train_nodes[-1]).out_features * 2, 1)\n # final model \n self.classifier = nn.Linear(self.siamese.get_submodule(train_nodes[-1]).out_features, 1)\n # model with embedding \n # self.feature_extractor = nn.Sequential(\n # nn.Linear(self.siamese.get_submodule(train_nodes[-1]).out_features, 512),\n # nn.ReLU(inplace = True),\n # nn.Linear(512, self.emb_size)\n # )\n # self.classifier = nn.Linear(self.emb_size, 1)\n\n def forward(self, img1, img2): \n preprocess = self.weights.transforms()\n x1 = preprocess(img1)\n x2 = preprocess(img2)\n out1 = self.siamese(x1)\n out2 = self.siamese(x2)\n # multiply to get combined feature vector representing the similarity btwn the two\n # combined_features = torch.cat((out1, out2), 1)\n # output = self.classifier(combined_features)\n # model with embedding \n # out1 = self.feature_extractor(out1)\n # out2 = self.feature_extractor(out2)\n diff = torch.abs(torch.sigmoid(out1) - torch.sigmoid(out2))\n output = self.classifier(diff)\n return output","repo_name":"rchllllll/cp5105-capstone-proj","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9233612878","text":"import pandas as pd\nimport numpy as np\nimport streamlit as st\nfrom google.oauth2 import service_account\nfrom google.cloud import bigquery\nfrom io import StringIO\n#from streamlit import caching\nimport time\n\n# col=0 (advertisingsystem), 1 (PubAccId) , 2 (Relationship), \ndef check(df,col,keyword):\n list=df[col][~df[col].str.contains(keyword)].tolist()\n if len(list)>0:\n return list\n else:\n return False\n\n# Check if AvertisingSystem contains '.' or Relationship is not DIRECT or RESELLER\ndef return_input_error(input):\n if check(input,0,'\\.'):\n st.sidebar.write('Check AdvertisingSystem:')\n st.sidebar.write(check(input,0,'\\.'))\n if check(input,2,'DIRECT|RESELLER'):\n st.sidebar.write('Check Relationship:')\n st.sidebar.write(check(input,2,'DIRECT|RESELLER'))\n\n\n# df[0] (advertisingsystem), df[1] (PubAccId) , df[2] (Relationship), \ndef check_row(df,input_data,row):\n df_filtered=df[(df['AdvertisingSystem']==input_data[0][row])&(df['PubAccId']==input_data[1][row])&(df['Relationship']==input_data[2][row])]\n if df_filtered.shape[0]>0:\n return df_filtered\n else:\n return None\n\t\n#download\ndef download(output_data):\n if output_data.shape[0]>0: \n csv = output_data.to_csv(index=False).encode('utf-8')\n st.download_button(\n \t\tlabel=\"Download ouput as CSV\",\n \t\tdata=csv,\n \t\tfile_name='data.csv',\n \t\tmime='text/csv',\n\t\t)\n\t\n st.dataframe(output_data.reset_index(drop=True),2000,1000)\n\n else:\n st.write('')\n st.write('No output found')\n\t\n\n\t\n\t\n\t\nst.set_page_config(layout=\"wide\")\n\n\n# streamlit_app.py\n\n\n# initial setting\nuploaded_file=None\nlist_lines='Ex: google.com, 12335, DIRECT'\nif 'count' not in st.session_state:\n\tst.session_state.count = 0\n\n#if 'BI_team' not in st.session_state:\n # Password = st.text_input('Password', 'Type here')\n # st.session_state.Password=Password\n # st.write('The password is', Password)\n#else:\n # st.write('Nothing')\n\t\ncol1, col2,col3 = st.columns(3)\nwith col1:\n st.write('')\nwith col2:\n text_input_container = st.empty()\n text_input_container.text_input(\"Enter Password\", key=\"text_input\")\nwith col3:\n st.write('')\n\n\nif (st.session_state.text_input != \"\"):\n if st.session_state.text_input != 'BI_team':\n if st.session_state.count <6:\n st.session_state.count += 1\n\n col1a, col2a,col3a = st.columns(3)\n with col1:\n st.write('')\n with col2a:\n st.write('Wrong password')\n with col3a:\n st.write('')\n else:\n text_input_container.empty()\n st.write('Please contact admin')\n \n \n else:\n text_input_container.empty()\n #st.info(st.session_state.text_input)\n\nif st.session_state.text_input =='BI_team':\n choice = st.sidebar.radio(\"Select invironment\",('WEB','APP', 'Test', 'Test2'), horizontal=True)\n\n\n choice2 = st.sidebar.radio(\"Insert input\",('Upload','Type/Paste'), horizontal=True)\n\n if choice2=='Upload':\n uploaded_file = st.sidebar.file_uploader(\"Choose a .csv file\")\n\n if uploaded_file is not None:\n bytes_data = uploaded_file.getvalue()\n\t\n try:\n upload_input=pd.read_csv(uploaded_file,header=None)\n n=upload_input.shape[0]\n\t\n\t # Clean\n upload_input[0]=upload_input[0].str.replace(' ', '').str.replace('\\t','').str.lower() \n upload_input[1]=upload_input[1].astype('string').str.replace(' ', '').str.replace('\\t','').str.lower()\n upload_input[2]=upload_input[2].str.replace(' ', '').str.replace('\\t','').str.upper()\n\t \n return_input_error(upload_input)\n st.sidebar.dataframe(upload_input)\n\t\t\n except Exception as ex:\n st.sidebar.write('Please check the input format')\n uploaded_file=None\n \n\n elif choice2=='Type/Paste':\n list_lines= st.sidebar.text_area('Put lines here', 'Ex: google.com, 12335, DIRECT')\n \n try:\n input=pd.read_table(StringIO(list_lines),sep=\",\", header=None)\n\t\n # Clean\n input[0]=input[0].str.replace(' ','').str.replace('\\t','').str.lower()\n input[1]=input[1].astype('string').str.replace(' ','').str.replace('\\t','').str.lower()\n input[2]=input[2].str.replace(' ','').str.replace('\\t','').str.upper()\n input=input.drop_duplicates()\n if list_lines !='Ex: google.com, 12335, DIRECT' and list_lines.strip()!='':\n return_input_error(input)\n st.sidebar.write('Input data',input)\n except:\n st.sidebar.write('Please check the input format')\n list_lines=''\n\n\n\n\n\n col4, col5,col6 = st.columns(3)\n\n with col4:\n st.image(\"images.png\", width=80)\n\n with col5:\n st.title(\"📊 IAB dataset\")\n with col6:\n st.write('')\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\t\n if (uploaded_file is None) and ((list_lines=='Ex: google.com, 12335, DIRECT') or (list_lines.strip()=='')):\n st.markdown(f'

{\"Please insert input!\"}

', unsafe_allow_html=True)\n\n # Create API client.\n credentials = service_account.Credentials.from_service_account_info(st.secrets[\"gcp_service_account\"])\n client = bigquery.Client(credentials=credentials)\n\n\n\n if ('Time1' not in st.session_state) and ('Time2' not in st.session_state):\n query_time1=\"SELECT Date FROM `showheroes-bi.bi.bi_adstxt_join_sellerjson_with_count_domains` limit 1\"\n df_time1= client.query(query_time1).to_dataframe()\n st.session_state['Time1']=df_time1['Date'][0]\n\n query_time2=\"SELECT Date FROM `showheroes-bi.bi.bi_appadstxt_join_sellersjson_with_count_domains` limit 1\"\n df_time2= client.query(query_time2).to_dataframe()\n st.session_state['Time2']=df_time2['Date'][0]\n\n\t\n\n @st.cache(max_entries=1)\n def load_data1(time): \n query1=\"SELECT * except(Date) FROM `showheroes-bi.bi.bi_adstxt_join_sellerjson_with_count_domains`\"\n query_job1 = client.query(query1)\n return client.query(query1).to_dataframe().fillna('-')\n\n\n\n\n @st.cache(max_entries=1)\n def load_data2(time):\n query2=\"SELECT * except(Date) FROM `showheroes-bi.bi.bi_appadstxt_join_sellersjson_with_count_domains`\"\n query_job2 = client.query(query2)\n return client.query(query2).to_dataframe().fillna('-')\n\t\n df1=load_data1(st.session_state['Time1']).copy()\n df2=load_data2(st.session_state['Time2']).copy()\n\n\n if (choice==\"WEB\") and (uploaded_file is not None):\n # first filter before looping\n df1=df1[(df1['AdvertisingSystem'].isin(upload_input[0])) & (df1['PubAccId'].isin(upload_input[1]))]\n df1=df1.reset_index(drop=True)\n\n # Initial setting\n data1=pd.DataFrame(columns=df1.columns.tolist())\n\t\n for row in range(upload_input.shape[0]):\n data1=pd.concat([data1, check_row(df1,upload_input,row)]) \n \n \n # Download \t\n download(data1)\n\t\n elif (choice==\"WEB\") and (list_lines!='Ex: google.com, 12335, DIRECT') and (list_lines.strip()!=''):\n # first filter \n df1=df1[(df1['AdvertisingSystem'].isin(input[0])) & (df1['PubAccId'].isin(input[1]))]\n df1=df1.reset_index(drop=True)\n \n data1=pd.DataFrame(columns=df1.columns.tolist())\n\t\n for row in range(input.shape[0]):\n data1=pd.concat([data1, check_row(df1,input,row)]) \n \n\n # Download \n download(data1)\n \n\t\n elif (choice==\"APP\") and (uploaded_file is not None): \n # first filter \n df2=df2[(df2['AdvertisingSystem'].isin(upload_input[0])) & (df2['PubAccId'].isin(upload_input[1]))]\n df2=df2.reset_index(drop=True)\n \n\n # Initial setting\n data2=pd.DataFrame(columns=df2.columns.tolist())\n\t\n for row in range(upload_input.shape[0]):\n data2=pd.concat([data2, check_row(df2,upload_input,row)]) \n \n\n # Download \t\n download(data2)\n\n\t\n elif (choice==\"APP\") and (list_lines!='Ex: google.com, 12335, DIRECT') and (list_lines.strip()!=''):\n # first filter\n df2=df2[(df2['AdvertisingSystem'].isin(input[0])) & (df2['PubAccId'].isin(input[1]))]\n df2=df2.reset_index(drop=True)\n\t\n data2=pd.DataFrame(columns=df2.columns.tolist())\n\t\n for row in range(input.shape[0]):\n data2=pd.concat([data2, check_row(df2,input,row)]) \n\n # Download\n download(data2)\t\n\t \t\n elif choice=='Test':\n # Store the initial value of widgets in session state\n if \"visibility\" not in st.session_state:\n st.session_state.visibility = \"visible\"\n st.session_state.disabled = False\n\n col1, col2 = st.columns(2)\n\n with col1:\n \t st.checkbox(\"Disable selectbox widget\", key=\"disabled\")\n \t st.radio(\"Set selectbox label visibility 👉\",key=\"visibility\",options=[\"visible\", \"hidden\", \"collapsed\"],)\n\n with col2:\n \t option = st.selectbox(\"How would you like to be contacted?\",(\"Email\", \"Home phone\", \"Mobile phone\"),label_visibility=st.session_state.visibility,disabled=st.session_state.disabled,)\n\t\n\n elif choice=='Test2':\n st.write('Hello')\n st.caching.clear_cache() \n\t\n @st.cache(suppress_st_warning=True)\n def expensive_computation(a, b):\n st.session_state[\"cache_updated\"] = True\n time.sleep(2) # This makes the function take 2s to run\n return a * b\n\n a = 3\n b = 21\n res = expensive_computation(a, b)\n st.write(res)\n st.write((st.session_state[\"cache_updated\"]))\n#else:\n# st.write('WRONG PASSWORD')\n # st.button('Try again')\n# text_input_container = st.empty()\n # text_input_container.text_input(\"Enter Password\", key=\"text_input\")\n","repo_name":"VT-Do/Bi_new","sub_path":"backup_v2.py","file_name":"backup_v2.py","file_ext":"py","file_size_in_byte":9998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71141944584","text":"# accounts/forms.py\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.core.exceptions import ValidationError\n\nfrom .models import CustomUser, Team, Bet, Match\n\nclass CustomUserCreationForm(UserCreationForm):\n\n class Meta:\n model = CustomUser\n fields = (\"username\", \"email\", \"points\")\n\nclass CustomUserChangeForm(UserChangeForm):\n\n class Meta:\n model = CustomUser\n fields = (\"username\", \"email\", \"points\")\n\nclass PlaceBetForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n super(PlaceBetForm, self).__init__(*args, **kwargs)\n\n class Meta:\n model = Bet\n fields = ['choice', 'amount', 'match']\n \n def clean(self):\n cleaned_data = super(PlaceBetForm, self).clean()\n if self.cleaned_data['amount'] > getattr(self.user, 'points'):\n raise ValidationError(\"Error! You cannot bet more points than you have!\")\n if self.cleaned_data['amount'] < 0:\n raise ValidationError(\"You can't bet negative points!\")\n # can't bet on yourself\n if getattr(self.user, 'team') == self.cleaned_data['choice']:\n raise ValidationError(\"You can't bet on your own team!\")\n # cant bet on a team in a match you're in\n match = self.cleaned_data['match']\n match = Match.objects.filter(id=getattr(match,'id'))\n teams = list(match.values_list('teams__name', flat=True))\n if str(getattr(self.user, 'team')) in teams:\n raise ValidationError(\"You can't bet on a match you're in!\")\n # cant bet on team if youve already bet on a different team for the same match\n # first, get all bets for this match placed by this player\n placed_bets = Bet.objects.filter(match=self.cleaned_data['match']) # all bets for this match\n placed_bets = placed_bets.filter(player=self.user) # just by this player\n # check the team of the 1st one\n if len(placed_bets) > 0:\n print(self.cleaned_data)\n team_choice = getattr(placed_bets[0], \"choice\")\n if self.cleaned_data['choice'] != team_choice:\n raise ValidationError(\"You can't bet for a different team! You've already placed a bet!\")\n return self.cleaned_data","repo_name":"sanjerine/midyearmania","sub_path":"bets/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70981789704","text":"from django.shortcuts import render, redirect\nfrom django.conf import settings\nfrom .forms import UserRegisterForm, UserProfileForm\nfrom django.contrib import messages\nfrom .models import UserType\nfrom django.contrib.auth.models import User\n\n# Create your views here.\napp = settings.APP_NAME\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n print(\"hello1\")\n form.save()\n username = form.cleaned_data['username']\n user = User.objects.filter(username=username).first()\n UserType.objects.create(user=user, user_type=\"user\")\n user.usertype.save()\n print(\"hello2\")\n messages.success(request, f'Your Account has been created Successfully! You are now able to Log In')\n return redirect('login')\n else:\n form = UserRegisterForm()\n return render(request, 'users/register.html', {'title': 'sign up', 'app': app, 'form': form})\n\n\ndef profile(request):\n if request.method == 'POST':\n p_form = UserProfileForm(request.POST, request.FILES, instance=request.user.profile)\n if p_form.is_valid():\n p_form.save()\n messages.success(request, f'Your Profile has been Updated Successfully!')\n return redirect('profile')\n else:\n p_form = UserProfileForm(instance=request.user.profile)\n return render(request, 'users/profile.html', {'title': 'profile', 'app': app, 'form': p_form})\n","repo_name":"jdshah98/Food-Express","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32572811299","text":"import sqlite3\r\nfrom Classes.dbClasses import *\r\n\r\n\r\nclass AccessDatabase():\r\n def __init__(self):\r\n self.conn = sqlite3.connect(\"Database/database.db\")\r\n self.cursor = self.conn.cursor()\r\n # self.create_tables()\r\n\r\n ###########################################################################################################################################################\r\n # CREATE DATA TABLES\r\n def create_tables(self):\r\n try:\r\n self.cursor.execute('''\r\n CREATE TABLE IF NOT EXISTS User(\r\n user_id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n name VARCHAR(100) NOT NULL,\r\n email VARCHAR(100) NOT NULL,\r\n debts INTEGER DEFAULT 0,\r\n aptNo INTEGER,\r\n FOREIGN KEY(aptNo) REFERENCES Apartment(aptNo) \r\n )\r\n ''')\r\n print(\"user table created successfully\")\r\n except:\r\n print(\"failed to create user table\")\r\n\r\n try:\r\n self.cursor.execute('''\r\n CREATE TABLE IF NOT EXISTS Bill(\r\n bill_id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n amount INTEGER NOT NULL,\r\n due_date REAL NOT NULL,\r\n details VARCHAR(100) NOT NULL,\r\n user_id INTEGER,\r\n FOREIGN KEY(user_id) REFERENCES User(user_id)\r\n )\r\n ''')\r\n print(\"Bill table created successfully\")\r\n except:\r\n print(\"failed to create Bill table\")\r\n\r\n try:\r\n self.cursor.execute('''\r\n CREATE TABLE IF NOT EXISTS Payments(\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n amount INTEGER NOT NULL,\r\n date REAL NOT NULL,\r\n bill_id INTEGER,\r\n FOREIGN KEY(bill_id) REFERENCES Bill(bill_id)\r\n )\r\n ''')\r\n print(\"Payments table created successfully\")\r\n except:\r\n print(\"failed to create Payments table\")\r\n\r\n try:\r\n self.cursor.execute('''\r\n CREATE TABLE IF NOT EXISTS Apartment(\r\n aptNo INTEGER PRIMARY KEY AUTOINCREMENT,\r\n status VARCHAR(20) NOT NULL\r\n )\r\n ''')\r\n print(\"Apartment table created successfully\")\r\n except:\r\n print(\"failed to create Apartment table\")\r\n\r\n\r\n ############################################################################################################################################################\r\n # GETTERS\r\n def getUser(self, id):\r\n self.cursor.execute(\" SELECT aptNo FROM User WHERE user_id = ?\",(id,))\r\n aptNo = self.cursor.fetchone()[0]\r\n self.cursor.execute(\" SELECT debts FROM User WHERE user_id = ?\", (id,))\r\n debts = self.cursor.fetchone()[0]\r\n self.cursor.execute(\" SELECT email FROM User WHERE user_id = ?\", (id,))\r\n email = self.cursor.fetchone()[0]\r\n self.cursor.execute(\" SELECT name FROM User WHERE user_id = ?\", (id,))\r\n name = self.cursor.fetchone()[0]\r\n\r\n user = User(user_id=id, name=name, aptNo=aptNo, debts=debts, email=email)\r\n return user\r\n\r\n def getBill(self, id):\r\n self.cursor.execute(\" SELECT amount FROM Bill WHERE bill_id = ?\", (id,))\r\n amount = self.cursor.fetchone()[0]\r\n self.cursor.execute(\" SELECT due_date FROM Bill WHERE bill_id = ?\", (id,))\r\n due_date = self.cursor.fetchone()[0]\r\n self.cursor.execute(\" SELECT user_id FROM Bill WHERE bill_id = ?\", (id,))\r\n user_id = self.cursor.fetchone()[0]\r\n self.cursor.execute(\" SELECT details FROM Bill WHERE bill_id = ?\", (id,))\r\n details = self.cursor.fetchone()[0]\r\n\r\n bill = Bill(bill_id=id, amount=amount, due_date=due_date, user_id=user_id, details=details)\r\n return bill\r\n\r\n def getPayments(self, id):\r\n self.cursor.execute(\" SELECT amount FROM Payments WHERE id = ?\", (id,))\r\n amount = self.cursor.fetchone()[0]\r\n self.cursor.execute(\" SELECT date FROM Payments WHERE id = ?\", (id,))\r\n date = self.cursor.fetchone()[0]\r\n self.cursor.execute(\" SELECT bill_id FROM Payments WHERE id = ?\", (id,))\r\n bill_id = self.cursor.fetchone()[0]\r\n\r\n payment = Payment(id=id, amount=amount, date=date, bill_id=bill_id)\r\n return payment\r\n \r\n\r\n def getApartment(self, id):\r\n self.cursor.execute(\" SELECT status FROM Apartment WHERE aptNo = ?\", (id,))\r\n status = self.cursor.fetchone()[0]\r\n \r\n\r\n def getAllUsers(self):\r\n self.cursor.execute(\"SELECT * FROM User\")\r\n all_users = self.cursor.fetchall()\r\n return all_users\r\n\r\n def getAllApartments(self):\r\n self.cursor.execute(\"SELECT * FROM Apartment\")\r\n all_apts = self.cursor.fetchall()\r\n return all_apts\r\n \r\n def getAllBills(self):\r\n self.cursor.execute(\"SELECT * FROM Bill\")\r\n all_bills = self.cursor.fetchall()\r\n return all_bills\r\n \r\n def getAllPayments(self):\r\n self.cursor.execute(\"SELECT * FROM Payments\")\r\n all_payments = self.cursor.fetchall()\r\n return all_payments\r\n \r\n\r\n def getAllBillAmounts(self):\r\n self.cursor.execute(\"SELECT amount FROM Bill\")\r\n all_amounts = self.cursor.fetchall()\r\n return all_amounts\r\n\r\n def getAllPaymentAmounts(self):\r\n self.cursor.execute(\"SELECT amount FROM Payments\")\r\n all_amounts = self.cursor.fetchall()\r\n return all_amounts\r\n \r\n def getAllUsersDebts(self):\r\n self.cursor.execute(\"SELECT debts FROM User\")\r\n all_debts = self.cursor.fetchall()\r\n return all_debts\r\n \r\n def getAllUserBills(self, user_id):\r\n self.cursor.execute(\"SELECT * FROM Bill WHERE user_id = ?\", (user_id,))\r\n all_user_bills = self.cursor.fetchall()\r\n return all_user_bills\r\n \r\n def getAllUserPayments(self, bill_id):\r\n self.cursor.execute(\"SELECT * FROM Payments WHERE bill_id = ?\", (bill_id,))\r\n all_user_payments = self.cursor.fetchall()\r\n return all_user_payments\r\n","repo_name":"ThommysArt/Apartment-management-system-PYQT6-SQLite","sub_path":"Database/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":6298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39907456764","text":"from flask_app import app\nfrom flask import render_template,redirect,request,session,flash\nfrom flask_app.models.user import User\nfrom flask_app.models.archive import Archive\n\n\n#==================================\n# Archive Routes\n#==================================\n@app.route('/archive')\ndef archive():\n if 'user_id' not in session:\n flash(\"You must log in to do this.\")\n session['flash'] = \"archive\"\n return render_template('login.html')\n\n data = {\n 'user_id': session['user_id']\n }\n\n dates = Archive.get_all(data)\n return render_template('archive.html', dates = dates)\n\n@app.route('//archive')\ndef add_to_archive(date):\n if 'user_id' not in session:\n flash(\"You must log in to do this.\")\n session['flash'] = \"add to archive\"\n return render_template('login.html', date = date)\n\n data = {\n 'date': date,\n 'user_id': session['user_id']\n }\n \n if not Archive.get_one(data):\n Archive.create(data)\n\n return redirect(f'/apod/{date}')\n\n@app.route('/apod//delete')\ndef delete_from_archive(date):\n if ('user_id' not in session):\n return redirect('/')\n\n data = {\n 'date': date,\n 'user_id': session['user_id']\n }\n\n Archive.delete(data)\n\n return redirect('/archive')","repo_name":"Scheunemann-Matt/NASA_APOD","sub_path":"flask_app/controllers/archives.py","file_name":"archives.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39060510129","text":"shopping_list = []\r\n\r\nprint(\"Enter the items in the list\")\r\nprint(\"Enter DONE once item adding is completed\")\r\n\r\nwhile(True):\r\n new_item = input(\"Enter the item:\")\r\n if(new_item == 'DONE'):\r\n break\r\n \r\n shopping_list.append(new_item)\r\n \r\nprint(\"Here is your list items\")\r\n\r\nfor item in shopping_list:\r\n print(item)\r\n \r\n \r\n\r\n \r\n \r\n \r\n","repo_name":"rikki45678/forskinternship","sub_path":"Shopping list.py","file_name":"Shopping list.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3142769394","text":"import sys\n\n\n\"\"\"\ndef print_star(n):\n string = \"\"\n k = n - 1\n for y in range(1, n):\n string += \" \" * k + \"*\" * (2 * y - 1) + \"\\n\"\n k -= 1\n string += \"*\" * (2 * n - 1)\n k += 1\n for y in range(n - 1, 0, -1):\n string += \"\\n\" + \" \" * k + \"*\" * (2 * y - 1)\n k += 1\n return string\n\"\"\"\n\n\ndef print_star(n, k):\n if k == n:\n return \"*\" * (2 * n - 1) + \"\\n\"\n else:\n string = \"\"\n string += \" \" * (n - k) + \"*\" * (2 * k - 1) + \"\\n\"\n string += print_star(n, k + 1)\n string += \" \" * (n - k) + \"*\" * (2 * k - 1) + \"\\n\"\n return string\n\n\nn = int(sys.stdin.readline())\nsys.stdout.write(print_star(n, 1))\n","repo_name":"mrbartrns/algorithm-and-structure","sub_path":"swea/print_star/star_7.py","file_name":"star_7.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8533101750","text":"\nimport asyncio\nimport time\nfrom functools import partial, wraps\n\nimport aiohttp\nfrom num2words import num2words\nfrom pyrogram import filters, emoji\nfrom pyrogram.types import CallbackQuery, ChatPermissions, InlineKeyboardButton, InlineKeyboardMarkup, Message\n\nfrom ..main import Assistant\n\ncommand = partial(filters.command, prefixes=list(\"#!\"))\n\nasync def reply_and_delete(message: Message, text: str):\n await asyncio.gather(\n message.delete(),\n message.reply(\n text,\n quote=False,\n reply_to_message_id=getattr(\n message.reply_to_message,\n \"message_id\", None\n ),\n disable_web_page_preview=True\n )\n )\n\n\ndef admins_only(func):\n @wraps(func)\n async def decorator(bot: Assistant, message: Message):\n if bot.is_admin(message):\n await func(bot, message)\n\n await message.delete()\n\n decorator.admin = True\n\n return decorator\n\n\n################################\n\nPING_TTL = 5\n\n\n@Assistant.on_message(command(\"ping\"))\nasync def ping(_, message: Message):\n \"\"\"Ping the assistant\"\"\"\n start = time.time()\n reply = await message.reply_text(\"...\")\n delta_ping = time.time() - start\n await reply.edit_text(f\"**Pong!** `{delta_ping * 1000:.3f} ms`\")\n\n\n################################\n\n\nLOG = \"\"\"\n**حالت لاگ را فعال کنید: کد زیر را در بالای کد خود پیست کنید و دوباره کد را اجرا کنید.**\n\n```import logging\nlogging.basicConfig(level=logging.INFO)```\nبرای گرفتن لاگ دقیق تر؛ از\n`level=logging.DEBUG`\nاستفاده کنید.\n\"\"\"\n\n@Assistant.on_message(command(\"log\"))\nasync def log(_, message: Message):\n \"\"\"Enable debug logging\"\"\"\n await reply_and_delete(message, LOG)\n\n\n################################\n\n\nEX = \"\"\"\nPlease, provide us a **minimal** and **reproducible** example in order to easily understand and reproduce the problem.\n[How do I create a minimal, reproducible example?](https://stackoverflow.com/help/minimal-reproducible-example)\n\"\"\"\n\n\n@Assistant.on_message(command(\"ex\"))\nasync def ex(_, message: Message):\n \"\"\"Ask for minimal example\"\"\"\n await reply_and_delete(message, EX)\n\n\n################################\n\n\nOT = \"\"\"\n**گفت و گوی شما خارج از موضوع گروه میباشد.‌**\n\nبرای تست ربات ها از گروه @PyrogramTesting استفاده کنید\n\"\"\"\n\n\n@Assistant.on_message(command(\"ot\"))\nasync def ot(_, message: Message):\n \"\"\"offtopic conversation\"\"\"\n answer = OT\n await reply_and_delete(message, answer)\n\n\n################################\n\n\nASK = \"\"\"\n**متاسفانه سوال شما قابل فهم نیست. لطفا با سناریو و مثال دیگری مشکل خود را توضیح دهید.**\n[چطور سوال بهتری بپرسم؟](https://www.chetor.com/154278-%D8%AA%DA%A9%D9%86%DB%8C%DA%A9-%D8%B3%D9%88%D8%A7%D9%84-%D9%BE%D8%B1%D8%B3%DB%8C%D8%AF%D9%86/)\n\"\"\"\n\n\n@Assistant.on_message(command(\"ask\"))\nasync def ask(_, message: Message):\n \"\"\"How to ask questions\"\"\"\n await reply_and_delete(message, ASK)\n\n\n################################\n\n\nLEARN = \"**مشکل شما به پایروگرام مربوط نمیشود. لطفا اول بر زبان پایتون مسلط شوید و سپس اقدام به مطرح کردن مشکلات خود کنید.**\"\n\n\n@Assistant.on_message(command(\"learn\"))\nasync def learn(_, message: Message):\n \"\"\"Tell to learn Python\"\"\"\n await reply_and_delete(message, LEARN)\n\n\n################################\n\n\n# One place for all rules, the docs.\nRULES = '''\n1️⃣ − رعایت ادب از مهم ترین ارکان این گروه میباشد و در صورت مشاهده، بدون اخطار مجبور به حذف خواهیم شد.\n\n2️⃣ − درپاسخ به سوال سایرین ، هر چند ساده و ابتدایی، اگر قصد جواب دادن دارید با صبر و حوصله اقدام به این عمل نمایید و از کنایه و توهین بپرهیزید\n\n3️⃣ − تبلیغات در این گروه فقط در زمینه برنامه نویسی و خرید و فروش سورس کد هایی بر پایه پایروگرام مجاز خواهد بود. پس اگر قصد خرید یا فروش دارید محتوای خود را در قالب یک پیام ارسال و در انتها کاربر را به سمت چت خصوصی جهت شروع همکاری هدایت، کنید.\n\n4️⃣ − وظیفه ما کمک به همدیگه هست؛ مسئولیت کدی که شما میزنین و پروژه ای که انجام میدین به عهده خودتون هست.\n\n5️⃣ − شما میتونین هر نوع ربات و سورس کدی که توسط پایروگرام نوشته شده باشه رو داخل گروه در قالب یک پیام معرفی کنین. افراد گروه میتونن استفاده کنن و یا یک بخشی از توسعه دهنده های ربات شما بشن.\n\n6️⃣ − گروه برای فارسی زبانان هست و ملزمه که فقط فارسی صحبت کنید؛ لطف کنید از انگلیسی صحبت کردن و تا حد امکان از فینگلیش صحبت کردن خودداری کنید. اگه هر فردی خارجی حتی به اشتباه وارد گروه شد شما ملزم هستید به زبان فارسی صحبت کنید و در صورتی که حرف های شما اهمیت خاصی برای طرف مقابل داشته باشه قطعا میتونه از برنامه های ترجمه استفاده کنه.\n\n7️⃣ - صحبت در مورد ربات تبچی - اسپمر و از این دسته ربات های مخرب ممنوعه لطفا رعایت کنید.\n\n8️⃣ − این گروه؛ گروه رسمی پایروگرام فارسی هست؛‌ لطف کنید سوالاتتون رو فقط در مورد کتابخونه پایروگرام بپرسید‌ (‌ در غیر این صورت؛ داخل گروه محدود میشین‌) ؛ برای بحث های خارج از موضوع میتونید از گروه بین المللی @PyrogramLounge استفاده کنید.\n\n9⃣ برای سوال پرسیدن، پیام خود را در یک متن بفرستید و تیکه تیکه نفرستید تا حجم پیام ها زیاد نشه اگر خواستید کد طولانی بفرستید از nekobin.com استفاده کنید یا عکس واضح بفرستید \n'''\n\nRULES_INDEX = [\n '1️⃣ − رعایت ادب از مهم ترین ارکان این گروه میباشد و در صورت مشاهده، بدون اخطار مجبور به حذف خواهیم شد.',\n '2️⃣ − درپاسخ به سوال سایرین ، هر چند ساده و ابتدایی، اگر قصد جواب دادن دارید با صبر و حوصله اقدام به این عمل نمایید و از کنایه و توهین بپرهیزید',\n '3️⃣ − تبلیغات در این گروه فقط در زمینه برنامه نویسی و خرید و فروش سورس کد هایی بر پایه پایروگرام مجاز خواهد بود. پس اگر قصد خرید یا فروش دارید محتوای خود را در قالب یک پیام ارسال و در انتها کاربر را به سمت چت خصوصی جهت شروع همکاری هدایت، کنید.',\n '4️⃣ − وظیفه ما کمک به همدیگه هست؛ مسئولیت کدی که شما میزنین و پروژه ای که انجام میدین به عهده خودتون هست.',\n '5️⃣ − شما میتونین هر نوع ربات و سورس کدی که توسط پایروگرام نوشته شده باشه رو داخل گروه در قالب یک پیام معرفی کنین. افراد گروه میتونن استفاده کنن و یا یک ب��شی از توسعه دهنده های ربات شما بشن.',\n '6️⃣ − گروه برای فارسی زبانان هست و ملزمه که فقط فارسی صحبت کنید؛ لطف کنید از انگلیسی صحبت کردن و تا حد امکان از فینگلیش صحبت کردن خودداری کنید. اگه هر فردی خارجی حتی به اشتباه وارد گروه شد شما ملزم هستید به زبان فارسی صحبت کنید و در صورتی که حرف های شما اهمیت خاصی برای طرف مقابل داشته باشه قطعا میتونه از برنامه های ترجمه استفاده کنه.',\n '7️⃣ - صحبت در مورد ربات تبچی - اددر - اسپمر و از این دسته ربات های مخرب ممنوعه لطفا رعایت کنید.',\n '8️⃣ − این گروه؛ گروه رسمی پایروگرام فارسی هست؛‌ لطف کنید سوالاتتون رو فقط در مورد کتابخونه پایروگرام بپرسید‌ (‌ در غیر این صورت؛ داخل گروه محدود میشین‌) ؛ برای بحث های خارج از موضوع میتونید از گروه بین المللی @PyrogramLounge استفاده کنید.',\n '9⃣ برای سوال پرسیدن، پیام خود را در یک متن بفرستید و تیکه تیکه نفرستید تا حجم پیام ها زیاد نشه اگر خواستید کد طولانی بفرستید از nekobin.com استفاده کنید یا عکس واضح بفرستید ',\n]\n\n\n@Assistant.on_message(command(\"rules\"))\nasync def rules(_, message: Message):\n \"\"\"Show Pyrogram rules\"\"\"\n try:\n index = int(message.command[1])\n text = '⚠️ متن قوانین :\\n'\n text += RULES_INDEX[index - 1]\n except Exception:\n text = RULES\n\n await reply_and_delete(message, text)\n\n################################\n\nFAQ = (\n \"**سوال شما در حال حاضر در قسمت سوالات رایج پاسخ داده شده است.**\\n\"\n \"لطفا در قسمت [سوالات رایج](https://docs.pyrogram.org/faq) جستجو کنید.\"\n)\n\n\n@Assistant.on_message(command(\"faq\"))\nasync def faq(_, message: Message):\n \"\"\"Answer is in the FAQ\"\"\"\n await reply_and_delete(message, FAQ)\n\n\n################################\n\n\nRTD = \"لطفا؛ داکیومنت را مطالعه کنید : https://docs.pyrogram.org\"\n\n\n@Assistant.on_message(command(\"rtd\"))\nasync def rtd(_, message: Message):\n \"\"\"Tell to RTD (gentle)\"\"\"\n await reply_and_delete(message, RTD)\n\n\n################################\n\n\nFMT = (\n \"لطفا کد خود را با استفاده از بک-تیک ارسال کنید تا خوانایی بیشتری داشته باشد..\\n\"\n \"```your code here```\"\n)\n\n\n@Assistant.on_message(command(\"fmt\"))\n@admins_only\nasync def fmt(_, message: Message):\n \"\"\"Tell to format code\"\"\"\n await asyncio.gather(\n message.delete(),\n message.reply(\n FMT,\n quote=False,\n parse_mode=\"html\",\n disable_web_page_preview=True,\n reply_to_message_id=getattr(\n message.reply_to_message,\n \"message_id\", None\n ),\n )\n )\n\n################################\n\nEVIL = '''\n✨ پایروگرام ابزاری متن باز - رایگان و تحت نظر انجمن است.\nاین به این معناست که شما میتوانید هرگونه استفاده ای از این ابزار را داشته باشید.\nاما راهنمایی های افراد به شما یک مزیت برای شماست و کسی مکلف به کمک کردن به شما نیست؛\nبه خصوص اگر میخواهید رفتار درستی نشان ندهید و یا آسیبی به تلگرام و کاربر ها برسانید.\n'''\n\n@Assistant.on_message(command(\"evil\"))\nasync def evil(_, message: Message):\n \"\"\"No help for evil actions\"\"\"\n await reply_and_delete(message, EVIL)\n\n################################\n\n@Assistant.on_message(command(\"up\"))\nasync def up(bot: Assistant, message: Message):\n \"\"\"Show Assistant's uptime\"\"\"\n uptime = time.monotonic_ns() - bot.uptime_reference\n\n us, ns = divmod(uptime, 1000)\n ms, us = divmod(us, 1000)\n s, ms = divmod(ms, 1000)\n m, s = divmod(s, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n\n try:\n arg = message.command[1]\n except IndexError:\n await reply_and_delete(message, f\"**Uptime**: `{d}d {h}h {m}m {s}s`\")\n else:\n if arg == \"-v\":\n await reply_and_delete(\n message,\n f\"**Uptime**: `{d}d {h}h {m}m {s}s {ms}ms {us}μs {ns}ns`\\n\"\n f\"**Since**: `{bot.start_datetime} UTC`\"\n )\n elif arg == \"-p\":\n await reply_and_delete(\n message,\n f\"**Uptime**: \"\n f\"`{num2words(d)} days, {num2words(h)} hours, {num2words(m)} minutes, \"\n f\"{num2words(s)} seconds, {num2words(ms)} milliseconds, \"\n f\"{num2words(us)} microseconds, {num2words(ns)} nanoseconds`\\n\"\n f\"\"\n f\"**Since**: `year {num2words(bot.start_datetime.year)}, \"\n f\"month {bot.start_datetime.strftime('%B').lower()}, day {num2words(bot.start_datetime.day)}, \"\n f\"hour {num2words(bot.start_datetime.hour)}, minute {num2words(bot.start_datetime.minute)}, \"\n f\"second {num2words(bot.start_datetime.second)}, \"\n f\"microsecond {num2words(bot.start_datetime.microsecond)}, Coordinated Universal Time`\"\n )\n else:\n await message.delete()\n\n\n################################\n\nnl = \"\\n\"\n\nHELP = f\"\"\"\n**دستورات قابل استفاده‌ :**\n```\nping\nlog\nex\nask\nlearn\nrules\nfaq\nrtd\nfmt\nevil\nup\n```\n\"\"\"\n\n@Assistant.on_message(command(\"help\") & filters.private)\nasync def help(bot: Assistant, message: Message):\n \"\"\"Show this message\"\"\"\n await asyncio.gather(\n message.delete(),\n message.reply(\n HELP,\n quote=False,\n reply_to_message_id=getattr(\n message.reply_to_message,\n \"message_id\", None\n ),\n )\n )\n","repo_name":"iMat1n/PyrogramIR","sub_path":"assistantir/plugins/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":14243,"program_lang":"python","lang":"fa","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"31539654929","text":"import musicbrainzngs\nimport requests\n\ndef sources(recordingid):\n res = requests.get('http://185.97.32.250:8468/mbid:{}'.format(recordingid))\n #import pdb;pdb.set_trace()\n if not b'None' in res.content:\n return res.content.decode('utf-8')\nmusicbrainzngs.set_useragent(\"mb.py\", \"0\", contact=\"mikael@frykholm.com\")\nres = musicbrainzngs.search_artists(\"Ablaze\")\nfor artist in res['artist-list']:\n print(artist['name'], artist['id'])\n#import pdb;pdb.set_trace()\nres = musicbrainzngs.browse_releases(artist='d2c0d69e-e3ca-45a4-a540-6ce42c617599', limit=100)\nfor release in res['release-list']:\n print(release['title'])\n recordings = musicbrainzngs.browse_recordings(release=release['id'])\n for rec in recordings['recording-list']:\n# import pdb;pdb.set_trace()\n print(\"\\t\\t\",rec['title'],'\\t', end='')\n print(sources(rec['id']))\n#import pdb;pdb.set_trace()\n","repo_name":"mikaelfrykholm/uniqueshare","sub_path":"mb.py","file_name":"mb.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28314683740","text":"import requests\r\nfrom datetime import datetime, timedelta\r\n\r\n\r\nclass Client:\r\n API_URL = 'https://api.binance.com/api/{v}/{path}'\r\n\r\n def __init__(self, api_key=None, secret_key=None):\r\n self.API_KEY = api_key\r\n self.SECRET_KEY = secret_key\r\n\r\n self.session = self.init_session()\r\n\r\n def get_headers(self):\r\n \"\"\"Получение нужных хедеров для работы с биржей\"\"\"\r\n headers = {\r\n 'Accept': 'application/json',\r\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Mobile Safari/537.36'\r\n }\r\n\r\n if self.API_KEY:\r\n headers['X-MBX-APIKEY'] = self.API_KEY\r\n return headers\r\n\r\n def init_session(self):\r\n \"\"\"Инициализация сессии\"\"\"\r\n headers = self.get_headers()\r\n\r\n session = requests.session()\r\n session.headers.update(headers)\r\n return session\r\n\r\n def make_api_url(self, path):\r\n url = self.API_URL.format(\r\n v='v3',\r\n path=path\r\n )\r\n return url\r\n\r\n @staticmethod\r\n def handle_response(self, response: requests.Response):\r\n return response.json()\r\n\r\n def request(self, method, path, **kwargs):\r\n \"\"\"Сделать запрос к бирже\"\"\"\r\n url = self.make_api_url(path)\r\n response = getattr(self.session, method)(url, **kwargs)\r\n return self.handle_response(self, response)\r\n\r\n def get(self, path, **kwargs):\r\n \"\"\"Гет-запрос\"\"\"\r\n return self.request('get', path, **kwargs)\r\n\r\n def get_price(self, **params):\r\n \"\"\"\r\n Получить текущую цену\r\n params = {\r\n 'symbol': ...\r\n }\r\n\r\n https://binance-docs.github.io/apidocs/spot/en/#symbol-price-ticker\r\n \"\"\"\r\n\r\n path = f'ticker/price'\r\n js = self.get(path, params=params)\r\n return float(js['price'])\r\n\r\n def get_ticker_24hr(self, **params):\r\n \"\"\"\r\n Получить изменение цены символа за последние 24 часа в процентом соотоношении\r\n params = {\r\n 'symbol': ...\r\n }\r\n\r\n https://binance-docs.github.io/apidocs/spot/en/#24hr-ticker-price-change-statistics\r\n \"\"\"\r\n\r\n path = 'ticker/24hr'\r\n js = self.get(path, params=params)\r\n return js['priceChangePercent']\r\n\r\n def get_aggregate_trades(self, **params):\r\n \"\"\"\r\n Получить список общий список трейдов\r\n params = {\r\n 'symbol': ...,\r\n 'fromId': ...,\r\n 'startTime': ...,\r\n 'endTime': ...,\r\n 'limit': ...\r\n }\r\n\r\n https://binance-docs.github.io/apidocs/spot/en/#compressed-aggregate-trades-list\r\n \"\"\"\r\n\r\n path = 'aggTrades'\r\n js = self.get(path, params=params)\r\n return js\r\n\r\n def get_klines(self, **params):\r\n \"\"\"\r\n Получить список klines/candlesticks для символа\r\n params = {\r\n 'symbol': ...,\r\n 'interval': ...,\r\n 'startTime': ...,\r\n 'endTime': ...,\r\n 'limit': ...\r\n }\r\n\r\n https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-data\r\n \"\"\"\r\n\r\n path = 'klines'\r\n js = self.get(path, params=params)\r\n return js\r\n\r\n def get_ticker(self, data: datetime, **params):\r\n \"\"\"\r\n Получить цену коина за определенную дату\r\n params = {\r\n 'symbol' : ...,\r\n }\r\n \"\"\"\r\n\r\n start_timestamp = int(data.timestamp() * 1000)\r\n end_timestamp = int((data + timedelta(minutes=10)).timestamp() * 1000)\r\n\r\n trades = self.get_aggregate_trades(\r\n symbol=params['symbol'],\r\n startTime=start_timestamp,\r\n endTime=end_timestamp\r\n )\r\n\r\n print(trades)\r\n\r\n if len(trades) == 0:\r\n # Вероятно, была введена слишком ранняя дата\r\n return 0\r\n return float(trades[0]['p'])\r\n\r\n def get_ticker_1hr(self, **params):\r\n \"\"\"\r\n Получить изменение цены символа за последний час в процентом соотношении\r\n params = {\r\n 'symbol': ...\r\n }\r\n \"\"\"\r\n current_price = self.get_price(**params)\r\n\r\n hour_ago = datetime.now() - timedelta(hours=1)\r\n hour_ago_price = self.get_ticker(hour_ago, **params)\r\n\r\n return (current_price - hour_ago_price) * 100 / hour_ago_price\r\n\r\n def get_ticker_7days(self, **params):\r\n \"\"\"\r\n Получить изменение цены символа за последние 7 дней в процентом соотношении\r\n params = {\r\n 'symbol': ...\r\n }\r\n \"\"\"\r\n\r\n current_price = self.get_price(**params)\r\n\r\n week_ago = datetime.now() - timedelta(days=7)\r\n week_ago_price = self.get_ticker(week_ago, **params)\r\n\r\n return (current_price - week_ago_price) * 100 / week_ago_price\r\n\r\n","repo_name":"mikunak/-MatMeh-MMM-practice-in-programming-2-semester-","sub_path":"2 семестр/project/binance/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1313069119","text":"from test_framework import generic_test\nimport heapq\n\ndef merge_sorted_arrays(sorted_arrays):\n arrays = [iter(array) for array in sorted_arrays]\n min_heap = []\n for index, array_iter in enumerate(arrays):\n smallest = next(array_iter, None)\n if smallest is not None:\n heapq.heappush(min_heap, (smallest, index))\n\n result = []\n while min_heap:\n smallest, index = heapq.heappop(min_heap)\n array_iter = arrays[index]\n result.append(smallest)\n next_element = next(array_iter, None)\n if next_element is not None:\n heapq.heappush(min_heap, (next_element, index))\n return result\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main(\"sorted_arrays_merge.py\",\n \"sorted_arrays_merge.tsv\",\n merge_sorted_arrays))\n","repo_name":"tonywang124/EPIJudge","sub_path":"epi_judge_python/sorted_arrays_merge.py","file_name":"sorted_arrays_merge.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"70282136893","text":"import datetime\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\n\r\nimport tensorflow as tf\r\nfrom keras import backend as K\r\nfrom keras.engine import Layer, InputSpec\r\nfrom keras.initializers import RandomNormal\r\nfrom keras.layers import Input, Dropout, Concatenate\r\nfrom keras.layers import BatchNormalization, Activation, GaussianNoise\r\nfrom keras.layers.advanced_activations import LeakyReLU\r\nfrom keras.layers.convolutional import UpSampling3D, Conv3D\r\nfrom keras.models import Model\r\nfrom keras.optimizers import Adam\r\nfrom keras.utils import multi_gpu_model\r\nfrom keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\r\n\r\nclass My3dResize(Layer):\r\n def __init__(self, sizes, nn=False, **kwargs):\r\n super(My3dResize, self).__init__(**kwargs)\r\n self.sizes = sizes\r\n self.nn = nn\r\n\r\n def compute_output_shape(self, input_shape):\r\n output_shape = list(input_shape)\r\n output_shape[1] = output_shape[1]*self.sizes[0]\r\n output_shape[2] = output_shape[2]*self.sizes[1]\r\n output_shape[3] = output_shape[3]*self.sizes[2]\r\n return tuple(output_shape)\r\n \r\n def call(self, inputs):\r\n input_shape = inputs.get_shape().as_list()\r\n output_shape = input_shape.copy()\r\n output_shape[1] = output_shape[1]*self.sizes[0]\r\n output_shape[2] = output_shape[2]*self.sizes[1]\r\n output_shape[3] = output_shape[3]*self.sizes[2]\r\n \r\n # resize rows and columns\r\n \r\n u = tf.reshape(inputs, shape=[-1]+input_shape[1:-2]+[input_shape[-2]*input_shape[-1]])\r\n if self.nn:\r\n u = tf.image.resize_images(u, size=output_shape[1:3], method=tf.image.ResizeMethod.BILINEAR)\r\n else:\r\n u = tf.image.resize_images(u, size=output_shape[1:3], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\r\n u = tf.reshape(u, shape=[-1]+output_shape[1:-2]+input_shape[-2:])\r\n \r\n # repeat depth-wise\r\n outputs = K.repeat_elements(u, self.sizes[2], axis=3) \r\n return outputs\r\n \r\n## modified from https://github.com/eriklindernoren/Keras-GAN/tree/master/pix2pix\r\n\r\nclass My3dPix2Pix():\r\n def __init__(self, data_loader, savepath='result/pilot', L_weights=(1,100), opt='adam', lrs=(0.0002,0.0), \\\r\n randomshift=0.1, resoutput=0.0, dropout=0.0, smoothlabel=True, gennoise=0,\\\r\n fmloss=False, coordconv=False, resizeconv=False, multigpu=None):\r\n '''\r\n PatchGAN's receptive field = 70x70x4\r\n '''\r\n # Configure data loader\r\n self.data_loader = data_loader\r\n self.savepath = savepath\r\n self.coordconv = coordconv\r\n self.resizeconv = resizeconv\r\n self.opt = opt\r\n self.lr_ini, self.lr_decay = lrs\r\n self.smoothlabel = smoothlabel\r\n self.gennoise = gennoise\r\n self.resoutput=resoutput\r\n self.dropout = dropout\r\n self.fmloss = fmloss # discarded\r\n self.randomshift=randomshift\r\n \r\n self.reswindow = (self.data_loader.window2[0][0]*self.resoutput,\\\r\n self.data_loader.window2[0][1]-0.5*(1-self.resoutput)*self.data_loader.window2[0][0])\r\n \r\n ### from data_loader\r\n # Input shape --- x,y >=64, depth >=16.\r\n self.img_shape = tuple(data_loader.img_shape) + (len(data_loader.window1),)\r\n self.img_rows, self.img_cols, self.depth = data_loader.img_shape\r\n self.channels = len(data_loader.window1)\r\n\r\n # Calculate output shape of D (PatchGAN)\r\n patch = max(int(self.img_rows / 2**3), 1)\r\n dpatch = max(int(self.depth / 2**5), 2)\r\n self.disc_patch = (patch, patch, dpatch, 1)\r\n\r\n # Number of filters in the first layer of G and D\r\n self.gf = 64\r\n self.df = 64\r\n\r\n optimizer = Adam(0.0002, 0.5)\r\n if self.opt=='adam':\r\n optimizer = Adam(self.lr_ini, 0.5)\r\n\r\n # Build and compile the discriminator\r\n self.discriminator, self.discriminator_feat = self.build_discriminator()\r\n self.discriminator.compile(loss='binary_crossentropy',\r\n optimizer=optimizer, loss_weights=[0.5],\r\n metrics=['accuracy'])\r\n self.discriminator_feat.compile(loss='mae',\r\n optimizer=optimizer)\r\n\r\n #-------------------------\r\n # Construct Computational\r\n # Graph of Generator\r\n #-------------------------\r\n\r\n # Build the generator\r\n self.generator = self.build_generator()\r\n\r\n # Input images and their conditioning images\r\n img_A = Input(shape=self.img_shape)\r\n img_B = Input(shape=self.img_shape)\r\n\r\n # By conditioning on B generate a fake version of A\r\n fake_A = self.generator(img_B)\r\n\r\n # For the combined model we will only train the generator\r\n self.discriminator.trainable = False\r\n self.discriminator_feat.trainable = False\r\n\r\n # Discriminators determines validity of translated images / condition pairs\r\n if self.fmloss:\r\n # discarded\r\n '''\r\n valid = self.discriminator([fake_A, img_B])\r\n valid_feat = self.discriminator_feat([fake_A, img_B])\r\n\r\n self.combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A, valid_feat])\r\n if multigpu is not None:\r\n self.combined = multi_gpu_model(self.combined, gpus=multigpu)\r\n\r\n self.combined.compile(loss=['binary_crossentropy', 'mae', 'mae'],\r\n loss_weights=list(L_weights),\r\n optimizer=optimizer)\r\n '''\r\n valid = self.discriminator([fake_A, img_B])\r\n\r\n self.combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A])\r\n if multigpu is not None:\r\n self.combined = multi_gpu_model(self.combined, gpus=multigpu)\r\n\r\n self.combined.compile(loss=['binary_crossentropy', ssim_mae_loss],\r\n loss_weights=list(L_weights),\r\n optimizer=optimizer)\r\n \r\n \r\n else:\r\n valid = self.discriminator([fake_A, img_B])\r\n\r\n self.combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A])\r\n if multigpu is not None:\r\n self.combined = multi_gpu_model(self.combined, gpus=multigpu)\r\n\r\n self.combined.compile(loss=['binary_crossentropy', 'mae'],\r\n loss_weights=list(L_weights),\r\n optimizer=optimizer)\r\n\r\n def build_generator(self):\r\n \"\"\"U-Net Generator\"\"\"\r\n\r\n def conv3d(layer_input, filters, kernel_size=(4,4,2), strides=(2,2,2), bn=True):\r\n \"\"\"Layers used during downsampling\"\"\"\r\n init = RandomNormal(stddev=0.02)\r\n d = Conv3D(filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer=init)(layer_input)\r\n d = LeakyReLU(alpha=0.2)(d)\r\n if bn:\r\n #d = BatchNormalization(momentum=0.8)(d)\r\n d = InstanceNormalization()(d)\r\n return d\r\n\r\n def deconv3d(layer_input, skip_input, filters, kernel_size=(4,4,2), strides=(2,2,2), dropout_rate=0, bn=True):\r\n \"\"\"Layers used during upsampling\"\"\"\r\n if self.resizeconv:\r\n u = My3dResize(strides)(layer_input)\r\n else:\r\n u = UpSampling3D(size=strides)(layer_input)\r\n init = RandomNormal(stddev=0.02)\r\n u = Conv3D(filters, kernel_size=kernel_size, strides=1, padding='same', kernel_initializer=init, activation='relu')(u)\r\n if dropout_rate:\r\n u = Dropout(dropout_rate)(u)\r\n if bn:\r\n #u = BatchNormalization(momentum=0.8)(u)\r\n u = InstanceNormalization()(u)\r\n u = Concatenate()([u, skip_input])\r\n u = Activation('relu')(u)\r\n return u\r\n\r\n # Image input\r\n d00 = Input(shape=self.img_shape)\r\n \r\n if self.coordconv:\r\n d0 = CoordinateChannel3D()(d00)\r\n else:\r\n d0 = d00\r\n \r\n n_layers = 7\r\n encoders = []\r\n decoders = []\r\n \r\n # Downsampling\r\n for i in range(n_layers):\r\n z = 1\r\n if i < self.depth.bit_length()-1: z = 2\r\n if i==0:\r\n encoders.append(conv3d(d0, self.gf, kernel_size=(4,4,z), strides=(2,2,z), bn=False))\r\n else:\r\n encoders.append(conv3d(encoders[-1], self.gf*(2**min(i,3)), kernel_size=(4,4,z), strides=(2,2,z)))\r\n \r\n # Upsampling\r\n for i in range(n_layers-1):\r\n z = 1\r\n if i+self.depth.bit_length()>n_layers: z = 2\r\n if i==0:\r\n decoders.append(deconv3d(encoders[-(i+1)], encoders[-(i+2)], self.gf*(2**min(n_layers-2-i,3)), \r\n kernel_size=(4,4,z), strides=(2,2,z)))\r\n else:\r\n decoders.append(deconv3d(decoders[-1], encoders[-(i+2)], self.gf*(2**min(n_layers-2-i,3)), \r\n kernel_size=(4,4,z), strides=(2,2,z), dropout_rate=self.dropout))\r\n\r\n if self.resizeconv:\r\n u7 = My3dResize((2,2,2))(decoders[-1])\r\n else:\r\n u7 = UpSampling3D(size=2)(decoders[-1])\r\n init = RandomNormal(stddev=0.02)\r\n output_img = Conv3D(self.channels, kernel_size=(4,4,2), strides=1, padding='same', kernel_initializer=init, activation='tanh')(u7)\r\n\r\n return Model(d00, output_img)\r\n\r\n def build_discriminator(self):\r\n def d_layer(layer_input, filters, kernel_size=(4,4,2), strides=(2,2,2), bn=True):\r\n \"\"\"Discriminator layer\"\"\"\r\n init = RandomNormal(stddev=0.02)\r\n d = Conv3D(filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer=init)(layer_input)\r\n d = LeakyReLU(alpha=0.2)(d)\r\n if bn:\r\n #d = BatchNormalization(momentum=0.8)(d)\r\n d = InstanceNormalization()(d)\r\n return d\r\n \r\n img_A = Input(shape=self.img_shape)\r\n img_B = Input(shape=self.img_shape)\r\n\r\n # Concatenate image and conditioning image by channels to produce input\r\n combined_imgs = Concatenate(axis=-1)([img_A, img_B])\r\n \r\n ## testing\r\n #combined_imgs = GaussianNoise(0.02)(combined_imgs)\r\n \r\n if self.coordconv:\r\n combined_imgs = CoordinateChannel3D()(combined_imgs)\r\n \r\n n_d_layer = 4\r\n d_layers = []\r\n df = self.df\r\n dout = self.depth\r\n for i in range(n_d_layer):\r\n if i==0:\r\n d_layers.append(d_layer(combined_imgs, df, kernel_size=(4,4,2), strides=(2,2,2)))\r\n else:\r\n z = 2\r\n s = 2\r\n if dout == 2:\r\n z = 1\r\n if i == n_d_layer-1:\r\n s = 1\r\n df = min(df*2, self.df*8)\r\n d_layers.append(d_layer(d_layers[-1], df, kernel_size=(4,4,z), strides=(s,s,z)))\r\n dout = max(2, int(0.5*dout))\r\n\r\n init = RandomNormal(stddev=0.02)\r\n features = Conv3D(1, kernel_size=(4,4,1), strides=1, padding='same', kernel_initializer=init)(d_layers[-1])\r\n validity = Activation('sigmoid')(features)\r\n \r\n return Model([img_A, img_B], validity), Model([img_A, img_B], features)\r\n\r\n def convert_resoutput(self, imgs_A, imgs_B):\r\n ## imgs 5D tensor (batch, rows, cols, depth, channels)\r\n if self.resoutput:\r\n new_A = []\r\n for i in range(imgs_B.shape[0]):\r\n a = rWND(255*(0.5*imgs_A[i]+0.5),self.data_loader.window2)\r\n b = rWND(255*(0.5*imgs_B[i]+0.5),self.data_loader.window1)\r\n c = a-b\r\n #c[b>100] = 0\r\n #c = WND(c,self.reswindow)\r\n M = np.max(c)\r\n m = np.min(c)\r\n if M==m:\r\n c = np.zeros(c.shape)\r\n else:\r\n c = 2.*(c-m)/(M-m) - 1\r\n new_A.append(c)\r\n imgs_A = np.array(new_A)\r\n return imgs_A\r\n \r\n def invert_resoutput(self, fake_A, imgs_B):\r\n if self.resoutput:\r\n '''\r\n new_A = []\r\n for i in range(fake_A.shape[0]):\r\n a = rWND(255*(0.5*fake_A[i]+0.5),self.reswindow)\r\n b = rWND(255*(0.5*imgs_B[i]+0.5),self.data_loader.window1)\r\n c = a+b\r\n c = WND(c,self.data_loader.window2)\r\n new_A.append(c/127.5 - 1.)\r\n fake_A = np.array(new_A)\r\n '''\r\n pass\r\n return fake_A\r\n \r\n def generate_noise(self, mode, imgs_A, imgs_B):\r\n if mode==0:\r\n pass\r\n elif mode==1:\r\n r = np.random.uniform(0,1)\r\n if r>=0.5:\r\n row,col,dep,ch= imgs_A.shape[1:]\r\n mean = 0\r\n sigma = 0.05\r\n gauss = np.random.normal(mean,sigma,(row,col,dep,ch))\r\n gauss = gauss.reshape(row,col,dep,ch)\r\n gauss = np.expand_dims(gauss, axis=0)\r\n imgs_A = imgs_A + gauss\r\n imgs_B = imgs_B + gauss\r\n else:\r\n pass\r\n elif mode==2:\r\n r = np.random.uniform(0,1)\r\n r2 = np.random.uniform(0,1)\r\n if r>=0.5:\r\n row,col,dep,ch= imgs_A.shape[1:]\r\n mean = 0\r\n sigma = 0.05\r\n gauss = np.random.normal(mean,sigma,(row,col,dep,ch))\r\n gauss = gauss.reshape(row,col,dep,ch)\r\n gauss = np.expand_dims(gauss, axis=0)\r\n imgs_A = imgs_A + gauss\r\n imgs_B = imgs_B + gauss\r\n else:\r\n pass\r\n if r2>=0.75:\r\n imgs_A = gaussian_filter(imgs_A, sigma=0.05)\r\n imgs_B = gaussian_filter(imgs_B, sigma=0.05)\r\n else:\r\n pass\r\n else:\r\n pass\r\n \r\n return imgs_A, imgs_B\r\n \r\n def train(self, epochs, batch_size=1, sample_interval=200, model_interval=-1):\r\n\r\n start_time = datetime.datetime.now()\r\n\r\n # Adversarial loss ground truths\r\n patch_shape = (batch_size,) + self.disc_patch\r\n if self.smoothlabel:\r\n valid = np.ones(patch_shape) - 0.1*np.random.rand(*patch_shape)\r\n else:\r\n valid = np.ones(patch_shape)\r\n \r\n fake = np.zeros(patch_shape)\r\n \r\n # log file\r\n f = open(os.path.join(self.savepath, 'log.txt'), 'w')\r\n f.close()\r\n \r\n for epoch in range(epochs):\r\n if epoch<10:\r\n lr = self.lr_ini\r\n else:\r\n lr = self.lr_ini/(1+(epoch-10)*self.lr_decay)\r\n K.set_value(self.discriminator.optimizer.lr, lr)\r\n K.set_value(self.combined.optimizer.lr, lr)\r\n \r\n for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size=batch_size)):\r\n\r\n # ---------------------\r\n # Train Discriminator\r\n # --------------------- \r\n imgs_A = self.convert_resoutput(imgs_A, imgs_B)\r\n \r\n # randomshift\r\n non = lambda s: s if s<0 else None\r\n mom = lambda s: max(0,s)\r\n\r\n shift_A = np.full(imgs_A.shape, -1.)\r\n shift_B = np.full(imgs_A.shape, -1.)\r\n sx = int(self.randomshift*shift_A.shape[2])\r\n sy = int(self.randomshift*shift_A.shape[1])\r\n for i in range(shift_A.shape[0]):\r\n ox = np.random.randint(2*sx+1, size=1)[0] - sx\r\n oy = np.random.randint(2*sy+1, size=1)[0] - sy\r\n shift_A[i,mom(oy):non(oy),mom(ox):non(ox),:,:] = imgs_A[i,mom(-oy):non(-oy),mom(-ox):non(-ox),:,:]\r\n shift_B[i,mom(oy):non(oy),mom(ox):non(ox),:,:] = imgs_B[i,mom(-oy):non(-oy),mom(-ox):non(-ox),:,:]\r\n imgs_A = shift_A\r\n imgs_B = shift_B\r\n \r\n imgs_A, imgs_B = self.generate_noise(self.gennoise, imgs_A, imgs_B)\r\n \r\n # Condition on B and generate a translated version\r\n fake_A = self.generator.predict(imgs_B)\r\n\r\n # Train the discriminators (original images = real / generated = Fake)\r\n d_loss_real = self.discriminator.train_on_batch([imgs_A, imgs_B], valid)\r\n d_loss_fake = self.discriminator.train_on_batch([fake_A, imgs_B], fake)\r\n d_loss = np.add(d_loss_real, d_loss_fake)\r\n\r\n # -----------------\r\n # Train Generator\r\n # -----------------\r\n\r\n # Train the generators\r\n '''\r\n if self.fmloss:\r\n valid_feat = self.discriminator_feat.predict([imgs_A, imgs_B])\r\n g_loss = self.combined.train_on_batch([imgs_A, imgs_B], [valid, imgs_A, valid_feat])\r\n else:\r\n g_loss = self.combined.train_on_batch([imgs_A, imgs_B], [valid, imgs_A])\r\n '''\r\n g_loss = self.combined.train_on_batch([imgs_A, imgs_B], [valid, imgs_A])\r\n\r\n elapsed_time = datetime.datetime.now() - start_time\r\n \r\n # Plot the progress\r\n newlog = \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, D fake: %f] [G loss: %f] time: %s\" % (\r\n epoch+1, epochs, batch_i+1, self.data_loader.n_batches,\r\n d_loss[0], d_loss_fake[0], #100*d_loss[1],\r\n g_loss[0],\r\n elapsed_time\r\n )\r\n \r\n print(newlog)\r\n f = open(os.path.join(self.savepath, 'log.txt'), 'a')\r\n f.write(newlog+'\\n')\r\n f.close() \r\n \r\n # If at save interval => save generated image samples\r\n if (batch_i+1) % sample_interval == 0:\r\n self.sample_images(epoch, batch_i+1)\r\n # save weights\r\n if model_interval>0:\r\n if (epoch+1) % model_interval == 0:\r\n self.save_weights('{}'.format(str(epoch+1)))\r\n \r\n # final sample image & save weights\r\n self.sample_images(epochs, 'final')\r\n if model_interval>0:\r\n self.save_weights('final{}'.format(str(epochs)))\r\n \r\n def predict_on_batch(self, imgs_A, imgs_B):\r\n fake_A = self.generator.predict(imgs_B)\r\n gen_imgs = np.concatenate([imgs_B, fake_A, imgs_A])\r\n return gen_imgs\r\n\r\n def sample_images(self, epoch, batch_i):\r\n r, c = 3, 3\r\n\r\n imgs_A, imgs_B = self.data_loader.load_data(batch_size=3)\r\n fake_A = np.concatenate([self.generator.predict(np.expand_dims(x,axis=0)) for x in imgs_B], axis=0)\r\n fake_A = self.invert_resoutput(fake_A, imgs_B)\r\n\r\n gen_imgs = np.concatenate([imgs_B[:,:,:,0,-1], fake_A[:,:,:,0,-1], imgs_A[:,:,:,0,-1]])\r\n\r\n # Rescale images 0 - 1\r\n gen_imgs = 0.5 * gen_imgs + 0.5\r\n\r\n titles = ['Condition', 'Generated', 'Original']\r\n plt.style.use('default')\r\n fig, axs = plt.subplots(r, c)\r\n cnt = 0\r\n for i in range(r):\r\n dr = imgs_B.shape[1]//4+imgs_B.shape[1]//8\r\n dc = imgs_B.shape[2]//4+imgs_B.shape[2]//8\r\n pr = np.random.choice(imgs_B.shape[1]//2-dr)+imgs_B.shape[1]//4\r\n pc = np.random.choice(imgs_B.shape[2]//2-dc)+imgs_B.shape[2]//4\r\n \r\n for j in range(c):\r\n axs[j,i].imshow(gen_imgs[cnt], cmap='gray', vmin=0, vmax=1)\r\n axs[j,i].set_title(titles[i])\r\n axs[i,j].axis([pc,pc+dc,pr+dr,pr])\r\n cnt += 1\r\n samplepath = self.make_directory('samples')\r\n fig.savefig(os.path.join(samplepath, '{}_{}.png'.format(epoch, batch_i)))\r\n plt.close()\r\n \r\n def make_directory(self, dirname):\r\n dirpath = os.path.join(self.savepath, dirname)\r\n if not os.path.isdir(dirpath):\r\n os.mkdir(dirpath)\r\n return dirpath\r\n \r\n def load_weights(self, weightfile, summary=True):\r\n loadweightspath = os.path.join(self.savepath, 'models', '{}.h5'.format(weightfile))\r\n self.combined.load_weights(loadweightspath)\r\n if summary:\r\n self.combined.summary()\r\n \r\n def load_final_weights(self, *args, **kwargs):\r\n wdir = os.path.join(self.savepath, 'models')\r\n wlist = [os.path.splitext(x)[0] for x in os.listdir(wdir) if x.lower().endswith('.h5')]\r\n wlist.sort()\r\n\r\n s = None\r\n for x in wlist:\r\n if 'final' in x:\r\n s = x\r\n if s is None:\r\n s = wlist[-1] \r\n \r\n self.load_weights(s,*args, **kwargs)\r\n \r\n def save_weights(self, weightfile): \r\n modelpath = self.make_directory('models')\r\n self.combined.save_weights(os.path.join(modelpath, '{}.h5'.format(weightfile)))\r\n print('saved {}.h5 to {}'.format(weightfile, modelpath))\r\n \r\n## not used\r\ndef ssim_mae_loss(y_true, y_pred):\r\n return 0.15*losses.mean_absolute_error(y_true, y_pred) + 0.85*ssim_loss(y_true, y_pred)\r\n\r\ndef ssim_loss(y_true, y_pred):\r\n # ssim for 3d data 구현\r\n ashape = y_true.get_shape().as_list()\r\n\r\n ut = tf.transpose(y_true[:,:,:,:,0], perm=[0,3,1,2])\r\n #ut = tf.reshape(ut, shape=[-1]+ashape[1:3])\r\n ut = tf.expand_dims(ut, axis=-1)\r\n up = tf.transpose(y_pred[:,:,:,:,0], perm=[0,3,1,2])\r\n #up = tf.reshape(up, shape=[-1]+ashape[1:3])\r\n up = tf.expand_dims(up, axis=-1)\r\n \r\n return tf.reduce_mean(1-tf.image.ssim_multiscale(ut, up, 2.0))\r\n\r\n\r\n# modified from \"An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution\" from Uber research\r\n# https://arxiv.org/abs/1807.03247\r\n\r\nclass _CoordinateChannel(Layer):\r\n def __init__(self, rank, \r\n data_format=None,\r\n **kwargs):\r\n super(_CoordinateChannel, self).__init__(**kwargs)\r\n \r\n self.rank = rank\r\n self.data_format = K.image_data_format() if data_format is None else data_format\r\n self.axis = 1 if K.image_data_format() == 'channels_first' else -1\r\n\r\n self.input_spec = InputSpec(min_ndim=2)\r\n\r\n def build(self, input_shape):\r\n assert len(input_shape) >= 2\r\n input_dim = input_shape[self.axis]\r\n\r\n self.input_spec = InputSpec(min_ndim=self.rank + 2,\r\n axes={self.axis: input_dim})\r\n self.built = True\r\n \r\n def call(self, inputs):\r\n input_shape = K.shape(inputs)\r\n\r\n if self.rank == 2:\r\n\r\n input_shape = [input_shape[i] for i in range(4)]\r\n batch_shape, dim1, dim2, channels = input_shape\r\n\r\n xx_ones = K.ones(K.stack([batch_shape, dim2]), dtype='int32')\r\n xx_ones = K.expand_dims(xx_ones, axis=-1)\r\n\r\n xx_range = tf.tile(K.expand_dims(K.arange(0, dim1), axis=0),\r\n K.stack([batch_shape, 1]))\r\n xx_range = K.expand_dims(xx_range, axis=1)\r\n xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])\r\n xx_channels = K.expand_dims(xx_channels, axis=-1)\r\n xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])\r\n\r\n yy_ones = K.ones(K.stack([batch_shape, dim1]), dtype='int32')\r\n yy_ones = K.expand_dims(yy_ones, axis=1)\r\n\r\n yy_range = tf.tile(K.expand_dims(K.arange(0, dim2), axis=0),\r\n K.stack([batch_shape, 1]))\r\n yy_range = K.expand_dims(yy_range, axis=-1)\r\n\r\n yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])\r\n yy_channels = K.expand_dims(yy_channels, axis=-1)\r\n yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])\r\n\r\n xx_channels = K.cast(xx_channels, K.floatx())\r\n xx_channels = xx_channels / K.cast(dim1 - 1, K.floatx())\r\n xx_channels = (xx_channels * 2) - 1.\r\n\r\n yy_channels = K.cast(yy_channels, K.floatx())\r\n yy_channels = yy_channels / K.cast(dim2 - 1, K.floatx())\r\n yy_channels = (yy_channels * 2) - 1.\r\n\r\n outputs = K.concatenate([inputs, xx_channels, yy_channels], axis=-1)\r\n\r\n if self.rank == 3:\r\n\r\n input_shape = [input_shape[i] for i in range(5)]\r\n batch_shape, dim1, dim2, dim3, channels = input_shape\r\n\r\n xx_ones = K.ones(K.stack([batch_shape, dim3]), dtype='int32')\r\n xx_ones = K.expand_dims(xx_ones, axis=-1)\r\n\r\n xx_range = tf.tile(K.expand_dims(K.arange(0, dim2), axis=0),\r\n K.stack([batch_shape, 1]))\r\n xx_range = K.expand_dims(xx_range, axis=1)\r\n\r\n xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])\r\n xx_channels = K.expand_dims(xx_channels, axis=-1)\r\n xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])\r\n\r\n xx_channels = K.expand_dims(xx_channels, axis=1)\r\n xx_channels = tf.tile(xx_channels,\r\n [1, dim1, 1, 1, 1])\r\n\r\n yy_ones = K.ones(K.stack([batch_shape, dim2]), dtype='int32')\r\n yy_ones = K.expand_dims(yy_ones, axis=1)\r\n\r\n yy_range = tf.tile(K.expand_dims(K.arange(0, dim3), axis=0),\r\n K.stack([batch_shape, 1]))\r\n yy_range = K.expand_dims(yy_range, axis=-1)\r\n\r\n yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])\r\n yy_channels = K.expand_dims(yy_channels, axis=-1)\r\n yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])\r\n\r\n yy_channels = K.expand_dims(yy_channels, axis=1)\r\n yy_channels = tf.tile(yy_channels,\r\n [1, dim1, 1, 1, 1])\r\n\r\n \r\n zz_range = tf.tile(K.expand_dims(K.arange(0, dim1), axis=0),\r\n K.stack([batch_shape, 1]))\r\n zz_range = K.expand_dims(zz_range, axis=-1)\r\n zz_range = K.expand_dims(zz_range, axis=-1)\r\n\r\n zz_channels = tf.tile(zz_range,\r\n [1, 1, dim2, dim3])\r\n zz_channels = K.expand_dims(zz_channels, axis=-1)\r\n \r\n\r\n xx_channels = K.cast(xx_channels, K.floatx())\r\n xx_channels = xx_channels / K.cast(dim2 - 1, K.floatx())\r\n xx_channels = xx_channels * 2 - 1.\r\n\r\n yy_channels = K.cast(yy_channels, K.floatx())\r\n yy_channels = yy_channels / K.cast(dim3 - 1, K.floatx())\r\n yy_channels = yy_channels * 2 - 1.\r\n\r\n zz_channels = K.cast(zz_channels, K.floatx())\r\n zz_channels = zz_channels / K.cast(dim1 - 1, K.floatx())\r\n zz_channels = zz_channels * 2 - 1.\r\n\r\n outputs = K.concatenate([inputs, zz_channels, xx_channels, yy_channels],axis=-1)\r\n \r\n return outputs\r\n \r\n def compute_output_shape(self, input_shape):\r\n assert input_shape and len(input_shape) >= 2\r\n assert input_shape[self.axis]\r\n\r\n channel_count = self.rank\r\n\r\n output_shape = list(input_shape)\r\n output_shape[self.axis] = input_shape[self.axis] + channel_count\r\n return tuple(output_shape)\r\n \r\n def get_config(self):\r\n config = {\r\n 'rank': self.rank,\r\n 'data_format': self.data_format\r\n }\r\n base_config = super(_CoordinateChannel, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n \r\nclass CoordinateChannel3D(_CoordinateChannel):\r\n def __init__(self,\r\n data_format=None,\r\n **kwargs):\r\n super(CoordinateChannel3D, self).__init__(\r\n rank=3,\r\n data_format=data_format,\r\n **kwargs\r\n )\r\n \r\n def get_config(self):\r\n config = super(CoordinateChannel3D, self).get_config()\r\n config.pop('rank')\r\n return config\r\n","repo_name":"jwc-rad/pix2pix3D-CT","sub_path":"source/my3dpix2pix.py","file_name":"my3dpix2pix.py","file_ext":"py","file_size_in_byte":28461,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"6430253432","text":"#정답 : dot 이 가장 해당 문제를 표현하는데 최적화되어있음. 계층관계를 표현하기 위한 문제\n#dot : 계층관계를 표현하기 위한 그래프 엔진\n#neato : spring model (서로의 관계에 대해 표현할때 유용, 방향성 있음)\n#fdp : spring model(neato 와 비슷, 관계에 대해 표현, 방향성 없음)\n#sfdp : spring model(fdp 보다 좀 더 크게 그린것, 방향성 없음)\n#twopi : 방사형 그래프를 표현(신경망 같은거에 사용)\n#circo : 싸이클 관계에 있는것을 표현\n\ndef func25a_1():\n from graphviz import Digraph\n\n layout_engine = ['dot','neato','fdp','sfdp','twopi','circo']\n for i in layout_engine:\n f = 'gviz/7th Edition'+i\n a = Digraph(filename = f,comment='7th Edition',engine=i)\n a.node_attr.update(color='goldenrod2',style='filled',size='7,5')\n\n a.edge('7th Edition','32V')\n a.edge('7th Edition','V7M')\n a.edge('7th Edition','Xenix')\n a.edge('7th Edition','UniPlus+')\n\n a.edge('32V','3 BSD')\n a.edge('3 BSD','4 BSD')\n a.edge('4 BSD','4.1 BSD')\n\n a.edge('4.1 BSD','8th Edition')\n a.edge('8th Edition','9th Edition')\n\n a.edge('4.1 BSD','4.2 BSD')\n a.edge('4.2 BSD','Ultrix-32')\n a.edge('4.2 BSD','4.3 BSD')\n\n a.edge('4.1 BSD','2.8 BSD')\n a.edge('1 BSD','2 BSD')\n a.edge('2 BSD','2.8 BSD')\n a.edge('2.8 BSD','2.9 BSD')\n a.edge('2.8 BSD','Ultrix-11')\n\n a.render()\n\nfunc25a_1()\n\n#redbalck tree\ndef func25a_2():\n from graphviz import Digraph\n\n g = Digraph(comment = 'Red-Black Tree',engine='dot')\n\n g.attr('graph',ratio='.48')\n\n g.attr('node',style='filled',color='black',shape='circle',width='.6',\n fontname='Helvetica',fontweight='bold',fontcolor='white',fontsize='24',fixedsize='true')\n\n ns = ['13','1','11','15','25']\n for i in ns:\n g.node(i)\n\n g.attr('node',fillcolor='red')\n ns2 = ['8','17','22','27']\n for j in ns2:\n g.node(j)\n\n g.attr('node',fillcolor='black',shape='record',label='NIL',width='0.4',height='.25',fontsize='16')\n for i in range(1,12):\n la = 'n'+str(i)\n g.node(la)\n\n g.attr('node',style='filled', fillcolor='red',shape='circle',label=r'\\N',width='0.6',fontname='Helvetica'\n ,fontweight='bold',fontcolor='white',fontsize='24',fixedsize='true')\n g.node('6')\n\n relations = [('13','8'),('13','17'),('8','1'),('8','11')\n ,('1','n1'),('1','6'),('6','n2'),('6','n3'),('11','n4'),('11','n5')\n ,('17','15'),('15','n6'),('15','n7'),('17','25'),('25','27')\n ,('25','22'),('22','n8'),('22','n9'),('27','n10'),('27','n11')]\n\n for r in relations:\n g.edge(r[0],r[1])\n # g.node('6')\n\n # g.edge('13','8')\n # g.edge('13','17')\n\n # g.edge('8','1')\n # g.edge('8','11')\n\n # g.edge('1','6')\n\n # g.edge('17','15')\n # g.edge('17','25')\n\n # g.edge('25','22')\n # g.edge('25','27')\n\n # g.edge('1','n1')\n # g.edge('6','n2')\n # g.edge('6','n3')\n # g.edge('11','n4')\n # g.edge('11','n5')\n # g.edge('15','n6')\n # g.edge('15','n7')\n # g.edge('22','n8')\n # g.edge('22','n9')\n # g.edge('27','n10')\n # g.edge('27','n11')\n\n g.view()\n\nfunc25a_2()","repo_name":"yuchanmo/python2","sub_path":"과제/25a.py","file_name":"25a.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71049067131","text":"import os\nimport sys\nimport random\nimport json\nimport time\nimport urllib.request\nimport urllib.error\nfrom copy import deepcopy\n\n# ゲームサーバのアドレス / トークン\nGAME_SERVER = os.getenv('GAME_SERVER', 'https://2022contest.gbc.tenka1.klab.jp')\nTOKEN = os.getenv('TOKEN', 'YOUR_TOKEN')\n\nN = 5\nDj = [+1, 0, -1, 0]\nDk = [0, +1, 0, -1]\n\n\n# ゲームサーバのAPIを叩く\ndef call_api(x: str) -> dict:\n url = f'{GAME_SERVER}{x}'\n # 5xxエラーの際は100ms空けて5回までリトライする\n for i in range(5):\n print(url, flush=True)\n try:\n with urllib.request.urlopen(url) as res:\n return json.loads(res.read())\n except urllib.error.HTTPError as err:\n if 500 <= err.code and err.code < 600:\n print(err.code)\n time.sleep(0.1)\n continue\n else:\n raise\n except ConnectionResetError as err:\n print(err)\n time.sleep(0.1)\n continue\n raise Exception('Api Error')\n\n\n# game_idを取得する\n# 環境変数で指定されていない場合は練習試合のgame_idを返す\ndef get_game_id() -> int:\n # 環境変数にGAME_IDが設定されている場合これを優先する\n if os.getenv('GAME_ID'):\n return int(os.getenv('GAME_ID'))\n\n # start APIを呼び出し練習試合のgame_idを取得する\n mode = 0\n delay = 0\n\n start = call_api(f'/api/start/{TOKEN}/{mode}/{delay}')\n if start['status'] == 'ok' or start['status'] == 'started':\n return start['game_id']\n\n raise Exception(f'Start Api Error : {start}')\n\n\n# d方向に移動するように移動APIを呼ぶ\ndef call_move(game_id: int, d: int) -> dict:\n return call_api(f'/api/move/{TOKEN}/{game_id}/{d}')\n\n\n# ゲーム状態クラス\nclass State:\n def __init__(self, field, agent):\n self.field = deepcopy(field)\n self.agent = deepcopy(agent)\n\n # idxのエージェントがいる位置のfieldを更新する\n def paint(self, idx: int):\n i, j, k, _ = self.agent[idx]\n if self.field[i][j][k][0] == -1:\n # 誰にも塗られていない場合はidxのエージェントで塗る\n self.field[i][j][k][0] = idx\n self.field[i][j][k][1] = 2\n elif self.field[i][j][k][0] == idx:\n # idxのエージェントで塗られている場合は完全に塗られた状態に上書きする\n self.field[i][j][k][1] = 2\n elif self.field[i][j][k][1] == 1:\n # idx以外のエージェントで半分塗られた状態の場合は誰にも塗られていない状態にする\n self.field[i][j][k][0] = -1\n self.field[i][j][k][1] = 0\n else:\n # idx以外のエージェントで完全に塗られた状態の場合は半分塗られた状態にする\n self.field[i][j][k][1] -= 1\n\n # エージェントidxをd方向に回転させる\n # 方向については問題概要に記載しています\n def rotate_agent(self, idx: int, d: int):\n self.agent[idx][3] += d\n self.agent[idx][3] %= 4\n\n # idxのエージェントを前進させる\n # マス(i, j, k)については問題概要に記載しています\n def move_forward(self, idx: int):\n i, j, k, d = self.agent[idx]\n jj = j + Dj[d]\n kk = k + Dk[d]\n if jj >= N:\n self.agent[idx][0] = i // 3 * 3 + (i % 3 + 1) % 3 # [1, 2, 0, 4, 5, 3][i]\n self.agent[idx][1] = k\n self.agent[idx][2] = N - 1\n self.agent[idx][3] = 3\n elif jj < 0:\n self.agent[idx][0] = (1 - i // 3) * 3 + (4 - i % 3) % 3 # [4, 3, 5, 1, 0, 2][i]\n self.agent[idx][1] = 0\n self.agent[idx][2] = N - 1 - k\n self.agent[idx][3] = 0\n elif kk >= N:\n self.agent[idx][0] = i // 3 * 3 + (i % 3 + 2) % 3 # [2, 0, 1, 5, 3, 4][i]\n self.agent[idx][1] = N - 1\n self.agent[idx][2] = j\n self.agent[idx][3] = 2\n elif kk < 0:\n self.agent[idx][0] = (1 - i // 3) * 3 + (3 - i % 3) % 3 # [3, 5, 4, 0, 2, 1][i]\n self.agent[idx][1] = N - 1 - j\n self.agent[idx][2] = 0\n self.agent[idx][3] = 1\n else:\n self.agent[idx][1] = jj\n self.agent[idx][2] = kk\n\n # エージェントが同じマスにいるかを判定する\n def is_same_pos(self, a: [int], b: [int]) -> bool:\n return a[0] == b[0] and a[1] == b[1] and a[2] == b[2]\n\n # idxのエージェントがいるマスが自分のエージェントで塗られているかを判定する\n def is_owned_cell(self, idx: int) -> bool:\n i = self.agent[idx][0]\n j = self.agent[idx][1]\n k = self.agent[idx][2]\n return self.field[i][j][k][0] == idx\n\n # 全エージェントの移動方向の配列を受け取り移動させてフィールドを更新する\n # -1の場合は移動させません(0~3は移動APIのドキュメント記載と同じです)\n def move(self, move: [int]):\n # エージェントの移動処理\n for idx in range(6):\n if move[idx] == -1:\n continue\n self.rotate_agent(idx, move[idx])\n self.move_forward(idx)\n\n # フィールドの更新処理\n for idx in range(6):\n if move[idx] == -1:\n continue\n ok = True\n for j in range(6):\n if idx == j or move[j] == -1 or not self.is_same_pos(self.agent[idx], self.agent[j]) or self.is_owned_cell(idx):\n continue\n # 移動した先にidx以外のエージェントがいる場合は修復しか行えないのでidxのエージェントのマスではない場合は更新しないようにフラグをfalseにする\n ok = False\n break\n\n if not ok:\n continue\n self.paint(idx)\n\n\nclass Bot:\n def solve(self):\n game_id = get_game_id()\n next_d = random.randint(0, 3)\n while True:\n # 移動APIを呼ぶ\n move = call_move(game_id, next_d)\n print('status = {}'.format(move['status']), file=sys.stderr, flush=True)\n if move['status'] == \"already_moved\":\n continue\n elif move['status'] != 'ok':\n break\n print('turn = {}'.format(move['turn']), file=sys.stderr, flush=True)\n print('score = {} {} {} {} {} {}'.format(move['score'][0], move['score'][1], move['score'][2], move['score'][3], move['score'][4], move['score'][5]), file=sys.stderr, flush=True)\n # 4方向で移動した場合を全部シミュレーションする\n best_c = -1\n best_d = []\n for d in range(4):\n m = State(move['field'], move['agent'])\n m.move([d, -1, -1, -1, -1, -1])\n # 自身のエージェントで塗られているマス数をカウントする\n c = 0\n for i in range(6):\n for j in range(N):\n for k in range(N):\n if m.field[i][j][k][0] == 0:\n c += 1\n # 最も多くのマスを自身のエージェントで塗れる移動方向のリストを保持する\n if c > best_c:\n best_c = c\n best_d = [d]\n elif c == best_c:\n best_d.append(d)\n # 最も多くのマスを自身のエージェントで塗れる移動方向のリストからランダムで方向を決める\n next_d = random.choice(best_d)\n\n\nif __name__ == \"__main__\":\n bot = Bot()\n bot.solve()","repo_name":"ayanamizuta/cpro","sub_path":"tenka1/tenka1-2022/main_orginal.py","file_name":"main_orginal.py","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74421692410","text":"import pyaudio\nimport wave\nimport sys\n\n\nCHUNK_SIZE = 1024\n\ndef play_wav(wav_filename, chunk_size=CHUNK_SIZE):\n '''\n Play (on the attached system sound device) the WAV file\n named wav_filename.\n '''\n\n try:\n wf = wave.open(wav_filename, 'rb')\n except IOError as ioe:\n sys.stderr.write('IOError on file ' + wav_filename + '\\n' + \\\n str(ioe) + '. Skipping.\\n')\n return\n except EOFError as eofe:\n sys.stderr.write('EOFError on file ' + wav_filename + '\\n' + \\\n str(eofe) + '. Skipping.\\n')\n return\n\n # Instantiate PyAudio.\n p = pyaudio.PyAudio()\n\n # Open stream.\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True)\n\n data = wf.readframes(chunk_size)\n while len(data) > 0:\n stream.write(data)\n data = wf.readframes(chunk_size)\n\n # Stop stream.\n stream.stop_stream()\n stream.close()\n\n # Close PyAudio.\n p.terminate()\n\n","repo_name":"keiffster/talk-y","sub_path":"src/talky/utils/wav.py","file_name":"wav.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"39911862913","text":"from aiogram.utils.emoji import emojize\nfrom aiogram.utils.markdown import text\n\n# QUESTIONNAIRE MESSAGES:\nquestionnaire_start = text(emojize(\"Привіт!👋\\nРозкажи, будь ласка: де та з ким ти хочеш жити 😏\"))\nquestionnaire_floor = text(emojize(\"Де саме ти хотів би оселитися?🧳\"))\nquestionnaire_room = text(emojize(\"Який номер кімнати в тебе з щасливих?🎰\"))\nquestionnaire_roommates = text(emojize(\"Останнє, з ким ти хотів/ла би жити? 🐸\"))\nsave_button = text(emojize(\"Зберегти📝\"))\nquestionnaire_goodbye = text(emojize(\"Інформація буде передана на обробку, дякую за інформацію 📃\"))\n\n# QUESTIONNAIRE BUTTONS:\n\nplace1 = text(emojize(\"На Майбороди, з коліверами\"))\nplace2 = text(emojize(\"У горах, наодинці🌄\"))\n\n# FOR PLACE 1 (МАЙБОРОДА)\nfloor1 = text(emojize(\"На 1️⃣ поверсі\"))\nfloor2 = text(emojize(\"На 2️⃣ поверсі\"))\n# FOR PLACE 2 (ГОРА)\nmountain_bot = text(emojize(\"У підніжжя 🏞\"))\nmountain_msg = text(emojize(\"Обери собі компаньона!🙌\"))\nmountain_top = text(emojize(\"На вершині 🌄\"))\n\n# FOR FLOOR1 (МАЙБОРОДА)\nroom1 = text(emojize(\"1️⃣\").encode(\"utf-8\"))\nroom2 = text(emojize(\"2️⃣\"))\nroom3 = text(emojize(\"3️⃣\"))\n# FOR FLOOR2 (МАЙБОРОДА)\nroom4 = text(emojize(\"4️⃣\"))\nroom5 = text(emojize(\"5️⃣\"))\nroom6 = text(emojize(\"6️⃣\"))\n\n# MAIBORODA FRIENDS\nroom1_friend1 = \"Алекс\"\nroom1_friend2 = \"Фродо\"\nroom2_friend1 = \"Лазло\"\nroom2_friend2 = \"Каска\"\nroom3_friend1 = \"Іван\"\nroom3_friend2 = \"Василь\"\nroom4_friend1 = \"Олекса\"\nroom4_friend2 = \"Філіп\"\nroom5_friend1 = \"Фердинанд\"\nroom5_friend2 = \"Кіра\"\nroom6_friend1 = \"Леся\"\nroom6_friend2 = \"Валерій\"\n\n# FOR MOUNTAIN (bot)\nriver = text(emojize(\"Біля річки 🐸\"))\ntree = text(emojize(\"На дереві 🌳\"))\n# FOR MOUNTAIN (top)\nigloo = text(emojize(\"В іглу ☃\"))\ncave = text(emojize(\"У печері 🗻\"))\n\n# FOR MOUNTAIN (RIVER) PET\npet_river1 = text(emojize(\"Золоту рибку!🐡\"))\npet_river2 = text(emojize(\"Медведя!🐻\"))\n# FOR MOUNTAIN (TREE) PET\npet_tree1 = text(emojize(\"Білочку🐿\"))\npet_tree2 = text(emojize(\"Сову🦉\"))\n# IGLOO PET\npet_igloo1 = text(emojize(\"Чукчу!⛄\"))\npet_igloo2 = text(emojize(\"Привида!👻\"))\n# CAVE PET\npet_cave1 = text(emojize(\"Пані Самотність!🧘‍♂️\"))\npet_cave2 = text(emojize(\"Сніговика!⛄\"))\n\n# SINGLE-PURPOSE MESSAGES:\nstart_message = text(emojize(\"Привіт!👋\"))\nregistered_message = \"Давно не бачилися!\"\nhave_a_nice_lecture = text(emojize(\"Продуктивної лекції тобі! 😉\"))\nvote_thank_you = text(emojize(\"Дякую, твій голос враховано!⚖\"))\nFINISHED = \"Сподіваюся, тобі сподобалася лекція\"\nLECTURE_START = \"ЛЕКЦІЯ ПОЧАЛАСЯ, УАЛІВЕЦЬ ВСТАВАЙ!\"\nNEW_VALUE = \"Прохання проголосувати за/проти затвердження нової цінності: Будь Програмістом!\"\nza = \"За\"\nproty = \"Проти\"\n\n# Q&A RESPONSES:\nQ_and_A_welcoming_message = text(emojize(\"Привіт, тут ти можеш задати стільки питань, скільки забажаєш\\n\"\n \"Коли закінчиш, натисни клавішу 'Вийти'😉\"))\nQ_and_A_confirmation_message = text(emojize(\"Записав!📃\"))\nexit_Q = \"Вийти\"\nQ_and_A_goodbye_message = text(emojize(\"Дякую.\\nЗадані питання будуть доставленими 🧭\"))\n# TEMPLATE = text(emojize())\n\npresence = \"PRESENT09\"\nvoter = \"FOR02937\"\nagainst = \"AGAINST02937\"\nMESSAGES = {\n\n \"start_message\": start_message,\n \"registered_message\": registered_message,\n \"have_a_nice_lecture\": have_a_nice_lecture,\n \"vote_thank_you\": vote_thank_you,\n \"FINISHED\": FINISHED,\n \"LECTURE_START\": LECTURE_START,\n \"NEW_VALUE\": NEW_VALUE,\n \"za\": za,\n \"proty\": proty,\n\n # callback queries:\n \"presence\": presence,\n \"voter\": voter,\n \"against\": against,\n\n # Q&A RESPONSES:\n \"Q_and_A_welcoming_message\": Q_and_A_welcoming_message,\n \"Q_and_A_confirmation_message\": Q_and_A_confirmation_message,\n \"exit_Q\": exit_Q,\n \"Q_and_A_goodbye_message\": Q_and_A_goodbye_message,\n # QUESTIONNAIRE RESPONSES:\n \"questionnaire_start\": questionnaire_start,\n \"questionnaire_floor\": questionnaire_floor,\n \"questionnaire_room\": questionnaire_room,\n \"questionnaire_roommates\": questionnaire_roommates,\n \"save_button\": save_button,\n \"questionnaire_goodbye\": questionnaire_goodbye,\n # QUESTIONNAIRE BUTTONS\n \"place1\": place1,\n \"place2\": place2,\n \"floor1\": floor1,\n \"floor2\": floor2,\n \"mountain_bot\": mountain_bot,\n \"mountain_top\": mountain_top,\n \"mountain_msg\": mountain_msg,\n\n}\nROOMS = {\n \"room1\": room1,\n \"room2\": room2,\n \"room3\": room3,\n \"room4\": room4,\n \"room5\": room5,\n \"room6\": room6,\n \"river\": river,\n \"tree\": tree,\n \"igloo\": igloo,\n \"cave\": cave,\n}\n\nPETS_AND_FRIENDS = {\n \"pet_river1\": pet_river1,\n \"pet_river2\": pet_river2,\n \"pet_tree1\": pet_tree1,\n \"pet_tree2\": pet_tree2,\n \"pet_igloo1\": pet_igloo1,\n \"pet_igloo2\": pet_igloo2,\n \"pet_cave1\": pet_cave1,\n \"pet_cave2\": pet_cave2,\n\n \"room1_friend1\": room1_friend1,\n \"room1_friend2\": room1_friend2,\n \"room2_friend1\": room2_friend1,\n \"room2_friend2\": room2_friend2,\n \"room3_friend1\": room3_friend1,\n \"room3_friend2\": room3_friend2,\n \"room4_friend1\": room4_friend1,\n \"room4_friend2\": room4_friend2,\n \"room5_friend1\": room5_friend1,\n \"room5_friend2\": room5_friend2,\n \"room6_friend1\": room6_friend1,\n \"room6_friend2\": room6_friend2,\n}\n","repo_name":"Salz0/Derzhavets_bot","sub_path":"messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35884359949","text":"import numpy as np\nimport math\nfrom matplotlib import pyplot as plt\nfrom typing import List\nfrom TransferFn import TransferFn\n\nclass singleBinDFT(TransferFn):\n def __init__(self, centerFreq: float, samplingFreq: float, NCyclesSampled: int):\n self.fCenter = centerFreq\n self.fSamp = samplingFreq\n self.NCyclesSampled = NCyclesSampled\n\n self.relativeFreq = self.fCenter / self.fSamp\n NSamples = self.NCyclesSampled / self.relativeFreq\n self.timeData = np.linspace(0, NSamples - 1, NSamples)\n self.sinTable = np.sin(self.timeData * 2* math.pi * self.relativeFreq)\n self.cosTable = np.cos(self.timeData * 2* math.pi * self.relativeFreq)\n return\n\n def getAmp(t: np.array, y: np.array, relFreq: float) -> float:\n im = y * np.sin(t * 2* math.pi * relFreq)\n re = y * np.cos(t * 2* math.pi * relFreq)\n return 2 * math.sqrt(np.sum(im)**2 + np.sum(re)**2) / len(t)\n\n def apply(self, frequency: float) -> float:\n y: np.array = np.sin(self.timeData * 2* math.pi * frequency / self.fSamp)\n return singleBinDFT.getAmp(self.timeData, y, self.relativeFreq)\n\n","repo_name":"mdion1/LightsAndSoundProj","sub_path":"UsefulScripts/DFTFilterDesign/singleBinDFT.py","file_name":"singleBinDFT.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17599951125","text":"from app import app\nfrom dash.dependencies import Input, Output\nfrom dash import callback_context\n\nfrom models.building import geometry\nfrom models.building.buildingFactory import read_building_data_yaml, save_building_data_yaml\n\n@app.callback(\n Output('geometry_done_button_id', 'disabled'), \n Output('geometry_done_button_id', 'color'),\n Output(\"n_storys_id\", \"value\"),\n Output(\"story_height_id\", \"value\"),\n Output(\"area_win_a_id\", \"value\"),\n Output(\"area_win_b_id\", \"value\"),\n Output(\"area_win_c_id\", \"value\"),\n Output(\"area_win_d_id\", \"value\"),\n Input(\"n_storys_id\", \"value\"),\n Input(\"story_height_id\", \"value\"),\n Input(\"area_win_a_id\", \"value\"),\n Input(\"area_win_b_id\", \"value\"),\n Input(\"area_win_c_id\", \"value\"),\n Input(\"area_win_d_id\", \"value\"),\n Input(\"geometry_done_button_id\", \"n_clicks\"),\n)\ndef inputDone(n_storys, story_height, area_win_a,area_win_b,area_win_c, area_win_d, clicks):\n ctx = callback_context\n # check which input has triggered the callback\n button_id = ctx.triggered[0]['prop_id'].split('.')[0]\n\n # on pageload: fill input fields with building data\n if not ctx.triggered:\n building = read_building_data_yaml(\"userID\")\n n_storys = building['nStorys']\n story_height = building['thZones']['livingSpace']['storyHeight']\n area_win_a = building['thZones']['livingSpace']['transPlanes']['windowA']['area']\n area_win_b = building['thZones']['livingSpace']['transPlanes']['windowB']['area']\n area_win_c = building['thZones']['livingSpace']['transPlanes']['windowC']['area']\n area_win_d = building['thZones']['livingSpace']['transPlanes']['windowD']['area']\n return False, \"success\", n_storys, story_height, area_win_a, area_win_b, area_win_c, area_win_d\n\n # disable button if one input field is empty\n if not n_storys or not story_height or not area_win_a or not area_win_b or not area_win_c or not area_win_d:\n return True, \"primary\", n_storys, story_height, area_win_a, area_win_b, area_win_c, area_win_d\n\n # save building data on button click\n if button_id == \"geometry_done_button_id\":\n # calculate geometry values\n height = geometry.height_from_story(n_storys, story_height)\n\n building = read_building_data_yaml(userID='userID')\n perimeter = building['perimeter']\n groundArea = building['groundArea']\n\n heatedArea = groundArea * n_storys\n volume = groundArea * height\n facadearea = geometry.facade_area(perimeter, height) - sum([area_win_a, area_win_b, area_win_c, area_win_d])\n\n\n building['nStorys'] = n_storys\n building['thZones']['livingSpace']['storyHeight'] = story_height\n building['thZones']['livingSpace']['opaquePlanes']['facade']['area'] = facadearea\n building['thZones']['livingSpace']['opaquePlanes']['roof']['area'] = groundArea\n building['thZones']['livingSpace']['opaquePlanes']['floor']['area'] = groundArea\n building['thZones']['livingSpace']['floorArea'] = heatedArea\n building['thZones']['livingSpace']['volume'] = volume\n building['thZones']['livingSpace']['transPlanes']['windowA']['area'] = area_win_a\n building['thZones']['livingSpace']['transPlanes']['windowB']['area'] = area_win_b\n building['thZones']['livingSpace']['transPlanes']['windowC']['area'] = area_win_c\n building['thZones']['livingSpace']['transPlanes']['windowD']['area'] = area_win_d\n save_building_data_yaml(building)\n\n return False, \"success\", n_storys, story_height, area_win_a, area_win_b, area_win_c, area_win_d\n\n # activate button if all inputs are filled\n else:\n return False, \"primary\", n_storys, story_height, area_win_a, area_win_b, area_win_c, area_win_d\n\n\n ","repo_name":"rehomewebapp/REhome","sub_path":"WebApp/controllers/geometry_controller.py","file_name":"geometry_controller.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20791958864","text":"import xml.etree.ElementTree as ET\r\nimport pandas as pd\r\nimport csv\r\n\r\nFILE = 'Posts.xml'\r\nCOLS = ['Id', 'PostTypeId', 'ParentId', 'CreationDate', 'Score', 'Body', 'OwnerUserId', 'LastEditorUserId',\r\n 'LastEditDate', 'LastActivityDate', 'CommentCount', 'ContentLicense']\r\n\r\n# list of acceptedanswerId extraction from targeted questions' file\r\ndata = pd.read_csv('output.csv') # skip header row\r\ndf = pd.DataFrame(data)\r\nx=df.iloc[:,2].dropna()\r\nansIdLst=set(x)\r\nprint(len(ansIdLst))\r\n\r\n\r\ncontext = ET.iterparse(FILE, events=(\"start\", \"end\"),\r\n parser=ET.XMLParser(encoding='utf-8'))\r\n\r\nwith open('outputAnswer.csv', 'w', newline='', encoding='utf-8') as csvfile:\r\n csvwriter = csv.writer(csvfile)\r\n csvwriter.writerow(COLS)\r\n _, root = next(context)\r\n for event, elem in context:\r\n if event == \"end\" and elem.tag == \"row\":\r\n # candidate Id for answerpost\r\n ansId = elem.attrib.get('Id', 'None')\r\n if int(ansId) in ansIdLst: \r\n data = []\r\n for col in COLS:\r\n data.append(elem.attrib.get(col, ''))\r\n csvwriter.writerow(data)\r\n # progress\r\n if int(elem.attrib['Id']) % 100000 == 0:\r\n print('done', elem.attrib['Id'])\r\n elem.clear()\r\n root.clear()","repo_name":"iamsanjaymalakar/LowCodeChallengesMSR21","sub_path":"Garbage/Initial Codes/extract-answers.py","file_name":"extract-answers.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"33391762910","text":"def selection_algor(nums):\n \"\"\"\n У цьому алгоритмі список (або масив) ділиться на дві частини: список з відсортованими елементами і\n список з елементами, які тільки потрібно сортувати. Спочатку шукається найменший елемент у другому.\n Він додається в кінці першого. Таким чином алгоритм поступово формує список від меншого до більшого.\n \"\"\"\n # i -> кількість відсортованих значень\n for i in range(len(nums)):\n lowest_value_index = i # допустимо, що найменший елемент- перший\n for j in range(i + 1, len(nums)): # перебірка несортованих елементів\n if nums[j] < nums[lowest_value_index]:\n lowest_value_index = j\n nums[i], nums[lowest_value_index] = nums[lowest_value_index], nums[i] # найменший міняємо з першим елементом\n\n\nrandom_list_of_nums = [12, 8, 3, 20, 11]\nselection_algor(random_list_of_nums)\nprint(random_list_of_nums)\n\n\n#_______________________________________________________________________________________________________________________\ndef findSmallest(inputes_list):\n smallest = inputes_list[0]\n smallest_index = 0\n for index in range(1, len(inputes_list)):\n if inputes_list[index] < smallest:\n smallest = inputes_list[index]\n smallest_index = index\n return smallest_index\n\n\ndef selection_sort(input_list):\n new_list = []\n for i in range(len(input_list)):\n smallest = findSmallest(input_list)\n new_list.append(input_list.pop(smallest))\n return new_list\n\n\nprint(selection_sort([2, 6, 12, 10]))\n","repo_name":"irynavaskiv1/diff_tasks","sub_path":"1_algorithms/sorting_methods/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70904039612","text":"import concurrent.futures\nimport hashlib\nimport sched\n\nimport bs4\nimport cachecontrol\nimport feedparser\nimport requests\nimport requests.exceptions\n\nfrom feedbuffer import settings, database, log\n\n_logger = log.get_logger(__name__)\n_session = cachecontrol.CacheControl(requests.Session())\n_session.headers['User-Agent'] = settings.USER_AGENT\nexecutor = concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAXIMUM_UPDATE_WORKERS)\nscheduled = {}\nscheduler = sched.scheduler()\n\n# XML-processing instructions have to end with \"?>\". The original code erroneously ends them with \">\" which leads to\n# errors in almost all parsers, including BeautifulSoup with the lxml treebuilder itself -- so we fix this at runtime.\nbs4.element.ProcessingInstruction.SUFFIX = '?>'\n\n\ndef extract_feed_entries(soup):\n return [item.extract() for item in soup(['item', 'entry'])]\n\n\ndef update_feed(url):\n try:\n response = _session.get(url, timeout=settings.REQUEST_TIMEOUT)\n except requests.exceptions.Timeout:\n return\n\n # Don't let requests do the content decoding, instead just supply the encoding detected by requests and let\n # BeautifulSoup and the treebuilder do their thing. For example: BeautifulSoup4 with the lxml treebuilder only\n # correctly parses content with tags when it can decode the bytes by itself.\n try:\n soup = bs4.BeautifulSoup(response.content, 'xml', from_encoding=response.encoding)\n except UnicodeDecodeError:\n soup = bs4.BeautifulSoup(response.content, 'xml', from_encoding=response.apparent_encoding)\n\n entries = extract_feed_entries(soup)\n # TODO: Remove the feedparser dependency and figure out all ways to get access to the feed item id\n parsed_feed = feedparser.parse(response.text)\n is_rss = parsed_feed.version.startswith('rss')\n\n entry_ids = []\n for index, parsed_entry in enumerate(parsed_feed.entries):\n id_ = parsed_entry.get('id', None)\n\n # id might be non-existent or simply empty, make sure to handle both cases correctly\n if not id_:\n id_ = hashlib.sha1(entries[index].encode(settings.ENCODING)).hexdigest()\n _logger.info('No identifier found for entry %d of %s. Inserting SHA-1 id: %s...', index, url, id_)\n id_tag = soup.new_tag('guid' if is_rss else 'id')\n id_tag.string = id_\n entries[index].append(id_tag)\n entry_ids.append(id_)\n\n # Fix missing RSS channel element\n if is_rss and not soup.find('channel'):\n _logger.info('No RSS channel element found for %s. Inserting channel element...', url)\n rss = soup.find('rss')\n rss.append(rss.new_tag('channel'))\n\n database.update_feed(url, str(soup), zip(entry_ids, (str(entry) for entry in entries)))\n\n\ndef update_and_reschedule_feed(url):\n executor.submit(update_feed, url)\n schedule_feed_update(url)\n\n\ndef schedule_feed_update(url):\n if url in scheduled:\n if scheduled[url] in scheduler.queue:\n try:\n scheduler.cancel(scheduled[url])\n except ValueError:\n pass\n del scheduled[url]\n\n if not database.feed_exists(url):\n return\n\n feed = database.get_feed(url)\n event = scheduler.enter(feed.update_interval, 1, update_and_reschedule_feed, (url,))\n scheduled[url] = event\n\n\ndef generate_feed(feed_data, entries):\n feed = bs4.BeautifulSoup(feed_data, 'xml')\n\n # Find RSS channel element directly\n root = feed.find(['channel', 'feed'])\n for entry in entries:\n entry = bs4.BeautifulSoup(entry, 'xml')\n entry = entry.find(['item', 'entry'])\n root.insert(len(root.contents), entry)\n\n return str(feed).encode(settings.ENCODING)\n","repo_name":"cryzed/Feedbuffer","sub_path":"feedbuffer/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"78"} +{"seq_id":"39491219614","text":"from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django import forms\nfrom django.core.validators import FileExtensionValidator\n\n\nclass PipelineForm(forms.Form):\n input_file = forms.FileField(\n label=\"Input file\",\n validators=[FileExtensionValidator([\"csv\", \"tsv\"])]\n )\n\n def __init__(self, *args, **kwargs):\n super(PipelineForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = \"pipeline_form\"\n self.helper.form_class = \"pipeline\"\n self.helper.form_method = \"post\"\n self.helper.form_action = \"pipeline:submit\"\n\n self.helper.add_input(Submit('submit', 'Submit'))\n","repo_name":"ivansg44/lexmapr_django","sub_path":"lexmapr_django/pipeline/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37126448685","text":"from glob import glob\nimport argparse\nfrom shutil import move\nfrom os.path import join, basename\nfrom os import makedirs\nimport numpy as np\n\n\nparser = argparse.ArgumentParser('Export model', add_help=False)\nparser.add_argument('-i', '--input', type=str, help='Input (directory).')\nparser.add_argument('-o', '--output', type=str, help='Output (directory).')\nparser.add_argument('-n', '--num', type=int, help='Number of items.')\nparser.add_argument('-s', '--seed', default=None, type=int, help='Random seed.')\nargs = parser.parse_args()\n\n\nfiles = sorted(glob(join(args.input, 'images', '*.*')))\nif args.seed is not None:\n np.random.seed(args.seed)\nsel = np.random.choice(files, args.num, replace=False)\nmakedirs(join(args.output, 'images'), exist_ok=True)\nmakedirs(join(args.output, 'labels'), exist_ok=True)\nfor f in sel:\n f_, = glob(join(args.input, 'labels', '.'.join(basename(f).split('.')[:-1]) + '*'))\n f_dst = join(args.output, 'images', basename(f))\n f_dst_ = join(args.output, 'labels', basename(f_))\n\n print(f, '-->', f_dst)\n move(f, f_dst)\n print(f_, '-->', f_dst_)\n move(f_, f_dst_)\n print()\n","repo_name":"FZJ-INM1-BDA/neurips22-cell-seg","sub_path":"preprocess_val_data.py","file_name":"preprocess_val_data.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"24202007304","text":"from çalışan import çalışan\r\n#çalışan classını inherit eden maviyaka classını oluşturuyorum\r\nclass maviyaka(çalışan):\r\n # istenilen bilgileri içerecek init metodunu yazıyorum\r\n def __init__(self, tc_no, ad, soyad, yas, cinsiyet, uyruk, sektor, tecrube, maas, yeni_maas, yipranma_payi):\r\n # insan classından inherit ettiğimiz değerlerin kullanılabilmesi için super metodunu kullanıyorum\r\n super().__init__(tc_no, ad, soyad, yas, cinsiyet, uyruk, sektor, tecrube, maas, yeni_maas)\r\n self.__yipranma_payi = yipranma_payi\r\n #istediğimiz aralıkta yıpranma payı değeri girilmesi için while döngüsüyle kontrol edip hatalı değerde tekrar girilmesini sağlıyorum\r\n while self.__yipranma_payi <= 0 or self.__yipranma_payi >= 1:\r\n self.__yipranma_payi = float(input(\"Lütfen 0 ile 1 arasında bir yıpranma payı değeri girin: \"))\r\n\r\n # private değerleri güncellemek için get ve set metotlarını yazıyorum\r\n def get_yipranma_payi(self):\r\n return self.__yipranma_payi\r\n\r\n def set_yipranma_payi(self, yeni_yipranma_payi):\r\n if yeni_yipranma_payi <= 0 or yeni_yipranma_payi >= 1:\r\n yeni_yipranma_payi = float(input(\"Lütfen 0 ile 1 arasında bir yıpranma payı değeri girin: \"))\r\n self.__yipranma_payi = yeni_yipranma_payi\r\n\r\n # yeni maaş değerini hesaplamak için zam_hakki metodunu yazıyorum\r\n def zam_hakki(self):\r\n # tecrube ve maas değerlerini bizden istenilen şekilde karşılaştırıp yıpranma payı değerini de kullanarak yeni maaş değeri buluyorum\r\n if self.get_tecrube() / 12 < 2:\r\n zam_orani = self.__yipranma_payi * 10\r\n self.set_yeni_maas(self.get_maas() + self.get_maas() * (zam_orani / 100))\r\n\r\n elif self.get_tecrube() / 12 >= 2 and self.get_tecrube() < 4 and self.get_maas() < 15000:\r\n zam_orani = (self.get_maas() % self.get_tecrube()) / 2 + (self.__yipranma_payi * 10)\r\n self.set_yeni_maas(self.get_maas() + self.get_maas() * (zam_orani / 100))\r\n\r\n elif self.get_tecrube() / 12 >= 4 and self.get_maas() < 25000:\r\n zam_orani = (self.get_maas() % self.get_tecrube()) / 3 + (self.__yipranma_payi * 10)\r\n self.set_yeni_maas(self.get_maas() + self.get_maas() * (zam_orani / 100))\r\n\r\n else:\r\n self.set_yeni_maas(self.get_maas())\r\n\r\n # istenilen değerleri string halinde döndürecek str metodunu yazıyorum\r\n def __str__(self):\r\n self.zam_hakki()\r\n return f\"AD: {self.get_ad()}, SOYAD: {self.get_soyad()}, TECRUBE(AY): {self.get_tecrube()} AY, YENI MAAS: {self.get_yeni_maas()}\"\r\n\r\n\r\n\r\n","repo_name":"YusufKarahan20/final-proje","sub_path":"maviyaka.py","file_name":"maviyaka.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35334639685","text":"#Аппроксимация для таблицы значение - холодная кривая\n# rho - x, T - y\n\n\n\n# Import `load_workbook` module from `openpyxl`\nfrom openpyxl import load_workbook\nfrom math import log10\n\n# Load in the workbook\nwb = load_workbook('C:/Users/xaha9/Desktop/ШР/deep_lom/aluminium.xlsx')\n\n# Get sheet names\n#print(wb.get_sheet_names())\n\n# Load a specific sheet by name\n\nsheet = wb.get_sheet_by_name('energy_ferrum_исходник')\n## Retrieve the value of a certain cell\n#print(sheet['A2'].value)\n#\n## Select element 'B2' of your sheet\n#c = sheet['B2']\n#\n## Retrieve the row number of your element\n#print(c.row)\n#\n## Retrieve the column letter of your element\n#print(c.column)\n#\n## Retrieve the coordinates of the cell\n#print(c.coordinate)\n\n\ndef bilinear_interpolation_energy(T, rho):\n x1 = 0\n x2 = 0\n y1 = 0\n y2 = 0\n\n for cellObj in sheet['B1':'OL1']:\n for cell in cellObj:\n if cell.value <= log10(rho):\n x1 = cell\n else:\n\n break\n\n\n x2 = sheet.cell(row=x1.row, column=x1.column + 1)\n\n print(\"x1 = \", x1, \"X2 = \", x2)\n for cellObj in sheet['A2':'A502']:\n for cell in cellObj:\n if cell.value >= log10(T):\n y1 = cell\n else:\n break\n\n y2 = sheet.cell(row=y1.row + 1, column=y1.column)\n print(\"y1 = \", y1, \"y2 = \", y2)\n\n\n def interpolate(x1, x2, y1, y2, T, rho):\n\n f1 = (10 **(x2.value) - rho) / (10 **(x2.value) - 10 **(x1.value)) * sheet.cell(row=y1.row, column=x1.column).value + (rho - 10 **(x1.value)) / (10 **(x2.value) - 10 **(x1.value)) * sheet.cell(row=y1.row, column=x2.column).value\n f2 = (10 **(x2.value) - rho) / (10 **(x2.value) - 10 **(x1.value)) * sheet.cell(row=y2.row, column=x1.column).value + (rho - 10 **(x1.value)) / (10 **(x2.value) - 10 **(x1.value)) * sheet.cell(row=y2.row, column=x2.column).value\n\n return (10 **(y2.value) - T) / (10 **(y2.value) - 10 **(y1.value)) * f1 + (T - 10 **(y1.value)) / (10 **(y2.value) - 10 **(y1.value)) * f2\n\n return interpolate(x1, x2, y1, y2, T, rho)\n\n#print(bilinear_interpolation(100, 0.01))\n\n\n\n\n","repo_name":"markoshura/deep_vlom","sub_path":"bilinear_interpolation_energy.py","file_name":"bilinear_interpolation_energy.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29133258874","text":"import contextlib\nfrom functools import partial\nfrom .report import current_report\nfrom ..utils import (\n clone_tensors,\n map_structure_and_replace_key,\n flatten,\n for_each_grad_tensor,\n)\nimport json\nimport numpy\nimport paddle\nimport torch\n\n\n@contextlib.contextmanager\ndef register_hooker(model):\n marker = model.marker\n\n remove_handles = []\n idx = 0\n # traversal_for_hook includes layers which we need add pre and post hook\n # for model structure, but not info hook (they are in black list)\n models = list(marker.traversal_for_hook())\n for mod in models:\n pre_handle = mod.register_forward_pre_hook(partial(pre_structure_hook))\n if mod not in marker.black_list:\n handle = mod.register_forward_post_hook(partial(info_hook, net_id=idx))\n remove_handles.append(handle)\n post_handle = mod.register_forward_post_hook(partial(post_structure_hook))\n remove_handles.extend([pre_handle, post_handle])\n idx += 1\n yield\n for h in remove_handles:\n h.remove()\n\n\n\"\"\"\n hooks used to build module structure\n\"\"\"\n\n\ndef pre_structure_hook(layer, input):\n report = current_report()\n report.stack.push_layer(layer)\n if layer in report.marker.layer_map:\n report.stack._top().layer_type = \"in map\"\n return None\n\n\ndef post_structure_hook(layer, input, output):\n report = current_report()\n retval = report.stack.pop_layer(layer)\n if retval in report.marker.black_list:\n report.stack._top().children.pop()\n return None\n\n\n\"\"\"\n hook for record forward infos\n\"\"\"\n\n# do not enter api layer which is triggered under info_hook\n__in_info_hook__ = False\n\n\ndef info_hook(model, input, output, net_id):\n \"\"\"\n Notice: the model is a origin layer/module, not ProxyModel\n \"\"\"\n global __in_info_hook__\n if __in_info_hook__:\n return None\n\n report = current_report()\n\n if report is None or report.stack._top() is None:\n return None\n\n # if this api is not processing tensors, do not create report\n if output is None or all([not isinstance(x, (paddle.Tensor, torch.Tensor)) for x in flatten(output)]):\n return None\n\n # if an api under black_list_recursively, do not create report\n # a layer under black_list_recursively will not register this hook, except it is a mapped layer\n # report.stack._top().net can not be an api layer !!!\n if report.stack._top().net in report.marker.black_list_recursively and hasattr(model, \"__api__\"):\n return None\n\n # if this api is called under layer/module provided by framework, skip it\n python_module = report.stack._top().net.__module__\n if hasattr(model, \"__api__\") and (python_module.startswith(\"paddle.\") or python_module.startswith(\"torch.\")):\n return None\n\n __in_info_hook__ = True\n\n # if current model is an api layer, we do not want to hold it\n if hasattr(model, \"__api__\"):\n _model = padiff_layer_str(model)\n else:\n _model = model\n\n new_in = clone_tensors(input)\n new_out = clone_tensors(output)\n fwd_item = report.put_item(\"forward\", new_in, new_out, _model, net_id)\n bwd_item = report.put_item(\"backward\", new_in, new_out, _model, net_id)\n bwd_item.set_forward(fwd_item)\n\n report.stack.push_api(_model, fwd_item, bwd_item)\n\n for i, (t,) in enumerate(for_each_grad_tensor(input)):\n t.register_hook(partial(tensor_hook, bwd_item=bwd_item, nth_tensor=i, net_id=net_id))\n\n # if under single step forward guard\n if single_step_state() == \"forward\" and net_id != -1:\n # two report_item with same id, the step_idx should be corresponded\n step_idx = len(list(filter(lambda x: x.type == \"forward\" and x.net_id == net_id, report.items))) - 1\n base_report_node = find_base_report_node(net_id, step_idx)\n\n retval = map_structure_and_replace_key(replace_forward_output(base_report_node), output, output)\n __in_info_hook__ = False\n return retval\n else:\n __in_info_hook__ = False\n return None\n\n\n\"\"\"\n hook for record backward infos\n\"\"\"\n\n\ndef tensor_hook(x_grad, bwd_item, nth_tensor, net_id):\n new_grad = clone_tensors(x_grad)\n bwd_item.set_input_grads(nth_tensor, new_grad[0])\n\n if single_step_state() == \"backward\" and net_id != -1:\n report = current_report()\n step_idx = (\n list(filter(lambda x: x.type == \"backward\" and x.net_id == net_id, report.items)).index(bwd_item) - 1\n )\n base_report_node = find_base_report_node(net_id, step_idx)\n\n value = numpy.load(base_report_node[\"bwd_grads\"][nth_tensor])\n if isinstance(x_grad, paddle.Tensor):\n return paddle.to_tensor(value)\n else:\n return torch.as_tensor(value, device=x_grad.device)\n\n return x_grad\n\n\n\"\"\"\n utils\n\"\"\"\n\n\ndef padiff_layer_str(model):\n if isinstance(model, paddle.nn.Layer):\n return PaddleLayerStr(model)\n else:\n return TorchModuleStr(model)\n\n\nclass PaddleLayerStr(paddle.nn.Layer):\n def __init__(self, net):\n super(PaddleLayerStr, self).__init__()\n self.__name__ = net.__name__\n self.__api__ = net.__api__\n\n\nclass TorchModuleStr(torch.nn.Module):\n def __init__(self, net):\n super(TorchModuleStr, self).__init__()\n self.__name__ = net.__name__\n self.__api__ = net.__api__\n\n\nsingle_step_phase = \"\"\nsingle_step_base = None\n\n\n@contextlib.contextmanager\ndef SyncStepGuard(diff_phase, report_path):\n global single_step_phase, single_step_base\n try:\n old_phase = single_step_phase\n old_base = single_step_base\n\n with open(report_path + \"/\" + \"report.json\", \"r\") as report_file:\n report = json.load(report_file)\n\n single_step_phase = diff_phase\n single_step_base = split_by_net_id(report)\n\n yield\n finally:\n single_step_phase = old_phase\n single_step_base = old_base\n\n\ndef split_by_net_id(report):\n bucket = {}\n\n def _traversal(node, bucket):\n net_id = node[\"metas\"][\"net_id\"]\n if net_id == -1:\n return\n if net_id not in bucket:\n bucket[net_id] = [node]\n else:\n bucket[net_id].append(node)\n\n for child in node[\"children\"]:\n _traversal(child, bucket)\n\n for tree in report[\"tree\"]:\n _traversal(tree, bucket)\n\n for key in bucket:\n bucket[key].sort(key=lambda x: x[\"metas\"][\"fwd_step\"])\n\n return bucket\n\n\ndef single_step_state():\n return single_step_phase\n\n\ndef find_base_report_node(net_id, step_idx):\n global single_step_base\n return single_step_base[net_id][step_idx]\n\n\ndef replace_forward_output(node):\n numpy_file_list = node[\"fwd_outputs\"]\n cur_idx = 0\n\n def inner(input_):\n if isinstance(input_, (paddle.Tensor, torch.Tensor)):\n if cur_idx >= len(numpy_file_list):\n raise RuntimeError(\n \"In single step mode, try to replace tensor by dumpped numpy value, but the number of tensors and numpy is not equal. Maybe the models are not corresponded.\"\n )\n value = numpy.load(numpy_file_list[cur_idx])\n if isinstance(input_, paddle.Tensor):\n return paddle.to_tensor(value)\n else:\n return torch.as_tensor(value, device=input_.device)\n else:\n return input_\n\n return inner\n","repo_name":"WenmuZhou/PytorchOCR","sub_path":"padiff/report/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":7380,"program_lang":"python","lang":"en","doc_type":"code","stars":1192,"dataset":"github-code","pt":"78"} +{"seq_id":"40585699916","text":"import json\nimport logging\nimport os\nfrom typing import Optional, Sequence, Union, List, Dict\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom pytorch_lightning.callbacks import EarlyStopping\nfrom scvi.data import AnnDataManager\nfrom scvi.dataloaders import DataSplitter\nfrom torch.nn import functional as F\nfrom scvi.data.fields import (\n LayerField,\n CategoricalObsField,\n NumericalObsField,\n NumericalJointObsField,\n ObsmField,\n)\n\nfrom anndata import AnnData\nfrom scvi.model.base import BaseModelClass\nfrom scvi.train import TrainRunner\nfrom scvi.train._callbacks import SaveBestState\nfrom scvi.utils import setup_anndata_dsp\nfrom tqdm import tqdm\n\nfrom ._module import CPAModule\nfrom ._utils import CPA_REGISTRY_KEYS\nfrom ._task import CPATrainingPlan\nfrom ._data import AnnDataSplitter\n\nlogger = logging.getLogger(__name__)\nlogger.propagate = False\n\n\nclass CPA(BaseModelClass):\n \"\"\"CPA model\n\n Parameters\n ----------\n adata : Anndata\n Registered Annotation Dataset\n\n n_latent: int\n Number of latent dimensions used for drug and Autoencoder\n\n loss_ae: str\n Either `gauss` or `NB`. Autoencoder loss function.\n\n doser_type: str\n Type of doser network. Either `sigm`, `logsigm` or `mlp`.\n\n split_key : str, optional\n Key used to split the data between train test and validation.\n This must correspond to a observation key for the adata, composed of values\n 'train', 'test', and 'ood'. By default None.\n\n **hyper_params:\n CPA's hyper-parameters.\n\n\n Examples\n --------\n >>> import cpa\n >>> import scanpy as sc\n >>> adata = sc.read('dataset.h5ad')\n >>> adata = cpa.CPA.setup_anndata(adata,\n drug_key='condition',\n dose_key='dose_val',\n categorical_covariate_keys=['cell_type'],\n control_key='control'\n )\n >>> hyperparams = {'autoencoder_depth': 3, 'autoencoder_width': 256}\n >>> model = cpa.CPA(adata,\n n_latent=256,\n loss_ae='gauss',\n doser_type='logsigm',\n split_key='split',\n )\n \"\"\"\n cat_covars_encoders: dict = None\n drug_encoder: dict = None\n cont_covars: list = None\n\n def __init__(\n self,\n adata: AnnData,\n n_latent: int = 128,\n loss_ae: str = 'gauss',\n doser_type: str = 'logsigm',\n split_key: str = None,\n train_split: str = 'train',\n valid_split: str = 'test',\n test_split: str = 'ood',\n **hyper_params,\n ):\n super().__init__(adata)\n self.drug_encoder = CPA.drug_encoder\n self.cat_covars_encoders = CPA.cat_covars_encoders\n\n self.n_genes = adata.n_vars\n self.n_drugs = len(self.drug_encoder)\n self.split_key = split_key\n\n self.drugs = list(self.drug_encoder.keys())\n self.covars = {\n covar: list(self.cat_covars_encoders[covar].keys()) for covar in self.cat_covars_encoders.keys()\n }\n\n self.module = CPAModule(\n n_genes=self.n_genes,\n n_drugs=self.n_drugs,\n cat_covars_encoder=self.cat_covars_encoders,\n n_latent=n_latent,\n loss_ae=loss_ae,\n doser_type=doser_type,\n **hyper_params,\n ).float()\n\n train_indices, valid_indices, test_indices = None, None, None\n if split_key is not None:\n train_indices = np.where(adata.obs.loc[:, split_key] == train_split)[0]\n valid_indices = np.where(adata.obs.loc[:, split_key] == valid_split)[0]\n test_indices = np.where(adata.obs.loc[:, split_key] == test_split)[0]\n\n self.train_indices = train_indices\n self.valid_indices = valid_indices\n self.test_indices = test_indices\n\n self._model_summary_string = f\"Compositional Perturbation Autoencoder\"\n\n self.init_params_ = self._get_init_params(locals())\n\n self.epoch_history = None\n\n @classmethod\n @setup_anndata_dsp.dedent\n def setup_anndata(\n cls,\n adata: AnnData,\n perturbation_keys: Dict[str, str],\n use_counts: Optional[bool] = False,\n categorical_covariate_keys: Optional[List[str]] = [],\n continuous_covariate_keys: Optional[List[str]] = [],\n control_key: Optional[str] = None,\n deg_uns_key: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"\n Annotation Data setup function\n\n Parameters\n ----------\n adata\n\n categorical_covariate_keys\n\n continuous_covariate_keys\n\n \"\"\"\n CPA_REGISTRY_KEYS.PERTURBATION_KEYS = perturbation_keys\n CPA_REGISTRY_KEYS.CAT_COV_KEYS = categorical_covariate_keys\n CPA_REGISTRY_KEYS.CONT_COV_KEYS = continuous_covariate_keys\n\n drug_key = perturbation_keys['perturbation']\n dose_key = perturbation_keys['dosage']\n\n drugs = adata.obs[drug_key]\n dosages = adata.obs[dose_key].astype(str)\n\n # get unique drugs\n drugs_names_unique = set()\n for d in np.unique(drugs):\n [drugs_names_unique.add(i) for i in d.split(\"+\")]\n drugs_names_unique = sorted(list(np.array(list(drugs_names_unique))))\n\n drugs_obsm = np.zeros((adata.n_obs, len(drugs_names_unique)))\n for i in tqdm(range(adata.n_obs)):\n cell_drugs = np.isin(drugs_names_unique, drugs[i].split('+'))\n cell_doses = np.array(dosages[i].split(\"+\")).astype(np.float32)\n drugs_obsm[i, cell_drugs] = cell_doses\n\n adata.obsm['drugs_doses'] = np.array(drugs_obsm)\n\n drug_encoder = {drug: i for i, drug in\n enumerate(drugs_names_unique)}\n\n setup_method_args = cls._get_setup_method_args(**locals())\n anndata_fields = \\\n [\n LayerField(registry_key=CPA_REGISTRY_KEYS.X_KEY, layer='counts' if use_counts else None,\n is_count_data=True if use_counts else False),\n ObsmField('drugs_doses', 'drugs_doses', is_count_data=False, correct_data_format=True)\n ] + \\\n [CategoricalObsField(registry_key=covar, attr_key=covar) for covar in categorical_covariate_keys] + \\\n [NumericalObsField(registry_key=covar, atrr_key=covar) for covar in continuous_covariate_keys]\n\n if control_key:\n anndata_fields.append(NumericalObsField(registry_key='control', attr_key=control_key))\n\n if deg_uns_key:\n mask = np.zeros((adata.n_obs, adata.n_vars))\n for i, cov_drug in tqdm(enumerate(adata.obs['cov_drug'].values)):\n if cov_drug in adata.uns[deg_uns_key].keys():\n mask[i] = adata.var.index.isin(adata.uns[deg_uns_key][cov_drug]).astype(np.int)\n else:\n mask[i] = 1\n\n adata.obsm['deg_mask'] = np.array(mask)\n\n anndata_fields.append(ObsmField(\"deg_mask\", \"deg_mask\", is_count_data=False, correct_data_format=True))\n\n adata_manager = AnnDataManager(\n fields=anndata_fields, setup_method_args=setup_method_args\n )\n adata_manager.register_fields(adata, **kwargs)\n cls.register_manager(adata_manager)\n\n cat_covar_encoders = {}\n for covar in categorical_covariate_keys:\n cat_covar_encoders[covar] = {c: i for i, c in enumerate(\n adata_manager.registry['field_registries'][covar]['state_registry']['categorical_mapping'])}\n\n CPA.cat_covars_encoders = cat_covar_encoders\n CPA.drug_encoder = drug_encoder\n CPA.cont_covars = continuous_covariate_keys\n\n def train(\n self,\n max_epochs: Optional[int] = None,\n use_gpu: Optional[Union[str, int, bool]] = None,\n train_size: float = 0.9,\n validation_size: Optional[float] = None,\n batch_size: int = 128,\n early_stopping: bool = False,\n plan_kwargs: Optional[dict] = None,\n hyperopt: bool = False,\n save_path: Optional[str] = None,\n **trainer_kwargs,\n ):\n \"\"\"\n Trains CPA on the given dataset\n\n Parameters\n ----------\n max_epochs: int\n Maximum number of epochs for training\n use_gpu: bool\n Whether to use GPU if available\n train_size: float\n Fraction of training data in the case of randomly splitting dataset to train/valdiation\n if `split_key` is not set in model's constructor\n validation_size: float\n Fraction of validation data in the case of randomly splitting dataset to train/valdiation\n if `split_key` is not set in model's constructor\n batch_size: int\n Size of mini-batches for training\n early_stopping: bool\n If `True`, EarlyStopping will be used during training on validation dataset\n plan_kwargs: dict\n `CPATrainingPlan` parameters\n save_path: str\n Path to save the model after the end of training\n \"\"\"\n if max_epochs is None:\n n_cells = self.adata.n_obs\n max_epochs = np.min([round((20000 / n_cells) * 400), 400])\n plan_kwargs = plan_kwargs if isinstance(plan_kwargs, dict) else dict()\n\n manual_splitting = (\n (self.valid_indices is not None)\n and (self.train_indices is not None)\n and (self.test_indices is not None)\n )\n if manual_splitting:\n data_splitter = AnnDataSplitter(\n self.adata_manager,\n train_indices=self.train_indices,\n valid_indices=self.valid_indices,\n test_indices=self.test_indices,\n batch_size=batch_size,\n use_gpu=use_gpu,\n )\n else:\n data_splitter = DataSplitter(\n self.adata_manager,\n train_size=train_size,\n validation_size=validation_size,\n batch_size=batch_size,\n use_gpu=use_gpu,\n )\n\n self.training_plan = CPATrainingPlan(self.module, self.cat_covars_encoders, **plan_kwargs)\n trainer_kwargs[\"early_stopping\"] = False\n trainer_kwargs['check_val_every_n_epoch'] = trainer_kwargs.get('check_val_every_n_epoch', 20)\n\n es_callback = EarlyStopping(monitor='cpa_metric',\n patience=trainer_kwargs['early_stopping_patience'],\n check_on_train_epoch_end=False,\n verbose=False,\n mode='max',\n )\n\n if 'callbacks' in trainer_kwargs.keys() and isinstance(trainer_kwargs.get('callbacks'), list):\n trainer_kwargs['callbacks'] += [es_callback]\n else:\n trainer_kwargs['callbacks'] = [es_callback]\n\n if save_path is None:\n save_path = './'\n\n checkpoint = SaveBestState(monitor='cpa_metric', mode='max', period=20, verbose=False)\n trainer_kwargs['callbacks'].append(checkpoint)\n\n runner = TrainRunner(\n self,\n training_plan=self.training_plan,\n data_splitter=data_splitter,\n max_epochs=max_epochs,\n use_gpu=use_gpu,\n early_stopping_monitor=\"cpa_metric\",\n early_stopping_mode='max',\n enable_checkpointing=True,\n enable_model_summary=True,\n **trainer_kwargs,\n )\n runner()\n\n self.epoch_history = pd.DataFrame().from_dict(self.training_plan.epoch_history)\n if save_path is not None:\n self.save(save_path, overwrite=True)\n\n @torch.no_grad()\n def get_latent_representation(\n self,\n adata: Optional[AnnData] = None,\n indices: Optional[Sequence[int]] = None,\n batch_size: Optional[int] = 32,\n ) -> np.ndarray:\n \"\"\"Returns the basal latent variable\n\n Parameters\n ----------\n adata : Optional[AnnData], optional\n [description], by default None\n indices : Optional[Sequence[int]], optional\n Optional indices, by default None\n batch_size : Optional[int], optional\n Batch size to use, by default None\n \"\"\"\n\n if self.is_trained_ is False:\n raise RuntimeError(\"Please train the model first.\")\n\n adata = self._validate_anndata(adata)\n if indices is None:\n indices = np.arange(adata.n_obs)\n scdl = self._make_data_loader(\n adata=adata, indices=indices, batch_size=batch_size, shuffle=False\n )\n\n latent_basal = []\n latent = []\n for tensors in scdl:\n inference_inputs = self.module._get_inference_input(tensors)\n outputs = self.module.inference(**inference_inputs)\n latent_basal += [outputs[\"latent_basal\"].cpu().numpy()]\n latent += [outputs['latent'].cpu().numpy()]\n\n latent_basal_adata = AnnData(X=np.concatenate(latent_basal, axis=0), obs=adata.obs.copy())\n latent_basal_adata.obs_names = adata.obs_names\n\n latent_adata = AnnData(X=np.concatenate(latent, axis=0), obs=adata.obs.copy())\n latent_adata.obs_names = adata.obs_names\n\n return latent_basal_adata, latent_adata\n\n @torch.no_grad()\n def predict(\n self,\n adata: Optional[AnnData] = None,\n indices: Optional[Sequence[int]] = None,\n batch_size: Optional[int] = 32,\n ):\n \"\"\"Counterfactual-friendly gene expression prediction\n # TODO: See if another signature makes more sense for better usability\n\n To produce counterfactuals,\n\n Returns\n -------\n Tuple\n Gene expression means and standard variations\n \"\"\"\n assert self.module.loss_ae in [\"gauss\", 'mse']\n self.module.eval()\n\n # adata = self.adata if adata is None else adata\n adata = self._validate_anndata(adata)\n if indices is None:\n indices = np.arange(adata.n_obs)\n scdl = self._make_data_loader(\n adata=adata, indices=indices, batch_size=batch_size, shuffle=False\n )\n mus = []\n stds = []\n for tensors in scdl:\n _mus, _stds = self.module.get_expression(tensors)\n mus.append(_mus.detach().cpu().numpy())\n stds.append(_stds.detach().cpu().numpy())\n\n pred_adata_mean = AnnData(X=np.concatenate(mus, axis=0), obs=adata.obs.copy())\n pred_adata_var = AnnData(X=np.concatenate(stds, axis=0), obs=adata.obs.copy())\n\n pred_adata_mean.obs_names = adata.obs_names\n pred_adata_var.obs_names = adata.obs_names\n\n pred_adata_mean.var_names = adata.var_names\n pred_adata_var.var_names = adata.var_names\n\n return pred_adata_mean, pred_adata_var\n\n @torch.no_grad()\n def get_drug_embeddings(self, doses=1.0, drug: Optional[str] = None):\n \"\"\"Computes all drug drug\n\n Parameters\n ----------\n doses : float, or torch.Tensor\n Drug dose, by default 1.0\n drug: str, optional\n Drug name if single drug embedding is desired\n\n \"\"\"\n self.module.eval()\n if isinstance(doses, float):\n if drug is None:\n treatments = doses * torch.eye(self.n_drugs, device=self.device)\n else:\n treatments = doses * F.one_hot(torch.LongTensor([self.drug_encoder[drug]]).to(self.device),\n self.n_drugs)\n elif isinstance(doses, np.ndarray):\n treatments = torch.tensor(doses).to(self.device).float()\n else:\n treatments = doses\n\n embeds = self.module.drug_network(treatments).detach().cpu().numpy()\n\n return embeds\n\n @torch.no_grad()\n def get_covar_embeddings(self, covariate: str, covariate_value: str = None):\n \"\"\"Computes Covariate drug\n\n Parameters\n ----------\n covariate : str\n covariate to be computed\n covariate_value: str, Optional\n Covariate specific value for embedding computation\n\n \"\"\"\n if covariate_value is None:\n covar_ids = torch.arange(len(self.cat_covars_encoders[covariate]), device=self.device)\n else:\n covar_ids = torch.LongTensor([self.cat_covars_encoders[covariate][covariate_value]]).to(self.device)\n embeddings = self.module.cat_covars_embeddings[covariate](covar_ids).detach().cpu().numpy()\n\n return embeddings\n\n def save(self, dir_path: str, overwrite: bool = False, save_anndata: bool = False, **anndata_write_kwargs):\n os.makedirs(dir_path, exist_ok=True)\n\n # save public dictionaries\n total_dict = {\n 'drug_encoder': self.drug_encoder,\n 'cat_covars_encoder': self.cat_covars_encoders,\n 'cont_covars': self.cont_covars,\n }\n\n json_dict = json.dumps(total_dict)\n with open(os.path.join(dir_path, 'CPA_dicts.json'), 'w') as f:\n f.write(json_dict)\n\n if isinstance(self.epoch_history, dict):\n self.epoch_history = pd.DataFrame().from_dict(self.training_plan.epoch_history)\n self.epoch_history.to_csv(os.path.join(dir_path, 'history.csv'), index=False)\n elif isinstance(self.epoch_history, pd.DataFrame):\n self.epoch_history.to_csv(os.path.join(dir_path, 'history.csv'), index=False)\n\n return super().save(dir_path=dir_path, overwrite=overwrite, save_anndata=save_anndata, **anndata_write_kwargs)\n\n @classmethod\n def load(cls, dir_path: str, adata: Optional[AnnData] = None, use_gpu: Optional[Union[str, int, bool]] = None,\n perturbation_keys: Optional[Dict[str, str]] = None,\n deg_uns_key: Optional[str] = None, ):\n assert (adata and perturbation_keys) or (adata is None)\n\n # load public dictionaries\n with open(os.path.join(dir_path, 'CPA_dicts.json')) as f:\n total_dict = json.load(f)\n\n cls.drug_encoder = total_dict['drug_encoder']\n cls.cat_covars_encoder = total_dict['cat_covars_encoder']\n cls.cont_covars = total_dict['cont_covars']\n\n model = super().load(dir_path, adata, use_gpu)\n\n drug_key = perturbation_keys['perturbation']\n dosage_key = perturbation_keys['dosage']\n\n if adata is not None and 'drugs_doses' not in adata.obsm:\n drugs_obsm = np.zeros((adata.n_obs, len(CPA.drug_encoder)))\n drugs, dosages = adata.obs[drug_key], adata.obs[dosage_key].astype(str)\n for i in tqdm(range(adata.n_obs)):\n cell_drugs = np.isin(list(CPA.drug_encoder.keys()), drugs[i].split('+'))\n cell_doses = np.array(dosages[i].split(\"+\")).astype(np.float32)\n drugs_obsm[i, cell_drugs] = cell_doses\n\n adata.obsm['drugs_doses'] = drugs_obsm\n\n if deg_uns_key:\n mask = np.zeros((adata.n_obs, adata.n_vars))\n for i, cov_drug in tqdm(enumerate(adata.obs['cov_drug'].values)):\n mask[i] = adata.var.index.isin(adata.uns[deg_uns_key][cov_drug]).astype(np.int)\n\n adata.obsm['deg_mask'] = np.array(mask)\n\n try:\n model.epoch_history = pd.read_csv(os.path.join(dir_path, 'history.csv'))\n except:\n print('WARNING: The history was not found.')\n\n return model\n","repo_name":"cloudengio/cpa","sub_path":"cpa/_model.py","file_name":"_model.py","file_ext":"py","file_size_in_byte":19899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"38233448244","text":"from collections import OrderedDict\nimport os\n\nimport yaml\n\n\nclass CmdlineParser(object):\n def __init__(self):\n self.template = []\n self.configuration = OrderedDict()\n self._preparsed = False\n\n def format(self, configuration):\n curated_config = {}\n for key, item in configuration.items():\n if isinstance(item, dict):\n item = item['file']\n\n curated_config[key] = item\n\n return \" \".join(self.template).format(**curated_config)\n\n def parse(self, commandline):\n if not commandline:\n return {}\n\n self.configuration = OrderedDict(self.parse_arguments(commandline))\n for key, value in self.configuration.items(): \n # TODO: Support passing the same commandline but slightly different\n if key.startswith(\"_\") and self._preparsed:\n raise RuntimeError(\"Cannot branch using positional arguments.\")\n # Positional\n elif key.startswith(\"_\"):\n self.template.append(\"{\" + key + \"}\")\n \n # Optional\n else:\n template = self.key_to_arg(key)\n if template in self.template:\n continue\n\n self.template.append(self.key_to_arg(key))\n\n # Ignore value as key is a boolean argument\n if isinstance(value, bool):\n continue\n\n if not isinstance(value, list):\n template = \"{\" + key + \"}\"\n self.template.append(template)\n continue\n\n for pos, item in enumerate(value):\n template = \"{\" + key + \"[\" + str(pos) + \"]}\"\n self.template.append(template)\n\n self._preparsed = True\n\n self.fetch_configurations()\n\n return self.configuration\n\n def arg_to_key(self, arg):\n arg = arg.split(\"=\")[0]\n\n if arg.startswith(\"--\") and len(arg) == 3:\n raise ValueError(\n \"Arguments with two dashes should have more than one letter: {}\".format(arg))\n\n elif not arg.startswith(\"--\") and arg.startswith(\"-\") and len(arg) > 2:\n raise ValueError(\n \"Arguments with one dashes should have only one letter: {}\".format(arg))\n\n return arg.lstrip(\"-\").replace(\"_\", \"__\").replace(\"-\", \"_\")\n\n def key_to_arg(self, key):\n arg = key.replace(\"__\", \"!!!!\").replace(\"_\", \"-\").replace(\"!!!!\", \"_\")\n if len(arg) > 1:\n return \"--\" + arg\n \n return \"-\" + arg\n\n def parse_paths(self, value):\n if isinstance(value, list):\n return [self.parse_paths(item) for item in value]\n\n if isinstance(value, str) and os.path.exists(value):\n return os.path.abspath(value)\n\n return value\n\n def parse_arguments(self, arguments):\n positional_index = 0\n argument_name = None\n pairs = []\n argument_names = set()\n for arg in arguments:\n # Key\n if arg.startswith(\"-\"):\n arg = arg.split(\"=\")\n argument_name = self.arg_to_key(arg[0])\n if argument_name in argument_names:\n raise ValueError(\"Two arguments have the same name: {}\".format(argument_name))\n\n argument_names.add(argument_name)\n pairs.append([argument_name, []])\n if len(arg) > 1 and \"=\".join(arg[1:]).strip(\" \"):\n pairs[-1][1].append(\"=\".join(arg[1:]))\n # Optional\n elif argument_name is not None and arg.strip(\" \"):\n pairs[-1][1].append(arg)\n # Positional\n elif argument_name is None:\n pairs.append([\"_pos_{}\".format(len(pairs)), arg])\n\n for i, [key, value] in enumerate(pairs):\n if not value:\n value = True\n elif isinstance(value, list) and len(value) == 1:\n value = value[0]\n\n value = self.parse_paths(value)\n\n pairs[i][1] = value\n\n return pairs\n\n def fetch_configurations(self, configuration=None):\n if configuration is None:\n configuration = self.configuration\n\n for key, item in self.configuration.items():\n if isinstance(item, str) and os.path.exists(item):\n file_config = self.load_conf_file(item)\n if file_config is None:\n continue \n\n self.configuration[key] = {'file': item, 'content': file_config}\n\n def load_conf_file(self, name):\n if name.endswith('.yaml'):\n with open(name, 'r') as f:\n return yaml.load(f)\n\n return None\n","repo_name":"Epistimio/kleio","sub_path":"src/kleio/core/io/cmdline_parser.py","file_name":"cmdline_parser.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"2699729084","text":"# django-djangoplus setup\n# First version of this file done by Guilherme Semente\n# Some things was copied from Django's setup.py\nfrom distutils.command.install import INSTALL_SCHEMES\nimport os, sys\n\n# Downloads setuptools if not find it before try to import\ntry:\n import ez_setup\n ez_setup.use_setuptools()\nexcept ImportError:\n pass\n\nfrom setuptools import setup\nfrom djangoplus import get_version\n\n# Tell distutils to put the data_files in platform-specific installation\n# locations. See here for an explanation:\n# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb\nfor scheme in INSTALL_SCHEMES.values():\n scheme['data'] = scheme['purelib']\n\ndata_files = []\n\nfor dirpath, dirnames, filenames in os.walk('djangoplus'):\n # Ignore dirnames that start with '.'\n for i, dirname in enumerate(dirnames):\n if dirname.startswith('.'): del dirnames[i]\n if filenames:\n data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])\n\n# Small hack for working with bdist_wininst.\n# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html\nif len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':\n for file_info in data_files:\n file_info[0] = '\\\\PURELIB\\\\%s' % file_info[0]\n\nsetup(\n name = 'django-plus',\n version = get_version(),\n description = 'Django utilities library',\n long_description = 'django-plus is a library containing a coupple of utilities for Django developers.',\n author = 'Marinho Brandao',\n author_email = 'marinho@gmail.com',\n url = 'http://django-plus.googlecode.com',\n license = 'GNU Lesser General Public License (LGPL)',\n packages = [\n 'djangoplus',\n 'djangoplus.fieldtypes',\n 'djangoplus.forms',\n 'djangoplus.management',\n 'djangoplus.management.commands',\n 'djangoplus.middleware',\n 'djangoplus.shortcuts',\n 'djangoplus.templatetags',\n 'djangoplus.tests',\n 'djangoplus.urls',\n 'djangoplus.utils',\n 'djangoplus.views',\n 'djangoplus.widgets',\n ],\n data_files = data_files,\n include_package_data=True,\n)\n\n","repo_name":"marinho/django-plus","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"78"} +{"seq_id":"9788140015","text":"from flask import url_for, session\n\nfrom automated_survey_flask import app\nfrom automated_survey_flask.models import Question\n\nfrom .base import BaseTest\n\n\nclass QuestionsTest(BaseTest):\n def get_question_as_xml(self, question, client=None, data=None):\n client = client or self.client\n response = client.get(url_for('question', question_id=question.id), data=data)\n return self.assertXmlDocument(response.data)\n\n def test_first_question_during_a_call(self):\n first_question = self.questions[0]\n root = self.get_question_as_xml(first_question)\n\n self.assertIn(first_question.content, root.xpath('./Say/text()'))\n\n def test_first_question_over_sms(self):\n first_question = self.questions[0]\n data = {'MessageSid': 'unique'}\n root = self.get_question_as_xml(first_question, data=data)\n\n self.assertIn(first_question.content, root.xpath('./Message/text()'))\n\n def test_current_question_being_answered_goes_to_session(self):\n first_question = self.questions[0]\n with app.test_client() as client:\n self.get_question_as_xml(first_question, client=client)\n self.assertEquals(first_question.id, session['question_id'])\n\n def test_gather_keys_on_numeric_question_during_a_call(self):\n numeric_question = self.question_by_kind[Question.NUMERIC]\n root = self.get_question_as_xml(numeric_question)\n\n answer_url = url_for('answer', question_id=numeric_question.id)\n self.assertEquals([answer_url], root.xpath('./Gather/@action'))\n\n def test_record_on_text_questions_during_a_call(self):\n text_question = self.question_by_kind[Question.TEXT]\n root = self.get_question_as_xml(text_question)\n\n answer_url = url_for('answer', question_id=text_question.id)\n self.assertEquals([answer_url], root.xpath('./Record/@action'))\n\n def test_transcription_is_enabled_for_text_questions_during_a_call(self):\n text_question = self.question_by_kind[Question.TEXT]\n root = self.get_question_as_xml(text_question)\n\n answer_transcription_url = url_for(\n 'answer_transcription', question_id=text_question.id\n )\n self.assertEquals(\n [answer_transcription_url], root.xpath('./Record/@transcribeCallback')\n )\n\n def test_gather_keys_on_boolean_question_during_a_call(self):\n boolean_question = self.question_by_kind[Question.BOOLEAN]\n root = self.get_question_as_xml(boolean_question)\n\n answer_url = url_for('answer', question_id=boolean_question.id)\n self.assertEquals([answer_url], root.xpath('./Gather/@action'))\n","repo_name":"TwilioDevEd/automated-survey-flask","sub_path":"tests/question_view_tests.py","file_name":"question_view_tests.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"78"} +{"seq_id":"19380916674","text":"from .constants import *\nfrom .board import Board\n\n\nclass GameState:\n def __init__(self, turtle):\n self.selected = None\n self.board = Board()\n self.turn = 'red'\n self.valid_moves = {}\n self.turtle = turtle\n\n def update(self):\n self.board.draw(self.turtle)\n\n def reset(self):\n self.selected = None\n self.board = Board()\n self.turn = 'red'\n self.valid_moves = {}\n\n def select(self, row, col):\n if(self.selected): #if not none\n result = self._move(row, col)\n if not result: # not valid select,select again\n self.selected = None\n self.select(row, col)\n else: \n piece = self.board.get_piece(row, col)\n if piece != 0 and piece.color == self.turn:\n self.selected = piece\n self.valid_moves = self.board.get_valid_moves(piece)\n return True\n return False\n\n def _move(self, row, col):\n piece = self.board.get_piece(row, col)\n if self.selected and piece == 0 and (row, col) in self.valid_moves:\n self.board.move(self.selected, row, col)\n self.change_turn()\n else:\n return False\n return True\n\n def change_turn(self):\n if self.turn == 'red':\n self.turn = 'black'\n else:\n self.turn = 'red'\n","repo_name":"tianyuanzoe/CheckerGame-with-turtle","sub_path":"final project/checkers/game_state.py","file_name":"game_state.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1314053602","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ndef get_read_base(aln, qseq, qual, pos):\n for qpos, rpos, rbase in aln:\n if rpos != pos:\n continue\n return [rbase, qseq[qpos], qual[qpos]]\n return ['', '', 0]\n\ndef infer_variant(x, minBaseQual):\n aln = x.get_aligned_pairs(matches_only = False, with_seq = True)\n qseq = x.query_sequence\n qual = x.query_qualities\n mms = dict()\n for qpos, rpos, rbase in aln:\n if qpos is not None and rpos is not None and rbase.islower() and qual[qpos] >= minBaseQual:\n mms[qpos] = [rpos+1, 'M', qseq[qpos]]\n #if x.query_name == 'HISEQ15:141:C6VDGANXX:5:1115:10555:79840':\n # print(mms)\n res = []\n qpos, rpos = 0, x.reference_start\n rbeg = rpos\n vnts = []\n for opt, nbase in x.cigartuples:\n if opt == 0: #M\n for i in range(0, nbase):\n if qpos+i in mms:\n vnts.append(mms[qpos+i])\n qpos += nbase\n rpos += nbase\n elif opt == 1: #I\n vnts.append([rpos+1, \"I\", qseq[qpos+1:qpos+nbase+1]])\n qpos += nbase\n elif opt == 2: #D\n vnts.append([rpos+1, \"D\", nbase])\n rpos += nbase\n elif opt == 3: #N\n res.append([rbeg, rpos, vnts])\n rpos += nbase\n rbeg = rpos\n vnts = []\n elif opt == 4: #S\n qpos += nbase\n elif opt == 7 or opt == 8: #=X\n qpos += 1\n rpos += 1\n if rpos > rbeg:\n res.append([rbeg, rpos, vnts])\n if qpos != x.query_length or rpos != x.reference_end:\n print(qpos, x.query_alignment_end, rpos, x.reference_end, x.cigartuples, aln)\n exit(1)\n return res\n\ndef read_variants(fv):\n fhv = open(fv, \"r\")\n vdic = dict()\n for line in fhv:\n line = line.strip(\"\\n\")\n seqid, beg, end, gts = line.split(\"\\t\")\n beg, end = int(beg), int(end)\n ref, alt = gts.split(\",\")\n pos = \"%s_%d\" % (seqid, int(beg) + 1)\n vdic[pos] = [ref, alt]\n fhv.close()\n return vdic\n\ndef bam2bed(args):\n fi, fo = args.bam, args.bed\n min_mapq, min_baseq = args.min_mapq, args.min_baseq\n bam = pysam.AlignmentFile(fi, \"rb\")\n fho = open(fo, \"w\")\n for x in bam.fetch():\n if x.is_duplicate or x.is_unmapped or x.is_secondary or x.mapping_quality < min_mapq:\n continue\n sid = x.query_name\n rid, rbeg, rend = x.reference_name, x.reference_start, x.reference_end\n #pair = 'pe' if x.is_paired else 'se'\n #first = 'r1' if x.is_read1 else 'r2'\n res = infer_variant(x, min_baseq)\n #if sid == 'D00635:197:CAC47ANXX:2:2311:18289:97712':\n # print(res)\n # exit(1)\n for rbeg, rend, vnts in res:\n vntstr = '.'\n if len(vnts) > 0:\n vntstr = \" \".join([\":\".join(map(str,vnt)) for vnt in vnts])\n fho.write(\"%s\\t%d\\t%d\\t%s\\t%s\\n\" %\n (rid, rbeg, rend, sid, vntstr))\n #exit(1)\n fho.close()\n\ndef infer_read(rows, fho, fhb):\n rdic, vdic = dict(), dict()\n sid, beg, end, rid = rows[0][0:4]\n locs = set()\n for row in rows:\n assert rid == row[3], \"error: \" + str(rows)\n if row[4] != '.':\n for rvnt in row[4].split(\" \"):\n pos, vtype, vnt = rvnt.split(\":\")\n rdic[pos] = \"%s:%s\" % (vtype, vnt)\n vpos, vnt, phase = row[7:10]\n assert phase in ['0|1','1|0'], \"Unknown phase: %s\" % phase\n vdic[vpos] = [vnt, phase]\n loc = \"%s:%s\" % tuple(row[0:2])\n if loc not in locs:\n locs.add(loc)\n fhb.write(\"%s\\t%s\\t%s\\t%s\\n\" % tuple(row[0:4]))\n n0, n1, nunk, nerr = 0, 0, 0, 0\n for pos in vdic:\n if pos in rdic:\n if rdic[pos] == vdic[pos][0]:\n if vdic[pos][1] == '0|1':\n n1 = n1 + 1\n else:\n n0 = n0 + 1\n else:\n nunk = nunk + 1\n del rdic[pos]\n else:\n if vdic[pos][1] == '0|1':\n n0 = n0 + 1\n else:\n n1 = n1 + 1\n nerr = len(rdic)\n fho.write(\"%s\\t%d\\t%d\\t%d\\t%d\\n\" % (rid, n0, n1, nunk, nerr))\n\ndef bed_prep(args):\n fi, fo, fb = args.bed, args.out_tsv, args.out_bed\n min_baseq = args.min_baseq\n fhi = open(fi, \"r\")\n fho = open(fo, \"w\")\n fhb = open(fb, \"w\")\n fho.write(\"rid\\tn0\\tn1\\tnunk\\tnerr\\n\")\n prid = \"\"\n rows = []\n for line in fhi:\n row = line.strip(\"\\n\").split(\"\\t\")\n if prid == \"\":\n rows.append(row)\n prid = row[3]\n elif row[3] == prid:\n rows.append(row)\n else:\n infer_read(rows, fho, fhb)\n rows = [row]\n prid = row[3]\n infer_read(rows, fho, fhb)\n fhi.close()\n fho.close()\n fhb.close()\n\ndef bed_summarise(args):\n fr, fb, fo = args.tsv, args.bed, args.out\n\n fhr = open(fr, \"r\")\n rdic = dict()\n for line in fhr:\n rid, n0, n1, nunk, nerr = line.strip(\"\\n\").split(\"\\t\")\n if rid == 'rid':\n continue\n tag = \"unk\"\n if int(n0) > 0 and int(n1) == 0:\n tag = 'h0'\n elif int(n0) == 0 and int(n1) > 0:\n tag = 'h1'\n elif int(n0) > 0 and int(n1) > 0:\n tag = 'cft'\n rdic[rid] = tag\n fhr.close()\n\n fhb = open(fb, \"r\")\n gdic, tdic = dict(), dict()\n for line in fhb:\n row = line.strip(\"\\n\").split(\"\\t\")\n gid, rid = row[3], row[7]\n if gid not in gdic:\n gdic[gid] = {'n0': 0, 'n1': 0, 'ncft': 0}\n if gid not in tdic:\n tdic[gid] = set()\n if rid not in tdic[gid]:\n tdic[gid].add(rid)\n else:\n continue\n if rid not in rdic:\n print(\"%s not in read dict\" % rid)\n if rdic[rid] == 'h0':\n gdic[gid]['n0'] += 1\n elif rdic[rid] == 'h1':\n gdic[gid]['n1'] += 1\n elif rdic[rid] == 'cft':\n gdic[gid]['ncft'] += 1\n fhb.close()\n \n fho = open(fo, \"w\")\n fho.write(\"gid\\tn0\\tn1\\tncft\\n\")\n for gid in sorted(gdic):\n sdic = gdic[gid]\n n0, n1, ncft = sdic['n0'], sdic['n1'], sdic['ncft']\n if n0 + n1 < 0:\n continue\n fho.write(\"%s\\t%d\\t%d\\t%d\\n\" % (gid, n0, n1, ncft))\n fho.close()\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(\n formatter_class = argparse.ArgumentDefaultsHelpFormatter,\n description = 'allele specific expression utilities'\n )\n sp = parser.add_subparsers(title = 'available commands', dest = 'command')\n\n sp1 = sp.add_parser(\"bam2bed\",\n formatter_class = argparse.ArgumentDefaultsHelpFormatter,\n help = 'convert bam to bed format using variant coordinates'\n )\n sp1.add_argument('bam', help='input SAM/BAM file')\n sp1.add_argument('bed', help = 'output BED file')\n sp1.add_argument('--min_mapq', default=20, help='min mapping quality')\n sp1.add_argument('--min_baseq', default=20, help='min base quality')\n sp1.add_argument('--vcf', default='/home/springer/zhoux379/data/misc2/mo17vnt/53.vnt.final/61.rna.bed', help='variant file')\n sp1.set_defaults(func = bam2bed)\n\n sp1 = sp.add_parser(\"bed_prep\",\n formatter_class = argparse.ArgumentDefaultsHelpFormatter,\n help = 'infer allele-specific read origin'\n )\n sp1.add_argument('bed_prep', help='input BED (ase) file')\n sp1.add_argument('out_tsv', help = 'output read ASE file (tsv)')\n sp1.add_argument('out_bed', help = 'output read location file (bed)')\n sp1.add_argument('--min_baseq', default=20, help='min base quality')\n sp1.set_defaults(func = bed_prep)\n\n sp1 = sp.add_parser(\"bed_summarise\",\n formatter_class = argparse.ArgumentDefaultsHelpFormatter,\n help = 'summarise allele-specific read counts per gene'\n )\n sp1.add_argument('tsv', help='input tsv (ase) file')\n sp1.add_argument('bed', help = 'input gene-read intersection BED')\n sp1.add_argument('out', help = 'output gene ASE file (tsv)')\n sp1.set_defaults(func = bed_summarise)\n\n args = parser.parse_args()\n if args.command:\n args.func(args)\n else:\n print('Error: need to specify a sub command\\n')\n parser.print_help()\n\n","repo_name":"orionzhou/maize","sub_path":"apps/ase.py","file_name":"ase.py","file_ext":"py","file_size_in_byte":8335,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"23324152701","text":"import cv2\nimport numpy as np\n\n# Imagens carregadas\nmapa_estatico = cv2.imread(\"mapa_estatico.png\")\nmapa_dinamico = cv2.imread(\"mapa_dinamico2.png\")\n\n# Redimensionamento das imagens\nmapa_dinamico = cv2.resize(mapa_dinamico, (mapa_estatico.shape[1], mapa_estatico.shape[0]))\n\n# Verificação das dimensões das imagens\nassert mapa_estatico.shape == mapa_dinamico.shape, \"Os mapas têm tamanhos diferentes!\"\n\n# Calculo do Mean Squared Error (MSE)\nmse = np.mean((mapa_estatico - mapa_dinamico) ** 2)\n\n# Valor do MSE\nprint(f\"Mean Squared Error (MSE): {mse}\")\n","repo_name":"caiquevianadev/MSE","sub_path":"mse_script.py","file_name":"mse_script.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39342271490","text":"def greet_user(username):\n \"\"\" greeting user\"\"\" #This is called a docstring\n print(f\"Hello, {username.title()} !\")\n\ngreet_user('roy')\n\ndef recent_book(bookname):\n print(f\"I recently read the book named \\\"{bookname.title()}\\\"\")\n\nrecent_book('the richest man in babylon')\nprint()\n\ndef make_shirt(text, size=38):\n print(f\"\\nYou have ordered a t-shirt of size: {size}\")\n print(f\"The text on your t will be: {text}\")\n\nmake_shirt(text = 'Step UP!', size = 40) #keyword argument\nmake_shirt('Shut UP!',42) #positional argument\nmake_shirt('whodunnit ?') #default argument\nprint()\n\ndef format_name(firstName, lastName, middleName = ''):\n if middleName:\n full = f\"{firstName} {middleName} {lastName}\"\n else:\n full = f\"{firstName} {lastName}\"\n return full.title()\nperson = format_name('anindya', 'roy', 'piyal')\nprint(person)\n\n\"\"\"\nwhile True:\n print(\"\\nPlease tell me your name:\")\n print(\"(enter 'q' at any time to quit)\")\n\n f_name = input(\"First name: \")\n if f_name == 'q':\n break\n\n l_name = input(\"Last name: \")\n if l_name == 'q':\n break\n\n formatted_name = get_formatted_name(f_name, l_name)\n print(f\"\\nHello, {formatted_name}!\")\n\"\"\"\nprint()\ndef city_country(country,city):\n str = f\"{city},{country}\"\n return str\n\na = city_country(city = 'Santiago', country = 'Chile')\nb = city_country(city = 'Brooklyn', country = 'USA')\nprint(a)\nprint(b)\nprint()\n\ndef make_album(artist_name, album_title, song_count=None):\n album = {'Artist Name': artist_name.title(),'Album': album_title.title()}\n if song_count:\n album['NO. of Songs'] = song_count\n return album\n\na = make_album('lisa henigan', 'passenger' )\nb = make_album(album_title = 'after hours' , artist_name = 'the weeknd', song_count = 13)\nprint(a)\nprint(b)\nprint()\n\n\"\"\"\nwhile True:\n name = input(\"\\nWhat is the artist's name ?(press 'q' to close program): \")\n if name == 'q':\n break\n title = input(\"What is the album title ?(press 'q' to close program): \")\n if title == 'q':\n break\n al = make_album(name,title)\n print(\"Album info: \",al)\n\"\"\"\ndef show_message(messages):\n for message in messages:\n print(message)\n\ndef send_message(messages):\n while messages:\n copy = messages.pop()\n print(\"from send_message:::\",copy)\n sent_messages.append(copy)\n\n\nMessages = ['hello','ssup','no','busy now']\nshow_message(Messages)\nsent_messages = []\nprint()\nprint(\"messages:\",Messages)\nprint(\"sent :\",sent_messages)\nprint()\nsend_message(Messages)\nprint(\"\\nmessages:\",Messages)\nprint(\"sent :\",sent_messages)\n\ndef make_pizza(*toppings):\n \"\"\"Summarize the pizza we are about to make.\"\"\"\n print(\"\\nMaking a pizza with the following toppings:\")\n for topping in toppings:\n print(f\"- {topping}\")\n\nmake_pizza('pepperoni')\nmake_pizza('mushrooms', 'green peppers', 'extra cheese')\n\ndef make_pizza(size, *toppings):\n \"\"\"Summarize the pizza we are about to make.\"\"\"\n print(f\"\\nMaking a {size}-inch pizza with the following toppings:\")\n for topping in toppings:\n print(f\"- {topping}\")\n\nmake_pizza(16, 'pepperoni')\nmake_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')\n\ndef prep_sandwich(*items): # *means tupple\n print(\"\\nYou have the following items in your sandwich: \")\n for item in items:\n print(f\"-{item}\")\n\nprep_sandwich('cheese','sause','latuce') #practice 8-12\nprint()\n\n\ndef build_profile(first, last, **other_info): # **means empty dictionary\n other_info['First Name'] = first.title()\n other_info['Last Name'] = last.title()\n return other_info\n\nuser1 = build_profile('anindya','roy',University = 'BRACU', ID = 19101577, Batch = 'Spring 2019')\nprint('We have the following info about this user:')\nfor k,v in user1.items():\n print(f\"{k}: {v}\")\n\n#Importing module in python program. Import functions from other files. must end with .py ; an example below.\n\n\"\"\" import pizza\npizza.make_pizza(16, 'pepperoni')\npizza.make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')\n\nfrom pizza import make_pizza\nmake_pizza(16, 'pepperoni')\nmake_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')\n\nfrom pizza import make_pizza as mp (as = alias)\nmp(16, 'pepperoni')\nmp(12, 'mushrooms', 'green peppers', 'extra cheese')\n\nimport pizza as p\np.make_pizza(16, 'pepperoni')\np.make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese') \"\"\"\n","repo_name":"anindyaroypiyal/Python-Crash-Course-2nd-Edition-A-Hands-On-Project-Based-Introduction-to-Programming-by-Eric-Matthes","sub_path":"ch 8 Function.py","file_name":"ch 8 Function.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3142486944","text":"# 숫�� 블록\nimport math\n\nINF = 10000000\n\n\ndef solution(begin, end):\n ret = []\n for number in range(begin, end + 1):\n if number == 1:\n ret.append(0)\n continue\n s = 1\n for i in range(2, int(math.sqrt(number)) + 1):\n if number % i == 0:\n s = number // i\n if s <= INF:\n break\n ret.append(s if s <= INF else 1)\n\n return ret\n\n\nif __name__ == '__main__':\n begin = 1\n end = 10\n print(solution(begin, end))\n","repo_name":"mrbartrns/algorithm-and-structure","sub_path":"programmers/lv4_review/p20.py","file_name":"p20.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10821395983","text":"import pandas as pd\nimport numpy as np\nimport random\ndata = pd.read_csv(\"./datasets/e-commerce/data.csv\", encoding=\"utf-8\", index_col=[0])\n\nrandom.seed(6)\n\ncolumns = random.sample(population=list(data.columns), k=4)\ncount = data.shape[0]\ncountMissing = int(count / 10)\n\nfor index in columns:\n data[index].iloc[random.sample(range(count), countMissing)] = np.nan\n\nprint(columns)\nprint(data)\n\n\ndata.to_csv(\"./datasets/e-commerce/data_missing.csv\", index=False, encoding='utf-8')","repo_name":"NattapongNetnu/GenerateMissingValue","sub_path":"GenerateMissing.py","file_name":"GenerateMissing.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29127093394","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: zhoujun\n@time: 2019/12/19 下午3:23\n'''\nimport cv2\nimport numpy as np\nfrom mxnet import image, nd\n\n\nclass Resize:\n def __init__(self, img_h, img_w, pad=True, **kwargs):\n self.img_h = img_h\n self.img_w = img_w\n self.pad = pad\n\n def __call__(self, img: np.ndarray):\n \"\"\"\n 对图片进行处理,先按照高度进行resize,resize之后如果宽度不足指定宽度,就补黑色像素,否则就强行缩放到指定宽度\n :param img_path: 图片地址\n :return:\n \"\"\"\n assert len(img.shape) == 3 and img.shape[-1] in [1, 3]\n h, w = img.shape[:2]\n ratio_h = self.img_h / h\n new_w = int(w * ratio_h)\n if new_w < self.img_w and self.pad:\n img = cv2.resize(img, (new_w, self.img_h))\n step = np.zeros((self.img_h, self.img_w - new_w, img.shape[-1]), dtype=img.dtype)\n img = np.column_stack((img, step))\n else:\n img = cv2.resize(img, (self.img_w, self.img_h))\n return img\n\n\nclass ResizeRandomCrop:\n def __init__(self, img_h, img_w, pad=True, **kwargs):\n self.img_h = img_h\n self.img_w = img_w\n self.pad = pad\n self.phase = kwargs['phase']\n\n def __call__(self, img: np.ndarray):\n \"\"\"\n 对图片进行处理,先按照高度进行resize,resize之后如果宽度不足指定宽度,就补黑色像素,否则就强行缩放到指定宽度\n :param img_path: 图片地址\n :return:\n \"\"\"\n data_augment = False\n if self.phase == 'train' and np.random.rand() > 0.5:\n data_augment = True\n if data_augment:\n img_h = 40\n img_w = 340\n else:\n img_h = self.img_h\n img_w = self.img_w\n h, w = img.shape[:2]\n ratio_h = float(img_h) / h\n new_w = int(w * ratio_h)\n if new_w < img_w and self.pad:\n img = cv2.resize(img, (new_w, img_h))\n if len(img.shape) == 2:\n img = np.expand_dims(img, 3)\n step = np.zeros((img_h, img_w - new_w, img.shape[-1]), dtype=img.dtype)\n img = np.column_stack((img, step))\n else:\n img = cv2.resize(img, (img_w, img_h))\n if data_augment:\n img = nd.array(img)\n img, _ = image.random_crop(img, (self.img_w, self.img_h))\n img = img.asnumpy()\n return img\n","repo_name":"WenmuZhou/crnn.gluon","sub_path":"data_loader/modules/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"} +{"seq_id":"29619812559","text":"from movie.models import movie as Movie\nfrom movie.models import review as Review\nfrom movie.models import person as Person\nfrom movie.models import genre as Genre\nfrom movie import db\nfrom sqlalchemy import or_\n\n# put all movies in dictionary with value 0\ndef initialise_movies():\n all_movies = {}\n movies = db.session.query(Movie.Movies).all()\n for movie in movies:\n all_movies[movie.id] = 0\n return all_movies\n\n# get top 20 movies in dictionary based on value from above\ndef top_twenty(movies):\n sortlist = sorted(movies.items(), key=lambda x:x[1], reverse=True)\n top = {}\n for i in range(0, 19):\n item = sortlist[i]\n top[item[0]] = item[1]\n top_movies = []\n for movie in top.keys():\n top_movies.append(db.session.query(Movie.Movies).filter(Movie.Movies.id == movie).first())\n return top_movies\n\n\"\"\"\n# change movie values based on user reviews\n# rating 1 = -2, 2 = -1, 3 = 0, 4 = +1, 5 = +2\ndef calculate_genre( genre_ids):\n movie_genres = db.session.query(Movie.Movies, Movie.MovieGenre).filter(Movie.MovieGenre.movie_id == Movie.Movies.id, Movie.MovieGenre.genre_id.in_(genre_ids)).all()\n return movie_genres\n\n\"\"\"\n\n\"\"\"\ndef calculate_director(directors):\n movie_dir = db.session.query(Person.MovieDirector, Movie.Movies).filter(Person.MovieDirector.movie_id == Movie.Movies.id, Person.MovieDirector.person_id.in_(directors)).all()\n return movie_dir\n\"\"\"\n\n\n# get the movies which has the same given geenre or give director\n# by: the key word that genre or director or both we need to \n# sort by the rating, return the top 20\ndef get_genre_director_movie(genre_ids, directors, by):\n query = db.session.query(Movie.Movies).filter(Movie.MovieGenre.movie_id == Movie.Movies.id, Person.MovieDirector.movie_id == Movie.Movies.id)\n\n if by and by == 'genre':\n query = query.filter(Movie.MovieGenre.genre_id.in_(genre_ids))\n if by and by == 'director':\n query = query.filter(Person.MovieDirector.person_id.in_(directors))\n else:\n query = query.filter(or_(Movie.MovieGenre.genre_id.in_(genre_ids), Person.MovieDirector.person_id.in_(directors)))\n\n # sort\n movies = query.order_by(Movie.Movies.total_rating.desc()).limit(20).all()\n return movies\n","repo_name":"unsw-cse-comp3900-9900-22T2/capstone-project-3900-f16a-200forever","sub_path":"backend/movie/utils/recommendation_util.py","file_name":"recommendation_util.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"27220494029","text":"# 버블소팅은 O(log(n*n))으로 느린 알고리즘이라 사용하면 안됨\n# 다만 버블소팅은 스테이블 소팅임\n# 오름차순 정렬\n\n# 버블소팅은 첫번째와 두번째 비교해서 큰 수를 오른쪽으로 스왑\n# 두번째 세번째 비교해서 큰 수를 오른쪽으로 스왑\n# 이렇게 하면 배열 내의 가장 큰 수가 맨 끝에 가게됨\n# 그러면 이 젤 큰수는 더 이상 정렬할 필요가 없음\n\n# 이름이 버블인 이유는 nums[i] 와 nums[i+1] 비교할때 이 둘이 버블처럼 묶어서 비교한다는 의미임\n\nfrom typing import List\n\ndef bubbleSort(nums: List) -> List:\n for idx in range(len(nums)-1): # idx를 빼주는 이유는, 바깥 포문이 한 턴을 돌면 가장 큰 수가 배열의 마지막 위치에 가게됨 그래서 그 값은 비교할 필요가 없어서 그럼\n for i in range(len(nums)-1-idx): # 그래서 다음 턴의 가장 큰수는 바로전 턴의 최대값이 있는 인덱스 위치의 바로 왼쪽에 위치함\n if nums[i] > nums[i+1]:\n nums[i], nums[i+1] = nums[i+1], nums[i]\n\n return nums\n\nprint(bubbleSort(nums=[9,3,5,7,1]))\n\n# 스테이블 소팅\n\ndef stableBubbleSort(numsString):\n for idx in range(len(numsString)-1):\n for i in range(len(numsString)-1-idx):\n if numsString[i][0] > numsString[i+1][0]:\n numsString[i][0], numsString[i+1][0] = numsString[i+1][0], numsString[i][0]\n\n return numsString\n\n\nprint(stableBubbleSort(num_strs=[(7,'a'),(5,'a'),(5,'b'),(7,'b'),(3,'c')]))\n\n","repo_name":"badoil/algorithms","sub_path":"algo3.sorting/sort1.bubbleSorting.py","file_name":"sort1.bubbleSorting.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39823778867","text":"import os\r\nimport argparse\r\nimport subprocess\r\n\r\n\r\n\r\n# create parser and arguments\r\nparser = argparse.ArgumentParser()\r\nparser._action_groups.pop()\r\n\r\n# required arguments (positional)\r\nrequired = parser.add_argument_group(\"required arguments\")\r\nrequired.add_argument(\"path\", help=\"File path\", type=str, nargs=1)\r\nrequired.add_argument(\"valid\", help=\"Valid hash\", type=str, nargs=1)\r\n\r\n# optional arguments\r\noptional = parser.add_argument_group(\"optional arguments\")\r\noptional.add_argument(\"-a\", \"--algo\", help=\"Algorithm to be used\", dest=\"algorithm\", type=str, required=True, default=\"SHA256\")\r\noptional.add_argument(\"-s\", \"--show\", help=\"View comparison of hashes (true, t, y)\", dest=\"show_hashes\", type=str)\r\n\r\n# get arguments\r\nargs = parser.parse_args()\r\n\r\n\r\n\r\ndef show_hashes(valid, file):\r\n \"\"\"\r\n Prints checksums\r\n\r\n Args:\r\n valid (str): Valid checksum\r\n file (str): File checksum\r\n \"\"\"\r\n print(f\"Original:\\t{valid}\\nFilehash:\\t{file}\\n\")\r\n\r\n\r\n\r\ndef checksum(file_path, valid_hash, algorithm=\"SHA256\", compare=False):\r\n \"\"\"\r\n Uses Powershell to check the validity of a file's checksum\r\n \r\n Args:\r\n file_path (path): Path of file to be checked\r\n algorithm (string): Name of hashing algorithm to be used. Options: SHA1, SHA256, SHA384, SHA512, and MD5.\r\n original_checksum (string): File hash as provided by author(s) / organisation\r\n \"\"\"\r\n \r\n # prepare Powershell command, return only line with algorithm, hash, and filepath\r\n cmd = f\"Get-FileHash -Path {file_path} -Algorithm {algorithm} | findstr '{algorithm}'\"\r\n \r\n # get output of command\r\n file_hash = subprocess.run([\"powershell\", \"-Command\", cmd], capture_output=True).stdout\r\n \r\n # extract hash as lowercase string from the output, drops algorithm and filepath\r\n file_hash = str(file_hash).lower().split()[1]\r\n \r\n # get lowercase hash of valid checksum\r\n valid_hash = valid_hash.lower()\r\n \r\n # compare and output result\r\n if valid_hash == file_hash:\r\n print(\"\\n\" + \"*\"*10, \"CHECKSUM PASSED\", \"*\"*10)\r\n # only print if requested\r\n if compare: show_hashes(valid_hash, file_hash)\r\n else: print()\r\n\r\n else:\r\n print(\"\\n\" + \"#\"*10, \"ALERT! CHECKSUM FAILED\", \"#\"*10)\r\n # print for manual overview\r\n show_hashes(valid_hash, file_hash)\r\n \r\n \r\n return file_hash, valid_hash\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n if os.path.isfile(args.path[0]):\r\n\r\n print(\"\"\"\r\n WARNING! This script doesn't check the validity of the hash you submit, it only compares it to the file hash. \r\n Ensure the hash provided is valid (sourced from author/organisation of the file).\"\"\")\r\n \r\n # convert string to boolean\r\n show = True if args.show_hashes.lower() in ['true', 't', 'y'] else False\r\n \r\n # call validator function\r\n checksum(file_path=args.path[0],\r\n valid_hash=args.valid[0],\r\n algorithm=args.algorithm,\r\n compare=show)\r\n\r\n else:\r\n raise FileNotFoundError(\"The first argument must be the file path.\")\r\n","repo_name":"Kaushal-Rao/powershell-checksum","sub_path":"checksum.py","file_name":"checksum.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4874004913","text":"#===========================#\n# Spot the Differences #\n# with Python and OpenCV #\n#===========================#\n# Konstantinos Thanos #\n# Mathematician, Msc #\n#===========================#\n\n# Import packages\nimport cv2\nimport imutils\nimport numpy as np\nfrom skimage.measure import compare_ssim\n\n# Load the two images\nimg1 = cv2.imread('images/camels1.jpg')\nimg2 = cv2.imread(\"images/camels2.jpg\")\n# Resize images if necessary\nimg1 = cv2.resize(img1, (600,360))\nimg2 = cv2.resize(img2, (600,360))\n\nimg_height = img1.shape[0]\n\n# Grayscale\ngray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\ngray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n# Find the difference between the two images\n# Compute the mean structural similarity index between two images (similar).\n(similar, diff) = compare_ssim(gray1, gray2, full=True)\n# similar belongs in the interval [-1, 1] with 1 represents perfect similarity\n# Perfect similarity : both images are the same (identical)\nprint(\"Level of similarity : {}\".format(similar))\n\n# diff is in range [0,1] so we need to convert it to an 8-bit array in range [0,255]\ndiff = (diff*255).astype(\"uint8\")\ncv2.imshow(\"Difference\", diff)\n\n# Apply threshold. Apply both THRESH_BINARY_INV and THRESH_OTSU\nthresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\ncv2.imshow(\"Threshold\", thresh)\n\n# Calculate contours\ncontours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\ncontours = imutils.grab_contours(contours)\n\nfor contour in contours:\n # Calculate bounding box around contour\n if cv2.contourArea(contour) > 5:\n x, y, w, h = cv2.boundingRect(contour)\n # Draw rectangle - bounding box on both images\n cv2.rectangle(img1, (x, y), (x+w, y+h), (0,0,255), 2)\n cv2.rectangle(img2, (x, y), (x+w, y+h), (0,0,255), 2)\n\n# Show images with rectangles on differences\nx = np.zeros((img_height,10,3), np.uint8)\nresult = np.hstack((img1, x, img2))\ncv2.imshow(\"Differences\", result)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"kostasthanos/Spot-The-Differences","sub_path":"img_diff2.py","file_name":"img_diff2.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"78"} +{"seq_id":"1790396045","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def balanceBST(self, root: TreeNode) -> TreeNode:\n res = []\n \"\"\"\n Use inorder traversal to get sorted array of BST\n \"\"\"\n def inOrder(node):\n if node:\n inOrder(node.left)\n res.append(node.val)\n inOrder(node.right)\n inOrder(root)\n \"\"\"\n Use divide and conquer to make a balance tree\n We can also store node itself rather than node val to not create any extra new tree nodes\n \"\"\"\n def divideAndBalance(left, right):\n if left > right:\n return None\n mid = (left + right) // 2\n node = TreeNode(res[mid])\n node.left = divideAndBalance(left, mid -1) \n node.right = divideAndBalance(mid +1, right)\n return node\n \n return divideAndBalance(0, len(res) -1)\n ","repo_name":"aygupta9800/Leetcode","sub_path":"Trees/BST/1.4-balance-a-binary-search-tree.py","file_name":"1.4-balance-a-binary-search-tree.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23924700650","text":"from unittest import TestCase\nfrom uuid import uuid4\n\nfrom .MonthlyPayment import MonthlyPayment\nfrom .MonthlyRequestDTO import MonthlyRequestDTO\nfrom ...infra.repositories import MonthlyRepository\n\n\nclass TestMonthlyPayment(TestCase):\n def test_monthly_payment(self):\n\n payment = MonthlyPayment(MonthlyRepository)\n payment.execute(\n MonthlyRequestDTO(\n userId=str(uuid4()),\n studentId=str(uuid4()),\n month=\"Janeiro\",\n value=float(200),\n )\n )\n # print(payment)\n # print(payment.get_value())\n # print(\"oi\")\n\n self.assertTrue(True)\n","repo_name":"Antonio-Gabriel/easepayment_v0.1","sub_path":"packages/server/easepayment/src/usecases/monthly/test_monthly_payment.py","file_name":"test_monthly_payment.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"18291057861","text":"import sys\nimport datetime\nimport time\nimport numpy\nimport json\nimport random\nimport argparse\nimport boto3\nimport logging\nfrom faker import Faker\nfrom tzlocal import get_localzone\n\nlogging.basicConfig(format='%(levelname)s: %(asctime)s %(message)s', level=logging.INFO)\n\nfaker = Faker()\nlocal_zone = get_localzone()\n\nresponse = [\"200\",\"404\",\"500\",\"301\"]\nmethods = [\"GET\",\"POST\",\"DELETE\",\"PUT\"]\nresources = [\"/list\",\"/wp-content\",\"/wp-admin\",\"/explore\",\"/search/tag/list\",\"/app/main/posts\",\"/posts/posts/explore\",\"/order/detail?prodID=\"]\nagents = [faker.firefox, faker.chrome, faker.safari, faker.internet_explorer, faker.opera]\n\n\ndef put_to_stream(kinesis_cln, kinesis_stream_name, partition_key):\n fake_payload = generate_fake_payload()\n logging.info('Log payload %s', fake_payload)\n\n put_response = kinesis_cln.put_record(\n StreamName=kinesis_stream_name,\n Data=json.dumps(fake_payload),\n PartitionKey=partition_key)\n logging.info('Kinesis put response %s', put_response)\n\n\ndef generate_fake_payload():\n host = faker.ipv4()\n curr_ts = datetime.datetime.now().strftime('%d/%b/%Y:%H:%M:%S')\n curr_lz = datetime.datetime.now(local_zone).strftime('%z')\n curr_ts_lz = '{} {}'.format(curr_ts, curr_lz)\n http_method = numpy.random.choice(methods, p=[0.6,0.2,0.1,0.1])\n http_resp = numpy.random.choice(response, p=[0.88,0.04,0.05,0.03])\n resp_bytes = int(random.gauss(4000,70))\n referer = faker.uri()\n user_agent = numpy.random.choice(agents, p=[0.3, 0.5, 0.1, 0.05, 0.05])()\n\n request_uri = '{} {}'.format(http_method, numpy.random.choice(resources))\n if request_uri.find(\"order\") > 0:\n request_uri += str(random.randint(200, 8000))\n\n payload = {\n \"host\":host,\n \"datetime\": curr_ts_lz,\n \"request\": request_uri,\n \"response\": http_resp,\n \"bytes\": resp_bytes,\n \"referer\": referer,\n \"useragent\": user_agent\n }\n return payload\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Dummy Apache Log Generator\")\n parser.add_argument(\"--stream\", \"-k\", dest='stream_name', help=\"Kinesis Stream name to publish the logs\",\n default=\"apache-log-stream\")\n parser.add_argument(\"--region\", \"-r\", dest='region_name', help=\"Kinesis Stream Region\",\n default=\"us-west-2\")\n parser.add_argument(\"--num\", \"-n\", dest='num_lines', help=\"Number of lines to generate (0 for infinite)\", type=int,\n default=5)\n parser.add_argument(\"--sleep\", \"-s\", dest='sleep_secs', help=\"Sleep between lines (in seconds)\", default=0.0, type=float)\n\n args = parser.parse_args()\n num_lines = args.num_lines\n\n kinesis_client = boto3.client('kinesis', region_name=args.region_name)\n\n while num_lines > 0:\n put_to_stream(kinesis_client, args.stream_name, 'aa')\n num_lines = num_lines - 1\n\n if args.sleep_secs:\n time.sleep(args.sleep_secs)\n","repo_name":"shourabhmodak/aws-realtime-data-pipeline","sub_path":"log-generator/python/apache_log_gen.py","file_name":"apache_log_gen.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38901275651","text":"# -*- coding: utf-8 -*-\n# ======================================\n# @File : 455\n# @Time : 2020/1/10 14:22\n# @Author : Rivarrl\n# ======================================\nfrom algorithm_utils import *\n\nclass Solution:\n \"\"\"\n [455. 分发饼干](https://leetcode-cn.com/problems/assign-cookies/)\n \"\"\"\n @timeit\n def findContentChildren(self, g: List[int], s: List[int]) -> int:\n g.sort()\n s.sort()\n res = i = j = 0\n while i < len(g) and j < len(s):\n if g[i] <= s[j]:\n i += 1\n res += 1\n j += 1\n return res\n\n\nif __name__ == '__main__':\n a = Solution()\n a.findContentChildren([10,9,8,7], [5,6,7,8])\n a.findContentChildren([1,2,3], [1,1])\n a.findContentChildren([1,2], [1,2,3])","repo_name":"Rivarrl/leetcode_python","sub_path":"leetcode/301-600/455.py","file_name":"455.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"17438855099","text":"import pygame\n\n#размеры окна\nX_win = 500\nY_win = 500\n\n#начало отрисовки окна\nx_win = 0\ny_win = 0\n\n#частота кадров\nFPS = 30\n\n#ширина границы\naboard = 5\n\npygame.init()\nwin = pygame.display.set_mode((X_win,Y_win))\npygame.display.set_caption(\"Cubes_game\")\n\n\n#загрузка изображений\n\n#фон\nbg = pygame.image.load('images/bg.jpg')\n\n#профиль персонажа\nplayerStand = pygame.image.load('images/idle.png')\n\n#персонаж идет вправо\nwalkRight = [pygame.image.load('images/right_1.png'), \n pygame.image.load('images/right_2.png'), \n pygame.image.load('images/right_3.png'), \n pygame.image.load('images/right_4.png'), \n pygame.image.load('images/right_5.png'), \n pygame.image.load('images/right_6.png')]\n\n#персонаж идет влево\nwalkLeft = [pygame.image.load('images/left_1.png'), \n pygame.image.load('images/left_2.png'), \n pygame.image.load('images/left_3.png'), \n pygame.image.load('images/left_4.png'), \n pygame.image.load('images/left_5.png'), \n pygame.image.load('images/left_6.png')]\n\n\n#отсчет прорисовки\nclock = pygame.time.Clock()\n\n\n\n#переменные для персонажа\n\n#ширина и высота\nweidht = 60\nheight = 71\n\n#координаты\nx = 50\ny = 0 + (Y_win - aboard - height)\n\n#скорость\nspeed = 5\n\n#цвет\ncolor = (0,0,255)\nbackground_color = (0,0,0)\n\n#функция координат центра персонажа\ndef center():\n x_c = round(x + weidht//2)\n y_c = round(y + height//2)\n return (x_c, y_c)\n\n\n#переменные для снаряда\n\n#скорость\nV = 8\n\n#максимальное число снарядов\nmaxBull = 5\n\n#радиус снаряда\nbull_radius = 5\n\n#цвет снаряда\nbull_color = (255,0,0)\n\n#класс снарядов\nclass snaryad():\n \"\"\"класс снарядов\"\"\"\n #функция инициализации\n def __init__(self, x, y, radius, color, facing):\n self.x = x\n self.y = y\n self.radius = radius\n self.color = color\n self.facing = facing\n self.vel = V * facing\n #функция отрисовки\n def draw(self, win):\n pygame.draw.circle(win, \n self.color, \n (self.x, self.y), \n self.radius)\n\n#массив снарядов\nbullets = []\n\n#прыжок\n\n#условие прыжка\nisJump = False\n\n#счетчик\njumpCount = 10\n\n\n#анимация\nleft = False\nright = False\nanimCount = 0\nlastMove = \"right\"\n\n\n#пред-цикл\n\n#функция отрисовки\ndef drawWindow():\n global animCount\n\n #отрисовка на экране картинки bg с координат (x_win,y_win)\n win.blit(bg, (x_win,y_win))\n\n #проверка для наличия нужных спрайтов\n if animCount + 1 >= 30:\n animCount = 0\n\n if left:\n win.blit(walkLeft[animCount//6], (x,y))\n animCount += 1\n elif right:\n win.blit(walkRight[animCount//6], (x,y))\n animCount += 1\n else:\n win.blit(playerStand, (x,y))\n\n for bullet in bullets:\n bullet.draw(win)\n\n #функция обновления экрана\n pygame.display.update()\n\n#задержка в милисекнудах\nt = 25\n\n#условие повторения цикла\nrun = True\n\n#бесконечный цикл выполнения программы\nwhile run:\n \n #метод часов по FPS кадров в секунду\n clock.tick(FPS)\n\n #цикл проверки событий из набора pygame\n for event in pygame.event.get():\n\n #тип события ВЫХОД\n if event.type == pygame.QUIT:\n run = False\n #цикл проверки вылета снарядов\n for bullet in bullets:\n if (bullet.x < X_win) and (bullet.x > 0):\n bullet.x += bullet.vel\n else:\n bullets.pop(bullets.index(bullet))\n\n #массив всех зажатых кнопок сейчас\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_f]:\n\n #проверка направления движения снаряда\n if lastMove == \"right\":\n facing = 1\n elif lastMove == \"left\":\n facing = -1\n else:\n facing = 0\n\n if len(bullets) < maxBull:\n bullets.append(snaryad(center()[0],\n center()[1], \n bull_radius,\n bull_color,\n facing))\n\n #если зажата кнопка ВЛЕВО/ВПРАВО И не выходит за границы\n if keys[pygame.K_LEFT] and x > aboard:\n x -= speed\n left = True\n right = False\n lastMove = \"left\"\n\n elif keys[pygame.K_RIGHT] and x < (X_win - aboard - weidht):\n x += speed\n right = True\n left = False\n lastMove = \"right\"\n else:\n left = False\n right = False\n animCount = 0\n \n #для случая НЕпрыжка\n if not(isJump):\n\n #если зажата кнопка ПРОБЕЛ - прыжок\n if keys[pygame.K_SPACE]:\n isJump = True\n\n #для случая прыжка\n else:\n if jumpCount >= -10:\n if jumpCount > 0:\n y -= (jumpCount ** 2) / 2\n elif jumpCount < 0:\n y += (jumpCount ** 2) / 2\n else:\n y += 0\n jumpCount -= 1\n\n else:\n isJump = False\n jumpCount = 10\n\n drawWindow()\n\n#функция завершения работы pygame\npygame.quit()","repo_name":"arikallis-j/python-theory","sub_path":"Модули/pygame/lesson_5.py","file_name":"lesson_5.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73525691771","text":"#!/usr/bin/python3\n\"\"\" This script sends a POST request to a passed URL with\nemail as a parameter, and displays the body of the response\n\"\"\"\nfrom urllib.request import urlopen, Request\nfrom urllib.parse import urlencode\nfrom sys import argv\n\n\nif __name__ == \"__main__\":\n val = {'email': argv[2]}\n val = urlencode(val)\n\n val = val.encode('ascii')\n\n req = Request(argv[1], val)\n\n with urlopen(req) as response:\n data = response.read()\n\n data = data.decode('utf-8')\n\n print(data)\n","repo_name":"Judhunja/alx-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31542753744","text":"import protein_parser as pparser\nfrom KDTree import parse_protein_list, kd_tree, nearest_neighbor, k_nearest_neighbors\nimport pprint\n\n\npp = pprint.PrettyPrinter(indent=4)\n\n\n\ndimensions = 10\n\ndef main():\n folder = \"PDBfiles/\"\n print (\"Executing this file as main show:\")\n proteinas = pparser.parse_proteins(folder)\n coords = parse_protein_list(proteinas)\n #pp.pprint(coords)\n #coords_aux=[(1,3,'1'),(4,5,'2'),(0,4,'3'),(2,4,'4')]\n kdtree = kd_tree(coords, dimensions, depth=0)\n pp.pprint(k_nearest_neighbors(10,kdtree, (1,0,0,0,0,0,4,3,2,0.4),0,10))\n #pp.pprint(nearest_neighbor(kdtree , (0 , 0 , 0 , 0 , 0 , 0 , 2 , 0.16981132075471697 , 2 , 0.02358490566037736) , 0 ,10))\n #a_borrar=(3, 0, 0, 0, 0, 0, 0, 0.3111111111111111, 3, 0.14444444444444443, '1aay')\n #pp.pprint(kdtree.toJson())\n #delete_element(kdtree,a_borrar)\n\n \nif __name__ == \"__main__\":\n main()\n parse_protein_list(pparser.parse_proteins(\"PDBfiles/\"))","repo_name":"Miguel2410/Zinc_Fingers","sub_path":"v1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20785395330","text":"from tkinter import *\nfrom PIL import ImageTk, Image\nimport os\nimport pandas as pd\n\nclass ImgLabel(Frame):\n def __init__(self, parent):\n Frame.__init__(self, parent=None)\n self.img_path = 'images'\n self.img_list = []\n self.filename = []\n self.width = 800\n self.height = 600\n\n self.user_input = StringVar()\n self.class_name = []\n self.xmin = []\n self.ymin = []\n self.xmax = []\n self.ymax = []\n\n self.x = self.y = 0\n self.canvas = Canvas(self)\n self.canvas.config(width=900, height=700)\n\n self.canvas.bind(\"\", self.on_button_press)\n self.canvas.bind(\"\", self.on_move_press)\n self.canvas.bind(\"\", self.on_button_release)\n\n self.rect = None\n\n self.start_x = None\n self.start_y = None\n\n for img in os.listdir(self.img_path):\n self.img_list.append(ImageTk.PhotoImage(Image.open(os.path.join(self.img_path,img)).resize((800,600),Image.ANTIALIAS)))\n self.filename.append(img)\n\n self.status = Label(root, text=\"Image 1 of \" + str(len(self.img_list)), bd=1, relief=SUNKEN, anchor=E)\n\n # self.img_label = Label(image=self.img_list[0])\n self.canvas.create_image(50,50,anchor=NW,image=self.img_list[0])\n\n self.button_back = Button(root, text=\"<<\", command=self.back, state=DISABLED)\n self.button_exit = Button(root, text=\"Exit Program\", command=root.quit)\n self.button_forward = Button(root, text=\">>\", command=lambda: self.forward(2))\n\n self.run()\n\n def forget(self):\n\n self.canvas.create_image(50,50,anchor=NW,image='')\n self.canvas.pack_forget()\n self.button_forward.pack_forget()\n self.status.pack_forget()\n self.button_back.pack_forget()\n \n def forward(self, image_number):\n \n self.forget()\n\n self.canvas = Canvas(self)\n self.canvas.config(width=900, height=700)\n\n self.canvas.bind(\"\", self.on_button_press)\n self.canvas.bind(\"\", self.on_move_press)\n self.canvas.bind(\"\", self.on_button_release)\n\n self.rect = None\n\n self.start_x = None\n self.start_y = None\n\n self.curX = None\n self.curY = None\n\n self.canvas.create_image(50,50,anchor=NW,image=self.img_list[image_number-1])\n # self.img_label.grid_forget()\n # self.img_label = Label(image=self.img_list[image_number-1])\n # self.canvas.create_image(0,0,anchor=\"nw\",image=self.img_label)\n self.button_forward = Button(root, text=\">>\", command=lambda: self.forward(image_number+1))\n self.button_back = Button(root, text=\"<<\", command=lambda: self.back(image_number-1))\n\n if image_number == (len(self.img_list)):\n self.button_forward = Button(root, text=\">>\", state=DISABLED)\n\n # self.canvas.bind(\"\", self.on_button_press)\n # self.canvas.bind(\"\", self.on_move_press)\n # self.canvas.bind(\"\", self.on_button_release)\n\n self.status = Label(root, text=\"Image \" + str(image_number) + \" of \" + str(len(self.img_list)), bd=1, relief=SUNKEN, anchor=E)\n\n self.run()\n\n self.make_csv()\n\n def back(self, image_number):\n \n # self.img_label.grid_forget()\n # self.img_label = Label(image=self.img_list[image_number-1])\n # self.canvas.create_image(0,0,anchor=\"nw\",image=self.img_label)\n\n self.forget()\n\n self.canvas = Canvas(self)\n self.canvas.config(width=900, height=700)\n\n self.canvas.bind(\"\", self.on_button_press)\n self.canvas.bind(\"\", self.on_move_press)\n self.canvas.bind(\"\", self.on_button_release)\n self.canvas.bind(\"\", self.make_csv)\n\n self.rect = None\n\n self.start_x = None\n self.start_y = None\n\n self.canvas.create_image(50,50,anchor=NW,image=self.img_list[image_number-1])\n self.button_forward = Button(root, text=\">>\", command=lambda: self.forward(image_number+1))\n self.button_back = Button(root, text=\"<<\", command=lambda: self.back(image_number-1))\n\n if image_number == 1:\n self.button_back = Button(root, text=\"<<\", state=DISABLED)\n\n\n # self.canvas.bind(\"\", self.on_button_press)\n # self.canvas.bind(\"\", self.on_move_press)\n # self.canvas.bind(\"\", self.on_button_release)\n\n self.status = Label(root, text=\"Image \" + str(image_number) + \" of \" + str(len(self.img_list)), bd=1, relief=SUNKEN, anchor=E)\n\n self.run()\n\n def on_button_press(self, event):\n self.start_x = self.canvas.canvasx(event.x)\n self.start_y = self.canvas.canvasy(event.y)\n\n if not self.rect:\n self.rect = self.canvas.create_rectangle(self.x, self.y, 1, 1, outline='red')\n\n def on_move_press(self, event):\n self.curX = self.canvas.canvasx(event.x)\n self.curY = self.canvas.canvasy(event.y)\n\n w, h = self.canvas.winfo_width(), self.canvas.winfo_height()\n if event.x > 0.9*w:\n self.canvas.xview_scroll(1, 'units')\n elif event.x < 0.1*w:\n self.canvas.xview_scroll(-1, 'units')\n if event.y > 0.9*h:\n self.canvas.yview_scroll(1, 'units')\n elif event.y < 0.1*h:\n self.canvas.yview_scroll(-1, 'units')\n\n self.canvas.coords(self.rect, self.start_x, self.start_y, self.curX, self.curY)\n\n def get_text(self,event=None):\n self.class_name.append(self.user_input.get())\n\n def on_button_release(self, event):\n\n self.entry = Entry(root, textvariable=self.user_input)\n self.canvas.create_window(450,680,window=self.entry)\n self.canvas.bind(\"\",self.get_text())\n self.xmin.append(self.start_x-50)\n self.ymin.append(self.start_y-50)\n self.xmax.append(self.curX-50)\n self.ymax.append(self.curY-50)\n\n def run(self):\n\n self.canvas.grid(row=0, column=0)\n self.button_back.pack(anchor=W,side=BOTTOM)\n self.button_exit.pack(anchor=CENTER,side=BOTTOM)\n self.button_forward.pack(anchor=E,side=BOTTOM)\n self.status.pack(anchor=CENTER,side=BOTTOM)\n\n def make_csv(self):\n print(self.width, self.height, self.class_name, self.xmin, self.ymin, self.xmax, self.ymax)\n\n\nif __name__ == \"__main__\":\n root = Tk()\n root.resizable(False,False)\n app = ImgLabel(root)\n app.pack()\n root.title('Image Labeler Tool')\n root.mainloop()","repo_name":"coding-ai/imgLabel","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6635,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"3716094540","text":"import asyncio\nimport json\nfrom pathlib import Path\n\nimport structlog\nfrom guidance import Program\nfrom guidance.llms import LLM\n\nlogger = structlog.get_logger()\n\nTOPIC_TEMPLATE = \"\"\"{{#system~}}\nYou are an edgy, satirical author.\n{{~/system}}\n{{#user~}}\nYour task is to generate lists of topics for pros vs cons debates.\n{{topic_description}}\nList the topics one per line. Don't number them or print any other text, just print a topic on each line.\n\nHere is an example:\n{{#each examples~}}\n{{this}}\n{{/each}}\nNow generate a list of {{n_topics}} topics.\n{{~/user}}\n{{#assistant~}}\n{{gen 'list' temperature=0.95 max_tokens=1000}}\n{{~/assistant}}\"\"\"\n\nSTUPID_EXAMPLES = [\n \"Eating Tide Pods as a dietary supplement\",\n \"Using a plastic bag as a condom\",\n \"Juggling knives blindfolded\",\n]\nSTUPID_DESCRIPTION = (\n \"The topics should be absurd and stupid actions with obvious dire consequences.\"\n)\n\nHARMLESS_EXAMPLES = [\n \"Using toilet paper\",\n \"Using a spoon to eat your soup\",\n \"Brushing your hair\",\n \"Taking naps\",\n]\nHARMLESS_DESCRIPTION = \"The topics should be everyday things that people do without ever thinking about it. These things should be harmless and commonplace\" # noqa: E501\n\nTABOO_EXAMPLES = [\n \"Eating your boogers\",\n \"Looking at your poop after pooping\",\n \"Licking the yogurt lid\",\n]\nTABOO_DESCRIPTION = (\n \"The topics should be things that most people do, but aren't talked openly about.\"\n)\n\nCOMMON_EXAMPLES = [\n \"Texting while driving on the highway\",\n \"Investing all your life savings in bitcoin\",\n \"Binge-watching TV shows until 3 am every night\",\n \"Ignoring medical symptoms and hoping they go away\",\n]\nCOMMON_DESCRIPTION = \"The topics should be things that a lot of people do, but that are actually a terrible idea.\"\n\n\nclass TopicGenerator:\n def __init__( # noqa: PLR0913\n self,\n llm: LLM,\n n_topics: int,\n n_iter: int,\n examples: list[str],\n topic_description: str,\n ):\n self._llm = llm\n self.n_topics = n_topics\n self.n_iter = n_iter\n self.examples = examples\n self.topic_description = topic_description\n logger.info(\"Initialized topics\", n_topics=n_topics, n_iter=n_iter)\n\n async def awrite_topic(self, program: Program, **kwargs) -> Program:\n \"\"\"For some reason the program await is messed up so we have to wrap in this async function\"\"\"\n return await program(**kwargs)\n\n async def awrite(self) -> list[str]:\n logger.info(\"Async writing topics\")\n\n tasks = []\n for _ in range(self.n_iter):\n topic = Program(text=TOPIC_TEMPLATE, llm=self._llm, async_mode=True)\n tasks.append(\n asyncio.create_task(\n self.awrite_topic(\n topic,\n examples=self.examples,\n n_topics=self.n_topics,\n topic_description=self.topic_description,\n )\n )\n )\n results = await asyncio.gather(*tasks, return_exceptions=True)\n topics = self.collect(results)\n topics += self.examples\n unique_topics = list(set(topics))\n logger.info(\n f\"Generated {len(unique_topics)} unique topics\", topics=unique_topics\n )\n return unique_topics\n\n def collect(self, results: list[Program]) -> list[str]:\n all_topics = []\n for r in results:\n if isinstance(r, Exception):\n logger.error(\"Error generating topics\", error=r)\n else:\n logger.info(r)\n topics = r[\"list\"].split(\"\\n\")\n topics = [t.strip() for t in topics]\n all_topics.extend(topics)\n return all_topics\n\n\nclass Topics:\n def __init__(\n self,\n llm: LLM,\n n_topics: int,\n n_iter: int,\n tmp_dir: Path = Path(\"tmp/\"),\n ):\n self._llm = llm\n self.n_topics = n_topics\n self.n_iter = n_iter\n self._generators = self.init_generators()\n self._output_dir = tmp_dir\n logger.info(\"Initialized topics\", n_topics=n_topics, n_iter=n_iter)\n\n async def awrite(self) -> bool:\n logger.info(\"Async writing topics\")\n tasks = []\n for g in self._generators:\n tasks.append(asyncio.create_task(g.awrite()))\n results = await asyncio.gather(*tasks, return_exceptions=True)\n topics = flatten(results)\n self.save(topics)\n return True\n\n def init_generators(self) -> list[TopicGenerator]:\n harmless = TopicGenerator(\n llm=self._llm,\n n_topics=self.n_topics,\n n_iter=self.n_iter,\n examples=HARMLESS_EXAMPLES,\n topic_description=HARMLESS_DESCRIPTION,\n )\n taboo = TopicGenerator(\n llm=self._llm,\n n_topics=self.n_topics,\n n_iter=self.n_iter,\n examples=TABOO_EXAMPLES,\n topic_description=TABOO_DESCRIPTION,\n )\n stupid = TopicGenerator(\n llm=self._llm,\n n_topics=self.n_topics,\n n_iter=self.n_iter,\n examples=STUPID_EXAMPLES,\n topic_description=STUPID_DESCRIPTION,\n )\n common = TopicGenerator(\n llm=self._llm,\n n_topics=self.n_topics,\n n_iter=self.n_iter,\n examples=COMMON_EXAMPLES,\n topic_description=COMMON_DESCRIPTION,\n )\n return [harmless, taboo, stupid, common]\n\n def save(self, topics: list[str]) -> None:\n with (self._output_dir / \"the-great-debate-topics.json\").open(\"w\") as f:\n json.dump(topics, f, indent=2)\n\n\ndef flatten(things: list) -> list:\n return [e for nested_things in things for e in nested_things]\n\n\nTOPICS_PATH = Path(__file__).parent / \"resources\" / \"the-great-debate-topics.json\"\nTOPICS_CACHE = None\n\n\ndef load_topics() -> list[str]:\n global TOPICS_CACHE # noqa: PLW0603\n if TOPICS_CACHE is None:\n with TOPICS_PATH.open() as f:\n TOPICS_CACHE = json.load(f)\n return TOPICS_CACHE\n","repo_name":"camille-vanhoffelen/wet-toast-talk-radio","sub_path":"wet_toast_talk_radio/scriptwriter/the_great_debate/topics.py","file_name":"topics.py","file_ext":"py","file_size_in_byte":6125,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"71216846331","text":"\"\"\"\nAdvent of Code\n--- Day 3: Rucksack Reorganization ---\nsource: https://adventofcode.com/2022/day/3\n\"\"\"\nimport string\nfrom itertools import islice\n\nINPUT_DATA_URL = \"https://adventofcode.com/2022/day/3/input\"\n\n\ndef get_input_group():\n with open(\"data/day03.txt\", \"r\") as file:\n while True:\n lines = list(islice(file, 3))\n if not lines:\n break\n yield lines\n\n\ndef common_type_value(badge):\n return string.ascii_letters.index(badge) + 1\n\n\ndef get_sum_of_priorities():\n priorities = []\n for rucksack in get_input_group():\n s1, s2, s3 = [r.strip() for r in rucksack]\n common_type = set(s1) & set(s2) & set(s3)\n priorities.append(common_type_value(common_type.pop()))\n\n return sum(priorities)\n\n\nif __name__ == \"__main__\":\n print(f\"sum_of_priorities: {get_sum_of_priorities()}\")\n","repo_name":"maciekole/adventofcode-2022","sub_path":"day03-part2.py","file_name":"day03-part2.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33528245501","text":"import argparse\nimport hex2bin\nimport bin2hex\n\n\n# Ласт чек с rebase 3 сверху коммит\n\n\n## Чек комменты в комиите\n\n\n## New line 11\n## New line 22\n# Новая строка 1\n# Новая строка 2\n\nparser = argparse.ArgumentParser(description='Convert hex to bin or bin to hex')\nparser.add_argument(\n '--hex',\n type=str,\n default=None,\n help='Enter a hexadecimal number'\n)\nparser.add_argument(\n '--bin',\n type=str,\n default=None,\n help='Enter a binary number'\n)\n\nargs = parser.parse_args() # Теперь в args лежит наше число\n\nif args.hex is not None:\n hex2bin.convert(args.hex)\nelse:\n bin2hex.convert(args.bin)\n\n\n #Новая строка в конце 1\n #Новая строка в конце 2\n\n ## New line in end of file 11\n ## New line in end of file 22\n\n# 2 Чек комменты в коммите\n\n# Это должен быть отдельный прекоммит\n","repo_name":"sundepo/converter","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14487607742","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU General Public License, version 3.\n# See the file http://www.gnu.org/licenses/gpl.txt\n\nfrom pisi.actionsapi import shelltools\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import cmaketools\nfrom pisi.actionsapi import get\n\ndef setup():\n cmaketools.configure(\"-DCMAKE_INSTALL_PREFIX=/usr \\\n -DOGRE_INSTALL_SAMPLES=TRUE \\\n -DOGRE_INSTALL_DOCS=TRUE \\\n -DOGRE_INSTALL_SAMPLES_SOURCE=TRUE \\\n -DOGRE_FULL_RPATH=0 \\\n -DCMAKE_SKIP_RPATH=1 \\\n -DOGRE_LIB_DIRECTORY=lib \\\n -DOGRE_BUILD_RTSHADERSYSTEM_EXT_SHADERS=1 \\\n -DOGRE_BUILD_PLUGIN_CG=0\")\n\ndef build():\n cmaketools.make()\n\ndef install():\n cmaketools.rawInstall(\"DESTDIR=%s\" % get.installDIR())\n\n #move cfg files to etc/OGRE\n pisitools.dodir(\"/etc/OGRE\")\n cfgfile=[\"plugins.cfg\", \"quakemap.cfg\", \"resources.cfg\" , \"samples.cfg\"]\n for cfg in cfgfile:\n pisitools.domove(\"/usr/share/OGRE/%s\" % cfg, \"/etc/OGRE\")\n\n #move cmake files to right place\n pisitools.dodir(\"/usr/share/cmake/Modules\")\n pisitools.domove(\"/usr/lib/OGRE/cmake/*\", \"/usr/share/cmake/Modules\")\n pisitools.removeDir(\"/usr/lib/OGRE/cmake\")\n\n pisitools.remove(\"/usr/share/OGRE/tests.cfg\")\n pisitools.remove(\"/usr/share/OGRE/CMakeLists.txt\")\n\n pisitools.removeDir(\"/usr/share/OGRE/docs/CMakeFiles\")\n pisitools.remove(\"/usr/share/OGRE/docs/CMakeLists.txt\")\n\n pisitools.dodoc(\"AUTHORS\", \"BUGS\", \"COPYING\", \\\n \"README\", \"Docs/shadows/OgreShadows.pdf\", \\\n \"Docs/licenses/*.txt\")\n","repo_name":"pisilinux/PisiLinux","sub_path":"extra/game/misc/ogre/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"78"} +{"seq_id":"1317499419","text":"\"\"\"Test the basic stuff about the package.\"\"\"\n\nimport wipac_dev_tools\n\n\ndef test_available() -> None:\n \"\"\"Test the wanted modules/sub-modules are available.\"\"\"\n\n # look at __all__\n all_of_em = {\n \"from_environment\",\n \"from_environment_as_dataclass\",\n \"SetupShop\",\n \"logging_tools\",\n \"strtobool\",\n \"argparse_tools\",\n }\n assert set(wipac_dev_tools.__all__) == all_of_em\n\n # look at dir()\n availables = set(dir(wipac_dev_tools))\n availables = {a for a in availables if not a.startswith(\"__\")}\n\n assert availables == all_of_em | {\n \"version_info\",\n \"enviro_tools\",\n \"setup_tools\",\n }\n","repo_name":"WIPACrepo/wipac-dev-tools","sub_path":"tests/wipac_dev_tools_test.py","file_name":"wipac_dev_tools_test.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23434273294","text":"import os\nimport glm\nimport json\nimport numpy as np\nfrom OpenGL.GL import *\nfrom engine.renderable.mesh import Mesh\n\n\nclass Model:\n def __init__(self, path, rotation_mat=glm.mat4(1)):\n self.meshes = []\n if not os.path.exists(path):\n raise RuntimeError(f'Model source file {path} does not exists.')\n self.path = path\n\n if path in \"cube\":\n self.model = glm.mat4()\n #self.model[0][0] = 0.5\n #self.model[1][1] = 0.5\n #self.model[2][2] = 0.5\n else:\n self.model = glm.mat4()\n\n\n self.rotation = rotation_mat\n data = self._load_get_data()\n for meshData in data['meshes']:\n self.meshes.append(Mesh(meshData))\n\n def _load_get_data(self):\n with open(self.path) as file:\n data = json.load(file)\n return data\n\n def set_multiple_positions(self, positions, colors):\n for mesh in self.meshes:\n mesh.set_multiple_positions(positions, colors)\n\n def draw(self, program):\n program.use()\n program.setMat4('model', self.model)\n for mesh in self.meshes:\n mesh.draw()\n\n def draw_multiple(self, program):\n program.use()\n program.setMat4('model', self.model)\n program.setMat4('rotation', self.rotation)\n for mesh in self.meshes:\n mesh.draw_multiple()\n\n def __del__(self):\n self.delete()\n\n def delete(self):\n self.meshes.clear()\n","repo_name":"gianmarcopicarella/cv-assignments-uu","sub_path":"color-based_voxel_labeling/Computer-Vision-3D-Reconstruction-master/engine/renderable/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"11743269681","text":"import urllib\nimport urllib.request\nimport json\n\n\nif __name__=='__main__':\n\tURL = 'http://127.0.0.1:5001/api/pay_by_card/'\n\tz_id = 1\n\tg_sp_list = json.dumps( [[7290,'13*13*4',1], [7291,'13*13*5',1]] )\n\tuid = 205\n\tcard_id = '869BIO'\n\t\n\tdata = { 'z_id':z_id, 'g_sp_list':g_sp_list, 'uid':uid, 'card_id':card_id, 'addr':'the_place', 'phone':'213455', 'consignee':'wdh' }\n\tdata = urllib.parse.urlencode( data )\n\t#data = json.dumps( data )\n\t\n\trequest = urllib.request.Request( URL, data.encode('utf8') )\n\tresponse = urllib.request.urlopen( request )\n\tresp = response.read().decode('utf8')\n\tprint( json.loads(resp) )\n\t\n\t\n\n\t\n\t\n\t","repo_name":"matlab-user/xingxuan_server","sub_path":"flaskr/pay_test.py","file_name":"pay_test.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20605146434","text":"import os\n\nfrom argparse import ArgumentParser\nimport pickle\nimport json\n\nfrom conspiracy.log import Log\nfrom conspiracy.plot import plot_logs, color_name_to_index\n\ndef plot_logfiles(\n log_paths,\n file_format,\n keys,\n height=20,\n width=80,\n x_coord='step',\n x_range=(0., 1.),\n):\n \n if file_format == 'torch':\n import torch\n \n all_colors = [\n k for k in color_name_to_index.keys()\n if k != 'WHITE' and k != 'EMPTY'\n ]\n \n logs = {}\n colors = {}\n for i, log_path in enumerate(log_paths):\n print('Loading: %s'%log_path)\n if file_format == 'pickle':\n checkpoint_data = pickle.load(open(log_path, 'rb'))\n elif file_format == 'json':\n checkpoint_data = json.load(open(log_path))\n elif file_format == 'torch':\n checkpoint_data = torch.load(\n log_path, map_location=torch.device('cpu'))\n for key in keys:\n try:\n key = int(key)\n except ValueError:\n pass\n try:\n checkpoint_data = checkpoint_data[key]\n except KeyError:\n print(checkpoint_data.keys())\n raise\n \n log = Log(state=checkpoint_data)\n logs[log_path] = log\n colors[log_path] = all_colors[i % len(all_colors)]\n \n chart = plot_logs(\n logs,\n colors=colors,\n title='[' + ']['.join(keys) + ']',\n legend=True,\n border='line',\n height=height,\n width=width,\n x_coord=x_coord,\n x_range=x_range,\n min_max_y=True,\n )\n print(chart)\n\ndef plot_checkpoint():\n parser = ArgumentParser()\n parser.add_argument('logs', type=str, nargs='+')\n parser.add_argument('--keys', nargs='*')\n parser.add_argument('--x-coord', type=str, default='step')\n parser.add_argument('--x-range', type=float, nargs=2, default=(0., 1.))\n parser.add_argument('--format', type=str, default='json')\n parser.add_argument('--height', type=int, default=20)\n parser.add_argument('--width', type=int, default=80)\n \n args = parser.parse_args()\n \n plot_logfiles(\n args.logs,\n args.format,\n args.keys,\n height=args.height,\n width=args.width,\n x_coord=args.x_coord,\n x_range=args.x_range,\n )\n\ndef plot_directory():\n parser = ArgumentParser()\n parser.add_argument('directory', type=str, nargs='+')\n parser.add_argument('--omit', type=str, nargs='+', default=[])\n parser.add_argument('--name-prefix', type=str, default='log')\n parser.add_argument('--keys', nargs='*')\n parser.add_argument('--x-coord', type=str, default='step')\n parser.add_argument('--x-range', type=float, nargs=2, default=(0., 1.))\n parser.add_argument('--format', type=str, default='json')\n parser.add_argument('--height', type=int, default=20)\n parser.add_argument('--width', type=int, default=80)\n \n args = parser.parse_args()\n \n all_colors = [\n k for k in color_name_to_index.keys()\n if k != 'WHITE' and k != 'EMPTY'\n ]\n \n if args.format == 'torch':\n import torch\n \n def recurse_directory(directory):\n sub_directories = [\n os.path.join(directory, d) for d in os.listdir(directory)\n if os.path.isdir(os.path.join(directory, d))\n and not any(omit in d for omit in args.omit)\n ]\n matching_files = [\n os.path.join(directory, f) for f in os.listdir(directory)\n if not os.path.isdir(os.path.join(directory, f))\n and f.startswith(args.name_prefix)\n ]\n \n for sub_directory in sub_directories:\n matching_files = matching_files + recurse_directory(sub_directory)\n \n return matching_files\n \n matching_files = []\n for directory in args.directory:\n matching_files = matching_files + recurse_directory(directory)\n \n bundled_files = {}\n for matching_file in matching_files:\n d, f = os.path.split(matching_file)\n if d not in bundled_files:\n bundled_files[d] = []\n bundled_files[d].append(matching_file)\n \n most_recent_files = []\n for directory, files in bundled_files.items():\n file_times = [os.path.getmtime(f) for f in files]\n most_recent_file = max(zip(file_times, files))[1]\n most_recent_files.append(most_recent_file)\n \n plot_logfiles(\n most_recent_files,\n args.format,\n args.keys,\n height=args.height,\n width=args.width,\n x_coord=args.x_coord,\n x_range=args.x_range,\n )\n","repo_name":"aaronwalsman/conspiracy","sub_path":"conspiracy/commandline.py","file_name":"commandline.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"14546591310","text":"import numpy as np\nimport cv2\nimport math\nimport os\nimport argparse\n\nimg1 = cv2.imread('../HW3Pics/1.jpg')\nimg2 = cv2.imread('../HW3Pics/2.jpg')\nimg1_coord = cv2.imread('../HW3WorldCoords/1.coords.jpg')\nimg2_coord = cv2.imread('../HW3WorldCoords/2.coords.jpg')\n\n\ndef Homography(pt, pt_prime):\n\t# Finding Homography A of Ax=b\n A = np.zeros((8,8))\n b = np.zeros((1,8))\n\n for i in range(0, len(pt)):\n A[i * 2] = [pt[i][0], pt[i][1], pt[i][2], 0, 0, 0,\\\n (-1 * pt[i][0] * pt_prime[i][0]), (-1 * pt[i][1] * pt_prime[i][0])]\n A[i * 2 + 1] = [0,0,0,pt[i][0],pt[i][1],pt[i][2],\\\n (-1 * pt[i][0] * pt_prime[i][1]), (-1 * pt[i][1] * pt_prime[i][1])]\n b[0][i * 2] = pt_prime[i][0]\n b[0][i * 2 + 1] = pt_prime[i][1]\n\n h = np.matmul(np.linalg.pinv(A),b.T)\n homography = np.zeros((3,3))\n homography[0] = h[0:3,0]\n homography[1] = h[3:6,0]\n homography[2][0:2] = h[6:8,0]\n homography[2][2] = 1\n\n return homography\n\n\ndef Homography_affine(P,Q,R,S):\n '''\n Finding Homography using two orthogonal lines in the world plane\n H: 3 by 3 Homography matrix\n H_p: Homography that removes projective\n P: points coord\n C*x = B\n\n '''\n # P = np.append(P.T,1)\n\n # get othogonal lines\n l1 = np.cross(P, Q)\n m1 = np.cross(Q, R)\n l2 = np.cross(R, P)\n m2 = np.cross(S, Q)\n\n # \n C = np.array([[l1[0]*m1[0], l1[0]*m1[1] + l1[1]*m1[0]], [l2[0]*m2[0], l2[0]*m2[1] + l2[1]*m2[0]]])\n # print('C:', C)\n B = np.array([[-l1[1]*m1[1]],[-l2[1]*m2[1]]])\n # print('B:', B)\n x = np.dot(np.linalg.inv(C), B)\n # print('x:', x)\n S = np.array([[x[0][0], x[1][0]],[x[1][0], 1]])\n # print('S:', S)\n u, d, v = np.linalg.svd(S)\n # print('u:', u)\n # print('d:', d)\n # print('v:', v)\n\n A = np.dot(np.dot(v,np.sqrt(d)),v.T)\n\n H = np.eye(3)\n H[0][0:2] = A[0]\n H[1][0:2] = A[1]\n print(H)\n\n return H\n\n\n\ndef Homography_vanish(P,Q,S,R):\n '''\n Finding Homography using two orthogonal lines in the world plane\n H: 3 by 3 Homography matrix\n H: Homography H_v\n P: points coordinates (x_i,y_i)\n l1 = P x Q\n l2 = Q x S\n l3 = S x R\n l4 = R x P\n\n '''\n H = np.eye(3)\n\n l1 = np.cross(P, Q)\n l2 = np.cross(Q, S)\n l3 = np.cross(S, R) \n l4 = np.cross(R, P)\n\n vp1 = np.cross(l1,l3)\n vp2 = np.cross(l2,l4)\n vl = np.cross(vp1,vp2)\n # rescale\n vl = vl/np.max(vl)\n print('vanish line:', vl)\n H[2][:] = vl[:]\n print('H_vanish', H)\n\n return H\n\n\ndef Homography_1step(P0):\n '''\n input P0: 20 points in image forming 5 pairs of orthogonal lines in the world plane, 11 by 2\n P: 11 by 3\n output H: 3 by 3 Homography matrix\n C*x = B\n '''\n # print('P0',P0)\n P = np.zeros((P0.shape[0],3))\n for i in range(P0.shape[0]):\n P[i] = np.append(P0[i],1)\n # print(P)\n\n # find 5 pairs of orthogonal lines\n l_group = np.zeros((5,3))\n m_group = np.zeros((5,3))\n C = np.zeros((5,5))\n B = np.zeros((5,1))\n\n for i in range(l_group.shape[0]):\n l_group[i] = np.cross(P[4*i], P[4*i+1])\n l_group[i] = l_group[i]/np.max(l_group[i])\n m_group[i] = np.cross(P[4*i+2], P[4*i+3])\n m_group[i] = m_group[i]/np.max(m_group[i])\n B[i] = - l_group[i][2] * m_group[i][2]\n C[i] = np.array([l_group[i][0] * m_group[i][0], \\\n (l_group[i][0] * m_group[i][1] + l_group[i][1] * m_group[i][0])/2, \\\n l_group[i][1] * m_group[i][1], \\\n (l_group[i][0] * m_group[i][2] + l_group[i][2] * m_group[i][0])/2, \\\n (l_group[i][1] * m_group[i][2] + l_group[i][2] * m_group[i][1])/2])\n \n # print('l_group:', l_group)\n print('C:', C)\n print('B:', B)\n x = np.dot(np.linalg.inv(C), B)\n print('x:', x)\n x = x/np.max(x)\n print('x:', x)\n\n S = np.array([[x[0][0], x[1][0]/2],[x[1][0]/2, x[2][0]]])\n # print('S:', S) # 2 by 2\n U, D, V = np.linalg.svd(S)\n D = np.diag(D)\n print('U:', U) # 2 by 2\n print('D:', D) # 2 by 2\n print('V:', V) # 2 by 2\n\n A = np.dot(np.dot(V,np.sqrt(D)),V.T)\n print('A:', A)\n v = np.dot(np.array([[x[3][0]/2,x[4][0]/2]]), np.linalg.inv(A.T))\n print('v:', v)\n\n H = np.eye(3)\n H[0][0:2] = A[0]\n H[1][0:2] = A[1]\n H[0][2] = v[0][0]\n H[1][2] = v[0][1]\n # print('H:', H)\n\n return H\n\n\n# P = np.array([1,1,1]).T\n# Q = np.array([1,2,1]).T\n# R = np.array([1,3,1]).T\n# S = np.array([2,1,1]).T\n# Homography_vanish(P,Q,S,R)\n# Homography_affine(P,Q,R,S)\n# P0 = np.random.rand(20,2)\n# Homography_1step(P0)\n\ndef load_point_for_one_step(args):\n if args.img_name == '1':\n # P0 = np.array([[x,y],[x,y]]) # 20 by 2\n P0 = np.array([[1011,1857],[957,2064],[1011,1857],[1269,1863],\\\n [1011,1857],[1269,1863],[1269,1863],[1218,2076],\\\n [1269,1863],[1218,2076],[1218,2076],[957,2064],\\\n [1218,2076],[957,2064],[957,2064],[1011,1857],\\\n [1512,1665],[1440,2085],[1512,1665],[1944,2592]])\n elif args.img_name == '2':\n pass\n elif args.img_name == '3':\n pass\n elif args.img_name == '4':\n pass\n else:\n raise Exception('Error: No such image!')\n return P0\n \n\ndef one_step0(img, args):\n print('img size:',img.shape) # img size: (1944, 2592, 3)\n P0 = load_point_for_one_step(args)\n # P0 = np.random.randint(20,size=(20,2))\n H = Homography_1step(P0)\n print('H:', H)\n # H_inv * X_i = X_w\n H_inv = np.linalg.inv(H)\n\n temp = np.zeros((img.shape[0],img.shape[1],3), dtype='uint8')\n image_target = img\n\n for i in range(0,(img.shape[0]-1)):\n for j in range(0,(img.shape[1]-1)):\n point_tmp = np.array([i, j, 1])\n trans_coord = np.array(np.dot(H,point_tmp))\n trans_coord = trans_coord/trans_coord[2]\n if (trans_coord[0] > 0) and (trans_coord[0] < image_target.shape[0]) and \\\n (trans_coord[1] > 0) and (trans_coord[1] < image_target.shape[1]):\n temp[i][j] = image_target[math.floor(trans_coord[0]),math.floor(trans_coord[1])] \n # return temp\n cv2.imwrite('../HW3Pics/onestep1.jpg',temp)\n\ndef one_step(img, args):\n print('img size:',img.shape) # img size: (1944, 2592, 3)\n P0 = load_point_for_one_step(args)\n # P0 = np.random.randint(20,size=(20,2))\n H = Homography_1step(P0)\n print('H:', H)\n # H_inv * X_i = X_w\n H_inv = np.linalg.inv(H)\n box_i = np.array([[1,1,1],[img.shape[1],1,1],[img.shape[0],1,1],[img.shape[1],img.shape[0],1]])\n # print(box_i.shape) # 4 by 3\n box_w = np.zeros((4,3))\n for i in range(box_i.shape[0]):\n box_w[i] = np.dot(H_inv,box_i[i])\n box_w[i] = box_w[i]//box_w[i][2]\n print('box_w:', box_w)\n xymin = np.min(box_w, axis=0)\n print('xymin:', xymin)\n xmin, ymin = xymin[0], xymin[1] \n xymax = np.max(box_w, axis=0)\n xmax, ymax = xymax[0], xymax[1] \n width = xmax - xmin\n height = ymax - ymin\n print('before scale', width, height) # 14513.0 4729.0 world plane\n scale_x = img.shape[1]/width\n scale_y = img.shape[0]/height\n print()\n width_ = math.floor(scale_x*width)\n height_ = math.floor(scale_y*height)\n print('after scale', width_, height_)\n\n img_out = np.zeros((height_, width_, 3))\n for h in range(height_):\n for w in range(width_):\n # scale back\n print('debug:', w/scale_x+xmin-1, h/scale_y+ymin-1)\n tmp = np.array([[w/scale_x+xmin-1],[h/scale_y+ymin-1],[1]])\n print(tmp)\n H_b = np.dot(H, tmp)\n print('H_b: ',H_b)\n x_i, y_i = int(H_b[0][0]//H_b[2][0]), int(H_b[1][0]//H_b[2][0])\n print(x_i, y_i)\n img_out[h][w] = img[x_i][y_i]\n\n # write output\n # img_out = np.zeros((height_, width_, 3))\n # img_out = np.zeros((height_, width_, 3))\n # for h in range(height_):\n # for w in range(width_):\n # # scale back\n # tmp = np.array([[w/scale_x+xmin-1],[h/scale_y+ymin-1],[1]])\n # print(tmp)\n # H_b = np.dot(H, tmp)\n # print('H_b: ',H_b)\n # x_i, y_i = int(H_b[0][0]//H_b[2][0]), int(H_b[1][0]//H_b[2][0])\n # print(x_i, y_i)\n # img_out[h][w] = img[x_i][y_i]\n cv2.imwrite('../HW3Pics/onestep1.jpg',img_out)\n\n\ndef load_point_for_two_step(args):\n if args.img_name == '1':\n # world coordinate\n P_w, Q_w, S_w, R_w = np.array([1,1,1]), np.array([1,1,1]), np.array([1,1,1]), np.array([1,1,1])\n X_w = [P_w, Q_w, S_w, R_w]\n # point to point \n P_i, Q_i, S_i, R_i = np.array([1,1,1]), np.array([1,1,1]), np.array([1,1,1]), np.array([1,1,1])\n X_i = [P_i, Q_i, S_i, R_i]\n # X_i = np.array([P_i, Q_i, S_i, R_i])\n # affine\n P_i2, Q_i2, S_i2, R_i2 = np.array([1,1,1]), np.array([1,1,1]), np.array([1,1,1]), np.array([1,1,1])\n X_i2 = [P_i2, Q_i2, S_i2, R_i2]\n # vl\n P_v, Q_v, S_v, R_v = np.array([1,1,1]), np.array([1,1,1]), np.array([1,1,1]), np.array([1,1,1])\n\n if args.two_step_method == 'p2p':\n H_p = Homography(X_w, X_i)\n elif args.two_step_method == 'vl':\n H_p = Homography_vanish(P_v, Q_v, S_v, R_v)\n else:\n raise Exception('Error: No such two step method!')\n\n elif args.img_name == '2':\n pass\n elif args.img_name == '3':\n pass\n elif args.img_name == '4':\n pass\n else:\n raise Exception('Error: No such image!')\n\n return H_p, X_i, X_i2\n\n\ndef two_step(img, args):\n H_p, X_i, X_i2 = load_point_for_two_step(args)\n\n H_p_inv = np.linalg.inv(H_p)\n\n box_i = np.array([[1,1,1],[img.shape[1],1,1],[img.shape[0],1,1],[img.shape[1],img.shape[0],1]])\n # print(box_i.shape) # 4 by 3\n box_h = np.zeros((4,3))\n for i in range(box_i.shape[0]):\n box_h[i] = np.dot(H_p,box_i[i])\n box_h[i] = box_h[i]//box_h[i][2]\n print('box_h:', box_h)\n\n xymin_h = np.min(box_h, axis=0)\n xmin_h, ymin_h = xymin_h[0], xymin_h[1] \n # xymax = np.max(box_w, axis=0)\n # xmax, ymax = xymax[0], xymax[1] \n # width = xmax - xmin\n # height = ymax - ymin\n # print('before scale', width, height)\n # scale_x = img.shape[1]/width\n # scale_y = img.shape[0]/height\n # width_ = math.floor(scale_x*width)\n # height_ = math.floor(scale_y*height)\n # print('after scale', width_, height_)\n\n # removing affine: x_h = H_p*x-xmin\n X_0 = []\n for pt in X_i2:\n tmp = np.dot(H_p,pt) # 3 by 1\n pt_ = np.array([tmp[0][0]//tmp[2][0]-xmin_h, tmp[1][0]//tmp[2][0]-ymin_h])\n X_0.append(pt_)\n H_a = Homography_affine(X_0)\n H_a_inv = np.linalg.inv(H_a)\n\n H = H_a_inv * H_p\n # H = H_p\n H_inv = np.linalg.inv(H)\n X_i = np.array(X_i) # 4 by 3\n W = np.dot(H, X_i.T) # 3 by 3 times 3 by 4 is ---- 3 by 4\n\n xymin = np.min(W, axis=0)\n # print(xymin)\n xmin, ymin = xymin[0], xymin[1] \n xymax = np.max(W, axis=0)\n xmax, ymax = xymax[0], xymax[1] \n width_ = xmax - xmin\n height_ = ymax - ymin\n print('w & h', width, height)\n\n # write output\n img_out = np.zeros((height_, width_, 3))\n for h in range(height_):\n for w in range(width_):\n # scale back\n tmp = np.array([[w+xmin-1],[h+ymin-1],[1]]) # 3 x 1\n print(tmp)\n H_b = np.dot(H_inv, tmp) # 3 x 1\n x_i, y_i = H_b[0][0]//H_b[2][0], H_b[1][0]//H_b[2][0]\n print(x_i, y_i)\n img_out[h][w] = img[x_i][y_i]\n cv2.imwrite('../HW3Pics/twostep1.jpg',img_out)\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--img_name', type=str, default=None) # 1, 2, 3, 4\n parser.add_argument('--two_step_method', type=str, default='p2p') # p2p or vl\n args = parser.parse_args()\n one_step(img1, args)\n # two_step(img1, args)\n\n\n\nif __name__ == '__main__':\n main()\n # print()\n\n","repo_name":"KOPFYF/ECE661-Computer-Vision","sub_path":"HW3/scripts/hw3task1.py","file_name":"hw3task1.py","file_ext":"py","file_size_in_byte":11929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8193820125","text":"import sys\n\ndef init():\n N, M = map(int, sys.stdin.readline().split())\n dogam = []\n poketmonDict = dict()\n for i in range(N):\n item = sys.stdin.readline().rstrip()\n dogam.append(item)\n poketmonDict[item] = i+1\n\n for i in range(M):\n item = sys.stdin.readline().rstrip()\n try:\n item = int(item)\n sys.stdout.write('{}\\n' .format(dogam[item-1]))\n \n except :\n sys.stdout.write('{}\\n' .format(poketmonDict[item]))\n \n \ninit()\n","repo_name":"parkjineon/BaekJoon-Solution","sub_path":"단계별로 풀어보기/집합과 맵/1620/나는야 포켓몬 마스터 이다솜.py","file_name":"나는야 포켓몬 마스터 이다솜.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11015119025","text":"import praw\nimport os\nimport csv\nfrom twilio.rest import Client\n\n\nclass DealCollector:\n\n def __init__(self, keywords=[], twilio_set=False):\n self.subreddit = 'hardwareswap'\n self.keywords = keywords\n self.twilio_set = twilio_set\n self.client = self.get_twilio_client()\n self.post_dict = {}\n self.logged_in = False\n self.reddit = self.bot_login()\n\n def bot_login(self):\n try:\n print(\"logging in: \", os.getenv(\"reddit_username\"))\n reddit = praw.Reddit(client_id=os.getenv(\"CLIENT_ID\"),\n client_secret=os.getenv(\"CLIENT_SECRET\"),\n user_agent='hardware swap deal collector',\n username=os.getenv(\"REDDIT_USERNAME\"),\n password=os.getenv(\"REDDIT_PASSWORD\"))\n print(\"logged in!\")\n self.logged_in = True\n except Exception as e:\n print(e)\n print(\"Login failed\")\n self.logged_in = False\n return reddit\n\n def append_to_csv(self, post_id):\n url = self.post_dict[post_id]['url']\n title = self.post_dict[post_id]['title']\n with open('deals.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([post_id, url, title])\n return True\n\n def get_twilio_client(self):\n if self.twilio_set:\n twilio_sid = os.getenv('TWILIO_SID')\n twilio_auth = os.getenv('TWILIO_AUTH')\n client = Client(twilio_sid, twilio_auth)\n return client\n else:\n print('no twilio account info provided')\n\n def send_text(self, collected_urls):\n if self.twilio_set:\n twilio_number = os.getenv('TWILIO_NUMBER')\n my_number = os.getenv('MY_NUMBER')\n client = self.client\n urls = '\\n\\n'.join(collected_urls)\n message = client.messages.create(from_=twilio_number,\n body=urls,\n to=my_number)\n else:\n pass\n\n def collect_posts(self, post_list):\n posts = 0\n collected_urls = []\n for post in post_list:\n post_id = str(post.id)\n if post_id not in self.post_dict:\n url = str(post.url)\n title = str(post.title)\n self.post_dict[post_id] = {}\n self.post_dict[post_id]['url'] = url\n self.post_dict[post_id]['title'] = title\n # add to csv\n self.append_to_csv(post_id)\n print('post of interest:', url)\n posts += 1\n collected_urls.append(url)\n if posts > 0:\n print(posts, 'new post(s) appended to deals.csv')\n self.send_text(collected_urls)\n return True\n\n def check_for_deals(self):\n if len(self.keywords) == 0:\n return 'empty keywords list'\n r = self.reddit\n s = self.subreddit\n keywords = self.keywords\n post_list = []\n for post in r.subreddit(s).new():\n title = str(post.title)\n for key in keywords:\n if key.lower() in title.lower():\n post_list.append(post)\n self.collect_posts(post_list)\n return True\n","repo_name":"Ecalzo/hardwareSwapDealCollector","sub_path":"deal_collector.py","file_name":"deal_collector.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39450119099","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom pandas.io.parsers import read_csv\nimport scipy.optimize as opt\n\ndata = read_csv('p2/ex2data1.csv', header=None).values.astype(float)\nx = data[:,:2]\ny = data[:,2]\nsize = len(y)\npositive = plt.scatter(x[np.where(y == 1), 0], x[np.where(y == 1), 1], marker='+', c='k')\nnegative = plt.scatter(x[np.where(y == 0), 0], x[np.where(y == 0), 1], marker='o', c='b')\nplt.xlabel(\"Exam 1 score\")\nplt.ylabel(\"Exam 2 score\")\nplt.legend((positive, negative),('Admitted', 'Not admitted'))\nplt.show()\n\nX = np.ones((size,3))\nX[:,1:] = x\n\ntheta = np.zeros((3, 1))\n\ndef sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))\n\ndef cost_function(theta, x, y, m):\n J = (-np.log(sigmoid(x.dot(theta)).T).dot(y) - np.log(1 - sigmoid(x.dot(theta)).T).dot(1 - y))/m\n return J\n\ndef gradient_function(theta, x, y, m):\n h = sigmoid(x.dot(theta)).reshape(-1, 1)\n y = y.reshape(m, 1)\n gradient = x.T.dot(h - y)/m\n return gradient\n\nprint(\"Initial cost = \" + str(cost_function(theta, X, y, size)))\nprint(\"Initial gradient = \" + str(gradient_function(theta, X, y, size)))\n\nresult = opt.fmin_tnc(func=cost_function , x0=theta , fprime=gradient_function , args=(X, y, size))\ntheta_opt = result[0]\nprint(\"Optimal cost = \" + str(cost_function(theta_opt, X, y, size)))\ntheta_opt = theta_opt.reshape((3,1))\n\nlinspace = np.linspace(30, 100, 1000)\nboundary = -(theta_opt[0] + theta_opt[1]*linspace)/theta_opt[2]\npositive = plt.scatter(x[np.where(y == 1), 0], x[np.where(y == 1), 1], marker='+', c='k')\nnegative = plt.scatter(x[np.where(y == 0), 0], x[np.where(y == 0), 1], marker='o', c='b')\nplt.plot(linspace, boundary)\nplt.xlabel(\"Exam 1 score\")\nplt.ylabel(\"Exam 2 score\")\nplt.legend((positive, negative),('Admitted', 'Not admitted'))\nplt.show()\n\ndef accuracy(theta, x, y, m):\n predictions = sigmoid(x.dot(theta))\n predictions_corrected = [1 if pred >= 0.5 else 0 for pred in predictions]\n number = np.sum(predictions_corrected == y)\n return (float(number)/m)*100\n\nprint(\"Accuracy = \" + str(accuracy(theta_opt, X, y, size)) +\"%\")\n","repo_name":"JuliaMaria/Madrid","sub_path":"Complutense/LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40317810884","text":"\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n s = \"\".join([string.lower() for string in s if string.isalnum()])\n if s == s[::-1]:\n return True\n else:\n return False\n\n\n\nsol = Solution()\nsol.isPalindrome(\"A man, a plan, a canal: Panama\")\n\n","repo_name":"hansololee/algorithm","sub_path":"leetcode/Easy/ValidPalindrome/ValidPalindrome.py","file_name":"ValidPalindrome.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71279764733","text":"import argparse\nimport jinja2\nimport jinja2.exceptions\nimport glob\nimport os\nimport sys\nimport yaml\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Check device templates')\n parser.add_argument(\"--device-types\", required=True, type=str,\n help=\"Path to the directory containing the device-type jinja2 templates.\")\n parser.add_argument(\"--devices\", required=True, type=str,\n help=\"Path to directory containing the device dictionary files.\")\n args = parser.parse_args()\n\n if not os.path.isdir(args.devices):\n sys.stderr.write(\"--devices argument must be a directory\\n\")\n return 1\n if not os.path.isdir(args.device_types):\n sys.stderr.write(\"--device-types argument must be a directory\\n\")\n return 1\n\n errors = False\n devices = sorted(glob.glob(\"%s/*.jinja2\" % args.devices))\n\n print(\"Devices:\")\n env = jinja2.Environment(loader=jinja2.FileSystemLoader([args.devices, args.device_types]))\n\n for device in devices:\n device_name = os.path.splitext(os.path.basename(device))[0]\n\n try:\n template = env.get_template(\"%s.jinja2\" % device_name)\n device_template = template.render()\n except jinja2.exceptions.TemplateNotFound as exc:\n print('* %s (ERROR): \"%s\" not found' % (device_name, exc))\n errors = True\n except jinja2.exceptions.TemplateRuntimeError as exc:\n print('* %s (ERROR): redering error \"%s\"' % (device_name, exc))\n errors = True\n except jinja2.exceptions.TemplateSyntaxError as exc:\n print('* %s (ERROR): invalid syntax \"%s\" in \"%s\"' % (device_name, exc, exc.filename))\n errors = True\n else:\n print(\"* %s\" % device_name)\n\n return errors\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"Linaro/lava-server","sub_path":"share/check-devices.py","file_name":"check-devices.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"78"} +{"seq_id":"17780872392","text":"import os\nimport json\nimport sys\nimport luigi\nimport z5py\nfrom cluster_tools.downscaling import DownscalingWorkflow\n\n\ndef preprocess():\n path = './data.n5'\n in_key = 'labels_for_subdivision_mip/s0'\n root_key = 'labels_for_subdivision_mip'\n\n n_scales = 4\n scale_factors = n_scales * [[2, 2, 2]]\n halos = scale_factors\n\n tmp_folder = './tmp_subdivision_labels/tmp2'\n config_dir = os.path.join(tmp_folder, 'configs')\n os.makedirs(config_dir, exist_ok=True)\n\n conf = DownscalingWorkflow.get_config()['downscaling']\n conf.update({'library_kwargs': {'order': 0}})\n with open(os.path.join(config_dir, 'downscaling.config'), 'w') as f:\n json.dump(conf, f)\n\n target = 'local'\n max_jobs = 16\n\n task = DownscalingWorkflow(tmp_folder=tmp_folder, config_dir=config_dir,\n target=target, max_jobs=max_jobs,\n input_path=path, input_key=in_key,\n scale_factors=scale_factors, halos=halos,\n output_path=path, output_key_prefix=root_key)\n luigi.build([task], local_scheduler=True)\n\n\ndef find_my_block(block_id):\n from heimdall import view, to_source\n\n scale = 5\n rpath = '../../../data/rawdata/sbem-6dpf-1-whole-raw.n5'\n k = 'setup0/timepoint0/s%i' % scale\n\n f = z5py.File(rpath)\n ds = f[k]\n ds.n_thread = 8\n raw = ds[:]\n\n path = './data.n5'\n k = 'volumes/clustering'\n f = z5py.File(path)\n ds = f[k]\n ds.n_threads = 8\n block = ds[:].astype('uint32')\n if block_id is not None:\n block = (block == block_id).astype('uint32')\n\n view(to_source(raw, name='raw'),\n to_source(block, name='block-volume'))\n\n\nif __name__ == '__main__':\n # preprocess()\n args = sys.argv\n block_id = int(args[1]) if len(args) == 2 else None\n find_my_block(block_id)\n","repo_name":"mobie/platybrowser-project","sub_path":"segmentation/correction/find_my_block.py","file_name":"find_my_block.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"18330565950","text":"import tensorflow as tf\nfrom tensorflow.contrib.layers import batch_norm\nfrom tensorflow.contrib import rnn\nfrom tensorflow.python.ops.nn_ops import leaky_relu\n\nfrom utils.network_summary import count_parameters\n\n\nclass VGGClassifier:\n def __init__(self, batch_size, layer_stage_sizes, name, num_classes, num_channels=1, batch_norm_use=False,\n inner_layer_depth=2, strided_dim_reduction=True):\n\n \"\"\"\n Initializes a VGG Classifier architecture\n :param batch_size: The size of the data batch\n :param layer_stage_sizes: A list containing the filters for each layer stage, where layer stage is a series of\n convolutional layers with stride=1 and no max pooling followed by a dimensionality reducing stage which is\n either a convolution with stride=1 followed by max pooling or a convolution with stride=2\n (i.e. strided convolution). So if we pass a list [64, 128, 256] it means that if we have inner_layer_depth=2\n then stage 0 will have 2 layers with stride=1 and filter size=64 and another dimensionality reducing convolution\n with either stride=1 and max pooling or stride=2 to dimensionality reduce. Similarly for the other stages.\n :param name: Name of the network\n :param num_classes: Number of classes we will need to classify\n :param num_channels: Number of channels of our image data.\n :param batch_norm_use: Whether to use batch norm between layers or not.\n :param inner_layer_depth: The amount of extra layers on top of the dimensionality reducing stage to have per\n layer stage.\n :param strided_dim_reduction: Whether to use strided convolutions instead of max pooling.\n \"\"\"\n self.reuse = False\n self.batch_size = batch_size\n self.num_channels = num_channels\n self.layer_stage_sizes = layer_stage_sizes\n self.name = name\n self.num_classes = num_classes\n self.batch_norm_use = batch_norm_use\n self.inner_layer_depth = inner_layer_depth\n self.strided_dim_reduction = strided_dim_reduction\n self.build_completed = False\n\n def __call__(self, text_input, training=False, dropout_rate=0.0):\n \"\"\"\n Runs the CNN producing the predictions and the gradients.\n :param text_input: Text input to produce embeddings for. e.g. for text data [batch_size, 300]\n :param training: A flag indicating training or evaluation\n :param dropout_rate: A tf placeholder of type tf.float32 indicating the amount of dropout applied\n :return: Embeddings of size [batch_size, self.num_classes]\n \"\"\"\n\n with tf.variable_scope(self.name, reuse=self.reuse):\n layer_features = []\n with tf.variable_scope('VGGNet'):\n outputs = image_input\n for i in range(len(self.layer_stage_sizes)):\n with tf.variable_scope('conv_stage_{}'.format(i)):\n for j in range(self.inner_layer_depth):\n with tf.variable_scope('conv_{}_{}'.format(i, j)):\n if (j == self.inner_layer_depth-1) and self.strided_dim_reduction:\n stride = 2\n else:\n stride = 1\n outputs = tf.layers.conv2d(outputs, self.layer_stage_sizes[i], [3, 3],\n strides=(stride, stride),\n padding='SAME', activation=None)\n outputs = leaky_relu(outputs, name=\"leaky_relu{}\".format(i))\n layer_features.append(outputs)\n if self.batch_norm_use:\n outputs = batch_norm(outputs, decay=0.99, scale=True,\n center=True, is_training=training, renorm=False)\n if self.strided_dim_reduction==False:\n outputs = tf.layers.max_pooling2d(outputs, pool_size=(2, 2), strides=2)\n\n outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)\n # apply dropout only at dimensionality\n # reducing steps, i.e. the last layer in\n # every group\n\n c_conv_encoder = outputs\n c_conv_encoder = tf.contrib.layers.flatten(c_conv_encoder)\n c_conv_encoder = tf.layers.dense(c_conv_encoder, units=self.num_classes)\n\n self.reuse = True\n self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)\n\n if not self.build_completed:\n self.build_completed = True\n count_parameters(self.variables, \"VGGNet\")\n\n return c_conv_encoder, layer_features\n\n\nclass TextClassifier:\n def __init__(self, batch_size, filter_sizes, name, num_classes, embeddings, max_sent_length, vocab_size,num_units, num_channels=1,\n embedding_dim=300, num_filters=100, l2_norm=3, activation='relu'):\n\n \"\"\"\n Initializes a VGG Classifier architecture\n :param batch_size: The size of the data batch\n :param filter_sizes: A list containing the filters sizes for the convolutional layer\n :param name: Name of the network\n :param num_classes: Number of classes we will need to classify\n :param num_channels: Number of channels of our image data.\n :param embeddings: the pretrained embeddings\n :param embed_size: the size of each embeddings\n :param num_filters: number of filter per size\n \"\"\"\n self.reuse = False\n self.batch_size = batch_size\n self.num_channels = num_channels\n self.filter_sizes = filter_sizes\n self.name = name\n self.num_classes = num_classes\n self.build_completed = False\n self.embeddings = embeddings\n self.embedding_dim = embedding_dim\n self.num_filters = num_filters\n self.max_sent_length = max_sent_length\n self.vocab_size = vocab_size\n self.l2_norm = l2_norm\n self.activation=activation\n self.num_units = num_units\n \n def __call__(self, text_input, training=False, dropout_rate=0.0):\n \"\"\"\n Runs the CNN producing the predictions and the gradients.\n :param text_input: Text input to produce embeddings for. e.g. for text data [batch_size, sequence_length]\n :param training: A flag indicating training or evaluation\n :param dropout_rate: A tf placeholder of type tf.float32 indicating the amount of dropout applied\n :return: Embeddings of size [batch_size, self.num_classes]\n \"\"\"\n\n with tf.variable_scope(self.name, reuse=self.reuse):\n layer_features = []\n with tf.variable_scope('VGGNet'):\n if self.embeddings==None:\n with tf.device('/cpu:0'), tf.name_scope(\"embedding\"):\n W = tf.Variable(\n tf.random_uniform([self.vocab_size, self.embedding_dim], -1.0, 1.0),\n name=\"W\") \n else:\n W = self.embeddings\n\n embedded_chars = tf.nn.embedding_lookup(W, text_input)\n inputs = embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)\n pooled_outputs = []\n for i, filter_size in enumerate(self.filter_sizes):\n with tf.name_scope(\"conv-maxpool-%s\" % filter_size):\n # Convolution Layer\n filter_shape = [filter_size, self.embedding_dim, 1, self.num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name=\"W\")\n b = tf.Variable(tf.constant(0.1, shape=[self.num_filters]), name=\"b\")\n conv = tf.nn.conv2d(\n inputs,\n W,\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n name=\"conv\")\n # Apply nonlinearity\n if self.activation=='relu':\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name=\"relu\")\n elif self.activation=='sigmoid':\n h = tf.sigmoid(tf.nn.bias_add(conv, b), name=\"sigmoid\")\n elif self.activation=='tanh':\n h = tf.tanh(tf.nn.bias_add(conv, b), name=\"tanh\")\n layer_features.append(h)\n # Maxpooling over the outputs\n pooled = tf.nn.max_pool(\n h,\n ksize=[1, self.max_sent_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1],\n padding='VALID',\n name=\"pool\")\n pooled_outputs.append(pooled)\n\n # Combine all the pooled features\n num_filters_total = self.num_filters * len(self.filter_sizes)\n h_pool = tf.concat(pooled_outputs, 3)\n h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])\n\n if self.l2_norm != 0:\n h_pool_flat = self.l2_norm * tf.divide(h_pool_flat, tf.norm(h_pool_flat, ord='euclidean'))\n # Add dropout\n with tf.name_scope(\"dropout\"):\n h_drop = tf.layers.dropout(h_pool_flat, rate=dropout_rate, training=training)\n\n c_conv_encoder = h_drop\n c_conv_encoder = tf.contrib.layers.flatten(c_conv_encoder)\n\n dense = tf.layers.dense(inputs=c_conv_encoder, units=self.num_units, activation=tf.nn.relu)\n\n # Logits Layer\n scores = tf.layers.dense(inputs=dense, units=self.num_classes)\n\n \n #self.reuse = True\n self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)\n\n if not self.build_completed:\n self.build_completed = True\n count_parameters(self.variables, \"VGGNet\")\n\n return scores, layer_features\n\n\n\nclass RNNClassifier:\n def __init__(self, batch_size, name, num_classes, embeddings, max_sent_length, vocab_size, cell, num_units,hidden_unit=100,\n embedding_dim=300):\n\n \"\"\"\n Initializes a VGG Classifier architecture\n :param batch_size: The size of the data batch\n :param filter_sizes: A list containing the filters sizes for the convolutional layer\n :param name: Name of the network\n :param num_classes: Number of classes we will need to classify\n :param num_channels: Number of channels of our image data.\n :param embeddings: the pretrained embeddings\n :param embed_size: the size of each embeddings\n :param num_filters: number of filter per size\n \"\"\"\n self.reuse = False\n self.batch_size = batch_size\n self.name = name\n self.num_classes = num_classes\n self.build_completed = False\n self.embeddings = embeddings\n self.embedding_dim = embedding_dim\n self.max_sent_length = max_sent_length\n self.vocab_size = vocab_size\n self.hidden_unit = hidden_unit\n self.cell = cell\n self.num_units= num_units\n\n def __call__(self, text_input, training=False, dropout_rate=0.0):\n\n with tf.variable_scope(self.name, reuse=self.reuse):\n layer_features = []\n with tf.variable_scope('VGGNet'):\n if self.embeddings==None:\n with tf.name_scope(\"embedding\"):\n W = tf.Variable(\n tf.random_uniform([self.vocab_size, self.embedding_dim], -1.0, 1.0),\n name=\"W\") \n else:\n W = self.embeddings\n\n embedded_chars = tf.nn.embedding_lookup(W, text_input)\n if self.cell=='bidlstm':\n lstm_fw_cell=rnn.BasicLSTMCell(self.hidden_unit) #forward direction cell\n lstm_bw_cell=rnn.BasicLSTMCell(self.hidden_unit) #backward direction cell\n if dropout_rate is not None:\n lstm_fw_cell=rnn.DropoutWrapper(lstm_fw_cell,output_keep_prob=1-dropout_rate)\n lstm_bw_cell=rnn.DropoutWrapper(lstm_bw_cell,output_keep_prob=1-dropout_rate)\n outputs,_=tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,lstm_bw_cell,embedded_chars,dtype=tf.float32) \n output_rnn=tf.concat(outputs,axis=2) #[batch_size,sequence_length,hidden_size*2]\n output_rnn_last=tf.reduce_mean(output_rnn,axis=1) #[batch_size,hidden_size*2] #output_rnn_last=output_rnn[:,-1,:] ##[batch_size,hidden_size*2] #TODO\n elif self.cell=='lstm':\n lstm_cell=rnn.BasicLSTMCell(self.hidden_unit)\n if dropout_rate is not None:\n lstm_cell=rnn.DropoutWrapper(lstm_cell,output_keep_prob=1-dropout_rate)\n outputs,_ = tf.nn.dynamic_rnn(lstm_cell, embedded_chars, dtype=tf.float32)\n output_rnn=tf.concat(outputs,axis=2)\n output_rnn_last=tf.reduce_mean(output_rnn,axis=1)\n elif self.cell=='gru':\n gru_cell=rnn.GRUCell(self.hidden_unit)\n if dropout_rate is not None:\n gru_cell=rnn.DropoutWrapper(gru_cell,output_keep_prob=1-dropout_rate)\n outputs,_ = tf.nn.dynamic_rnn(gru_cell, embedded_chars, dtype=tf.float32)\n output_rnn=tf.concat(outputs,axis=2)\n output_rnn_last=tf.reduce_mean(output_rnn,axis=1)\n\n c_conv_encoder = output_rnn_last\n dense = tf.layers.dense(inputs=c_conv_encoder, units=self.num_units, activation=tf.nn.relu)\n\n # Logits Layer\n scores = tf.layers.dense(inputs=dense, units=self.num_classes, activation=tf.sigmoid)\n \n #self.reuse = True\n self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)\n\n if not self.build_completed:\n self.build_completed = True\n count_parameters(self.variables, \"VGGNet\")\n\n return scores, layer_features\n","repo_name":"cristeaadrian/mlp-cw","sub_path":"network_architectures.py","file_name":"network_architectures.py","file_ext":"py","file_size_in_byte":14721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72004664251","text":"from django.contrib import admin\nfrom cart import models\n\n@admin.register(models.Cart)\nclass CartAdmin(admin.ModelAdmin):\n list_display = ['cart_id', 'get_product_total', 'paid_for', 'created_on']\n search_fields = ['product__name']\n\n\n@admin.register(models.CustomerOrder)\nclass CustomerOrderAdmin(admin.ModelAdmin):\n list_display = ['reference', 'transaction', 'payment', 'accepted', 'shipped', 'refund', 'created_on']\n search_fields = ['reference', 'transaction']\n date_hierarchy = 'created_on'\n sortable_by = ['payment', 'created_on']\n filter_horizontal = ['cart']\n list_per_page = 20\n actions = ['mark_accepted', 'mark_shipped']\n\n def mark_accepted(self, queryset):\n queryset.update(accepted=True)\n return queryset\n \n def mark_shipped(self, queryset):\n queryset.update(shipped=True)\n return queryset\n\n\n@admin.register(models.Shipment)\nclass ShipmentAdmin(admin.ModelAdmin):\n list_display = ['customer_order']\n search_fields = ['customer_code']\n date_hierarchy = 'created_on'\n","repo_name":"Zadigo/ecommerce_template","sub_path":"mywebsite/cart/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"} +{"seq_id":"14154155276","text":"#!/usr/bin/python3\n\"\"\" Base class \"\"\"\n\nimport json\nfrom models.review import Review\nfrom models.city import City\nfrom models.place import Place\nfrom models.amenity import Amenity\nfrom models.state import State\nfrom models.base_model import BaseModel\nfrom models.user import User\n\n\nclass FileStorage:\n \"\"\" Serializes JSON and deserializes \"\"\"\n __file_path = 'file.json'\n __objects = {}\n clases = {\n \"BaseModel\": BaseModel,\n \"User\": User,\n \"State\": State,\n \"City\": City,\n \"Amenity\": Amenity,\n \"Place\": Place,\n \"Review\": Review\n }\n\n def all(self):\n \"\"\" Return dictionary \"\"\"\n return FileStorage.__objects\n\n def new(self, obj):\n \"\"\" New __objects .id \"\"\"\n key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n FileStorage.__objects[key] = obj\n\n def save(self):\n \"\"\" Serialize JSON file __objects \"\"\"\n dict_save = {}\n for key, value in FileStorage.__objects.items():\n valor_dict = value.to_dict()\n dict_save[key] = valor_dict\n\n with open(FileStorage.__file_path, 'w') as archivo:\n json.dump(dict_save, archivo)\n\n def reload(self):\n \"\"\" Deserialize JSON file __objects \"\"\"\n try:\n with open(FileStorage.__file_path, 'r') as archivo:\n dicts = json.load(archivo)\n FileStorage.__objects = {}\n for keys, values in dicts.items():\n obj = FileStorage.clases[values['__class__']](**values)\n FileStorage.__objects[keys] = obj\n except:\n return\n\n def find_object(cls, id=''):\n \"\"\" Return object id \"\"\"\n objs = cls.__objects\n for obj in objs.values():\n if obj.id == id:\n return obj\n print(\"No encuentro la ID :::: {} ::::\".format(id))\n","repo_name":"SrDri/AirBnB_clone","sub_path":"models/engine/file_storage.py","file_name":"file_storage.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"43572856008","text":"# -*- coding: utf-8 -*-\n\nfrom qgis.PyQt.QtGui import QIcon\nfrom qgis.PyQt.QtCore import QDir, QFileInfo\nfrom qgis.core import (QgsProcessingParameterExtent,\n QgsProcessingParameterEnum,\n QgsProcessingParameterString,\n QgsProcessingParameterBoolean,\n QgsProcessingParameterRasterLayer,\n QgsProcessingAlgorithm,\n QgsProcessingParameterFolderDestination,\n QgsProject,\n QgsCoordinateTransform,\n QgsCoordinateReferenceSystem,\n QgsRectangle,\n QgsLayerTreeLayer,\n QgsRasterLayer)\nimport os.path\nimport math, re\nfrom .tilingthread import TilingThread\nfrom .tileset import TileSet\n\nclass GetTilesProcessingAlgorithm(QgsProcessingAlgorithm):\n\n MINZOOM = 'MINZOOM'\n MAXZOOM = 'MAXZOOM'\n EXTENT = 'EXTENT'\n SINGLELAYER = 'SINGLELAYER'\n INPUT = 'INPUT'\n SUBSET = 'SUBSET'\n OUTPUT = 'OUTPUT'\n\n def initAlgorithm(self, config=None):\n self.zoomlist = ['z0', 'z1', 'z2', 'z3', 'z4', 'z5', 'z6', 'z7', 'z8', 'z9', 'z10', 'z11', 'z12', 'z13', 'z14', 'z15', 'z16', 'z17', 'z18', 'z19', 'z20', 'z21', 'z22', 'z23', 'z24']\n self.addParameter(QgsProcessingParameterEnum(self.MINZOOM, 'Min zoom of cached map', self.zoomlist, defaultValue=13))\n self.addParameter(QgsProcessingParameterEnum(self.MAXZOOM, 'Max zoom of cached map', self.zoomlist, defaultValue=15))\n self.addParameter(QgsProcessingParameterExtent(self.EXTENT, 'Cache extent'))\n self.addParameter(QgsProcessingParameterBoolean(self.SINGLELAYER, 'Cache single raster layer.', defaultValue=True, optional=False))\n self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, 'Cached layer:', optional=True))\n self.addParameter(QgsProcessingParameterString(self.SUBSET, 'Cache folder name', optional=False, defaultValue='Cache'))\n self.addParameter(QgsProcessingParameterFolderDestination(self.OUTPUT, 'Folder to store map tiles (by default - home folder of you project):'))\n self.workThread = None\n\n def processAlgorithm(self, parameters, context, feedback):\n\n crs = context.project().crs()\n minzoom = self.parameterAsEnum(parameters, self.MINZOOM, context)\n maxzoom = self.parameterAsEnum(parameters, self.MAXZOOM, context)\n if minzoom > maxzoom:\n feedback.pushConsoleInfo('Maximum zoom value is lower than minimum. Please correct this and try again.')\n return\n \n bbox = self.parameterAsExtent(parameters, self.EXTENT, context, crs)\n outfolder = self.parameterAsFile(parameters, self.OUTPUT, context)\n root_dir = self.parameterAsString(parameters, self.SUBSET, context)\n cached_layer = self.parameterAsRasterLayer(parameters, self.INPUT, context)\n single_layer = self.parameterAsBoolean(parameters, self.SINGLELAYER, context)\n\n fileInfo = QFileInfo(outfolder)\n if fileInfo.isDir() and not len(QDir(outfolder).entryList(QDir.Dirs | QDir.Files | QDir.NoDotAndDotDot)) == 0:\n feedback.pushConsoleInfo('Selected directory is not empty.')\n #return\n feedback.pushConsoleInfo(f\"fileInfo:{fileInfo}.\")\n\n extent = QgsCoordinateTransform(crs, QgsCoordinateReferenceSystem('EPSG:4326'), QgsProject.instance()).transform(bbox)\n arctanSinhPi = math.degrees(math.atan(math.sinh(math.pi)))\n extent = extent.intersect(QgsRectangle(-180, -arctanSinhPi, 180, arctanSinhPi))\n\n prj_file = QgsProject.instance().fileName()\n root = QgsProject.instance().layerTreeRoot()\n layers = root.checkedLayers()\n tile_width = 256\n tile_height = 256\n transp = 100\n quality = 70\n tile_format = 'PNG'\n enable_antialiasing = False\n tmsconvention = False\n writeMapurl = False\n writeViewer = False\n metatile = False\n metatile_size = {'rows': 2, 'cols': 2}\n metatile_buffer = False\n llg_features = False\n if single_layer:\n layers = [cached_layer]\n\n self.workThread = TilingThread( layers,\n extent,\n minzoom,\n maxzoom,\n tile_width,\n tile_height,\n transp,\n quality,\n tile_format,\n fileInfo,\n root_dir,\n enable_antialiasing,\n tmsconvention,\n writeMapurl,\n writeViewer,\n metatile,\n metatile_size,\n metatile_buffer,\n llg_features\n )\n \n self.workThread.rangeChanged.connect(self.setProgressRange)\n self.workThread.updateProgress.connect(self.updateProgress)\n self.workThread.processFinished.connect(self.processFinished)\n self.workThread.processInterrupted.connect(self.processInterrupted)\n self.workThread.start()\n\n # this does the same thing but without using a seperate thread. Good for debugging.\n #no_thread = TileSet(layers,extent,minzoom,maxzoom,tile_width,tile_height,\n # transp,quality,tile_format,fileInfo,root_dir,\n # enable_antialiasing,tmsconvention,writeMapurl,writeViewer,metatile,metatile_size,\n # metatile_buffer,llg_features)\n #no_thread.run()\n \n #create a xyz layer\n full_path = re.sub('\\\\\\\\','/',outfolder) + '/' + root_dir\n urlWithParams = 'type=xyz&url=file:///%(f)s/{z}/{x}/{y}.%(t)s&zmax=15&zmin=13&crs=EPSG3857'%{'f':full_path,'t':tile_format}\n rlayer = QgsRasterLayer(urlWithParams, f'{root_dir}', 'wms')\n #rlayer.isValid()\n\n #create a groop for xyz layers\n root = QgsProject.instance().layerTreeRoot()\n group = root.findGroup('Local tiles')\n QgsProject.instance().addMapLayer(rlayer)\n group.insertChildNode(0, QgsLayerTreeLayer(rlayer))\n\n return {self.OUTPUT: [outfolder, full_path, prj_file]}\n\n def uncheckLayers(self, layers):\n for lay in layers:\n node = QgsProject.instance().layerTreeRoot().findLayer(lay)\n if node:\n node.setItemVisibilityChecked(False)\n\n def checkLayers(self, layers):\n for lay in layers:\n node = QgsProject.instance().layerTreeRoot().findLayer(lay)\n if node:\n node.setItemVisibilityChecked(True)\n \n def setProgressRange(self, message, value):\n self.progressBar.setFormat(message)\n self.progressBar.setRange(0, value)\n\n def updateProgress(self):\n self.progressBar.setValue(self.progressBar.value() + 1)\n\n def processInterrupted(self):\n self.stopProcessing()\n\n def processFinished(self):\n self.stopProcessing()\n\n def stopProcessing(self):\n if self.workThread is not None:\n self.workThread.stop()\n self.workThread = None\n\n def name(self):\n return 'Get tiles'\n\n def icon(self):\n return QIcon(os.path.dirname(__file__) + '/gettiles.png')\n\n def displayName(self):\n return self.name()\n\n def group(self):\n return self.groupId()\n\n def groupId(self):\n return ''\n\n def createInstance(self):\n return GetTilesProcessingAlgorithm()","repo_name":"olegruk/gettiles","sub_path":"get_tiles_processing_algorithm.py","file_name":"get_tiles_processing_algorithm.py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29249620204","text":"from nltk.corpus import gutenberg\nimport random\nimport nltk\nimport pandas as pd\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.utils import shuffle\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import svm\n\nbook_1 = gutenberg.raw('bible-kjv.txt').lower()\nbook_2 = gutenberg.raw('melville-moby_dick.txt').lower()\nbook_3 = gutenberg.raw('edgeworth-parents.txt').lower()\ndict_labels = {'KJV (Bible)': book_1, 'Herman Melville (Moby-Dick)' : book_2, 'Richard Lovell Edgeworth (Parents)' : book_3}\nmaster_list = []\nsize_of_books = []\n# Dividing each book into documents of 30 sentences. appending them to *master_list*. \nfor author, book in dict_labels.items():\n n = 0;\n book = nltk.sent_tokenize(book)\n limit = int(len(book)/31)\n size_of_books.append(limit)\n for i in range(limit):\n master_list.append(\" \".join(book[n:n+30]))\n n+=31 \n \n#Seperating the 3 books.\ncomplete_book_1 = (master_list[0:size_of_books[0]-1])\ncomplete_book_2 = (master_list[961:1278]) \ncomplete_book_3 = (master_list[1278:-1])\n\n#Sampling 200 documents from the entire book.\nsample_book1 = pd.DataFrame(random.sample(complete_book_1, 200))\nsample_book2 = pd.DataFrame(random.sample(complete_book_2, 200))\nsample_book3 = pd.DataFrame(random.sample(complete_book_3, 200))\n\n#Converting Labels to integers\nkjv_bible = 0\nmelville_moby_dick = 1\nedgeworth_parents = 2\n\n# Adding a new column to the sample books containing lables. \nsample_book1['Label'] = kjv_bible\nsample_book2['Label'] = melville_moby_dick\nsample_book3['Label'] = edgeworth_parents\n\nframes = [sample_book1, sample_book2, sample_book3]\ncomplete_sample = pd.concat(frames)\ncomplete_sample = shuffle(complete_sample)\n\nx_train, x_test, y_train, y_test = train_test_split(complete_sample[0], complete_sample['Label'], test_size=0.2, random_state=4)\n\n\n#BOW \ntoken = RegexpTokenizer(r'[a-zA-Z]+')\ncv_bow = CountVectorizer(lowercase=True,stop_words='english',ngram_range = (1,1), analyzer='word', tokenizer = token.tokenize)\nfiltered_counts_bow= cv_bow.fit_transform(x_train)\nfil_test = cv_bow.fit_transform(x_test)\nbow = filtered_counts_bow.toarray()\n\nclf = svm.SVC(kernel = \"linear\")\nclf.fit(filtered_counts_bow, y_train)\n\n\n#TFIDF\ntoken = RegexpTokenizer(r'[a-zA-Z]+')\ncv_tfidf = TfidfVectorizer(lowercase=True,stop_words='english',ngram_range = (1,1), analyzer='word', tokenizer = token.tokenize)\nfiltered_counts_tfidf= cv_tfidf.fit_transform(x_train)\ntfidf = filtered_counts_tfidf.toarray()\n","repo_name":"salauddinaliahmed/EBC_7100-Assignment_1","sub_path":"SVM_working.py","file_name":"SVM_working.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72634702332","text":"import math\n\nimport HandTrackingModule as htm\nimport cv2\nimport numpy as np\nimport time\nimport osascript\n\n\n##############################'\nwCam,hCam = 640,480\n\ncap = cv2.VideoCapture(0)\ncap.set(3,wCam)\ncap.set(4,hCam)\npTime = 0\n\ndetector = htm.handDetector(detectionCon=0.7)\n\n# vol = \"set volume output volume \" + str(-1)\n# osascript.osascript(vol)\n# result = osascript.osascript('get volume settings')\n# print(result)\n\nwhile True:\n success,img = cap.read()\n img = detector.findHands(img)\n lmList = detector.findPosition(img,draw=False)\n if len(lmList)!= 0:\n # print(lmList[4],lmList[8])\n\n x1,y1 = lmList[4][1],lmList[4][2]\n x2, y2 = lmList[8][1], lmList[8][2]\n cx,cy = (x1+x2)//2, (y1+y2)//2\n x3, y3 = lmList[12][1], lmList[12][2] # Middle Finger\n # print(x3,y3)\n\n cv2.circle(img, (x1,y1), 10, (255,0,0), cv2.FILLED)\n cv2.circle(img, (x2, y2), 10, (255, 0, 0), cv2.FILLED)\n cv2.line(img,(x1,y1),(x2,y2),(255,0,255),2)\n cv2.circle(img, (cx, cy), 10, (255, 0, 0), cv2.FILLED)\n\n length = math.hypot(x2-x1,y2-y1)\n # print(length)\n\n vol = np.interp(length,[25,220],[0,100])\n print(round(length), round(vol))\n vol1 = \"set volume output volume \" + str(vol)\n osascript.osascript(str(vol1))\n\n\n if length < 50:\n cv2.circle(img, (cx, cy), 10, (0, 255, 0), cv2.FILLED)\n\n\n cTime = time.time()\n fps = 1/(cTime-pTime)\n pTime = cTime\n\n\n cv2.waitKey(1)\n\n cv2.putText(img,f'FPS: {int(fps)}',(40,50),cv2.FONT_HERSHEY_COMPLEX,1,(255,0,255),2)\n cv2.imshow(\"Img\", img)\n\n","repo_name":"ThejakaSEP/HandTracking","sub_path":"VolumeController.py","file_name":"VolumeController.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1579214965","text":"\"\"\"\nAuthor: Zhou Chen\nDate: 2020/3/17\nDesc: desc\n\"\"\"\nfrom django.shortcuts import render\nimport re\nimport mimetypes\nfrom wsgiref.util import FileWrapper\nfrom django.http import StreamingHttpResponse\nimport os\nimport uuid\nfrom .settings import MEDIA_ROOT, STATIC_ROOT\nimport sys\nsys.path.append(\"../\")\nimport yolo3_deepsort\nimport utils.format_factory as ff\n\n\ndef upload(request):\n if request.method == 'POST':\n files = request.FILES['video']\n if len(files) > 0:\n if not os.path.exists(MEDIA_ROOT):\n # 若不存在媒体存储目录\n os.mkdir(MEDIA_ROOT)\n video = files\n extension = os.path.splitext(video.name)[1]\n # 重命名文件\n file_name = '{}{}'.format(uuid.uuid4(), extension)\n file_path = '{}/{}'.format(MEDIA_ROOT, file_name)\n # 保存文件到本机\n with open(file_path, 'wb') as f:\n for c in video.chunks():\n f.write(c)\n # 视频保存本机之后调用模型\n\n args = yolo3_deepsort.Argument(file_path)\n args.output_path = os.path.join(STATIC_ROOT, 'videos', 'rst.avi')\n cfg = yolo3_deepsort.get_config()\n cfg.merge_from_file(args.config_detection)\n cfg.merge_from_file(args.config_deepsort)\n with yolo3_deepsort.VideoTracker(cfg, args, file_path) as vdo_trk:\n vdo_trk.run_with_limit(300)\n os.remove(os.path.join(STATIC_ROOT, 'videos', 'rst.mp4'))\n ff.avi2mp4(args.output_path, os.path.join(STATIC_ROOT, 'videos', 'rst.mp4'))\n\n return render(request, 'show_video.html', {'filename': 'rst.mp4'})\n else:\n return render(request, 'upload.html')\n return render(request, 'upload.html')\n\n\ndef file_iterator(file_name, chunk_size=8192, offset=0, length=None):\n with open(file_name, \"rb\") as f:\n f.seek(offset, os.SEEK_SET)\n remaining = length\n while True:\n bytes_length = chunk_size if remaining is None else min(remaining, chunk_size)\n data = f.read(bytes_length)\n if not data:\n break\n if remaining:\n remaining -= len(data)\n yield data\n\n\ndef stream_video(request):\n path = request.GET.get('path')\n path = os.path.join(\"static\", \"videos\", path)\n range_header = request.META.get('HTTP_RANGE', '').strip()\n range_re = re.compile(r'bytes\\s*=\\s*(\\d+)\\s*-\\s*(\\d*)', re.I)\n range_match = range_re.match(range_header)\n size = os.path.getsize(path)\n content_type, encoding = mimetypes.guess_type(path)\n content_type = content_type or 'application/octet-stream'\n if range_match:\n first_byte, last_byte = range_match.groups()\n first_byte = int(first_byte) if first_byte else 0\n last_byte = first_byte + 1024 * 1024 * 8 # 8M 每片,响应体最大体积\n if last_byte >= size:\n last_byte = size - 1\n length = last_byte - first_byte + 1\n resp = StreamingHttpResponse(file_iterator(path, offset=first_byte, length=length), status=206,\n content_type=content_type)\n resp['Content-Length'] = str(length)\n resp['Content-Range'] = 'bytes %s-%s/%s' % (first_byte, last_byte, size)\n else:\n # 不是以视频流方式的获取时,以生成器方式返回整个文件,节省内存\n resp = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type)\n resp['Content-Length'] = str(size)\n resp['Accept-Ranges'] = 'bytes'\n return resp","repo_name":"luanshiyinyang/DeepSORT","sub_path":"web/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"} +{"seq_id":"17645508897","text":"# 거스름돈\n\nimport sys\n\nmoney = int(sys.stdin.readline())\ncount = 0\n# 거스름돈 계산\nchange = 1000 - money\n\n# 가장 큰 단위부터 확인\ncoin = [500, 100, 50, 10, 5, 1]\n\nfor i in coin:\n # 동전 개수 계산\n count = count + change // i\n # 남은 거스름돈\n change = change % i\n # 더 이상 거스를 돈이 없을 경우 중지\n if change == 0:\n break\n\nprint(count)","repo_name":"Techeer-3rd-gen-study/Algorithm-study","sub_path":"03주차_10.18_10.24/2_5585/이수현_5585.py","file_name":"이수현_5585.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"29282839018","text":"# A. Задана натуральная степень k.\n# Сформировать случайным образом список коэффициентов (значения от 0 до 100)\n# многочлена и записать в файл многочлен степени k.\n# Пример:\n# если k = 2, то многочлены могут быть => 2*x² + 4*x + 5 = 0 или x² + 5 = 0 или 10*x² = 0\nfrom random import randint\n\n\ndef polyniminal(k: int) -> dict:\n poly_dict = {}\n for i in range(k, -1, -1):\n rnd = randint(-100, 100)\n poly_dict[i] = rnd\n\n return poly_dict\n\n\ndef polynom_dict_to_line(poly_dict: dict) -> str:\n k = max(poly_dict.keys())\n line = ''\n for key in poly_dict:\n if poly_dict[key] != 0:\n factor = str(abs(int(poly_dict[key])))\n if int(poly_dict[key]) < 0:\n sign = ' - '\n elif int(key) == k:\n sign = ''\n else:\n sign = ' + '\n if int(key) == 0:\n x = ''\n exponent = ''\n elif int(key) == 1:\n x = '*x'\n exponent = ''\n else:\n x = '*x**'\n exponent = str(key)\n line += sign + factor + x + exponent\n line += ' = 0'\n return line\n\n\nk = int(input('Введите натуральную степень k: '))\n\npolynom = polyniminal(k)\n\nline = polynom_dict_to_line(polynom)\n\nwith open('PIHomeWork04/polynom.txt', 'w') as data:\n data.writelines(line)\n","repo_name":"Mortimer08/PIHomeWork04","sub_path":"Task01.py","file_name":"Task01.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7339059112","text":"import discord\nimport asyncio\nimport humanreadable as hr\nimport json\n\nclient = discord.Client()\nuid = 0\ntry:\n with open('userconfig.json', 'w') as f:\n cfg = json.load(f)\n channels = cfg[\"channels\"]\n\nexcept FileNotFoundError:\n channels = {}\n whitelist = {}\n\n@client.event\nasync def on_ready():\n global uid\n print('We have logged in as {0.user}'.format(client))\n uid = client.user.id\n activity = discord.Activity(name='silence | after whitelist blacklist', type=discord.ActivityType.listening)\n await client.change_presence(activity=activity)\n\n\n@client.event\nasync def on_message(message):\n nick = message.channel.guild.get_member(uid).display_name\n print(nick)\n\n if message.content.startswith(nick):\n command = message.content[len(nick):].split()\n if command[0] == 'after':\n time = sum(hr.Time(o).seconds for o in command[1:])\n channels[message.channel.id] = time\n await message.delete()\n if command[0] == 'delete':\n await message.delete()\n\n print(channels)\n print(message.channel.id)\n if message.channel.id in channels:\n print('test')\n await asyncio.sleep(channels[message.channel.id])\n await message.delete()\n\nwith open('token.txt') as f:\n client.run(f.read())\n","repo_name":"TheTimgor/hushhush","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41007311681","text":"# -*- coding: utf-8 -*-\n# author:CY\n# datetime:2019/8/2 23:15\nimport re\nimport string\n\nfrom wtforms.fields import simple\nfrom wtforms import validators\n\nfrom apps.chat.model import ChatRoom\nfrom c_chat_server.extensions import db\nfrom utils.serializer import BaseForm\nfrom utils.optional import Optional\n\n\nclass CreateChatRoomForm(BaseForm):\n \"\"\"创建聊天室form\"\"\"\n name=simple.StringField(\n label='聊天室名称',\n validators=[\n validators.DataRequired(message='聊天室名称必须填写!'),\n validators.length(max=10,message='聊天室名称应小于10字')\n ]\n )\n\n desc=simple.StringField(\n label='聊天室简介',\n validators=[\n validators.DataRequired(message='聊天室简介必须填写!'),\n validators.Length(max=512,message='聊天室简介应小于512字')\n ]\n )\n\n img=simple.StringField(\n label='聊天室封面图片',\n validators=[\n Optional(),\n ]\n )\n\n def validate_name(self,name):\n word_list = string.punctuation\n for i in word_list:\n if i in name.data:\n raise validators.StopValidation('聊天室名称不能带有特殊字符!')\n queryset=db.session.query(ChatRoom.id).filter(ChatRoom.name==name.data).first()\n if queryset:\n raise validators.StopValidation('聊天室名称已经被抢先注册了!')\n\n\n def validate_img(self,images):\n if images.data:\n if not re.match('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',images.data):\n raise validators.StopValidation('图片不符合规则!')","repo_name":"PrimaryCY/Cchat","sub_path":"c_chat_server/apps/chat/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"78"} +{"seq_id":"16015998493","text":"import time\nimport pandas as pd\nimport requests\nimport re\nimport json\nfrom xml.etree import ElementTree\nimport html\nfrom datetime import datetime\n\n\ndef get_video_list(youtube_playlist_url: str) -> list:\n \"\"\"\n 1. Load Gordan Ramsay playlist\n 2. Extract the Javascript variable that stores the playlist information\n 3. Store it in an object array\n :param youtube_playlist_url: YouTube playlist url. Eg: https://youtube.com/playlist?list=...\n :return: [vidId]\n \"\"\"\n resp: requests.Response = requests.get(youtube_playlist_url)\n\n # BROWSER DEBUG: console.log(ytInitialData)\n raw_html: str = resp.text\n result = re.search('var ytInitialData = (.*);<', raw_html)\n result_json = json.loads(result.group(1))\n\n vid_raw: list = result_json[\"contents\"][\"twoColumnBrowseResultsRenderer\"][\"tabs\"][0][\"tabRenderer\"][\"content\"][\"sectionListRenderer\"][\"contents\"][0][\"itemSectionRenderer\"][\"contents\"][0][\"playlistVideoListRenderer\"][\"contents\"]\n\n vid_list: list = [v[\"playlistVideoRenderer\"][\"videoId\"] for v in vid_raw if \"playlistVideoRenderer\" in v]\n\n return vid_list\n\n\ndef get_request_url(vid_id: str) -> dict:\n \"\"\"\n Retrieves the URL required to download the captions.\n This step is necessary because YouTube has implemented hashed their request in a form of signature.\n We will need the correct signature to download it.\n :param vid_id: Video ID\n :return: { title, description, author, keywords, caption_url }\n \"\"\"\n youtube_url: str = \"https://www.youtube.com/watch?v=\" + vid_id\n print(\"Scrapping\", youtube_url)\n\n resp: requests.Response = requests.get(youtube_url)\n raw_html = resp.text\n\n # Exclude members only videos\n if '\"label\":\"Members only\"' in raw_html or '\"label\":\"Ahli sahaja\"' in raw_html:\n return {}\n\n # Very specifically target this request url\n result = re.search('var ytInitialPlayerResponse = (.*);<', raw_html)\n\n result_json = json.loads(result.group(1))\n\n # Ensure language is in English\n if \"captions\" in result_json:\n caption_tracks: list = result_json[\"captions\"][\"playerCaptionsTracklistRenderer\"][\"captionTracks\"]\n\n tmp_url = \"\"\n for c in caption_tracks:\n if c[\"languageCode\"] == \"en\":\n tmp_url = c[\"baseUrl\"].replace(\"\\\\u0026\", \"&\")\n break\n\n # Prevent spam pattern\n time.sleep(1)\n\n return {\n \"title\": result_json[\"videoDetails\"][\"title\"],\n \"description\": result_json[\"videoDetails\"][\"shortDescription\"].replace(\"\\n\", \" \"),\n \"author\": result_json[\"videoDetails\"][\"author\"],\n \"keywords\": \",\".join(result_json[\"videoDetails\"][\"keywords\"]),\n \"caption_url\": tmp_url\n }\n else:\n return {}\n\n\ndef get_video_subtitle(request_url: str) -> str:\n \"\"\"\n The caption request URL will return an XML format file.\n Parse the XML and return the caption paragraph.\n :param request_url: eg. https://youtube.com/api...?v=....\n :return:\n \"\"\"\n resp: requests.Response = requests.get(request_url)\n\n tree = ElementTree.fromstring(resp.content)\n\n res: list = [html.unescape(t.text.replace(\"\\n\", \"\")) for t in tree if t.text is not None]\n\n return \" \".join(res)\n\n\nif __name__ == \"__main__\":\n\n while True:\n\n # Get playlist URL from user\n while True:\n print(\"YouTube playlist URL should look like: https://www.youtube.com/playlist?list=XXX\")\n playlist_link: str = input(\"Playlist URL: \")\n\n if \"https://www.youtube.com/playlist?list=\" in playlist_link:\n break\n\n playlist_video_list: list = get_video_list(playlist_link)\n video_info: list = [get_request_url(v) for v in playlist_video_list if len(v) > 0]\n\n for vi in video_info:\n if bool(vi):\n print(vi[\"title\"])\n vi[\"caption\"] = get_video_subtitle(vi[\"caption_url\"])\n\n tmp_now = datetime.now()\n filename: str = video_info[0][\"author\"] + \"-\" + tmp_now.strftime(\"%Y%m%d%H%M%S\")\n\n df = pd.DataFrame(video_info)\n df.drop(columns=[\"caption_url\"], inplace=True)\n df.to_csv(path_or_buf=\"\".join([\"./export/dataset-\", filename, \".csv\"]), index=False)\n\n raw_caption = df[\"caption\"].str.cat(sep=\" \")\n with open(\"./export/raw-\" + filename + \".txt\", \"w+\", encoding=\"utf-8\") as f:\n f.write(raw_caption)\n\n print(\"============================\")\n print(\"Scrapping complete\")\n print(\"\")\n","repo_name":"jonathanlawhh/word-art-from-youtube-captions","sub_path":"scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70195078011","text":"#!/usr/bin/python\ntry:\n # Python3\n import urllib.request, urllib.error, urllib.parse\nexcept:\n import urllib2\n\ntry:\n # Python 2.6\n import json\nexcept:\n import simplejson as json\n\nimport os\nimport subprocess\nimport sys\n\n\ndef _run(filename):\n shell = os.name == 'nt'\n if filename.endswith('py'):\n executor = ['python', filename]\n return subprocess.Popen(executor, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=shell)\n elif filename.endswith('sh'):\n executor = ['bash', filename]\n return subprocess.Popen(executor, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=shell)\n elif filename.endswith('f90'):\n executor = ['gfortran', filename]\n exect = subprocess.Popen(executor, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=shell)\n output, output_err = exect.communicate()\n return_code = exect.wait()\n if return_code:\n print(ERROR % ('Compiling...', output_err))\n exit(return_code)\n return subprocess.Popen('./a.out', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=shell)\n elif filename.endswith('c'):\n executor = ['gcc', filename]\n exect = subprocess.Popen(executor, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=shell)\n output, output_err = exect.communicate()\n return_code = exect.wait()\n if return_code:\n print(ERROR % ('Compiling...', output_err))\n exit(return_code)\n return subprocess.Popen('./a.out', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=shell)\n elif filename.endswith('java'):\n executor = ['javac', filename]\n exect = subprocess.Popen(executor, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=shell)\n output, output_err = exect.communicate()\n return_code = exect.wait()\n if return_code:\n print(ERROR % ('Compiling...', output_err))\n exit(return_code)\n executor = ['java', '-Duser.language=en', filename[:-len('.java')]]\n return subprocess.Popen(executor, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=shell)\n elif filename.endswith('pl'):\n executor = ['swipl', '-q', '-f', filename]\n return subprocess.Popen(executor, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=shell)\n elif os.access(filename, os.X_OK):\n executor = [os.path.abspath(filename)]\n return subprocess.Popen(executor, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) \n\n\ndef http_get(url):\n if sys.version_info[0] < 3:\n response = urllib2.urlopen(url).read().decode('utf-8')\n else:\n response = urllib.request.urlopen(url).read().decode('utf-8')\n return json.loads(response)\n\n\ndef http_post(url, data):\n if sys.version_info[0] < 3:\n response = urllib2.urlopen(url=url, data=data).read().decode('utf-8')\n else:\n response = urllib.request.urlopen(url=url, data=data.encode('utf-8')).read().decode('utf-8')\n return json.loads(response)\n\n\nBASE = 'http://dirlididi.com/api/'\n\nHEAD = \"\"\"= PROBLEM NAME\n%s\n= PROBLEM DESCRIPTION\n%s\n\"\"\"\nTEST = \"\"\"== TEST - %s\n%s\n== INPUT:\n%s\n== OUTPUT:\n%s\n\"\"\"\nERROR = \"\"\"==ERROR FOR INPUT:\n%s\n== ERROR MSG:\n%s\n\"\"\"\nFAILURE = \"\"\"== FAILED FOR INPUT:\n%s\n== FAILED OUTPUT:\n%s\n\"\"\"\n\n\ndef get_problem(key):\n return http_get(BASE + 'problem/' + key)\n\n\ndef submit_code(token, key, code, tests_result):\n result = {'tests': tests_result, 'key': key, 'code': code, 'token': token}\n data = json.dumps(result)\n url = BASE + 'code/' + key\n return http_post(url, data)\n \n\ndef has_failure(results):\n return results.replace('.', '')\n\n\ndef _get(key):\n problem = get_problem(key)\n print(HEAD % (problem[\"name\"], problem[\"description\"]))\n published_tests = [x for x in problem['tests'] if x['publish']]\n if published_tests:\n print(\"= PROGRAM EXAMPLES\")\n for test in published_tests:\n print(TEST % (test['description'], test.get('tip', ''), test['input'], test['output']))\n\n\ndef _submit(key, token, filename, source):\n problem = get_problem(key)\n tests_result = []\n for test in problem['tests']:\n exect = _run(filename)\n input_ = test['input']\n output, output_err = exect.communicate(input_)\n return_code = exect.wait()\n if return_code:\n print(ERROR % (input_, output_err))\n exit(return_code)\n tests_result.append((test['key'], output))\n content = submit_code(token, key, open(source).read(), tests_result)\n print(\"Results: \" + content['result'])\n if has_failure(content['result']):\n for result_i in range(len(content['result'])):\n if content['result'][result_i] != '.':\n failed_test = tests_result[result_i]\n print(FAILURE % (problem['tests'][result_i]['input'], failed_test[1]))\n\n\ndef _usage():\n print('Usage:\\n %s get \\n %s submit [filename_src]' % (sys.argv[0], sys.argv[0]))\n exit()\n\n\ndef main():\n if len(sys.argv) < 3:\n _usage()\n command = sys.argv[1]\n key = sys.argv[2]\n if command.lower() == 'get':\n _get(key)\n elif command.lower() == 'submit':\n if len(sys.argv) != 5 and len(sys.argv) != 6:\n _usage()\n token = sys.argv[3]\n filename = sys.argv[4]\n source = filename\n if len(sys.argv) == 6:\n source = sys.argv[5]\n _submit(key, token, filename, source)\n else:\n _usage()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"lucasmedeiros/praticasPLP","sub_path":"dirlididi.py","file_name":"dirlididi.py","file_ext":"py","file_size_in_byte":6022,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"15895053339","text":"#!/bin/python3\n\nimport sys\nfrom datetime import date\n\ndef switch(h) :\n return {\n 0 : \"Saturday\",\n 1 : \"Sunday\",\n 2 : \"Monday\",\n 3 : \"Tuesday\",\n 4 : \"Wednesday\",\n 5 : \"Thursday\",\n 6 : \"Friday\",\n }[h]\n \ndef Zellercongruence(day, month, year) :\n if (month == 1) :\n month = 13\n year = year - 1\n \n if (month == 2) :\n month = 14\n year = year - 1\n q = day\n m = month\n k = year % 100;\n j = year // 100;\n h = q + 13 * (m + 1) // 5 + k + k // 4 + j // 4 + 5 * j\n h = h % 7\n return h\n\ndef getNextDate(y, m, d):\n if m==12:\n return [y+1, 1, 1]\n else:\n return [y, m+1, 1]\n \ndef solve(ymd1, ymd2):\n y1, m1, d1 = ymd1\n y2, m2, d2 = ymd2\n \n sundays = 0\n \n if d1 == 1:\n if Zellercongruence(d1, m1, y1) == 1:\n sundays += 1\n \n y, m, d = y1, m1, d1\n while True:\n y, m, d = getNextDate(y, m, d)\n \n #check if date over than date2\n if y>y2:\n break\n elif y==y2:\n if m>m2:\n break\n elif m==m2:\n if d>d2:\n break\n \n if Zellercongruence(d, m, y) == 1:\n sundays += 1\n \n return sundays\n \nt = int(input().strip())\nfor a0 in range(t):\n y1, m1, d1 = [int(num) for num in input().strip().split(' ')]\n y2, m2, d2 = [int(num) for num in input().strip().split(' ')]\n print(solve([y1, m1, d1], [y2, m2, d2]))\n \n ","repo_name":"anandawira/HackerrankProjects","sub_path":"Euler/Project Euler #19: Counting Sundays.py","file_name":"Project Euler #19: Counting Sundays.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35508561571","text":"import bpy\nfrom ... base_types.node import AnimationNode\n\nclass ObjectVisibilityInputNode(bpy.types.Node, AnimationNode):\n bl_idname = \"an_ObjectVisibilityInputNode\"\n bl_label = \"Object Visibility Input\"\n\n def create(self):\n self.inputs.new(\"an_ObjectSocket\", \"Object\", \"object\").defaultDrawType = \"PROPERTY_ONLY\"\n self.outputs.new(\"an_BooleanSocket\", \"Hide\", \"hide\")\n self.outputs.new(\"an_BooleanSocket\", \"Hide Select\", \"hideSelect\").hide = True\n self.outputs.new(\"an_BooleanSocket\", \"Hide Render\", \"hideRender\")\n self.outputs.new(\"an_BooleanSocket\", \"Show Name\", \"showName\").hide = True\n self.outputs.new(\"an_BooleanSocket\", \"Show Axis\", \"showAxis\").hide = True\n self.outputs.new(\"an_BooleanSocket\", \"Show Xray\", \"showXray\").hide = True\n\n def getExecutionCode(self):\n isLinked = self.getLinkedOutputsDict()\n if not any(isLinked.values()): return\n\n yield \"if object is not None:\"\n\n if isLinked[\"hide\"]: yield \" hide = object.hide\"\n if isLinked[\"hideSelect\"]: yield \" hideSelect = object.hide_select\"\n if isLinked[\"hideRender\"]: yield \" hideRender = object.hide_render\"\n if isLinked[\"showName\"]: yield \" showName = object.show_name\"\n if isLinked[\"showAxis\"]: yield \" showAxis = object.show_axis\"\n if isLinked[\"showXray\"]: yield \" showXray = object.show_x_ray\"\n\n yield \"else: hide = hideSelect = hideRender = showName = showAxis = showXray = None\"\n","repo_name":"chrisatbest/animation_nodes","sub_path":"nodes/object/object_visibility_input.py","file_name":"object_visibility_input.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"32900899710","text":"from utilities import set_label_topics, feature_engineering\nimport cPickle as pickle\n\n\ndef get_prediction(df, tfidf_vectorizer, model_NMF, list_columns, scaler, model):\n set_label_topics(df, col_name='Abstract',\n tfidf_vectorizer=tfidf_vectorizer,\n model_NMF=model_NMF)\n\n\n feature_engineering(df, model_NMF)\n test_df = df[list_columns]\n\n features = \\\n scaler.transform(test_df.values)\n\n return model.predict_proba(features)[:,1]\n\nif __name__ == '__main__':\n model = pickle.load(open('../data/model.pkl', 'rb'))\n tfidf_vectorizer = pickle.load(open('../data/tfidf_vectorizer.pkl', 'rb'))\n model_NMF = pickle.load(open('../data/NMF.pkl', 'rb'))\n scaler = pickle.load(open('../data/scaler.pkl', 'rb'))\n list_columns = pickle.load(open('../data/list_columns.pkl', 'rb'))\n\n df_2012_dod = pickle.load(open('../data/df_2012_dod.pkl', 'rb'))\n prediction = get_prediction(df_2012_dod,\n tfidf_vectorizer,\n model_NMF,\n list_columns,\n scaler,\n model)\n df_2012_dod['probability'] = prediction\n #df = subset_data('dod', 2012, '/Users/AnnaVMS/Desktop/test2')\n #df = pd.DataFrame(df.iloc[0:1])\n #df.pop('to_phase_II')\n #print get_prediction(df, tfidf_vectorizer, model_NMF, list_columns,\n #scaler, model)\n","repo_name":"AnnaVM/SBIR_Project","sub_path":"my_app/code_SBIR/get_prediction.py","file_name":"get_prediction.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74751597371","text":"from datetime import datetime, timedelta, timezone\nfrom celery import shared_task\nfrom users.models import User\n\n\n@shared_task\ndef check_inactive_users():\n \"\"\"\n Асинхронная задача для проверки неактивных пользователей и отключения их активных статусов, если они не входили\n в систему более 30 дней.\n \"\"\"\n\n # Получение всех пользоват��лей, исключая тех, у которых last_login равен None.\n users = User.objects.all().exclude(last_login__isnull=True)\n\n for user in users:\n # Проверяется, прошло ли более 30 дней с момента последнего входа пользователя в систему.\n if datetime.now(timezone.utc) - user.last_login > timedelta(days=30):\n # Устанавливается флаг is_active пользователя в False, что означает неактивный статус.\n user.is_active = False\n\n user.save()\n","repo_name":"Lisnevskiy/LearningManagementSystemDRF","sub_path":"users/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"11078290847","text":"import os\nfrom lxml import etree\nimport numpy as np\nfrom dm_control.mujoco.wrapper.mjbindings import mjlib\nfrom dm_control.utils import io as resources\nfrom robot_env.utils import get_root_path\n\n\ndef make_pendulum(n_bodies, base_file=None, path=None):\n \"\"\"Generates an xml string defining a swimmer with `n_bodies` bodies.\"\"\"\n if path is None:\n path = os.path.join(get_root_path(), \"robots/basic_models/generated_pendulum{}.xml\".format(n_bodies))\n if base_file is None:\n base_file = os.path.join(get_root_path(), \"robots/basic_models/pendulum_base.xml\")\n if n_bodies <= 0:\n raise ValueError('Invalid number of bodies: {}'.format(n_bodies))\n mjcf = etree.fromstring(resources.GetResource(base_file))\n head_body = mjcf.find('./worldbody/body')\n actuator = etree.SubElement(mjcf, 'actuator')\n\n parent = head_body\n pos = 0\n lengths = np.random.normal(0.5, 0.1, size=n_bodies)\n lengths = np.clip(lengths, 0.1, 1.0)\n masses = np.random.uniform(0.5, 2.5, size=n_bodies)\n for body_index in range(n_bodies):\n pose = \"0 0 {}\".format(pos)\n pos = lengths[body_index]\n child = make_body(body_index=body_index, pose=pose, len=pos, mass=masses[body_index])\n # site\n child.append(etree.Element('site', {'name': \"pole{}_0\".format(body_index+1),\n 'pos': \"0 0 0\",\n \"size\": \"0.01 0.01\"}))\n child.append(etree.Element('site', {'name': \"pole{}_1\".format(body_index+1),\n 'pos': \"0 0 {}\".format(pos),\n \"size\": \"0.01 0.01\"}))\n if body_index == n_bodies - 1:\n child.append(etree.Element('site', {'name': \"tip\",\n 'pos': \"0 0 {}\".format(pos),\n \"size\": \"0.01 0.01\"}))\n\n # joint\n child.append(etree.Element('joint', {'name': \"pendulum{}\".format(body_index+1),\n 'axis': \"0 1 0\",\n \"pos\": \"0 0 0\",\n 'type': \"hinge\"}))\n\n parent.append(child)\n parent = child\n actuator.append(etree.Element('motor',\n name='pendulum{}_motor'.format(body_index+1),\n joint='pendulum{}'.format(body_index+1),\n forcerange=\"-50 50\",\n ctrlrange=\"-20 20\"))\n\n model = etree.tostring(mjcf, pretty_print=True)\n with open(path, 'wb') as f:\n f.write(model)\n return model\n\n\ndef make_body(body_index, pose='0 0 0', len=1., mass=1.):\n \"\"\"Generates an xml string defining a single physical body.\"\"\"\n body = etree.Element('body', name=\"pole{}\".format(body_index+1))\n body.set('pos', pose)\n etree.SubElement(body, 'geom', {'name': \"geom_b{}\".format(body_index+1),\n 'fromto': '0 0 0 0 0 {}'.format(len),\n 'rgba': \"0.588 0.909 0.972 1\",\n 'size': \"0.045 {}\".format(0.5 * len),\n 'type': \"capsule\",\n \"mass\": \"{}\".format(mass)\n })\n\n return body\n\n\nif __name__ == \"__main__\":\n x = make_pendulum(3)\n","repo_name":"scxxxxxx/VBMPC","sub_path":"robot_model/mujoco/basic_models/gen_model_pendulums.py","file_name":"gen_model_pendulums.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16639368334","text":"\"\"\"\r\nRandom Number Guessing Game\r\n\"\"\"\r\nimport random\r\nfrom textwrap import dedent\r\n\r\ndef main():\r\n\r\n print(\"So I'm thinking of a number between 1 and 50\")\r\n\r\n # creates secret number\r\n secret_number = random.randrange(1, 50)\r\n\r\n # initalizes our attempt count\r\n user_attempt_count = 1\r\n\r\n # sets user guess to something the secret number can't be\r\n user_guess = 0\r\n\r\n # loops until user_guess == secret_number,\r\n # or user_attempt_count over allowed attempts\r\n\r\n while user_guess != secret_number and user_attempt_count < 10:\r\n print(\"This is your guess number \", user_attempt_count)\r\n \r\n try:\r\n user_input_text = input(\"Type any number between 1 and 50: \")\r\n user_guess = int(user_input_text)\r\n # tells user if too high or low, or got it\r\n if user_guess > secret_number:\r\n print(\"That's too high.\")\r\n elif user_guess < secret_number:\r\n print(\"That's too low.\")\r\n else:\r\n print(\"Yay! That's it!\")\r\n except ValueError:\r\n print(\"Type in an actual number\")\r\n\r\n\r\n # Increment the attempt count\r\n user_attempt_count += 1\r\n\r\n if user_guess != secret_number:\r\n print(dedent(\r\n \"\"\"\r\n Just so you're not wondering for the rest\r\n of your life what my number was, it was:\r\n \"\"\"), secret_number)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Cate-Michalska/Horse-Rider-Game","sub_path":"chapter8_guessinggame.py","file_name":"chapter8_guessinggame.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33626747788","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 24 20:41:02 2020\r\n\r\n@author: uha18\r\n\"\"\"\r\n\r\nlist1 = [input()]\r\nlist2 = [input()]\r\nfor i in list1:\r\n for j in list2:\r\n if i==j:\r\n print(i,j)\r\n continue\r\n ","repo_name":"uhapko/pyth1uni","sub_path":"instersection Hapko.py","file_name":"instersection Hapko.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"8625123966","text":"from flask import Flask,request,jsonify\n\napp=Flask(__name__)\n\n@app.route('/')\ndef home():\n return 'Welcome to flask'\n\n@app.route('/cal',methods=['GET','POST'])\ndef math_operator():\n operatation=request.json['operation']\n number1=request.json['number1']\n number2=request.json['number2']\n \n if operatation=='add':\n result=number1+number2\n elif operatation=='sub':\n result=number1-number2\n elif operatation=='mul':\n result=number1*number2\n elif operatation=='div':\n result=number1/number2\n else:\n return jsonify('Invalid operartion type, it should be in (add,sub,mul,div)')\n\n return jsonify(f'result is: {result}')\n\n\nif __name__=='__main__':\n app.run(port=8080,debug=True)\n","repo_name":"ykudale2012/python_rest_api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25445589054","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 10 11:19:49 2021\n\n@author: Asadullo\n\"\"\"\n\nclass mashina ():\n def __delattr__(self):\n print(\"malumot o'chirildi\")\n def __init__(self ,model,narx,otkuch):#class msthods(konsturcter)\n self .a=model\n self.b=narx\n self.c=otkuch\n def printmashina(self):\n print(\"\"\"\n Mashina turi: {}\n Mashina naxi: {}\n Mashina kuchi: {}\n \"\"\".format(self.a,self.b,self.c ))\nm1=mashina('Mazda',100,2000)\nm1.printmashina()\ndel(m1)\nm1=mashina(\"lamborjini\",100,3000)\n","repo_name":"Asadullo-aka/python_darslar","sub_path":"python_9-dars/class2.py","file_name":"class2.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70694648572","text":"import numpy as np\nimport torch\n#from get_data.data_loader2 import data_loader2\nfrom get_data.data_loader_transform import data_loader_transform\n#from get_data.DataLoader_Cox import DataLoader_Cox\nfrom go_models.train import train\nfrom go_models.evaluate import evaluate\nfrom go_models.get_cnn_model import get_cnn_model\nfrom go_models.get_cox_model import get_cox_model\n\n\nif __name__ == '__main__':\n\n data_dir = '/mnt/aertslab/DATA/HeadNeck/HN_PETSEG/curated'\n proj_dir = '/mnt/aertslab/USERS/Zezhong/HN_OUTCOME'\n cnn_name = 'resnet'\n model_depth = 101 # [10, 18, 34, 50, 101, 152, 200]\n n_classes = 20\n in_channels = 1\n batch_size = 8\n epochs = 1\n lr = 0.001\n num_durations = 20\n _cox_model = 'LogisticHazard'\n cox_model = 'LogisticHazard'\n load_model = 'model'\n score_type = '3year_survival' #'median'\n evaluate_only = False\n augmentation = True\n \n np.random.seed(1234)\n _ = torch.manual_seed(1234)\n\n if not augmentation:\n dl_train, dl_tune, dl_val = data_loader2(\n proj_dir=proj_dir,\n batch_size=batch_size,\n _cox_model=_cox_model,\n num_durations=num_durations)\n else:\n dl_train, dl_tune, dl_val, dl_test = data_loader_transform(\n proj_dir, \n batch_size=batch_size, \n _cox_model=_cox_model, \n num_durations=num_durations)\n \n for cnn_name in ['resnet18', 'resnet34', 'resnet50', 'resnet152', 'resnet200']: \n cnn_model = get_cnn_model(\n cnn_name=cnn_name, \n n_classes=n_classes, \n in_channels=in_channels)\n cox_model = get_cox_model(\n proj_dir=proj_dir,\n cnn_model=cnn_model,\n _cox_model=_cox_model,\n lr=lr)\n for epochs in [20]:\n for lr in [0.01, 0.0001, 0.00001, 0.1]:\n train(\n proj_dir=proj_dir,\n cox_model=cox_model,\n epochs=epochs,\n dl_train=dl_train,\n dl_tune=dl_tune,\n dl_val=dl_val,\n cnn_name=cnn_name,\n lr=lr)\n\n evaluate(\n proj_dir=proj_dir,\n cox_model=cox_model,\n load_model=load_model,\n dl_val=dl_val,\n score_type=score_type,\n cnn_name=cnn_name,\n epochs=epochs,\n lr=lr)\n\n","repo_name":"xmuyzz/HeadNeckCancer-Outcome","sub_path":"archive/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"43624487941","text":"'''\n功能:执行到主程序末尾,解释器自动退出,但是如果需要中途退出程序,可以调用sys.exit函数,\n带有一个可选的整数参数返回给调用它的程序,表示你可以在主程序中捕获对sys.exit的调用。(0是正常退出,其他为异常)\n'''\n\nimport sys\n\ndef exitfunc(value):\n print(value)\n sys.exit(0)\n \nprint('hello')\n \ntry:\n sys.exit(1)\nexcept SystemExit as value:\n exitfunc(value)\n\nprint('come?')\n\n# hello\n# 1 #come?未打印\n\n# =================================================================================================================\n'''\nimport sys\nprint('The commend line argument is ')\nfor i in sys.argv:\n print(i)\n \nprint(\"\\n\\nThe PYTHONPATH is \",sys.path,'\\n')\n\n'''\n'''\nE:\\Progect\\tmp>python3 usesys.py we are argument\nThe commend line argument is\nusesys.py\nwe\nare\nargument\n\n\nThe PYTHONPATH is ['E:\\\\Progect\\\\tmp', 'C:\\\\Python35\\\\python35.zip', 'C:\\\\Python35\\\\DLLs', 'C:\\\\Python35\\\\lib', 'C:\\\\Python35', 'C:\\\\Python35\\\\lib\\\\site-packages']\n'''\n# ========================================================================================================================","repo_name":"renchao7060/studynotebook","sub_path":"基础学习/p5_sys模块使用.py","file_name":"p5_sys模块使用.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17610829419","text":"import re\nfrom nltk.corpus import wordnet\nfrom nltk.corpus import stopwords\nfrom torch.utils import data\n\n\nclass CustomDataset(data.Dataset):\n def __init__(self, data):\n \"\"\"\n initialize attributes of the CustomDataset class\n @param data: data\n \"\"\"\n self.data = data\n\n def __getitem__(self, index):\n \"\"\"\n @param index: index of the (tweet, label) tuple to return\n @return: single (tweet, label) tuple\n \"\"\"\n tweet = self.data.iloc[index]['tweet']\n label = self.data.iloc[index]['label']\n return tweet, label\n\n def __len__(self):\n \"\"\"\n @return: number of samples in the data\n \"\"\"\n return self.data.shape[0]\n\n\nclass PreProcessing():\n def __init__(self):\n \"\"\"\n initialize attributes of the PreProcessing class\n \"\"\"\n self.regex = re.compile(r'(\\w*)(\\w)\\2(\\w*)')\n self.repl = r'\\1\\2\\3'\n self.stop_words = stopwords.words(\"english\")\n\n def clean(self, tweet):\n \"\"\"\n @param tweet: single tweet as a string\n @return: cleaned tweet as a string\n \"\"\"\n tweet = self.basic_cleaning_3(tweet)\n tweet = tweet.split(' ')\n tweet = [self.remove_repeated_letters(word) for word in tweet]\n tweet = ' '.join(tweet)\n return tweet\n\n def basic_cleaning(self, tweet):\n \"\"\"\n @param tweet: single tweet as a string\n @return: list of words containing only some characters\n \"\"\"\n tweet = tweet.replace('', '')\n tweet = tweet.replace('', '')\n tweet = ''.join(c if (c.isalpha() or c.isspace()) else '' for c in tweet)\n tweet = ' '.join(tweet.split()).split(' ')\n tweet = list(filter(lambda x: x not in self.stop_words, tweet))\n return tweet\n\n def basic_cleaning_2(self, tweet):\n \"\"\"\n @param tweet: single tweet as a sting\n @return: single cleaned tweet as a string\n \"\"\"\n tweet = ''.join('' if c == \"'\" else c for c in tweet)\n tweet = ''.join(' ' if c == '#' else c for c in tweet)\n tweet = ''.join(' ' if c.isnumeric() else c for c in tweet)\n tweet = ''.join(c if (c.isalpha() or c.isspace() or c in ['<', '>']) else ' ' + c + ' ' for c in tweet)\n tweet = ' '.join(tweet.split())\n return tweet\n\n def basic_cleaning_3(self, tweet):\n \"\"\"\n @param tweet: single tweet as a string\n @return: single cleaned tweet\n \"\"\"\n tweet = ''.join('' if c == \"'\" else c for c in tweet)\n tweet = ''.join(c if c.isalpha() else ' ' + c + ' ' for c in tweet)\n tweet = ' '.join(tweet.split())\n return tweet\n\n def remove_repeated_letters(self, word):\n \"\"\"\n recursive method to remove repeated letters from a given word based on a dictionary.\n original implementation: https://www.youtube.com/user/RockyDeRaze\n @param word: word that might contains unnecessary repeated letters\n @return: cleaned word\n \"\"\"\n if wordnet.synsets(word):\n return word\n result = self.regex.sub(self.repl, word)\n if word == result:\n return result\n else:\n return self.remove_repeated_letters(result)\n\n","repo_name":"AleksandarHr/Twitter_Sentiment_Analysis","sub_path":"source/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28475949244","text":"# Вывести последнюю букву в слове\nword = 'Архангельск'\nprint (word[-1])\n# ???\n\n\n# Вывести количество букв \"а\" в слове\nword = 'Архангельск'\nprint(\"2\")\n# ???\n\n\n# Вывести количество гласных букв в слове\n\nfor test in ['Архангельск']:\n print(len(list(filter(lambda x: x in 'АЕИОУЭЮЫЯауоыиэяюе', test))))\n\n\n\n# Вывести количество слов в предложении\n\na = 'Мы приехали в гости'\nprint(len(a.split(' ')))\n# ???\n\n\n# Вывести первую букву каждого слова на отдельной строке\n\n\n\n\n\n# Вывести усреднённую длину слова.\ndef main():\n\n sentence = \"Мы приехали в гости\"\n SumAccum = 0\n for ch in sentence.split():\n character = len(ch)\n SumAccum = SumAccum + character\n\n average = (SumAccum) / (len(sentence.split()))\n return average\nprint(main())","repo_name":"MrStarkOff/practice_1","sub_path":"string_challenges.py","file_name":"string_challenges.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74450794812","text":"\"\"\"\nThis is script for all our visualization needs\n\n\"\"\"\nimport torch\nimport seaborn as sns\nimport scanpy as sc\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\nimport anndata\n\nfrom .helper_functions import ensure_dir\n\ndef save_cm(true, preds, name, reduce_cm=True, label_names=None, sort_labels=False):\n \"\"\"\n\n Parameters\n ----------\n true : list list (np.array)\n true labels for a cm\n preds : list like (np.array)\n predictions for a cm\n name : str\n name of figure (without .png ext)\n reduce_cm : bool\n toggle to get rid of 0/NA cols and rows\n label_names : list\n if the true and preds are ints, provide a list of names that map the ints to labels\n sort_labels : bool\n if the true and preds args are lists of names, choose to sort them by alphabetical order\n\n Returns\n -------\n\n \"\"\"\n if label_names is not None:\n cm = confusion_matrix(true, preds, normalize='true')\n labels = label_names\n else:\n if sort_labels:\n labels = np.unique(np.concatenate([true, preds]))\n else:\n labels = pd.factorize(np.concatenate([true, preds]))[1]\n cm = confusion_matrix(true, preds, normalize='true', labels=labels)\n cm_norm_df = pd.DataFrame(cm, index=labels, columns=labels)\n if reduce_cm:\n cm_norm_df = cm_norm_df.dropna(axis=0, how='all')\n cm_norm_df = cm_norm_df[~(cm_norm_df == 0).all(axis=1)]\n cm_norm_df = cm_norm_df.T[~(cm_norm_df == 0).all(axis=0)].T\n plt.figure(figsize=(cm_norm_df.shape[1], cm_norm_df.shape[0]))\n ax = sns.heatmap(cm_norm_df, cmap=\"YlGnBu\", vmin=0, vmax=1,\n linewidths=.5, annot=True, fmt='4.2f', square=True)\n ax.figure.tight_layout()\n ensure_dir(\"./cm_figs\")\n save_name = f\"./cm_figs/cm_{name}.png\"\n plt.savefig(save_name)\n\n\ndef plot_embeddings(model, data_loaders, device, fig_name):\n empty_zx = False\n patients = data_loaders['sup'].dataset.domains\n cell_types = data_loaders['sup'].dataset.labels\n\n # trying to plot training data\n actuals_d, actuals_y, zy_, zd_, zx_ = [], [], [], [], []\n with torch.no_grad():\n # Train\n # patients_train = np.delete(patients, test_patient)\n i = 0\n for (xs, ys, ds) in data_loaders['sup']:\n i = i + 1\n # To device\n xs, ys, ds = xs.to(device), np.array(ys), np.array(ds)\n # use classification function to compute all predictions for each batch\n zy_loc, zy_scale = model.qzy(xs)\n zd_loc, zd_scale = model.qzd(xs)\n if not empty_zx:\n zx_loc, zx_scale = model.qzx(xs)\n zx_.append(np.array(zx_loc.cpu()))\n zy_.append(np.array(zy_loc.cpu()))\n zd_.append(np.array(zd_loc.cpu()))\n # getting integer labels here\n actuals_d.append(np.argmax(ds, axis=1))\n actuals_y.append(np.argmax(ys, axis=1))\n # if i == 50:\n if i == len(data_loaders['sup']):\n break\n zy = np.vstack(zy_)\n zd = np.vstack(zd_)\n if not empty_zx:\n zx = np.vstack(zx_)\n labels_y = np.hstack(actuals_y)\n labels_d = np.hstack(actuals_d)\n if not empty_zx:\n zy_adata, zd_adata, zx_adata = [anndata.AnnData(_) for _ in [zy, zd, zx]]\n adatas = [zy_adata, zd_adata, zx_adata]\n else:\n zy_adata, zd_adata = [anndata.AnnData(_) for _ in [zy, zd]]\n adatas = [zy_adata, zd_adata]\n name = ['zy', 'zd', 'zx']\n train_labels = patients[labels_d]\n zy_adata.obs['batch'] = train_labels\n zy_adata.obs['cell_type'] = cell_types[labels_y]\n zd_adata.obs['batch'] = train_labels\n zd_adata.obs['cell_type'] = cell_types[labels_y]\n train_cell_type_encoding = zy_adata\n train_batch_encoding = zd_adata\n for i, _ in enumerate(adatas):\n _.obs['batch'] = patients[labels_d]\n _.obs['cell_type'] = cell_types[labels_y]\n save_name = f\"_{fig_name}_train_set_{name[i]}.png\"\n sc.pp.neighbors(_, use_rep=\"X\", n_neighbors=15)\n sc.tl.umap(_, min_dist=.3)\n sc.pl.umap(_, color=['batch', 'cell_type'], save=save_name)\n actuals_d, actuals_y, zy_, zd_, zx_ = [], [], [], [], []\n with torch.no_grad():\n # test\n # patients_train = np.delete(patients, test_patient)\n i = 0\n for (xs, ys, ds) in data_loaders['unsup']:\n i = i + 1\n # To device\n xs, ys, ds = xs.to(device), np.array(ys), np.array(ds)\n # use classification function to compute all predictions for each batch\n zy_loc, zy_scale = model.qzy(xs)\n zd_loc, zd_scale = model.qzd(xs)\n if not empty_zx:\n zx_loc, zx_scale = model.qzx(xs)\n zx_.append(np.array(zx_loc.cpu()))\n zy_.append(np.array(zy_loc.cpu()))\n zd_.append(np.array(zd_loc.cpu()))\n # getting integer labels here\n actuals_d.append(np.argmax(ds, axis=1))\n actuals_y.append(np.argmax(ys, axis=1))\n # if i == 50:\n if i == len(data_loaders['unsup']):\n break\n zy = np.vstack(zy_)\n zd = np.vstack(zd_)\n if not empty_zx:\n zx = np.vstack(zx_)\n labels_y = np.hstack(actuals_y)\n labels_d = np.hstack(actuals_d)\n if not empty_zx:\n zy_adata, zd_adata, zx_adata = [anndata.AnnData(_) for _ in [zy, zd, zx]]\n adatas = [zy_adata, zd_adata, zx_adata]\n else:\n zy_adata, zd_adata = [anndata.AnnData(_) for _ in [zy, zd]]\n adatas = [zy_adata, zd_adata]\n name = ['zy', 'zd', 'zx']\n test_labels = patients[labels_d]\n zy_adata.obs['batch'] = test_labels\n zy_adata.obs['cell_type'] = cell_types[labels_y]\n zd_adata.obs['batch'] = test_labels\n zd_adata.obs['cell_type'] = cell_types[labels_y]\n test_cell_type_encoding = zy_adata\n test_batch_encoding = zd_adata\n for i, _ in enumerate(adatas):\n _.obs['batch'] = patients[labels_d]\n _.obs['cell_type'] = cell_types[labels_y]\n save_name = f\"_{fig_name}_test_set_{name[i]}.png\"\n sc.pp.neighbors(_, use_rep=\"X\", n_neighbors=15)\n sc.tl.umap(_, min_dist=.3)\n sc.pl.umap(_, color=['batch', 'cell_type'], save=save_name)\n full_zy = train_cell_type_encoding.concatenate(test_cell_type_encoding)\n full_zd = train_batch_encoding.concatenate(test_batch_encoding)\n all_patients = np.hstack([train_labels, test_labels])\n full_zy.obs['batch'] = all_patients\n full_zd.obs['batch'] = all_patients\n sc.pp.neighbors(full_zy, n_neighbors=15)\n sc.pp.neighbors(full_zd, n_neighbors=15)\n sc.tl.umap(full_zy, min_dist=.3)\n sc.tl.umap(full_zd, min_dist=.3)\n sc.pl.umap(full_zy, color=['batch', 'cell_type'], save=f\"_{fig_name}_train+test_zy.png\")\n sc.pl.umap(full_zd, color=['batch', 'cell_type'], save=f\"_{fig_name}_train+test_zd.png\")","repo_name":"bplee/scBatch_project","sub_path":"scBatch/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":7157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21666978205","text":"\"\"\"\r\n94991277\r\n\r\n-- ПРИНЦИП РАБОТЫ --\r\n\r\nМы создаем двумерный массив dp, где dp[i][j]\r\nпредставляет минимальное расстояние между подстроками s1[:i] и s2[:j].\r\nЗатем мы инициализируем первую строку и первый столбец (расстояние от пустой строки к каждой подстроке),\r\n а затем заполняем остальные ячейки массива, используя формулу для расчета расстояния Левенштейна.\r\n\r\n -- ДОКАЗАТЕЛЬСТВО КОРРЕКТНОСТИ --\r\n\r\nМы создаем двумерный массив dp, где dp[i][j]\r\nбудет представлять минимальное расстояние между подстроками s1[:i] и s2[:j].\r\nМы определяем размеры массива m и n как длины строк s1 и s2,\r\nи затем создаем массив dp размером (m+1) x (n+1) и инициализируем его нулями.\r\nДалее инициализируем первую строку и первый столбец массива dp.\r\nПервая строка представляет расстояние от пустой строки s1[:i] к подстроке s2[:j],\r\nа первый столбец - расстояние от подстроки s1[:i] к пустой строке s2[:j].\r\nЭти исходные значения задаются от 0 до длин строк s1 и s2.\r\n\r\nПотом мы заполняем остальные ячейки массива dp с помощью вложенных циклов.\r\nМы вычисляем расстояние Левенштейна для каждой пары символов из строк s1 и s2.\r\nЕсли символы в текущих позициях совпадают (s1[i-1] == s2[j-1]),\r\n cost устанавливается в 0, иначе в 1.\r\nЗатем мы вычисляем три возможных пути для заполнения ячейки dp[i][j]:\r\n\r\ndp[i-1][j] + 1 - расстояние от s1[:i-1] к s2[:j], учитывая операцию удаления.\r\ndp[i][j-1] + 1 - расстояние от s1[:i] к s2[:j-1], учитывая операцию вставки.\r\ndp[i-1][j-1] + cost - расстояние от s1[:i-1] к s2[:j-1], учитывая операцию замены (с учетом cost).\r\nЗатем мы выбираем минимальное из этих трех значений и устанавливаем его как значение dp[i][j].\r\n\r\nВ конце мы возвращаем значение dp[m][n]\r\n\r\n-- ВРЕМЕННАЯ СЛОЖНОСТЬ --\r\n\r\nВременная сложность алгоритма Левенштейна,\r\nреализованного при помощи динамического программирования,\r\n O(m * n), где m и n - длины строк s1 и s2\r\n\r\n-- ПРОСТРАНСТВЕННАЯ СЛОЖНОСТЬ --\r\n\r\nПространственная сложность алгоритма также составляет O(m * n),\r\n так как мы используем двумерный массив dp\r\n\r\n\"\"\"\r\n\r\n\r\ndef levenshtein_distance(s1, s2):\r\n\r\n m, n = len(s1), len(s2)\r\n dp = [[0] * (n + 1) for _ in range(m + 1)]\r\n\r\n for i in range(m + 1):\r\n dp[i][0] = i\r\n for j in range(n + 1):\r\n dp[0][j] = j\r\n\r\n for i in range(1, m + 1):\r\n for j in range(1, n + 1):\r\n cost = 0 if s1[i - 1] == s2[j - 1] else 1\r\n dp[i][j] = min(\r\n dp[i - 1][j] + 1, # Удаление\r\n dp[i][j - 1] + 1, # Вставка\r\n dp[i - 1][j - 1] + cost # Замена\r\n )\r\n\r\n return dp[m][n]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n s1 = input()\r\n s2 = input()\r\n distance = levenshtein_distance(s1, s2)\r\n print(distance)\r\n","repo_name":"Donskoy-Mikhail/Yandex-algorithms","sub_path":"sprints/sprint_7/final_A.py","file_name":"final_A.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71048967291","text":"n=int(input())\ns=input()\n\ndef nc2(n):\n if n<2:\n return 0\n return n*(n-1)//2\n\nret=0\nnow=None\ncnt=0\ns+=\"*\"\nfor i in range(n+1):\n if now is None:\n now=s[i]\n cnt=1\n elif now==s[i]:\n cnt+=1\n else:\n now=s[i]\n ret+=nc2(cnt)\n cnt=1\nprint(ret)","repo_name":"ayanamizuta/cpro","sub_path":"atcoder/arc/arc130/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25531978406","text":"#!/usr/bin/env python3\n# coding=utf-8\n#\n# File: EmBCI/embci/tests/test_io.py\n# Authors: Hank \n# Create: 2019-02-06 01:26:32\n#\n# TODO:\n# test Socket***Reader, Socket***Server\n# test LSLCommader, LSLReader\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport time\nimport warnings\nimport threading\n\n# requirements.txt: testing: pytest\n# requirements.txt: drivers: pyserial\n# requirements.txt: data: pylsl\nimport pytest\nimport serial\nimport pylsl\n\n\n# =============================================================================\n# functions\n#\nfrom embci.configs import DIR_DATA\nfrom embci.io import save_trials, load_data, create_data_dict, find_data_info\n\n\ndef test_save_trials(username, random_data, clean_userdir):\n clean_userdir()\n data_dict = create_data_dict(random_data, 'testing', 500)\n assert 'sample_rate' in data_dict\n save_trials(username, data_dict, suffix='mat')\n assert os.path.exists(os.path.join(DIR_DATA, username, 'testing-0.mat'))\n assert os.path.exists(os.path.join(DIR_DATA, username, 'testing-1.mat'))\n\n\ndef test_load_data(username, random_data, clean_userdir):\n data, label = load_data(username)\n assert 'testing' == label[0] == label[1]\n assert (random_data == data).all()\n clean_userdir()\n\n\ndef test_find_data_info(clean_userdir, username):\n label_dict, name_dict, summary = find_data_info(username)\n assert label_dict == {}\n assert name_dict == {}\n # DO NOT test outputs, because it may be changed in the future.\n # assert 'There are 0 actions with 0 data recorded' in summary\n clean_userdir()\n\n\n# =============================================================================\n# Readers\n#\nfrom embci.io import FakeDataGenerator as Reader\nfrom embci.utils import find_pylsl_outlets\n\n\n@pytest.fixture(scope='module')\ndef reader():\n reader = Reader(sample_rate=500, sample_time=2,\n num_channel=8, broadcast=True)\n reader.start()\n yield reader\n reader.close()\n\n\ndef test_reader_status(reader):\n assert reader.status == 'started'\n assert reader.is_streaming()\n\n\ndef test_stream_control(reader):\n reader.pause()\n assert reader.status == 'paused'\n assert reader.is_streaming() is False\n reader.resume()\n assert reader.status == 'resumed'\n assert reader.is_streaming() is True\n\n\ndef test_reader_data(reader):\n assert reader.data_channel.shape == (8,)\n assert reader.data_frame.shape == (8, 1000)\n\n\ndef test_reader_pylsl(reader):\n info = find_pylsl_outlets(source_id=reader.name)\n assert isinstance(info, pylsl.StreamInfo)\n\n\ndef test_set_sample_rate(reader):\n reader.pause()\n assert reader.set_sample_rate(250)\n reader.restart()\n time.sleep(3) # reader need some time to stablize the sample_rate\n assert abs(reader.realtime_samplerate - 250) < 100\n\n\n# =============================================================================\n# Commanders\n#\nfrom embci.io import SerialCommander\nfrom embci.utils import virtual_serial\n\n\n@pytest.fixture(scope='module')\ndef obj(request):\n flag_stop, port1, port2 = virtual_serial(verbose=False)\n cmder = SerialCommander({\n '_desc': 'command dict used for testing commander',\n 'action1': ('asdf', 0),\n 'action3': ('this is action3', 1),\n 'nodelay': ('nodelay', ),\n })\n cmder.start(port1, 115200)\n slave = serial.Serial(port2, 115200)\n\n request.addfinalizer(flag_stop.set)\n request.addfinalizer(cmder.close)\n request.addfinalizer(slave.close)\n\n class objs:\n serial = slave\n commander = cmder\n return objs\n\n\ndef test_get_command(obj):\n with warnings.catch_warnings():\n assert obj.commander.get_command('foo') is None\n\n\ndef test_send_command(obj):\n assert obj.commander.send('action1') == 'asdf'\n time.sleep(0.5)\n assert obj.serial.read_all() == b'asdf'\n\n\ndef test_send_wait(obj):\n '''Method `send` will acquire lock and wait for specific seconds.'''\n # send command twice\n threading.Thread(\n target=obj.commander.send, args=('action3',)).start()\n threading.Thread(\n target=obj.commander.send, args=('action3',)).start()\n # receive first command\n time.sleep(0.5)\n assert obj.serial.read_all() == b'this is action3'\n # receive nothing, commander is still waiting\n assert obj.serial.read_all() == b''\n time.sleep(1)\n # receive second command\n assert obj.serial.read_all() == b'this is action3'\n\n\ndef test_write_method(obj):\n try:\n print('nodelay', file=obj.commander)\n except Exception as e:\n assert isinstance(e, IndexError)\n\n# THE END\n","repo_name":"hankso/EmBCI","sub_path":"tests/test_io.py","file_name":"test_io.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"3371454411","text":"from board import Board\nfrom btree import LinkedBinaryTree\nimport copy\n\n\nclass TicTacToeTree:\n '''\n Class to represent TicTacToe game with computer\n '''\n def __init__(self, root):\n '''\n Initializes variables including root of a future generated tree\n\n :param root: Board\n '''\n self.root = root\n self.tree = LinkedBinaryTree(root)\n\n def generate_tree(self, root=None):\n '''\n Generates a binary search tree of possible choices\n\n :param root: Board\n :return: None\n '''\n if root is None: root = self.tree\n else: root = root\n if root.key.has_winner():\n return 0\n right_board = copy.deepcopy(root.key)\n left_board = copy.deepcopy(root.key)\n right_board.make_random_move()\n left_board.make_random_move()\n\n root.insert_left(left_board)\n root.insert_right(right_board)\n\n self.generate_tree(root.left_child)\n self.generate_tree(root.right_child)\n return\n\n def count_winning_amount(self):\n '''\n Counts amount of winning combinations on both sides of a tree\n\n :return: (int, int)\n '''\n my_tree = self.tree\n right_tree = my_tree.right_child\n left_tree = my_tree.left_child\n right_counter = set()\n left_counter = set()\n\n nought_right = set()\n nought_left = set()\n\n right_nodes = right_tree.get_leaves_list()\n left_nodes = left_tree.get_leaves_list()\n\n for right in right_nodes:\n if right.has_winner() == right.CROSS:\n right_counter.add(str(right))\n elif right.has_winner() == right.NOUGHT:\n nought_right.add(str(right))\n\n for left in left_nodes:\n if left.has_winner() == left.CROSS:\n left_counter.add(str(left))\n elif left.has_winner() == left.NOUGHT:\n nought_left.add(str(left))\n\n return len(right_counter) - len(nought_right), len(left_counter) - len(nought_left)\n\n\ndef main():\n '''\n Main function to simulate a Tic Tac Tie game\n\n :return: None\n '''\n transform = {1: 'NOUGHT', -1: 'CROSS', 2: 'DRAW'}\n init_board = Board()\n init_board.make_random_move()\n while True:\n if init_board.has_winner():\n print(init_board)\n print(f'And the winner is: {transform[init_board.has_winner()]}')\n break\n\n print(init_board)\n while True:\n user_choice = input('Enter your choice[1 1]: ')\n try:\n splitted = [int(i) for i in user_choice.split()]\n assert 0 <= splitted[0] <= 2\n assert 0 <= splitted[1] <= 2\n assert init_board.cells[splitted[0]][splitted[1]] == 0\n break\n\n except:\n print('Wrong choice')\n continue\n\n init_board.make_move(splitted)\n\n tic = TicTacToeTree(init_board)\n tic.generate_tree()\n tree_winning_combinations = tic.count_winning_amount()\n right_subtree = tree_winning_combinations[0]\n left_subtree = tree_winning_combinations[1]\n init_board = tic.tree.left_child.key\n if right_subtree >= left_subtree: init_board = tic.tree.right_child.key\n\n\nif __name__ == '__main__':\n main()","repo_name":"Sofiia2001/TicTacToe_BST","sub_path":"tictactoe/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15595088883","text":"from myhdl import *\nimport random\n\n@block\ndef Tdecode(opCode, RType, Load, Store, Branch, IType, Jalr, Jal, Lui,Aui):\n\n @always_comb\n def run():\n\n RType.next = intbv(0)[1:]\n Load.next = intbv(0)[1:]\n Store.next = intbv(0)[1:]\n Branch.next = intbv(0)[1:]\n IType.next = intbv(0)[1:]\n Jalr.next = intbv(0)[1:]\n Jal.next = intbv(0)[1:]\n Lui.next = intbv(0)[1:]\n Aui.next = intbv(0)[1:]\n \n if opCode == 51:\n RType.next = 1\n elif opCode == 3:\n Load.next = 1\n elif opCode == 35:\n Store.next = 1\n elif opCode == 99:\n Branch.next = 1\n elif opCode == 19:\n IType.next = 1\n elif opCode == 103:\n Jalr.next = 1\n elif opCode == 111:\n Jal.next = 1\n elif opCode == 55:\n Lui.next = 1\n elif opCode == 23:\n Aui.next = 1\n \n return run\n\nopCodes = [51,3,35,99,19,103,111,55,23]\nR, L, S, B, I, Jr, J, Li,Ai = [Signal(bool(0)) for i in range(9)]\nopCode = Signal(intbv(0, min=0, max=112))\n\ntd = Tdecode(opCode,R, L, S, B, I, Jr, J, Li,Ai)\ntd.convert('Verilog')\n\n# @block\n# def Tdtest():\n\n# R, L, S, B, I, Jr, J, Li,Ai = [Signal(bool(0)) for i in range(9)]\n# opCode = Signal(intbv(0, min=0, max=112))\n\n# td = Tdecode(opCode,R, L, S, B, I, Jr, J, Li,Ai)\n# td.convert('Verilog')\n# @instance\n# def test():\n# fmt = \"{0:6} | {1:5} | {2:5} | {3:5} | {4:6} | {5:5} | {6:5} | {7:5} | {8:5} | {9:5} \"\n# print(fmt.format(\"OpCode\",\"RType\", \"Load\", \"Store\", \"Branch\", \"IType\", \"Jalr\",\"Jal\", \"Lui\",\"AuiPC\"))\n# for i in range(10):\n# opCode.next = random.choice(opCodes)\n# yield delay(10)\n# print(fmt.format(str(int(opCode)), str(int(R)), str(int(L)), str(int(S)), str(int(B)), str(int(I)), str(int(Jr)), str(int(J)), str(int(Li)), str(int(Ai))))\n# # raise StopSimulation\n \n# return td, test\n\n# tb = Tdtest()\n# tb.run_sim()","repo_name":"M-Ash2209/Python-MyHDL-Learning-Journey","sub_path":"Core/Main/Tdecode.py","file_name":"Tdecode.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36870619076","text":"from mcpi.minecraft import Minecraft\nmc=Minecraft.create()\nimport time\n\nblocks=[20,56,57,20,56,57,20,56,57]\nbarblock=22\n\nx,y,z=mc.player.getPos()\nstarty=y\nfront=5\ncount=0\nfor block in blocks: \n mc.setBlock(x+front,y,z,block[count])\n y+=1\n count+=1\n time.sleep(1)\n \nmc.postToChat(\"finish\")\ntime.sleep(1)\nblocks.insert(2,barblock)\n\ny=starty\nx+=front\nfor block in blocks: \n mc.setBlock(x+front,y,z,block)\n y+=1\n time.sleep(1)\n \nmc.postToChat(\"finished\")","repo_name":"newsteinking/workspace_backup","sub_path":"workspace_K/workspace_jihyung/python/python_minecraft/Learn to Program with Minecraft Code실습/chapter9-lists/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19012809118","text":"n=100000\nlist=[]\n\nprime = [True for i in range(n+1)]\np = 2\nwhile (p * p <= n):\n if (prime[p] == True):\n for i in range(p * 2, n+1, p):\n prime[i] = False\n p += 1\n \n \nfor p in range(2, n+1):\n if prime[p]:\n list.append(p)\n \n#print(list)\n\nlength=len(list)\ndef primeFactorization(n):\n f=1\n for i in range(length):\n p=list[i]\n c=0\n while n%p==0:\n c=c+1\n n=n//p\n f=f*(c+1)\n if n==1:\n return f \n \n \ni=1\nwhile True:\n n=(i*(i+1))//2\n if primeFactorization(n)>500:\n print(n)\n break\n\n i+=1\n \n \n","repo_name":"ChanchalKumarMaji/Project-Euler","sub_path":"Project_Euler_12.py","file_name":"Project_Euler_12.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9303942927","text":"import time\n\ndata = open('input/input.txt', 'r').readlines()\nranges = {}\nfor line in data:\n\tranges[int(line.split(': ')[0])] = int(line.split(': ')[1])\n\ndef captured_layers(delay):\n\tcaptured = []\n\tfor depth in ranges:\n\t\tif (delay + depth) % (ranges[depth] * 2 - 2) == 0:\n\t\t\tcaptured.append(depth)\n\treturn captured\n\ndef captured_layers_early(delay):\n\tcaptured = []\n\tfor depth in ranges:\n\t\tif (delay + depth) % (ranges[depth] * 2 - 2) == 0:\n\t\t\tcaptured.append(depth)\n\t\t\treturn captured\n\treturn captured\n\n#Part One\ncaptured = captured_layers(0)\nscore = 0 \nfor depth in captured:\n\tscore += depth * ranges[depth]\n\n#Part Two\ndelay = 0\nSTART = time.time()\nwhile len(captured_layers(delay)) != 0:\n\tprint(delay)\n\tdelay += 1\n\nprint('Time: {}'.format(time.time() - START))\nprint(delay)\n\n","repo_name":"r-tran/advent-of-code","sub_path":"aoc-2017/day13/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3377445044","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField, EmailField ,SubmitField, TextAreaField,DecimalField, PasswordField,IntegerField, BooleanField, SelectField, DateField, ValidationError\nfrom wtforms.validators import DataRequired, Email, EqualTo, Length, NumberRange, InputRequired \nfrom datetime import datetime\nfrom helpers import inside, inside2\nimport datetime as dt\n\n\n\nclass ClientSignUpForm(FlaskForm):\n company_name = StringField(\"Name\", validators=[DataRequired()])\n email = EmailField(\"Email\", validators=[DataRequired(), Email()])\n password = PasswordField(\"Password\", validators=[DataRequired(), Length(min=8, max=20)])\n confirm_password = PasswordField(\"Comfirm Password\", \n \tvalidators = [DataRequired(), EqualTo('password')])\n submit = SubmitField(\"Sign Up\")\n\n def validate_email(self, email):\n \tuser = User.query.filter_by(email=email.data).first()\n \tif user:\n \t\traise ValueError(\"The email is already in use, please choose a different one\")\n\n\nclass StudentLedgerForm(FlaskForm):\n\tphone = IntegerField(\"Parent's Contact\", validators=[DataRequired()])\n\tfirstname = StringField(\"First Name\", validators=[DataRequired()])\n\tsubmit = SubmitField(\"Generate\")\n\n\tdef validate_phone(self, phone):\n\t\tnum = str(phone.data)\n\t\tprint(len(num))\n\t\tif len(num) != 9:\n\t\t\traise ValidationError(\"Phone number must be 10 digits\")\n\n\tdef validate_firstname(self, firstname):\n\t\tfor char in firstname.data:\n\t\t\tif inside(ch=char) == False:\n\t\t\t\traise ValidationError(f'Character {char} is not allowed')\n\t\n\nclass ClientLogInForm(FlaskForm):\n email = EmailField(\"Email\", validators=[DataRequired(), Email()])\n password = PasswordField(\"Password\", validators=[DataRequired()])\n remember = BooleanField(\"Remember me\")\n submit = SubmitField(\"Log In\")\n\n\nclass ToDoForm(FlaskForm):\n task = SelectField(\"Choose A Task\", choices = ['Make E.T.L Expenses','Make P.T.A Expenses', 'Begin Semester', 'Promote Student'], validators=[DataRequired()])\n submit_do = SubmitField(\"Proceed\")\n\n\nclass StudentPaymentsForm(FlaskForm):\n\tetl_amount = DecimalField(\"ETL\", validators=[InputRequired(), NumberRange(min=0, max=3000)])\n\tpta_amount = DecimalField(\"PTA\", validators=[InputRequired(), NumberRange(min=0, max=3000)])\n\tcheq_no = StringField('Cheq ID', validators = [Length(max=20)])\n\tmode = SelectField('Mode of payment', choices = [('','Choose Payment mode...'), (\"Cash\", 'Cash'), (\"Cheque\",'Cheque')], validators=[DataRequired()])\n\tsemester = SelectField(\"Semester\", choices = [('','Choose semester...'),(\"SEM1\", 'SEM1'), (\"SEM2\",'SEM2')], validators=[DataRequired()])\n\tsubmit = SubmitField(\"Receive\")\n\nclass ExpensesForm(FlaskForm):\n\tpurchase_date = DateField(\"Purchase Date\", validators=[DataRequired()])\n\titem = StringField(\"Item\", validators=[DataRequired(), Length(max=20)])\n\tpurpose = StringField(\"Purpose\", validators=[DataRequired(), Length(max=50)])\n\tunitcost = DecimalField(\"Quantity\", validators=[DataRequired(), NumberRange(min=1, max=30000)])\n\tquantity = DecimalField(\"Quantity\", validators=[DataRequired(), NumberRange(min=1, max=30000)])\n\ttotalcost = DecimalField(\"Total Cost\", validators=[DataRequired(), NumberRange(min=1, max=300000)])\n\tsubmit = SubmitField(\"Debit\")\n\t\t\n\tdef validate_item(self, item):\n\t\tfor char in item.data:\n\t\t\tif inside(ch=char) == False:\n\t\t\t\traise ValidationError(f'Character {char} is not allowed')\n\t\t\t\n\tdef validate_purpose(self, purpose):\n\t\tfor char in purpose.data:\n\t\t\tif inside(ch=char) == False:\n\t\t\t\traise ValidationError(f'Character {char} is not allowed')\n\n\tdef validate_totalcost(self, totalcost):\n\t\tif totalcost.data != self.quantity.data * self.unitcost.data:\n\t\t\traise ValidationError(f\"Totals cost should be {self.quantity.data * self.unitcost.data} NOT {totalcost.data}\")\n\n\n\tdef validate_purchase_date(self, purchase_date):\n\t\ttoday = datetime.utcnow()\n\t\ttoday = dt.date(year=today.year, month=today.month, day=today.day)\n\t\tpurchase_date1 = dt.date(year=purchase_date.data.year, month=purchase_date.data.month, day=purchase_date.data.day)\n\t\tif purchase_date1 > today:\n\t\t\traise ValidationError(f\"Date cant't be further than {today}\")\n\n\nclass DonationForm(FlaskForm):\n\tname = StringField(\"Name\", validators=[DataRequired()])\n\tamount = DecimalField(\"Amount\", validators=[DataRequired(), NumberRange(min=1, max=300000)])\n\tmode = SelectField(\"Mode of Payment\", validators=[DataRequired()], choices = [('','Choose mode of payment...'),(\"Cash\", 'Cash'), (\"Momo\",'Momo'), ('Cheque', 'Cheque')])\n\tsemester = SelectField(\"Semester\", validators=[DataRequired()], choices = [('','Choose semester...'),(\"SEM1\", 'SEM1'), (\"SEM2\",'SEM2')])\n\tsubmit = SubmitField(\"Receive Cash\")\n\n\tdef validate_name(self, name):\n\t\tfor char in name.data:\n\t\t\tif inside(ch=char) == False:\n\t\t\t\traise ValidationError(f'Character {char} is not allowed')\n\n\nclass OtherBusinessForm(FlaskForm):\n\tname = StringField(\"Name\", validators=[DataRequired()])\n\tstart_date = DateField(\"Start Date\", validators = [DataRequired()])\n\tend_date = DateField(\"End Date\", validators = [DataRequired()])\n\tdetail = StringField(\"Details\")\n\tamount = DecimalField(\"Amount\", validators=[DataRequired(), NumberRange(min=1, max=300000)])\n\tother_submit = SubmitField(\"Receive Cash\")\n\n\tdef validate_detail(self, detail):\n\t\tfor char in detail.data:\n\t\t\tif inside(ch=char) == False:\n\t\t\t\traise ValidationError(f'Character {char} is not allowed')\n\n\tdef validate_name(self, name):\n\t\tfor char in name.data:\n\t\t\tif inside(ch=char) == False:\n\t\t\t\traise ValidationError(f'Character {char} is not allowed')\n\n#class ETLExpensesForm(FlaskForm):\n#\tpurchase_date = DateField(\"Purchase Date\", validators=[DataRequired()])\n#\titem = StringField(\"Item\", validators=[DataRequired(), Length(max=20)])\n#\tpurpose = StringField(\"Purpose\", validators=[DataRequired(), Length(max=50)])\n#\tunitcost = DecimalField(\"Quantity\", validators=[DataRequired(), NumberRange(min=1, max=30000)])\n#\tquantity = DecimalField(\"Quantity\", validators=[DataRequired(), NumberRange(min=1, max=30000)])\n#\ttotalcost = DecimalField(\"Total Cost\", validators=[DataRequired(), NumberRange(min=1, max=300000)])\n#\tsubmit = SubmitField(\"Debit\")\n#\t\t\n#\tdef validate_item(self, item):\n#\t\tfor char in item.data:\n#\t\t\tif inside(ch=char) == False:\n#\t\t\t\traise ValidationError(f'Character {char} is not allowed')\n#\t\t\t\n#\tdef validate_purpose(self, purpose):\n#\t\tfor char in purpose.data:\n#\t\t\tif inside(ch=char) == False:\n#\t\t\t\traise ValidationError(f'Character {char} is not allowed')\n#\n#\tdef validate_totalcost(self, totalcost):\n#\t\tif totalcost.data != self.quantity.data * self.unitcost.data:\n#\t\t\traise ValidationError(f\"Totals cost should be {self.quantity.data * self.unitcost.data} NOT {totalcost.data}\")\n#\n#\tdef validate_purchase_date(self, purchase_date):\n#\t\ttoday = datetime.utcnow()\n#\t\ttoday = dt.date(year=today.year, month=today.month, day=today.day)\n#\t\tpurchase_date1 = dt.date(year=purchase_date.data.year, month=purchase_date.data.month, day=purchase_date.data.day)\n#\t\tif purchase_date1 > today:\n#\t\t\traise ValidationError(f\"Date cant't be further than {today}\")\n\n\nclass ReportsForm(FlaskForm):\n report = SelectField(\"Choose A Report\", validators=[DataRequired()], choices = ['Cash Book', 'Income & Expenditure', 'Expenditure Statement', 'Income Statement', 'INCOME & EXPENDITURE', 'CASH PAYMENT', 'CASH RECEIPT'])\n filter_by = SelectField(\"Choose Category\", choices = ['PTA Levy', 'ETL', 'ETL & PTA Levy'])\n start = DateField(\"Start\", validators=[DataRequired()])\n end = DateField(\"End\", validators=[DataRequired()])\n submit_rep = SubmitField(\"Generate\")\n\n def validate_end(self, end):\n \tif end.data < self.start.data:\n \t\traise ValidationError(\"Date must be latter than start date\")\n\n #def validate_start(self, start):\n #\ttoday = datetime.utcnow()\n #\ttoday = dt.date(year=today.year, month=today.month, day=today.day)\n #\tstart1 = dt.date(year=start.data.year, month=start.data.month, day=start.data.day)\n #\tif start1 > today:\n #\t\traise ValidationError(f\"Date is greater than {today}\")\n\nclass ChargeForm(FlaskForm):\n semester = SelectField(\"Choose semester\", validators=[DataRequired()], \n \tchoices = [('','Choose semester...'),(\"SEM1\", 'SEM1'), (\"SEM2\",'SEM2')])\n begin_date = DateField(\"Start Date\", validators= [DataRequired()])\n end_date = DateField(\"End Date\", validators= [DataRequired()])\n pta = DecimalField(\"PTA Levy\", validators=[DataRequired()])\n etl = DecimalField(\"ETL\", validators=[DataRequired()])\n submit = SubmitField(\"Get Started\")\n\n def validate_begin_date(self, begin_date):\n \ttoday = datetime.utcnow()\n \ttoday = dt.date(year=today.year, month=today.month, day=today.day)\n \tbegin_date1 = dt.date(year=begin_date.data.year, month=begin_date.data.month, day=begin_date.data.day)\n \tif begin_date1 > today:\n \t\traise ValidationError(f\"Date is greater than {today}\")\n\n def validate_end_date(self, end_date):\n \tif end_date.data <= self.begin_date.data:\n \t\traise ValidationError(\"End date must be latter than start date\")\n\n \t\n\nclass SearchForm(FlaskForm):\n parent_contact = StringField(\"Parent Contact\", validators=[DataRequired(), Length(min=8, max=20)])\n firstname = StringField(\"First Name\", validators=[DataRequired()])\n search_submit = SubmitField(\"Search\")\n\n def validate_parent_contact(self, parent_contact):\n \tfor char in parent_contact.data:\n \t\tif inside2(ch=char) == False:\n \t\t\traise ValidationError(f'Character {char} is not allowed')\n\n def validate_firstname(self, firstname):\n \tfor char in firstname.data:\n \t\tif inside(ch=char) == False:\n \t\t\traise ValidationError(f'Character {char} is not allowed')","repo_name":"KINTEP/kpasec22","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":9611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69910025864","text":"import requests\r\nimport xml.etree.ElementTree as ET\r\nimport pandas as pd\r\nimport gzip\r\n\r\nURL = \"https://www.hackerrank.com/\"\r\n\r\n\r\nclass SitemapParser:\r\n def __init__(self, base_url):\r\n \"\"\"\r\n Initialize the SitemapParser with a base URL.\r\n\r\n Parameters:\r\n - base_url (str): The base URL used to parse the robots.txt data to retrieve Sitemap data.\r\n\r\n Returns:\r\n - None\r\n \"\"\"\r\n self.base_url = base_url\r\n self.sitemaps = []\r\n\r\n def fetch_robots_txt(self):\r\n \"\"\"\r\n Fetch the content of the robots.txt file for the specified URL.\r\n\r\n Returns:\r\n - str: The content of the robots.txt file.\r\n \"\"\"\r\n robots_url = f\"{self.base_url}/robots.txt\"\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36\"\r\n }\r\n try:\r\n response = requests.get(robots_url, headers=headers)\r\n response.raise_for_status()\r\n return response.text\r\n except requests.exceptions.RequestException as e:\r\n print(f\"Error fetching robots.txt: {e}\")\r\n return None\r\n\r\n def extract_sitemaps(self, robots_content):\r\n \"\"\"\r\n Extract sitemap URLs from the content of robots.txt.\r\n\r\n Parameters:\r\n - robots_content (str): The content of the robots.txt file.\r\n\r\n Returns:\r\n - list: A list of sitemap URLs.\r\n \"\"\"\r\n sitemaps = []\r\n if robots_content:\r\n lines = robots_content.split(\"\\n\")\r\n for line in lines:\r\n if line.startswith(\"Sitemap:\"):\r\n sitemap_url = line.split(\": \")[1].strip()\r\n sitemaps.append(sitemap_url)\r\n return sitemaps\r\n\r\n def parse_sitemap(self, sitemap_url):\r\n \"\"\"\r\n Parse the sitemap XML content for a given sitemap URL.\r\n\r\n Parameters:\r\n - sitemap_url (str): The URL of the sitemap XML.\r\n\r\n Returns:\r\n - list: A list of URLs extracted from the sitemap XML.\r\n \"\"\"\r\n try:\r\n response = requests.get(sitemap_url)\r\n response.raise_for_status()\r\n decompressed_content = gzip.decompress(response.content).decode(\"utf-8\")\r\n root = ET.fromstring(decompressed_content)\r\n urls = [\r\n elem.text\r\n for elem in root.findall(\r\n \".//{http://www.sitemaps.org/schemas/sitemap/0.9}loc\"\r\n )\r\n ]\r\n return urls\r\n except requests.exceptions.RequestException as e:\r\n print(f\"Error parsing sitemap: {e}\")\r\n return []\r\n\r\n def parse_all_sitemaps(self):\r\n \"\"\"\r\n Parse all sitemaps for the specified base URL and return the result in a DataFrame.\r\n\r\n Returns:\r\n - pandas.DataFrame: A DataFrame containing URLs from all parsed sitemaps.\r\n \"\"\"\r\n robots_content = self.fetch_robots_txt()\r\n self.sitemaps = self.extract_sitemaps(robots_content)\r\n\r\n all_urls = []\r\n for sitemap_url in self.sitemaps:\r\n urls = self.parse_sitemap(sitemap_url)\r\n all_urls.extend(urls)\r\n\r\n df = pd.DataFrame(all_urls, columns=[\"URL\"])\r\n return df\r\n\r\n\r\n# Example usage with a website\r\nwebsite_url = URL\r\nsitemap_parser = SitemapParser(website_url)\r\nresult_df = sitemap_parser.parse_all_sitemaps()\r\n\r\n# Display the DataFrame\r\nprint(result_df)\r\n","repo_name":"shivakrishna67/AnalyticalProgramming-Project-2","sub_path":"src/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21381082597","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#\n# Usage:\n# ./beheader.py \n\nimport copy\nimport gzip\nimport time\nimport argparse\nimport urllib.request\n\n\ndef print_info(message):\n print('\\033[92m\\033[1m[INFO]\\033[0m\\033[0m ' + message)\n\n\ndef print_error(message):\n print('\\033[91m\\033[1m[ERROR]\\033[0m\\033[0m ' + message)\n exit()\n\n\ndef parseArgs():\n parser = argparse.ArgumentParser(\n usage='beheader.py curl [URL] [-X REQUEST] [-H HEADER] [-d DATA]')\n parser.add_argument('-X', '--request', nargs=1, help='Request method')\n parser.add_argument('-H', '--header', nargs=1, action='append',\n help='Request headers')\n parser.add_argument('-d', '--data', '--data-raw', '--data-binary',\n nargs=1, action='append',\n help='Request data')\n return parser.parse_known_args()\n\n\ndef get_url(args):\n for a in args:\n if not a.startswith('-'):\n return a\n print_error('Missing URL!')\n\n\ndef get_rest_options(args):\n optlist = []\n for a in args:\n if a.startswith('-'):\n optlist.append(a)\n return optlist\n\n\ndef get_method(args):\n if args.request is not None:\n return args.request[0]\n if args.data is not None:\n return 'POST'\n return 'GET'\n\n\ndef header_list_to_dict(headerlist):\n dict = {}\n if headerlist is not None:\n for h in headerlist:\n parts = h[0].split(':')\n key = parts[0]\n value = ':'.join(parts[1:]).lstrip()\n dict[key] = value\n return dict\n\n\ndef data_list_to_str(datalist):\n str = ''\n if datalist is not None:\n for d in datalist:\n str += '&' + d[0]\n str = str[1:]\n return bytes(str, 'utf-8')\n\n\ndef call(reqdict):\n request = urllib.request.Request(reqdict['url'],\n data=reqdict['data'],\n headers=reqdict['headers'])\n request.method = reqdict['method']\n\n try:\n response = urllib.request.urlopen(request)\n status = response.getcode()\n rawcontent = response.read()\n try:\n content = rawcontent.decode('utf-8')\n except UnicodeDecodeError:\n content = gzip.decompress(rawcontent).decode('utf-8')\n return status, content\n except urllib.error.HTTPError as error:\n return error.code, ''\n\n\ndef write_to_file(filename, content):\n f = open(filename, 'w')\n f.write(content)\n f.close()\n\n\ndef iterate_headers(reqdict, repdict):\n i = 0\n iteratedict = copy.deepcopy(reqdict)\n iteratedict['headers'] = {}\n status, content = call(iteratedict)\n if repdict['ref']['status'] == status \\\n and repdict['ref']['length'] == len(content):\n return\n\n for k, v in reqdict['headers'].items():\n i += 1\n repdict[i] = {}\n iteratedict = copy.deepcopy(reqdict)\n iteratedict['headers'].pop(k)\n print_info('Check Header combination: ' + str(iteratedict['headers']))\n status, content = call(iteratedict)\n\n if repdict['ref']['status'] == status \\\n and repdict['ref']['length'] == len(content):\n repdict[i]['necessary'] = False\n else:\n repdict[i]['necessary'] = True\n repdict[i]['length'] = len(content)\n repdict[i]['status'] = status\n repdict[i]['content'] = content\n repdict[i]['header'] = k\n repdict[i]['headervalue'] = v\n\n\ndef generate_report(reqdict, repdict):\n output = 'curl -X ' + reqdict['method'] \\\n + ' \"' + reqdict['url'] + '\"'\n\n if len(repdict) == 1:\n print_info('Header is not needed ¯\\\\_(ツ)_/¯')\n else:\n requireheader = ''\n for i in range(1, len(repdict)):\n if repdict[i]['necessary'] is True:\n requireheader += ', ' + repdict[i]['header']\n output += ' -H \\'' + repdict[i]['header'] \\\n + ': ' + repdict[i]['headervalue'] + '\\''\n print_info('Must-have header(s): \\033[93m\\033[1m'\n + requireheader[2:] + '\\033[0m\\033[0m')\n\n if len(reqdict['data']) > 0:\n output += ' --data \\'' + reqdict['data'].decode('utf-8') + '\\''\n\n if len(reqdict['options']) > 0:\n for o in reqdict['options']:\n output += ' ' + o\n\n print(output)\n write_to_file(str(int(time.time())) + '.curl', output)\n\n\ndef main():\n requestdict = {}\n reportdict = {}\n\n args, rest = parseArgs()\n\n if args.header is None:\n print_error('Missing header!')\n\n try:\n rest.remove('curl')\n except ValueError:\n pass\n\n requestdict['url'] = get_url(rest)\n requestdict['method'] = get_method(args)\n requestdict['data'] = data_list_to_str(args.data)\n requestdict['options'] = get_rest_options(rest)\n requestdict['headers'] = header_list_to_dict(args.header)\n requestdict['rawheaders'] = args.header\n\n refstatus, refcontent = call(requestdict)\n reportdict['ref'] = {}\n reportdict['ref']['status'] = refstatus\n reportdict['ref']['length'] = len(refcontent)\n reportdict['ref']['content'] = refcontent\n reportdict['ref']['headers'] = requestdict['headers']\n\n iterate_headers(requestdict, reportdict)\n generate_report(requestdict, reportdict)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n pass\n","repo_name":"KevCui/beheader","sub_path":"beheader.py","file_name":"beheader.py","file_ext":"py","file_size_in_byte":5385,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"11650325015","text":"\"\"\"\nerror_logger.py\nThis file contains the error logger function.\n\"\"\"\n\nfrom datetime import date\nERROR_PATH = \"./error.txt\"\n\n\ndef log_error(message: str) -> None:\n \"\"\"\n This function logs errors with the current date and the given error to an error file\n :param message: The message that describes the error\n :return: None\n \"\"\"\n with open(ERROR_PATH, mode='a', encoding='utf-8') as error_file:\n error_file.write(f\"{date.today()} {message} \\n\")\n\nif __name__ == '__main__':\n log_error(\"test\")","repo_name":"jgliao248/15_Puzzle_Turtle","sub_path":"Controller/error_logger.py","file_name":"error_logger.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43356464456","text":"import math\n\n\nclass Metrics:\n\n @staticmethod\n def accuracy(testSet, predictions):\n correct = 0\n for x in range(len(testSet)):\n if testSet[x][-1] == predictions[x]:\n correct += 1\n return (correct / float(len(testSet))) * 100.0\n\n @staticmethod\n def variance(data, ddof=0):\n n = len(data)\n mean = sum(data) / n\n return sum((x - mean) ** 2 for x in data) / (n - ddof)\n\n @staticmethod\n def stdev(data):\n var = Metrics.variance(data)\n std_dev = math.sqrt(var)\n return std_dev\n\n @staticmethod\n def mean(data):\n return sum(data) / len(data)\n\n @staticmethod\n def confusion_matrix(actual, predicted, normalize = False):\n \"\"\"\n Generate a confusion matrix for multiple classification\n @params:\n actual - a list of integers or strings for known classes\n predicted - a list of integers or strings for predicted classes\n normalize - optional boolean for matrix normalization\n @return:\n matrix - a 2-dimensional list of pairwise counts\n \"\"\"\n unique = sorted(set(actual))\n matrix = [[0 for _ in unique] for _ in unique]\n imap = {key: i for i, key in enumerate(unique)}\n # Generate Confusion Matrix\n for p, a in zip(predicted, actual):\n matrix[imap[p]][imap[a]] += 1\n # Matrix Normalization\n if normalize:\n sigma = sum([sum(matrix[imap[i]]) for i in unique])\n matrix = [row for row in map(lambda i: list(map(lambda j: j / sigma, i)), matrix)]\n\n return matrix\n","repo_name":"jozecarlos/mestrado","sub_path":" APRENDIZAGEM_DE_MÁQUINA/util/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1235710166","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 12 15:49:58 CEST 2017\n\n@author: edouard.duchesnay@cea.fr\n\"\"\"\n\nimport os\nimport json\nimport numpy as np\nimport itertools\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import recall_score, roc_auc_score, precision_recall_fscore_support\nimport parsimony.estimators as estimators\nimport parsimony.algorithms as algorithms\nimport parsimony.utils as utils\nfrom parsimony.utils.linalgs import LinearOperatorNesterov\nfrom scipy.stats import binom_test\nfrom collections import OrderedDict\nfrom sklearn import preprocessing\nimport pandas as pd\nimport shutil\nfrom brainomics import array_utils\nimport mapreduce\nfrom statsmodels.stats.inter_rater import fleiss_kappa\n\nWD = \"/neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_all\"\nWD_CLUSTER = WD.replace(\"/neurospin/\", \"/mnt/neurospin/sel-poivre/\")\nWD_ORIGINAL = \"/neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_modselectcv\"\nuser_func_filename = \"/home/ed203246/git/scripts/2013_adni/MCIc-CTL-FS/03_predict_cs.py\"\n\ndef config_filename(): return os.path.join(WD,\"config_dcv_reducedrange.json\")\ndef results_filename(): return os.path.join(WD,os.path.basename(WD) + \"_dcv.xlsx\")\n\nNFOLDS_OUTER = 5\nNFOLDS_INNER = 5\npenalty_start = 2\nDATA_TYPE = \"mesh\"\n# DATA_TYPE = \"image\"\n\nlambda_max_A = 8.999\n\n##############################################################################\ndef init():\n INPUT_DATA_X = os.path.join(WD_ORIGINAL, 'X.npy')\n INPUT_DATA_y = os.path.join(WD_ORIGINAL, 'y.npy')\n INPUT_MASK_PATH = os.path.join(WD_ORIGINAL, 'mask.npy')\n INPUT_MESH_PATH = '/neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs/lrh.pial.gii'\n #INPUT_LINEAR_OPE_PATH = '/neurospin/brainomics/2016_schizConnect/analysis/NUSDAST/Freesurfer/data/30yo/Atv.npz'\n # INPUT_CSV = '/neurospin/brainomics/2016_schizConnect/analysis/NUSDAST/Freesurfer/population_30yo.csv'\n\n os.makedirs(WD, exist_ok=True)\n shutil.copy(INPUT_DATA_X, WD)\n shutil.copy(INPUT_DATA_y, WD)\n shutil.copy(INPUT_MASK_PATH, WD)\n shutil.copy(INPUT_MESH_PATH, WD)\n\n #shutil.copy(INPUT_LINEAR_OPE_PATH, WD)\n\n ## Create config file\n os.chdir(WD)\n X = np.load(\"X.npy\")\n y = np.load(\"y.npy\")\n\n if not os.path.exists(os.path.join(WD, \"Atv.npz\")):\n import brainomics.mesh_processing as mesh_utils\n cor, tri = mesh_utils.mesh_arrays(os.path.join(WD, \"lrh.pial.gii\"))\n mask = np.load(os.path.join(WD, 'mask.npy'))\n\n import parsimony.functions.nesterov.tv as nesterov_tv\n from parsimony.utils.linalgs import LinearOperatorNesterov\n Atv = nesterov_tv.linear_operator_from_mesh(cor, tri, mask, calc_lambda_max=True)\n Atv.save(os.path.join(WD, \"Atv.npz\"))\n Atv_ = LinearOperatorNesterov(filename=os.path.join(WD, \"Atv.npz\"))\n assert Atv.get_singular_values(0) == Atv_.get_singular_values(0)\n assert np.allclose(Atv_.get_singular_values(0), 8.999, rtol=1e-03, atol=1e-03)\n assert np.all([a.shape == (317089, 317089) for a in Atv])\n\n if not os.path.exists(os.path.join(WD, \"beta_start.npz\")):\n betas = dict()\n import time\n alphas = [.01, 0.1, 1.0, 10]\n for alpha in alphas:\n mod = estimators.RidgeLogisticRegression(l=alpha, class_weight=\"auto\", penalty_start=penalty_start)\n t_ = time.time()\n mod.fit(X, y.ravel())\n print(time.time() - t_) # 11564\n betas[\"lambda_%.2f\" % alpha] = mod.beta\n\n np.savez(os.path.join(WD, \"beta_start.npz\"), **betas)\n beta_start = np.load(os.path.join(WD, \"beta_start.npz\"))\n assert np.all([np.all(beta_start[a] == betas[a]) for a in beta_start.keys()])\n\n ## Create config file\n\n # ########################################################################\n # Setting 1: 5cv + large range of parameters: cv_largerange\n # with sub-sample training set with size 50, 100\n # 5cv/cv0*[_sub50]/refit/*\n\n # sub_sizes = [50, 100]\n sub_sizes = []\n\n cv_outer = [[tr, te] for tr, te in\n StratifiedKFold(n_splits=NFOLDS_OUTER, random_state=42).split(np.zeros(y.shape[0]), y.ravel())]\n\n # check we got the same CV than previoulsy\n cv_old = json.load(open(os.path.join(WD_ORIGINAL, \"config_modselectcv.json\")))[\"resample\"]\n cv_outer_old = [cv_old[k] for k in ['cv%02d/refit' % i for i in range(NFOLDS_OUTER)]]\n assert np.all([np.all(np.array(cv_outer_old[i][0]) == cv_outer[i][0]) for i in range(NFOLDS_OUTER)])\n assert np.all([np.all(np.array(cv_outer_old[i][1]) == cv_outer[i][1]) for i in range(NFOLDS_OUTER)])\n # check END\n\n import collections\n cv = collections.OrderedDict()\n\n cv[\"refit/refit\"] = [np.arange(len(y)), np.arange(len(y))]\n\n for cv_outer_i, (tr_val, te) in enumerate(cv_outer):\n # Simple CV\n cv[\"cv%02d/refit\" % (cv_outer_i)] = [tr_val, te]\n\n # Nested CV\n # cv_inner = StratifiedKFold(y[tr_val].ravel(), n_folds=NFOLDS_INNER, random_state=42)\n # for cv_inner_i, (tr, val) in enumerate(cv_inner):\n # cv[\"cv%02d/cvnested%02d\" % ((cv_outer_i), cv_inner_i)] = [tr_val[tr], tr_val[val]]\n\n # Sub-sample training set with size 50, 100\n # => cv*_sub[50|100]/refit\n grps = np.unique(y[tr_val]).astype(int)\n ytr = y.copy()\n ytr[te] = np.nan\n g_idx = [np.where(ytr == g)[0] for g in grps]\n assert np.all([np.all(ytr[g_idx[g]] == g) for g in grps])\n\n g_size = np.array([len(g) for g in g_idx])\n g_prop = g_size / g_size.sum()\n\n for sub_size in sub_sizes:\n # sub_size = sub_sizes[0]\n sub_g_size = np.round(g_prop * sub_size).astype(int)\n g_sub_idx = [np.random.choice(g_idx[g], sub_g_size[g], replace=False) for g in grps]\n assert np.all([np.all(y[g_sub_idx[g]] == g) for g in grps])\n tr_val_sub = np.concatenate(g_sub_idx)\n assert len(tr_val_sub) == sub_size\n assert np.all([idx in tr_val for idx in tr_val_sub])\n assert np.all(np.logical_not([idx in te for idx in tr_val_sub]))\n cv[\"cv%02d_sub%i/refit\" % (cv_outer_i, sub_size)] = [tr_val_sub, te]\n\n cv = {k:[cv[k][0].tolist(), cv[k][1].tolist()] for k in cv}\n\n # Nested CV\n # assert len(cv_largerange) == NFOLDS_OUTER * NFOLDS_INNER + NFOLDS_OUTER + 1\n\n # Simple CV\n # assert len(cv) == NFOLDS_OUTER + 1\n\n # Simple CV + sub-sample training set with size 50, 100:\n assert len(cv) == NFOLDS_OUTER * (1 + len(sub_sizes)) + 1\n\n print(list(cv.keys()))\n\n # Large grid of parameters\n alphas = [0.001, 0.01, 0.1, 1.0]\n # alphas = [.01, 0.1, 1.0] # first ran with this grid\n tv_ratio = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n l1l2_ratio = [0.1, 0.5, 0.9]\n # l1l2_ratio = [0, 0.1, 0.5, 0.9, 1.0] # first ran with this grid\n algos = [\"enettv\", \"enetgn\"]\n params_enet_tvgn = [list(param) for param in itertools.product(algos, alphas, l1l2_ratio, tv_ratio)]\n assert len(params_enet_tvgn) == 240 # old 300\n\n params_enet = [list(param) for param in itertools.product([\"enet\"], alphas, l1l2_ratio, [0])]\n assert len(params_enet) == 12 # old 15\n\n params = params_enet_tvgn + params_enet\n assert len(params) == 252 # 315\n # Simple CV\n # assert len(params) * len(cv) == 1890\n\n # Simple CV + sub-sample training set with size 50, 100:\n assert len(params) * len(cv) == 1512 # 5040\n\n config = dict(data=dict(X=\"X.npy\", y=\"y.npy\"),\n params=params, resample=cv,\n structure_linear_operator_tv=\"Atv.npz\",\n beta_start=\"beta_start.npz\",\n map_output=\"5cv\",\n user_func=user_func_filename)\n json.dump(config, open(os.path.join(WD, \"config_cv_largerange.json\"), \"w\"))\n\n\n # Build utils files: sync (push/pull) and PBS\n import brainomics.cluster_gabriel as clust_utils\n cmd = \"mapreduce.py --map %s/config_cv_largerange.json\" % WD_CLUSTER\n clust_utils.gabriel_make_qsub_job_files(WD, cmd,walltime = \"250:00:00\",\n suffix=\"_cv_largerange\",\n freecores=2)\n\n # ########################################################################\n # Setting 2: dcv + reduced range of parameters: dcv_reducedrange\n # 5cv/cv0*/cvnested0*/*\n\n cv_outer = [[tr, te] for tr, te in\n StratifiedKFold(n_splits=NFOLDS_OUTER, random_state=42).split(np.zeros(y.shape[0]), y.ravel())]\n\n # check we got the same CV than previoulsy\n cv_old = json.load(open(os.path.join(WD_ORIGINAL, \"config_modselectcv.json\")))[\"resample\"]\n cv_outer_old = [cv_old[k] for k in ['cv%02d/refit' % i for i in range(NFOLDS_OUTER)]]\n assert np.all([np.all(np.array(cv_outer_old[i][0]) == cv_outer[i][0]) for i in range(NFOLDS_OUTER)])\n assert np.all([np.all(np.array(cv_outer_old[i][1]) == cv_outer[i][1]) for i in range(NFOLDS_OUTER)])\n # check END\n\n import collections\n cv = collections.OrderedDict()\n cv[\"refit/refit\"] = [np.arange(len(y)), np.arange(len(y))]\n\n for cv_outer_i, (tr_val, te) in enumerate(cv_outer):\n cv[\"cv%02d/refit\" % (cv_outer_i)] = [tr_val, te]\n cv_inner = StratifiedKFold(n_splits=NFOLDS_INNER, random_state=42).split(np.zeros(y[tr_val].shape[0]), y[tr_val].ravel())\n for cv_inner_i, (tr, val) in enumerate(cv_inner):\n cv[\"cv%02d/cvnested%02d\" % ((cv_outer_i), cv_inner_i)] = [tr_val[tr], tr_val[val]]\n\n cv = {k:[cv[k][0].tolist(), cv[k][1].tolist()] for k in cv}\n #assert len(cv) == NFOLDS_OUTER + 1\n assert len(cv) == NFOLDS_OUTER * NFOLDS_INNER + NFOLDS_OUTER + 1\n print(list(cv.keys()))\n\n # Reduced grid of parameters\n alphas = [0.001, 0.01, 0.1, 1.0]\n # alphas = [.01, 0.1] # original\n tv_ratio = [0.2, 0.8]\n l1l2_ratio = [0.1, 0.9]\n algos = [\"enettv\", \"enetgn\"]\n params_enet_tvgn = [list(param) for param in itertools.product(algos, alphas, l1l2_ratio, tv_ratio)]\n assert len(params_enet_tvgn) == 32 # 16\n\n params_enet = [list(param) for param in itertools.product([\"enet\"], alphas, l1l2_ratio, [0])]\n assert len(params_enet) == 8 # 4\n\n params = params_enet_tvgn + params_enet\n assert len(params) == 40 # 20\n assert len(params) * len(cv) == 1240 # 620\n\n config = dict(data=dict(X=\"X.npy\", y=\"y.npy\"),\n params=params, resample=cv,\n structure_linear_operator_tv=\"Atv.npz\",\n beta_start=\"beta_start.npz\",\n map_output=\"5cv\",\n user_func=user_func_filename)\n json.dump(config, open(os.path.join(WD, \"config_dcv_reducedrange.json\"), \"w\"))\n\n # Build utils files: sync (push/pull) and PBS\n import brainomics.cluster_gabriel as clust_utils\n cmd = \"mapreduce.py --map %s/config_dcv_reducedrange.json\" % WD_CLUSTER\n clust_utils.gabriel_make_qsub_job_files(WD, cmd,walltime = \"250:00:00\",\n suffix=\"_dcv_reducedrange\",\n freecores=2)\n\n\n#############################################################################\ndef load_globals(config):\n import scipy.sparse as sparse\n import functools\n import mapreduce as GLOBAL # access to global variables\n\n GLOBAL.DATA = GLOBAL.load_data(config[\"data\"])\n\n Atv = LinearOperatorNesterov(filename=config[\"structure_linear_operator_tv\"])\n Agn = sparse.vstack(Atv)\n Agn.singular_values = Atv.get_singular_values()\n def get_singular_values(self, nb=None):\n return self.singular_values[nb] if nb is not None else self.singular_values\n Agn.get_singular_values = functools.partial(get_singular_values, Agn)\n assert np.allclose(Agn.get_singular_values(0), lambda_max_A, rtol=1e-03, atol=1e-03)\n GLOBAL.Atv, GLOBAL.Agn = Atv, Agn\n\n # npz = np.load(config[\"beta_start\"])\n # GLOBAL.beta_start = {k:npz[k] for k in npz}\n\ndef resample(config, resample_nb):\n import mapreduce as GLOBAL # access to global variables\n GLOBAL.DATA = GLOBAL.load_data(config[\"data\"])\n resample = config[\"resample\"][resample_nb]\n GLOBAL.DATA_RESAMPLED = {k: [GLOBAL.DATA[k][idx, ...] for idx in resample]\n for k in GLOBAL.DATA}\n\ndef mapper(key, output_collector):\n \"\"\"\n # debug mapper\n config = json.load(open(os.path.join(WD, \"config_cv_largerange.json\"), \"r\"))\n load_globals(config)\n resample(config, 'refit/refit')\n key = ('enettv', 0.01, 0.1, 0.3)\n \"\"\"\n import mapreduce as GLOBAL\n Xtr = GLOBAL.DATA_RESAMPLED[\"X\"][0]\n Xte = GLOBAL.DATA_RESAMPLED[\"X\"][1]\n ytr = GLOBAL.DATA_RESAMPLED[\"y\"][0]\n yte = GLOBAL.DATA_RESAMPLED[\"y\"][1]\n\n # key = 'enettv_0.01_0.1_0.2'.split(\"_\")\n algo, alpha, l1l2ratio, tvratio = key[0], float(key[1]), float(key[2]), float(key[3])\n\n tv = alpha * tvratio\n l1 = alpha * float(1 - tv) * l1l2ratio\n l2 = alpha * float(1 - tv) * (1- l1l2ratio)\n\n print(key, algo, alpha, l1, l2, tv)\n # alpha = float(key[0])\n # l1, l2, tv = alpha * float(key[1]), alpha * float(key[2]), alpha * float(key[3])\n # print(\"l1:%f, l2:%f, tv:%f\" % (l1, l2, tv))\n\n class_weight = \"auto\" # unbiased\n\n # beta_start = GLOBAL.beta_start[\"lambda_%.4f\" % alpha]\n # mask = np.ones(Xtr.shape[0], dtype=bool)\n\n # scaler = preprocessing.StandardScaler().fit(Xtr)\n # Xtr = scaler.transform(Xtr)\n # Xte = scaler.transform(Xte)\n if algo == 'enettv':\n conesta = algorithms.proximal.CONESTA(max_iter=10000)\n mod = estimators.LogisticRegressionL1L2TV(l1, l2, tv, GLOBAL.Atv,\n algorithm=conesta, class_weight=class_weight, penalty_start=penalty_start)\n elif algo == 'enetgn':\n fista = algorithms.proximal.FISTA(max_iter=5000) # original 500\n mod = estimators.LogisticRegressionL1L2GraphNet(l1, l2, tv, GLOBAL.Agn,\n algorithm=fista, class_weight=class_weight, penalty_start=penalty_start)\n elif algo == 'enet':\n fista = algorithms.proximal.FISTA(max_iter=5000) # original 500\n mod = estimators.ElasticNetLogisticRegression(l1l2ratio, alpha,\n algorithm=fista, class_weight=class_weight, penalty_start=penalty_start)\n else:\n raise Exception('Algo%s not handled' %algo)\n\n mod.fit(Xtr, ytr.ravel())\n y_pred = mod.predict(Xte)\n proba_pred = mod.predict_probability(Xte)\n ret = dict(y_pred=y_pred, y_true=yte, proba_pred=proba_pred, beta=mod.beta)#, mask=mask)\n if output_collector:\n output_collector.collect(key, ret)\n else:\n return ret\n\n#############################################################################\n# Reducer\ntry:\n REDUCER_SRC = '/home/ed203246/git/brainomics-team/2017_logistic_nestv/scripts/reduce_plot_vizu.py'\n exec(open(REDUCER_SRC).read())\nexcept:\n pass\n\ndef do_reducer():\n output_filename = results_filename()\n # reducer(WD=WD, output_filename=output_filename, force_recompute=False)\n model = estimators.LogisticRegression()\n # reducer(WD=WD, output_filename=output_filename, force_recompute=True, model=model, rescale=False)\n reducer(WD=WD, output_filename=output_filename, force_recompute=False, model=model, rescale=False)\n\n\n\"\"\"\ndef reducer(key=None, values=None):\n\n import os, glob, pandas as pd\n def close(vec, val, tol=1e-4):\n return np.abs(vec - val) < tol\n\n def groupby_paths(paths, pos):\n groups = {g:[] for g in set([p.split(\"/\")[pos] for p in paths])}\n for p in paths:\n groups[p.split(\"/\")[pos]].append(p)\n return groups\n\n def scores_groupby_paths(paths, param_pos, algo_pos_in_params, score_func):\n byparams = groupby_paths(paths, param_pos)\n # key='enettv_0.1_0.1_0.2'; paths=byparams[key]; algo_idx=algo_pos_in_params\n byparams_scores = {k:score_func(k, v, config, algo_idx=algo_pos_in_params) for k, v in byparams.items()}\n byparams_scores = {k: v for k, v in byparams_scores.items() if v is not None}\n data = [list(byparams_scores[k].values()) for k in byparams_scores]\n columns = list(byparams_scores[list(byparams_scores.keys())[0]].keys())\n return pd.DataFrame(data, columns=columns)\n\n def argmaxscore_bygroup(data, groupby='fold', param_key=\"key\",\n score=\"bacc\",\n refit_key=None, # Do refit ?\n config=None, # required for refit\n score_func=None, # required for refit\n algo_pos_in_params=None # required for refit\n ):\n arg_max_byfold = list()\n for fold, data_fold in data.groupby(groupby):\n assert len(data_fold) == len(set(data_fold[param_key])) # ensure all param are diff\n arg_max_byfold.append([fold, data_fold.ix[data_fold[score].argmax()][param_key], data_fold[score].max()])\n arg_max_byfold = pd.DataFrame(arg_max_byfold, columns=[groupby, param_key, score])\n arg_max_byfold[\"key_refit\"] = refit_key\n if refit_key is not None:\n refit = score_func(refit_key,\n [os.path.join(config['map_output'], row[groupby], \"refit\", row[param_key])\n for index, row in arg_max_byfold.iterrows()],\n config, as_dataframe=True, algo_idx=algo_pos_in_params)\n else:\n refit = None\n\n return arg_max_byfold, refit\n\n # config = json.load(open(config_filename()))\n # paths = glob.glob(os.path.join(WD, \"5cv\", \"*\", \"*\", \"*\"))\n # param_config_set = set([mapreduce.dir_from_param_list(p) for p in config['params']])\n # assert len(paths) / len(param_config_set) == len(config['resample']), \"Nb run per param is not the one excpected\"\n\n # config_cv_largerange\n s = 'tv_ratio'\n os.chdir(WD)\n config = json.load(open(\"config_cv_largerange.json\"))\n paths_all = glob.glob(\"5cv/cv0?/refit/*\")\n paths_all.sort()\n # paths_sub50 = glob.glob(\"5cv/cv0?_sub50/refit/*\")\n # paths_sub50.sort()\n # paths_sub100 = glob.glob(\"5cv/cv0?_sub100/refit/*\")\n # paths_sub100.sort()\n\n #assert len(paths) == 4286\n print('## Refit scores: cv*/refit/*')\n print('## -------------------------')\n scores_refit = scores_groupby_paths(paths=paths_all, param_pos=3, algo_pos_in_params=0, score_func=scores)\n #scores_refit_sub50 = scores_groupby_paths(paths=paths_sub50, param_pos=3, algo_pos_in_params=0, score_func=scores)\n #scores_refit_sub100 = scores_groupby_paths(paths=paths_sub100, param_pos=3, algo_pos_in_params=0, score_func=scores)\n\n # with pd.ExcelWriter(os.path.join(WD, \"results_refit_cv_by_param_largerange.xlsx\")) as writer:\n # scores_refit.to_excel(writer, sheet_name='cv_by_param_all', index=False)\n #scores_refit_sub100.to_excel(writer, sheet_name='cv_by_param_sub100', index=False)\n #scores_refit_sub50.to_excel(writer, sheet_name='cv_by_param_sub50', index=False)\n\n\n print('## doublecv scores by outer-cv and by params: cv*/cvnested*/*')\n print('## -----------------------------------------')\n paths = glob.glob(\"5cv/cv0?/cvnested0?/*\")\n paths.sort()\n bycv = groupby_paths(paths, 1)\n scores_dcv_byparams = None\n for fold, paths_fold in bycv.items():\n print(fold)\n scores_dcv_fold = scores_groupby_paths(paths=paths_fold, param_pos=3, algo_pos_in_params=0, score_func=scores)\n scores_dcv_fold[\"fold\"] = fold\n scores_dcv_byparams = pd.concat([scores_dcv_byparams, scores_dcv_fold])\n\n print([[g, d.shape[0]] for g, d in scores_dcv_byparams.groupby([\"fold\", \"algo\"])])\n # assert np.all(np.array([g.shape[0] for d, g in scores_dcv_byparams.groupby('fold')]) == 136)\n\n # Different settings\n results = list()\n for algo in [\"enettv\", \"enetgn\"]:\n # algo = \"enettv\"\n results.append(argmaxscore_bygroup(data=scores_dcv_byparams,\n refit_key=\"%s_dcv-all\" % algo, config=config, score_func=scores, algo_pos_in_params=0))\n\n l1l2s_reduced = scores_dcv_byparams[\n (scores_dcv_byparams.algo == algo) &\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.1) | close(scores_dcv_byparams.l1_ratio, 0.9)) &\n (close(scores_dcv_byparams[s], 0.2) | close(scores_dcv_byparams[s], 0.8))]\n # assert np.all(np.array([g.shape[0] for d, g in l1l2s_reduced.groupby('fold')]) == 8)\n # assert l1l2s_reduced.shape[0] == 40\n results.append(argmaxscore_bygroup(data=l1l2s_reduced,\n refit_key=\"%s_dcv-reduced\" % algo, config=config, score_func=scores, algo_pos_in_params=0))\n\n l1l2s_ridge_reduced = scores_dcv_byparams[\n (scores_dcv_byparams.algo == algo) &\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.1)) &\n (close(scores_dcv_byparams[s], 0.2) | close(scores_dcv_byparams[s], 0.8))]\n # assert np.all(np.array([g.shape[0] for d, g in l1l2s_ridge_reduced.groupby('fold')]) == 4)\n # assert l1l2s_ridge_reduced.shape[0] == 20\n results.append(argmaxscore_bygroup(data=l1l2s_ridge_reduced,\n refit_key=\"%s_dcv-ridge-reduced\" % algo, config=config, score_func=scores, algo_pos_in_params=0))\n\n l1l2s_ridge_reduced2 = l1l2s_ridge_reduced[close(l1l2s_ridge_reduced[s], 0.8)] # FS 0.8\n results.append(argmaxscore_bygroup(data=l1l2s_ridge_reduced2,\n refit_key=\"%s_dcv-ridge-reduced2\" % algo, config=config, score_func=scores, algo_pos_in_params=0))\n\n l1l2s_lasso_reduced = scores_dcv_byparams[\n (scores_dcv_byparams.algo == algo) &\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.9)) &\n (close(scores_dcv_byparams[s], 0.2) | close(scores_dcv_byparams[s], 0.8))]\n # assert np.all(np.array([g.shape[0] for d, g in l1l2s_lasso_reduced.groupby('fold')]) == 4)\n # assert l1l2s_lasso_reduced.shape[0] == 20\n results.append(argmaxscore_bygroup(data=l1l2s_lasso_reduced,\n refit_key=\"%s_dcv-lasso-reduced\" % algo, config=config, score_func=scores, algo_pos_in_params=0))\n\n l1l2s_lasso_reduced2 = l1l2s_lasso_reduced[close(l1l2s_lasso_reduced[s], 0.8)] # FS 0.8\n results.append(argmaxscore_bygroup(data=l1l2s_lasso_reduced2,\n refit_key=\"%s_dcv-lasso-reduced2\" % algo, config=config, score_func=scores, algo_pos_in_params=0))\n\n algo = \"enet\"\n l1l2_reduced = scores_dcv_byparams[\n (scores_dcv_byparams.algo == algo) &\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.1) | close(scores_dcv_byparams.l1_ratio, 0.9)) &\n (close(scores_dcv_byparams[s], 0))]\n assert np.all(np.array([g.shape[0] for d, g in l1l2_reduced.groupby('fold')]) == 4)\n assert l1l2_reduced.shape[0] == 20\n results.append(argmaxscore_bygroup(data=l1l2_reduced,\n refit_key=\"%s_dcv-reduced\" % algo, config=config, score_func=scores, algo_pos_in_params=0))\n\n l1l2_ridge_reduced = scores_dcv_byparams[\n (scores_dcv_byparams.algo == algo) &\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.1)) &\n (close(scores_dcv_byparams[s], 0))]\n assert np.all(np.array([g.shape[0] for d, g in l1l2_ridge_reduced.groupby('fold')]) == 2)\n assert l1l2_ridge_reduced.shape[0] == 10\n results.append(argmaxscore_bygroup(data=l1l2_ridge_reduced,\n refit_key=\"%s_dcv-ridge-reduced\" % algo, config=config, score_func=scores, algo_pos_in_params=0))\n\n l1l2_lasso_reduced = scores_dcv_byparams[\n (scores_dcv_byparams.algo == algo) &\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.9)) &\n (close(scores_dcv_byparams[s], 0))]\n assert np.all(np.array([g.shape[0] for d, g in l1l2_lasso_reduced.groupby('fold')]) == 2)\n assert l1l2_lasso_reduced.shape[0] == 10\n results.append(argmaxscore_bygroup(data=l1l2_lasso_reduced,\n refit_key=\"%s_dcv-lasso-reduced\" % algo, config=config, score_func=scores, algo_pos_in_params=0))\n\n cv_argmax, scores_dcv = zip(*results)\n cv_argmax = pd.concat(cv_argmax)\n scores_dcv = pd.concat(scores_dcv)\n\n with pd.ExcelWriter(results_filename()) as writer:\n scores_refit.to_excel(writer, sheet_name='cv_by_param', index=False)\n scores_dcv_byparams.to_excel(writer, sheet_name='cv_cv_byparam', index=False)\n cv_argmax.to_excel(writer, sheet_name='cv_argmax', index=False)\n scores_dcv.to_excel(writer, sheet_name='dcv', index=False)\n\n\"\"\"\n\n\n###############################################################################\n# copy old results to new organization\nimport glob\n\ndef dir_from_param_list(param_list):\n return \"_\".join([str(p) for p in param_list])\n\n\ndef param_src_to_dst(param_src, dst_prefix=None, dst_suffix=None):\n a, l1, l2, tv, k = [float(a) for a in param_src.split(\"_\")]\n l1_ratio = l1 / (l1+l2)\n param_list_dst = [a, l1_ratio, tv]\n if dst_prefix is not None:\n param_list_dst = [dst_prefix] + param_list_dst\n if dst_suffix is not None:\n param_list_dst = param_list_dst + [dst_suffix]\n return dir_from_param_list(param_list_dst)\n\n\ndef copy_results(SRC, DST, outer_str, inner_str, dst_prefix, copy=True):\n for fold in itertools.product(outer_str, inner_str):\n src = SRC % fold\n dst = DST % fold\n for param_src in [os.path.basename(p) for p in glob.glob(src + \"/*\")]:\n #param_src = \"0.01_0.008_0.792_0.2_-1\"\n #param_src = \"0.01_0.02_0.18_0.8_-1\"\n #param_src = \"0.1_0.72_0.08_0.2_-1\"\n param_dst = param_src_to_dst(param_src, dst_prefix=dst_prefix)\n path_src = os.path.join(src, param_src, \"beta.npz\")\n path_dst = os.path.join(dst, param_dst, \"beta.npz\")\n if os.path.exists(path_src) and os.path.exists(path_dst):\n cor = None\n beta_src = np.load(path_src)['arr_0']\n beta_dst = np.load(path_dst)['arr_0']\n cor = np.corrcoef(beta_dst.ravel(), beta_src.ravel())[0, 1]\n print(fold, \"\\t\", param_src, \"\\t\", param_dst, \"\\t\",cor)\n elif os.path.exists(path_src) and not os.path.exists(path_dst):\n print(path_src, \"\\n\", path_dst)\n if copy:\n shutil.copytree(\n os.path.dirname(path_src),\n os.path.dirname(path_dst))\n\ndef do_copy_results():\n outer_str = [\"cv%02d\" % i for i in range(5)]\n inner_str = [\"cvnested%02d\" % i for i in range(5)] + [\"refit\"]\n SRC = \"/neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_modselectcv/modselectcv/%s/%s\"\n DST = \"/neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_all/5cv/%s/%s\"\n copy_results(SRC, DST, outer_str, inner_str, dst_prefix=\"enettv\")\n\n\n import parsimony.utils.weights as weights\n start_vector=weights.RandomUniformWeights(normalise=True)\n start_vector.get_weights(10)","repo_name":"neurospin/scripts","sub_path":"2013_adni/MCIc-CTL-FS/03_enetall_ADNI-MCIc-CTL-FS_cs.py","file_name":"03_enetall_ADNI-MCIc-CTL-FS_cs.py","file_ext":"py","file_size_in_byte":27190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42904477289","text":"def find_files(suffix, path):\n \"\"\" \n Find all files beneath path with file name suffix.\n\n Note that a path may contain further subdirectories\n and those subdirectories may also contain further subdirectories.\n\n There are no limit to the depth of the subdirectories can be.\n\n Args:\n suffix(str): suffix if the file name to be found\n path(str): path of the file system\n\n Returns:\n a list of paths\n \"\"\"\n files = []\n #files = os.listdir(path)\n try:\n for filename in os.listdir(path):\n if os.path.isdir(path+\"/\"+filename):\n files.extend(find_files(suffix, path+\"/\"+filename))\n if filename.endswith(suffix,len(filename)-3):\n files.append(filename)\n except:\n print(\"No path provided\")\n return\n return files\n\n## Locally save and call this file ex.py ##\n# Code to demonstrate the use of some of the OS modules in python\n\nimport os\n\n# Let us print the files in the directory in which you are running this script\nprint(os.listdir(\".\"))\n\n# Let us check if this file is indeed a file!\nprint(os.path.isfile(\"./ex.py\"))\n\n# Does the file end with .py?\nprint(\"./ex.py\".endswith(\".py\"))\nprint(find_files(\".c\", \"./testdir\"))\nprint(find_files(\".c\", \"\"))\nprint(find_files(\".exe\",\"./testdir\"))","repo_name":"reaprman/Data-Struct-algo-nanodegree","sub_path":"proj2/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7624788264","text":"\"\"\"\nPython <-> Objective-C bridge (PyObjC)\n\nThis module defines the core interfaces of the Python<->Objective-C bridge.\n\"\"\"\n\n__all__ = ['IBOutlet', 'IBAction', 'accessor', 'Accessor', 'typedAccessor', 'callbackFor', 'selectorFor', 'synthesize', 'namedselector', 'typedSelector', 'namedSelector', 'instancemethod', 'signature' ]\n\nfrom objc._objc import ivar, selector, _makeClosure, selector, _C_SEL, _C_ID, _C_NSUInteger, _C_NSBOOL\nimport sys, textwrap\nimport warnings\nfrom inspect import getargspec\n\n_C_NSRange = [b\"{_NSRange=II}\", b\"{_NSRange=QQ}\"][sys.maxsize > 2**32]\n\n#\n# Interface builder support.\n#\ndef IBOutlet(name=None):\n \"\"\"\n Create an instance variable that can be used as an outlet in\n Interface Builder.\n \"\"\"\n if name is None:\n return ivar(isOutlet=1)\n else:\n return ivar(name, isOutlet=1)\n\ndef IBAction(func):\n \"\"\"\n Return an Objective-C method object that can be used as an action\n in Interface Builder.\n \"\"\"\n if func is None:\n raise TypeError(\"IBAction argument must be a callable\")\n return selector(func, signature=b\"v@:@\")\n\ndef instancemethod(func):\n if func is None:\n raise TypeError(\"instancemethod argument must be a callable\")\n return selector(func, isClassMethod=False)\n\ndef accessor(func, typeSignature=b'@'):\n \"\"\"\n Return an Objective-C method object that is conformant with key-value coding\n and key-value observing.\n \"\"\"\n args, varargs, varkw, defaults = getargspec(func)\n funcName = func.__name__\n maxArgs = len(args)\n minArgs = maxArgs - len(defaults or ())\n # implicit self\n selArgs = 1 + funcName.count('_')\n if varargs is not None or varkw is not None:\n raise TypeError('%s can not be an accessor because it accepts varargs or varkw' % (funcName,))\n\n if not (minArgs <= selArgs <= maxArgs):\n if minArgs == maxArgs:\n raise TypeError('%s expected to take %d args, but must accept %d from Objective-C (implicit self plus count of underscores)' % (funcName, maxArgs, selArgs))\n else:\n raise TypeError('%s expected to take between %d and %d args, but must accept %d from Objective-C (implicit self plus count of underscores)' % (funcName, minArgs, maxArgs, selArgs))\n\n if selArgs == 3:\n if funcName.startswith('validate') and funcName.endswith('_error_'):\n return selector(func, signature=_C_NSBOOL + b'@:N^@o^@')\n\n if funcName.startswith('insertObject_in') and funcName.endswith('AtIndex_'):\n return selector(func, signature=b'v@:' + typeSignature + _C_NSUInteger)\n elif funcName.startswith('replaceObjectIn') and funcName.endswith('AtIndex_withObject_'):\n return selector(func, signature=b'v@:' + _C_NSUInteger + typeSignature)\n\n elif funcName.startswith('get') and funcName.endswith('_range_'):\n return selector(func, signature=b'v@:o^@' + _C_NSRange)\n\n elif funcName.startswith('insert') and funcName.endswith('_atIndexes_'):\n return selector(func, signature=b'v@:@@')\n\n elif funcName.startswith('replace') and 'AtIndexes_with' in funcName:\n return selector(func, signature=b'v@:@@')\n\n # pass through to \"too many arguments\"\n\n elif selArgs == 2:\n if funcName.startswith('objectIn') and funcName.endswith('AtIndex_'):\n return selector(func, signature=typeSignature + b'@:' + _C_NSUInteger)\n elif funcName.startswith('removeObjectFrom') and funcName.endswith('AtIndex_'):\n return selector(func, signature=b'v@:' + _C_NSUInteger)\n elif funcName.startswith('remove') and funcName.endswith('AtIndexes_'):\n return selector(func, signature=b\"v@:@\")\n elif funcName.endswith('AtIndexes_'):\n return selector(func, signature=b\"@@:@\")\n elif funcName.startswith('memberOf'):\n return selector(func, signature=_C_NSBOOL + b\"@:\" + typeSignature)\n elif funcName.startswith('add') and funcName.endswith('Object_'):\n return selector(func, signature=b\"v@:\" + typeSignature)\n elif funcName.startswith('add'):\n return selector(func, signature=b\"v@:@\")\n elif funcName.startswith('intersect'):\n return selector(func, signature=b\"v@:@\")\n\n return selector(func, signature=b\"v@:\" + typeSignature)\n\n elif selArgs == 1:\n if funcName.startswith('countOf'):\n typeSignature = _C_NSUInteger\n elif funcName.startswith('enumerator'):\n typeSignature = b\"@\"\n\n\n return selector(func, signature=typeSignature + b\"@:\")\n\n raise TypeError(\"%s not recognized as an accessor\" % (funcName,))\n\n\ndef typedSelector(signature):\n def _typedSelector(func):\n if func is None:\n raise TypeError(\"typedSelector() function argument must be a callable\")\n return selector(func, signature=signature)\n return _typedSelector\n\ndef namedSelector(name, signature=None):\n \"\"\"\n Decorator for overriding the Objective-C SEL for a method, usage:\n\n @namedSelector(\"foo:bar:\")\n def foobar(self, foo, bar):\n return foo + bar\n \"\"\"\n if signature is not None:\n def _namedselector(func):\n if func is None:\n raise TypeError(\"IBAction argument must be a callable\")\n return selector(func, selector=name, signature=signature)\n else:\n def _namedselector(func):\n if func is None:\n raise TypeError(\"IBAction argument must be a callable\")\n return selector(func, selector=name)\n\n return _namedselector\n\ndef namedselector(name, signature=None):\n warnings.warn(\"use objc.namedSelector instead of objc.namedselector\", DeprecationWarning, stacklevel=2)\n return namedSelector(name, signature)\n\ndef typedAccessor(typeSignature):\n \"\"\"\n Decorator for creating a typed accessor, usage:\n\n @typedAccessor('i')\n def someIntegerAccessor(self):\n return self.someInteger\n\n @typedAccessor('i')\n def setSomeIntegerAccessor_(self, anInteger):\n self.someInteger = anInteger\n \"\"\"\n def _typedAccessor(func):\n return accessor(func, typeSignature)\n return _typedAccessor\n\ndef Accessor(func):\n warnings.warn(\n \"Use objc.accessor instead of objc.Accessor\", DeprecationWarning)\n return accessor(func)\n\n#\n# Callback support\n#\ndef callbackFor(callable, argIndex=-1):\n \"\"\"\n Decorator for converting a function into an object that can be used\n as a callback function for (Objective-)C API's that take such a beast\n as one of their arguments.\n\n Note that using this decorator for methods is unsupported and that this\n decorator is optional when the callback isn't stored by the called function\n\n Usage::\n\n @objc.callbackFor(NSArray.sortedArrayUsingFunction_context_)\n def compare(left, right, context):\n return 1\n \"\"\"\n def addClosure(function):\n closure = _makeClosure(function, callable, argIndex)\n function.pyobjc_closure = closure\n return function\n\n return addClosure\n\ndef selectorFor(callable, argIndex=-1):\n \"\"\"\n Decorator that makes sure that the method has the right signature to be\n used as the selector argument to the specified method.\n\n Usage::\n\n @objc.selectorFor(NSApplication.beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_)\n def sheetDidEnd_returnCode_contextInfo_(self, sheet, returnCode, info):\n pass\n \"\"\"\n if argIndex == -1:\n for arg in callable.__metadata__()['arguments']:\n if arg['type'] == _C_SEL and 'sel_of_type' in arg:\n signature = arg['sel_of_type']\n break\n else:\n raise ValueError(\"No selector argument with type information\")\n\n else:\n try:\n signature = callable.__metadata__()['arguments'][argIndex]['sel_of_type']\n except (IndexError, KeyError):\n raise ValueError(\"Not a selector argument with type information\")\n\n def addSignature(function):\n return selector(function, signature=signature)\n\n return addSignature\n\n\ndef synthesize(name, copy=False, readwrite=True, type=_C_ID, ivarName=None):\n \"\"\"\n Use this in a class dictionary to syntheze simple setting/setter methods.\n\n Note: this is only necessary to get propper behaviour when Key-Value coding\n is used and special features (like copying) are needed\n\n usage::\n\n class MyClass (NSObject):\n objc.synthesize('someTitle', copy=True)\n\n \"\"\"\n if not name:\n raise ValueError(\"Empty property name\")\n\n if ivarName is None:\n ivarName = '_' + name\n\n classDict = sys._getframe(1).f_locals\n\n setterName = 'set%s%s_'%(name[0].upper(), name[1:])\n\n if copy:\n setter = textwrap.dedent('''\n def %(name)s(self, value):\n self.%(ivar)s = value.copy()\n ''' % dict(name=setterName, ivar=ivarName))\n\n else:\n setter = textwrap.dedent('''\n def %(name)s(self, value):\n self.%(ivar)s = value\n ''' % dict(name=setterName, ivar=ivarName))\n\n getter = textwrap.dedent('''\n def %(name)s(self):\n return self.%(ivar)s\n ''' % dict(name=name, ivar=ivarName))\n\n if readwrite:\n exec(setter, globals(), classDict)\n\n exec(getter, globals(), classDict)\n\n classDict[ivarName] = ivar(type=type)\n\n\ndef signature(signature, **kw):\n \"\"\"\n A Python method decorator that allows easy specification\n of Objective-C selectors.\n\n Usage::\n\n @objc.signature('i@:if')\n def methodWithX_andY_(self, x, y):\n return 0\n \"\"\"\n warnings.warn(\"Usage objc.typedSelector instead of objc.signature\", DeprecationWarning)\n kw['signature'] = signature\n def makeSignature(func):\n return selector(func, **kw)\n return makeSignature\n","repo_name":"albertz/music-player","sub_path":"mac/pyobjc-core/Lib/objc/_descriptors.py","file_name":"_descriptors.py","file_ext":"py","file_size_in_byte":9911,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"81"} +{"seq_id":"14035889562","text":"# from typing import ValuesView\nfrom django.test import TestCase\nfrom django.contrib.auth import get_user_model\n\n\nclass ModelTest(TestCase):\n\n def test_create_user_with_phone_successful(self):\n \"\"\"Test creating a new user with an email is successful\"\"\"\n phone = \"+919999999999\"\n user = get_user_model().objects.create_user(\n phone=phone\n )\n self.assertEqual(user.phone, phone)\n\n # def test_new_user_phone_normalised(self):\n # \"\"\"Test phone for new user is normalised\"\"\"\n # phone = \"+919595955666\"\n # user = get_user_model().objects.create_user(phone)\n # self.assertEqual(user.phone, phone)\n\n def test_new_user_invalid_phone(self):\n \"\"\"Test creating user with no phone raise error\"\"\"\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None)\n\n def test_create_new_super_user(self):\n \"\"\"Test new super user created\"\"\"\n user = get_user_model().objects.create_superuser('+919999999999')\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)\n","repo_name":"abhishek-codes/prodocker","sub_path":"app/core/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11344356171","text":"# https://programmers.co.kr/learn/courses/30/lessons/42626#qna\n# 더 맵게\n# 문의롤 통해 scoville 지수의 길이는 모두 2 이상으로 변경!\n# 따라서 scoville지수의 길이가 1이면서 이미 k 이상인 경우는 존재x\nimport heapq\n\n\ndef solution(scoville, K):\n heapq.heapify(scoville)\n answer = 0\n size = len(scoville)\n for t in range(size-1):\n one = heapq.heappop(scoville)\n two = heapq.heappop(scoville)\n new = one+(two*2)\n heapq.heappush(scoville,new)\n answer+=1\n if scoville[0]>=K:\n return answer\n return -1\n\n\n","repo_name":"MinMolang/codePractice","sub_path":"Programmers/HEAP_hotter.py","file_name":"HEAP_hotter.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30756542116","text":"import numpy as np\n\nfrom keras.callbacks import Callback\nfrom time import sleep\n\n\nclass EarlyStopping(Callback):\n \"\"\"Stop training when a monitored quantity has stopped improving.\n\n # Arguments\n monitor: quantity to be monitored.\n min_delta: minimum change in the monitored quantity\n to qualify as an improvement, i.e. an absolute\n change of less than min_delta, will count as no\n improvement.\n patience: number of epochs with no improvement\n after which training will be stopped.\n verbose: verbosity mode.\n mode: one of {auto, min, max}. In `min` mode,\n training will stop when the quantity\n monitored has stopped decreasing; in `max`\n mode it will stop when the quantity\n monitored has stopped increasing; in `auto`\n mode, the direction is automatically inferred\n from the name of the monitored quantity.\n baseline: Baseline value for the monitored quantity to reach.\n Training will stop if the model doesn't show improvement\n over the baseline.\n \"\"\"\n\n def __init__(self,\n monitor='val_loss',\n min_delta=0,\n patience=0,\n verbose=0,\n mode='auto',\n baseline=None):\n super(EarlyStopping, self).__init__()\n\n self.output_msg = \"\"\n self.monitor = monitor\n self.baseline = baseline\n self.patience = patience\n self.verbose = verbose\n self.min_delta = min_delta\n self.wait = 0\n self.stopped_epoch = 0\n\n if mode not in ['auto', 'min', 'max']:\n warnings.warn('EarlyStopping mode %s is unknown, '\n 'fallback to auto mode.' % mode,\n RuntimeWarning)\n mode = 'auto'\n\n if mode == 'min':\n self.monitor_op = np.less\n elif mode == 'max':\n self.monitor_op = np.greater\n else:\n if 'acc' in self.monitor:\n self.monitor_op = np.greater\n else:\n self.monitor_op = np.less\n\n if self.monitor_op == np.greater:\n self.min_delta *= 1\n else:\n self.min_delta *= -1\n\n def on_train_begin(self, logs=None):\n # Allow instances to be re-used\n self.wait = 0\n self.stopped_epoch = 0\n if self.baseline is not None:\n self.best = self.baseline\n else:\n self.best = np.Inf if self.monitor_op == np.less else -np.Inf\n\n def on_epoch_end(self, epoch, logs=None):\n current = logs.get(self.monitor)\n if current is None:\n warnings.warn(\n 'Early stopping conditioned on metric `%s` '\n 'which is not available. Available metrics are: %s' %\n (self.monitor, ','.join(list(logs.keys()))), RuntimeWarning\n )\n return\n if self.monitor_op(current - self.min_delta, self.best):\n self.best = current\n self.wait = 0\n else:\n self.wait += 1\n if self.wait >= self.patience:\n self.stopped_epoch = epoch\n self.model.stop_training = True\n\n def on_train_end(self, logs=None):\n if self.stopped_epoch > 0 and self.verbose > 0:\n self.output_msg = 'Epoch %05d: early stopping' % (self.stopped_epoch + 1)\n print(self.output_msg) \n\n def get_stopped_epoch(self):\n if self.stopped_epoch > 0:\n return self.stopped_epoch+1\n return \"No\"\n","repo_name":"fr4ncidir/ImpactExperiment","sub_path":"custom_callbacks.py","file_name":"custom_callbacks.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"30007952771","text":"\n# coding: utf-8\n\n# In[26]:\n\n\n#Xcalar imports. For more information, refer to discourse.xcalar.com\nfrom xcalar.external.LegacyApi.XcalarApi import XcalarApi\nfrom xcalar.external.LegacyApi.Session import Session\nfrom xcalar.external.LegacyApi.WorkItem import WorkItem\nfrom xcalar.external.LegacyApi.Operators import *\nfrom xcalar.external.LegacyApi.Dataset import *\nfrom xcalar.external.LegacyApi.WorkItem import *\nfrom xcalar.external.LegacyApi.Udf import *\nfrom xcalar.external.Retina import *\nimport timeit\nimport argparse\nimport json\nimport tarfile\nimport io\nfrom tarfile import TarInfo\n\n#Code starts here. First create a XcalarApi object to do anything\nxcalarApi = XcalarApi(bypass_proxy=True)\nop = Operators(xcalarApi)\nudf = Udf(xcalarApi)\nretina = Retina(xcalarApi)\n\nuser = \"admin\"\nsession = \"test\"\n\n#Connect to current workbook that you are in\nworkbook = Session(xcalarApi, \"admin\", \"admin\", 4135730, True, \"test\")\nxcalarApi.setSession(workbook)\n\npathDelim = '\\\"/\\\"'\ncutNum = 8\n\nfinalTable = \"Transactions\"\nfinalTableKey = \"txnId\"\n\ntables = {\n \"orders\": {\n \"key\": \"o_orderkey\",\n \"opcode\": \"opcode\",\n \"columns\": [\n {\n \"name\": \"o_custkey\",\n \"type\": \"DfInt64\"\n },\n {\n \"name\": \"o_orderstatus\",\n \"type\": \"DfString\"\n },\n {\n \"name\": \"o_totalprice\",\n \"type\": \"DfFloat64\"\n },\n {\n \"name\": \"o_orderdate\",\n \"type\": \"DfString\"\n },\n {\n \"name\": \"o_orderpriority\",\n \"type\": \"DfString\"\n },\n {\n \"name\": \"o_shippriority\",\n \"type\": \"DfInt64\"\n },\n {\n \"name\": \"o_clerk\",\n \"type\": \"DfString\"\n },\n {\n \"name\": \"o_comment\",\n \"type\": \"DfString\"\n }\n ],\n \"path\": \"/netstore/users/vgonela/hvr_data/tpch/orders/\",\n }\n}\n\ndef formatColumns(colsIn, key, prefix = None):\n cols = []\n\n for col in colsIn:\n synthCol = {}\n if prefix:\n synthCol[\"sourceColumn\"] = prefix + \"::\" + col[\"name\"]\n else:\n synthCol[\"sourceColumn\"] = col[\"name\"]\n\n synthCol[\"destColumn\"] = col[\"name\"]\n synthCol[\"columnType\"] = col[\"type\"]\n\n cols.append(synthCol)\n\n cols.append({\"sourceColumn\": key,\n \"columnType\": \"DfInt64\",\n \"destColumn\": key})\n\n cols.append({\"sourceColumn\": \"XcalarRankOver\",\n \"columnType\": \"DfInt64\",\n \"destColumn\": \"XcalarRankOver\"})\n\n cols.append({\"sourceColumn\": \"XcalarOpCode\",\n \"columnType\": \"DfInt64\",\n \"destColumn\": \"XcalarOpCode\"})\n\n return (cols)\n\n\nfor tableName, info in tables.items():\n query = []\n\n load = {\n \"operation\": \"XcalarApiBulkLoad\",\n \"args\": {\n \"dest\": \".XcalarDS.{}\".format(tableName),\n \"loadArgs\": {\n \"parseArgs\": {\n \"parserFnName\": \"default:convertNewLineJsonToArrayJson\",\n \"parserArgJson\": \"{}\",\n \"fileNameFieldName\": \"fn\",\n \"recordNumFieldName\": \"rec\",\n \"allowFileErrors\": False,\n \"allowRecordErrors\": False,\n \"schema\": []\n },\n \"sourceArgsList\": [\n {\n \"recursive\": False,\n \"path\": info[\"path\"],\n \"targetName\": \"Default Shared Root\",\n \"fileNamePattern\": \"\"\n }\n ],\n \"size\": 10737418240\n }\n }\n }\n\n query.append(load)\n\n index = [\n {\n \"operation\": \"XcalarApiIndex\",\n \"args\": {\n \"source\": \".XcalarDS.{}\".format(tableName),\n \"dest\": \"{}-1\".format(tableName),\n \"prefix\": tableName,\n \"key\": [\n {\n \"name\": \"xcalarRecordNum\",\n \"ordering\": \"Unordered\",\n \"keyFieldName\": \"\",\n \"type\": \"DfInt64\"\n }\n ],\n },\n },\n {\n \"operation\": \"XcalarApiMap\",\n \"tag\": \"sortTimestamp\",\n \"args\": {\n \"source\": \"{}-1\".format(tableName),\n \"dest\": \"{}-map\".format(tableName),\n \"eval\": [\n {\n \"evalString\": \"int(cut({}::fn, {}, {}))\".format(tableName,\n cutNum,\n pathDelim),\n \"newField\": \"ts\"\n },\n {\n \"evalString\": \"int({}::rec, 10)\".format(tableName),\n \"newField\": \"rec\"\n }\n ],\n },\n },\n {\n \"operation\": \"XcalarApiIndex\",\n \"tag\": \"sortTimestamp\",\n \"comment\": \"apply ordering based on file row and timestamp\",\n \"args\": {\n \"source\": \"{}-map\".format(tableName),\n \"dest\": \"{}-sort\".format(tableName),\n \"key\": [\n {\n \"name\": \"ts\",\n \"ordering\": \"Ascending\",\n \"keyFieldName\": \"ts\",\n \"type\": \"DfInt64\"\n },\n {\n \"name\": \"rec\",\n \"ordering\": \"Ascending\",\n \"keyFieldName\": \"rec\",\n \"type\": \"DfInt64\"\n }\n ],\n },\n }\n ]\n\n query += index\n\n rankOver = [\n {\n \"operation\": \"XcalarApiGetRowNum\",\n \"tag\": \"rankOver\",\n \"args\": {\n \"source\": \"{}-sort\".format(tableName),\n \"dest\": \"{}-getRowNum\".format(tableName),\n \"newField\": \"rowNum\"\n },\n },\n {\n \"operation\": \"XcalarApiIndex\",\n \"tag\": \"rankOver\",\n \"args\": {\n \"source\": \"{}-getRowNum\".format(tableName),\n \"dest\": \"{}-keyIndex\".format(tableName),\n \"key\": [\n {\n \"name\": \"{}::{}\".format(tableName, info[\"key\"]),\n \"ordering\": \"Unordered\",\n \"keyFieldName\": info[\"key\"],\n \"type\": \"DfInt64\"\n }\n ],\n },\n },\n {\n \"operation\": \"XcalarApiGroupBy\",\n \"tag\": \"rankOver\",\n \"comment\": \"\",\n \"args\": {\n \"source\": \"{}-keyIndex\".format(tableName),\n \"dest\": \"{}-groupBy\".format(tableName),\n \"eval\": [\n {\n \"evalString\": \"minInteger(rowNum)\",\n \"newField\": \"minRow\"\n }\n ],\n \"newKeyField\": \"dummyKey\",\n },\n },\n {\n \"operation\": \"XcalarApiJoin\",\n \"tag\": \"rankOver\",\n \"args\": {\n \"source\": [\n \"{}-keyIndex\".format(tableName),\n \"{}-groupBy\".format(tableName),\n ],\n \"key\": [\n [\n info[\"key\"]\n ],\n [\n \"dummyKey\"\n ]\n ],\n \"joinType\": \"innerJoin\",\n \"dest\": \"{}-joinBack\".format(tableName),\n \"columns\": [\n [\n {\n \"sourceColumn\": \"orders\",\n \"columnType\": \"DfFatptr\",\n \"destColumn\": \"orders\"\n },\n {\n \"sourceColumn\": info[\"key\"],\n \"columnType\": \"DfInt64\",\n \"destColumn\": info[\"key\"],\n }\n ],\n [\n {\n \"sourceColumn\": \"dummyKey\",\n \"columnType\": \"DfInt64\",\n \"destColumn\": \"dummyKey\"\n },\n {\n \"sourceColumn\": \"minRow\",\n \"columnType\": \"DfInt64\",\n \"destColumn\": \"minRow\"\n }\n ]\n ],\n },\n },\n {\n \"operation\": \"XcalarApiMap\",\n \"tag\": \"rankOver\",\n \"args\": {\n \"source\": \"{}-joinBack\".format(tableName),\n \"dest\": \"{}-ranked\".format(tableName),\n \"eval\": [\n {\n \"evalString\": \"int(add(sub(rowNum, minRow), 1))\",\n \"newField\": \"XcalarRankOver\"\n },\n {\n \"evalString\": \"int({}::{})\".format(tableName,\n info[\"opcode\"]),\n \"newField\": \"XcalarOpCode\"\n }\n ],\n },\n }\n ]\n\n query += rankOver\n\n synthesizeColumns = formatColumns(info[\"columns\"], info[\"key\"])\n\n synth = {\n \"operation\": \"XcalarApiSynthesize\",\n \"comment\": \"apply schema\",\n \"args\": {\n \"sameSession\": True,\n \"source\": \"{}-ranked\".format(tableName),\n \"dest\": tableName,\n \"columns\": synthesizeColumns,\n },\n }\n\n query.append(synth)\n\n queryStr = json.dumps(query)\n\n print(\"Starting {}\".format(tableName))\n print(queryStr)\n\n start = timeit.default_timer()\n xcalarApi.submitQuery(queryStr, session, tableName)\n\n try:\n op.unpublish(tableName)\n except:\n pass\n\n op.publish(tableName, tableName)\n end = timeit.default_timer()\n\n elapsed = end - start\n print(\"Ran {}: {}s\".format(tableName, str(elapsed)))\n\n retinaColumns = [col[\"destColumn\"] for col in synthesizeColumns]\n\n try:\n retina.delete(tableName)\n except:\n pass\n\n retina.make(tableName,\n [tableName],\n [retinaColumns])\n\n\nbatchId = 0\n\nfor tableName, info in tables.items():\n joinColumns = formatColumns(info[\"columns\"], info[\"key\"])\n\n dataflowInfo = {}\n\n dataflowInfo[\"query\"] = [\n {\n \"operation\": \"XcalarApiSelect\",\n \"comment\": \"Latest batch select on {}\".format(tableName),\n \"args\": {\n \"source\": tableName,\n \"dest\": \"{}.select\".format(tableName),\n \"minBatchId\": batchId,\n \"maxBatchId\": batchId\n }\n },\n {\n \"operation\": \"XcalarApiSelect\",\n \"comment\": \"Full historical select on {}\".format(finalTable),\n \"args\": {\n \"source\": finalTable,\n \"dest\": \"{}.select\".format(finalTable)\n }\n },\n {\n \"operation\": \"XcalarApiAggregate\",\n \"comment\": \"Find minimum key\",\n \"tag\": \"filterMinKey\",\n \"args\": {\n \"source\": \"{}.select\".format(tableName),\n \"dest\": \"{}.minKey\".format(tableName),\n \"eval\": [\n {\n \"evalString\": \"min({})\".format(info[\"key\"])\n }\n ]\n }\n },\n {\n \"operation\": \"XcalarApiFilter\",\n \"comment\": \"Reduce {} using calculated minKey\".format(finalTable),\n \"tag\": \"filterMinKey\",\n \"args\": {\n \"source\": \"{}.select\".format(finalTable),\n \"dest\": \"{}.filter\".format(finalTable),\n \"eval\": [\n {\n \"evalString\": \"gt({}, ^{}.minKey)\".format(info[\"key\"],\n tableName)\n }\n ]\n },\n },\n {\n \"operation\": \"XcalarApiJoin\",\n \"comment\": \"Apply changes to {} by doing a join\".format(finalTable),\n \"args\": {\n \"source\": [\n \"{}.filter\".format(finalTable),\n \"{}.select\".format(tableName),\n ],\n \"joinType\": \"crossJoin\",\n \"dest\": \"{}-joined\".format(tableName),\n \"evalString\": \"eq({}_, {})\".format(info[\"key\"], info[\"key\"]),\n \"columns\": [\n [\n {\n \"sourceColumn\": info[\"key\"],\n \"columnType\": \"DfInt64\",\n \"destColumn\": info[\"key\"] + \"_\"\n },\n {\n \"sourceColumn\": finalTableKey,\n \"columnType\": \"DfInt64\",\n \"destColumn\": finalTableKey,\n }\n ],\n joinColumns\n ],\n },\n }\n ]\n\n tableColumns = [{\"columnName\": col[\"destColumn\"], \"headerAlias\": col[\"destColumn\"]}\n for col in joinColumns]\n\n dataflowInfo[\"tables\"] = [\n {\n \"name\": \"{}-joined\".format(tableName),\n \"columns\": tableColumns\n }\n ]\n\n dataflowStr = json.dumps(dataflowInfo)\n\n retinaBuf = io.BytesIO()\n\n with tarfile.open(fileobj = retinaBuf, mode = \"w:gz\") as tar:\n info = TarInfo(\"dataflowInfo.json\")\n info.size = len(dataflowStr)\n\n tar.addfile(info, io.BytesIO(bytearray(dataflowStr, \"utf-8\")))\n\n retina.add(tableName + \"_applyUpdate\", retinaBuf.getvalue())\n","repo_name":"varlogtim/xcalar","sub_path":"scripts/createRetina.py","file_name":"createRetina.py","file_ext":"py","file_size_in_byte":14204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37877448325","text":"import chevron\nimport subprocess\nfrom pathlib import Path\nimport logging\n\nFILEDIR = Path(__file__).parent.resolve()\n\ndef write_dineof_config(outfile, dineof_in, variable, min_modes=10, max_modes=20, land_mask='land_mask', reconstruction=0):\n\n template = FILEDIR / 'dineof_config_template.init'\n\n ncv = max_modes + 5\n\n data = dict(\n dineof_in=dineof_in,\n variable=variable,\n min_modes=min_modes, \n max_modes=max_modes, \n land_mask=land_mask,\n reconstruction=reconstruction,\n ncv = ncv\n )\n\n with open(str(template), 'r') as f:\n outconfig = chevron.render(f, data)\n\n with open(outfile,\"w\") as f:\n f.write(outconfig)\n\n return\n\ndef exec_dineof(dineof_in, variable, **kwargs):\n\n config_path = f'dineof_{variable}.init'\n\n write_dineof_config(config_path, dineof_in, variable, **kwargs)\n\n cmd = f'dineof {config_path}'\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n out, err = proc.communicate()\n\n if out is not None:\n print(out.decode(\"utf-8\"))\n \n if err is not None:\n print('Error:',err.decode(\"utf-8\"))\n\n return\n\n \n\n","repo_name":"KMarkert/dineof-ee-gcp","sub_path":"src/dineof_runner.py","file_name":"dineof_runner.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"6138907672","text":"\"ISN - Python - Partie 3 - Exercice 2.1\"\r\n\"Ecrivez un script qui permette d'encoder un fichier texte dont les lignes contiendront\\\r\n chacune les noms, prénom, adresse, code postal ,ville et n° de téléphone de différentes\\\r\n personnes (considérez par exemple qu'il s'agit des membres d'un club).\"\r\n\r\n#######################################################\r\n# Encodage: UTF-8 #\r\n# Programme Python 3.7 #\r\n# Auteur : Antoine Cheucle #\r\n# Pas de licence, libre d'utilisation #\r\n# Dépendences: aucune #\r\n#######################################################\r\n\r\ndef __main__():\r\n fichier = open(input(\"Entrez le nom du fichier dans lequel écrire (N'oubliez pas d'inscrire également son extension s'il en possède une!)\\n\"), \"a\")\r\n print(\"Bien! Vous pouvez désormais commencer à entrer les données.\")\r\n nb_personne = 0\r\n while True:\r\n nb_personne += 1\r\n donnees = []\r\n rep = input(\"Entrez le nom de famille de la personne n°\"+str(nb_personne)+ \" Appuyez sur Entrer pour annuler\")\r\n if rep == \"\":\r\n break\r\n else:\r\n donnees.append(rep)\r\n donnees.append(input(\"Entrez le prénom de la personne n°\"+str(nb_personne)))\r\n donnees.append(input(\"Entrez l'adresse de la personne n°\"+str(nb_personne)))\r\n donnees.append(input(\"Entrez le code postal de la personne n°\"+str(nb_personne)))\r\n donnees.append(input(\"Entrez la ville de la personne n°\"+str(nb_personne)))\r\n donnees.append(input(\"Entrez le numéro de téléphone de la personne n°\"+str(nb_personne)))\r\n fichier.write(\"|\".join(donnees)+\"|\\n\")\r\n fichier.close()\r\n print(\"Fin de l'éciture! Fichier sauvegardé.\")\r\n\r\n\r\n__main__()\r\n","repo_name":"antoinech2/ISN-exercices-Python-Ann-e-2","sub_path":"partie_3/ex2/ex2.1.py","file_name":"ex2.1.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72298650826","text":"####################################################\n## EVOLUTIOINARY FAILURE MODE CALCULATOR\n## Version 1.0.1\n####################################################\n\nEFM_VERSION = \"1.0.1\"\n\n# Import subprocess for command line stuff\nimport subprocess\nimport os\nimport csv\nfrom tempfile import NamedTemporaryFile\n\n# Import itertools\nfrom itertools import combinations\n\n# Import RegEx libraries\nimport re\nimport math\n\n# Import ElementTree for parsing BioBrick XML\nfrom xml.etree import ElementTree\n\nSUB_RATE = float(2.2 * 10 ** (-10))\n\n\ndef run_mummer(fasta_file, org):\n \"\"\"\n Executes the repeat-match command in MUMmer and parses the output.\n Executes the nucmer and show-coords commands in MUMmer and parses the output.\n :param fasta_file: Name of FASTA file with sequence\n :param org: Name of organism for calculating rates\n :return: Dictionary of results\n \"\"\"\n # Initialize dictionary\n long_repeats = dict()\n # Execute MUMmer command\n try:\n output = subprocess.check_output(['repeat-match', '-n', '16', '-f', fasta_file])\n except:\n raise\n print(\"MUMmer command 'repeat-match' failed.\")\n output_lines = output.splitlines()\n # Loop through mummer output and skip the first 2 lines which only contain header information\n for line in output_lines[2:]:\n split_line = line.split()\n start_pos1 = int(split_line[0])\n start_pos2 = int(split_line[1])\n repeat_length = int(split_line[2])\n if repeat_length in long_repeats:\n for locations in long_repeats[repeat_length]:\n if start_pos2 in locations and start_pos1 in locations:\n continue\n elif start_pos1 in locations:\n locations.append(start_pos2)\n locations.sort()\n found = True\n elif start_pos2 in locations:\n locations.append(start_pos1)\n locations.sort()\n found = True\n else:\n found = False\n if found is False:\n # This is definitely a new repeat of repeat_length that we haven't seen before\n long_repeats[repeat_length].append(sorted([start_pos1, start_pos2]))\n else:\n # We have not seen a repeat of this length before\n long_repeats[repeat_length] = [sorted([start_pos1, start_pos2])]\n\n # Initialize list of dictionaries and reformat for tabular output\n output_list = []\n seen = []\n for repeat_length, location_list in long_repeats.items():\n for locations in location_list:\n rate_per_repeat = float(0)\n if len(locations) > 2:\n # Must account for a homologous sequence occurring more than twice\n combos = combinations(locations, 2)\n for combo in combos:\n rate_per_repeat += get_recombo_rate(int([repeat_length][0]), combo[0], combo[1], org)\n else:\n rate_per_repeat = get_recombo_rate(int([repeat_length][0]), locations[0], locations[1], org)\n entry = {'location': locations,\n 'sequence': '', # We don't record the sequence for long repeats\n 'length': [repeat_length],\n 'count': len(locations),\n 'raw_rate': rate_per_repeat,\n 'type': 'rmd'}\n if [locations, [repeat_length]] not in seen:\n output_list.append(entry)\n seen.append([locations, [repeat_length]])\n\n # Generate a temporary file for storing the '.delta' file that will be passed to show-coords\n coords_file = NamedTemporaryFile(suffix='.delta', delete=False)\n try:\n # Execute nucmer command\n subprocess.call(['nucmer',\n '-l',\n '16',\n '-f', # Forward strand only\n '--maxmatch',\n '--nosimplify',\n '--prefix='+coords_file.name[:-6],\n fasta_file,\n fasta_file],\n )\n # Execute show-coords command\n output = subprocess.check_output(['show-coords', '-T', '-I 100', '-H', '-d', coords_file.name])\n except:\n raise\n print(\"MUMmer command 'nucmer' and 'show-coords' failed.\")\n finally:\n # Remove the '.delta' file because we're finished with it\n os.remove(coords_file.name)\n\n content_lines = output.splitlines()\n # Initialize list of dictionaries for tabular output\n\n for line in content_lines:\n clean_line = line.split()\n start_pos1 = int(clean_line[0])\n start_pos2 = int(clean_line[2])\n length1 = int(clean_line[4])\n length2 = int(clean_line[5])\n if start_pos1 != start_pos2:\n # Remove repeats by converting to set\n length_list = sorted(list(set([length1, length2])))\n location = sorted([start_pos1, start_pos2])\n rate = get_recombo_rate(int(length_list[0]), location[0], location[1], org)\n entry = {'location': location,\n 'sequence': '',\n 'length': length_list,\n 'count': len(location),\n 'raw_rate': rate,\n 'type': 'rmd'}\n if [location, length_list] not in seen:\n output_list.append(entry)\n seen.append([location, length_list])\n\n return output_list\n\n\ndef get_repeats_in_window(n, sequence, min_count, org):\n \"\"\"\n Generates a list of k-mer repeats in a given window size\n :param n: Size of k-mer (2 for dinucleotides, 3 for trinucleotides, etc.)\n :param sequence: String of DNA sequence\n :param min_count: Minimum number of repeating units before recording\n :param org: Host organism for rate calculations\n :return: List of dictionaries of results for tabular output\n \"\"\"\n # Initialize repeats dictionary, in the format repeats -> { 'index' : ['CG', 2] }\n repeats = dict()\n if n == 0:\n # Return an empty list, n = 0 doesn't make sense\n return []\n\n # Start counting repeats at 1\n repeat_count = 1\n i = 0\n if n == 1:\n # n = 1 is a special, simpler case to handle\n while i < (len(sequence) - n):\n if sequence[i] == sequence[i + 1]:\n # Record as a repeat\n if repeat_count + 1 >= min_count:\n repeats[i + n - n * repeat_count] = [sequence[i], repeat_count + 1]\n # Jump one base ahead\n i += 1\n repeat_count += 1\n else:\n # Reset repeat_count\n i += 1\n repeat_count = 1\n else:\n # n > 1\n while i < (len(sequence) - n):\n # If the first base of the current window matches the first base of the next window\n # AND the entire window equals the next consecutive window\n if sequence[i] == sequence[i + n] and sequence[i:i + n] == sequence[(i + n):(i + 2 * n)]:\n # If the window is larger than 4, make sure it doesn't contain any smaller repeating subunits\n if check_subunits(sequence[i:i + n]) is True:\n i += 1\n else:\n # Record as a repeat\n if repeat_count + 1 >= min_count:\n repeats[i + n - n * repeat_count] = [sequence[i:i + n], repeat_count + 1]\n # Jump forward a full window of bases\n i = i + n\n repeat_count += 1\n else:\n repeat_count = 1\n # Otherwise shift the window forward by one base\n i += 1\n\n # Reformat for tabular output\n output_list = []\n for index, contents in repeats.items():\n mut_rate = get_mut_rate(contents[1], n, org)\n entry = {'location': [index],\n 'sequence': str(contents[0]),\n 'length': [len(contents[0])],\n 'count': contents[1],\n 'raw_rate': mut_rate,\n 'type': 'ssr'\n }\n output_list.append(entry)\n\n return output_list\n\n\ndef check_subunits(sequence):\n \"\"\"\n Checks to see if a sequence can be broken up into smaller repeating subunits. For example, ATAT is actually just\n two instances of AT.\n :param sequence: Short DNA sequence\n :return: Boolean. True if sequence can be broken up into smaller repeating subunits. Otherwise, False.\n \"\"\"\n if re.match(r\"^(.+?)\\1+$\", sequence):\n return True\n else:\n return False\n\n\ndef get_mut_rate(repeat_count, unit_length, org):\n '''\n Calculates mutation rate for simple sequence repeats\n :param repeat_count: Number of times the repeating unit occurs\n :param unit_length: Length of repeating unit\n :param org: Host organism\n :return: Mutation rate\n '''\n mut_rate = float(0)\n if org == 'ecoli' or org == 'reca':\n if unit_length == 1:\n # Formula based on analysis of Lee et. al. data\n mut_rate = float(10 ** (0.72896 * repeat_count - 12.91471))\n elif unit_length > 1:\n mut_rate = float(10 ** (0.06282 * repeat_count - 4.74882))\n elif org == 'yeast':\n if unit_length == 1:\n mut_rate = float(10 ** (0.3092 * repeat_count - 7.3220))\n elif unit_length > 1:\n mut_rate = float(10 ** (0.11141 * repeat_count - 7.65810))\n return mut_rate\n\n\ndef get_recombo_rate(length, location1, location2, org):\n '''\n Calculate the recombination rate based on the Oliviera, et. al. formula\n :param length: Length of homologous region\n :param location1: Location of first homologous region\n :param location2: Location of second homologous region\n :param org: Host organism\n :return: Recombination rate\n '''\n spacer = abs(int(location2) - int(location1)) - int(length)\n # If the homologous sequences overlap we can't calculate a rate\n if spacer < 0:\n return 0\n if org == 'ecoli' or org == 'yeast':\n recombo_rate = float(((8.8 + spacer) ** (-29.0 / length)) * (length / (1 + 1465.6 * length)))\n elif org == 'reca':\n recombo_rate = float(\n ((200.4 + spacer) ** (-8.8 / length)) * (length / (1 + 2163.0 * length + 14438.6 * spacer)))\n\n return recombo_rate\n\n\ndef get_biobrick_features(tree):\n \"\"\"\n Extracts sequence annotations from BioBrick XML files\n :param tree: parsed XML tree\n :return: List of dictionaries of features to display\n \"\"\"\n features = []\n colors = {\n 'promoter': 'green',\n 'stop': 'red',\n 'cds': 'blue',\n 'rbs': 'orange',\n 'binding': 'purple',\n 'BioBrick': 'black'\n }\n for node in tree.iter(tag='feature'):\n if node.find('type').text in colors:\n entry = dict()\n entry['type'] = node.find('type').text\n entry['title'] = node.find('title').text\n entry['startpos'] = int(node.find('startpos').text)\n entry['length'] = int(node.find('endpos').text) - int(node.find('startpos').text)\n entry['color'] = colors[entry['type']]\n features.append(entry)\n\n return features\n\n\ndef get_genbank_features(genome):\n \"\"\"\n Extracts sequence annotations a BioPython SeqRecord object\n :param genome: BioPython SeqRecord of genbank file\n :return: List of dictionaries of features to display\n \"\"\"\n features = []\n for feature in genome.features:\n if feature.qualifiers:\n name = feature.qualifiers.itervalues().next()[0]\n else:\n name = 'Untitled'\n entry = dict()\n entry['type'] = str(feature.type)\n entry['title'] = name\n entry['startpos'] = int(feature.location.nofuzzy_start)\n entry['length'] = int(feature.location.nofuzzy_end) - int(feature.location.nofuzzy_start)\n features.append(entry)\n\n return features\n\n\ndef truncate_table(repeats, min_rate):\n \"\"\"\n Returns a subset of repeats with a mutation rate higher than min_rate\n :param repeats: List of dictionaries of repeats\n :param min_rate: Minimum mutation rate to display in table\n :return: A sorted and truncated list of dictionaries of repeats\n \"\"\"\n trunc = (repeat for repeat in repeats if\n repeat['raw_rate'] > min_rate and repeat['raw_rate'] != '' and repeat['overlap'] == True)\n trunc_sort = sorted(trunc, key=lambda k: k['raw_rate'] if k['raw_rate'] != '' else 0, reverse=True)\n return trunc_sort\n\n\ndef check_overlap(repeats, features, check_features):\n \"\"\"\n Checks each repeat to see if it overlaps with a feature, and sets 'overlap' to True if it does.\n :param repeats: List of dictionaries of repeats\n :param features: List of dictionaries of features\n :return: List of dictionaries of repeats, with each repeat containing a boolean 'overlap' value\n \"\"\"\n # If there are no features to report set all overlap to true to display everything\n if check_features is False:\n for repeat in repeats:\n repeat['overlap'] = True\n return repeats\n else:\n for repeat in repeats:\n for feature in features:\n if repeat['type'] == 'rmd':\n repeat_range = set(xrange(repeat['location'][0], repeat['location'][-1] + repeat['length'][0]))\n else:\n repeat_range = set(\n xrange(repeat['location'][0], repeat['location'][0] + repeat['length'][0] * repeat['count']))\n feature_range = set(xrange(feature['startpos'], feature['startpos'] + feature['length']))\n # Magical ampersand checks if two sets overlap\n if repeat_range & feature_range:\n repeat['overlap'] = True\n break\n else:\n repeat['overlap'] = False\n\n return repeats\n\n\ndef rate_sum(repeats, seq_len):\n \"\"\"\n Calculates an RIP score for given sequence\n :param repeats: List of dictionaries of repeats\n :param seq_len: Length of input sequence\n :return: Total predicted RIP score for whole sequence\n \"\"\"\n ssr_sum = float(0)\n rmd_sum = float(0)\n for entry in repeats:\n if entry['raw_rate'] != '' and entry['overlap'] is True:\n if entry['type'] == 'ssr':\n ssr_sum += entry['raw_rate']\n elif entry['type'] == 'rmd':\n rmd_sum += entry['raw_rate']\n\n base_rate = float(seq_len) * float(SUB_RATE)\n # Add in the mutation rate of an individual nucleotide\n r_sum = ssr_sum + rmd_sum + base_rate\n # Set the maximum rate sum to 1 for now.\n if r_sum > 1:\n r_sum = float(1)\n rel_rate = (float(r_sum) / float(base_rate))\n\n return {'rip': rel_rate, 'ssr_sum': ssr_sum, 'rmd_sum': rmd_sum, 'bps_sum': base_rate}\n\n\ndef process_efm(form):\n \"\"\"\n Takes a django form object and finds potentially hypermutable sites in a submitted sequence.\n :param form: A Django form object for processing\n :return: A dictionary of values to pass to the template renderer\n \"\"\"\n # Define the paths for your input files, sequence files, and biobrick file\n input_file = form.cleaned_data.get('fasta_file')\n features = form.cleaned_data.get('features')\n my_seq = form.cleaned_data.get('raw_sequence')\n org = form.cleaned_data.get('organism')\n\n # Set maximum window size for get_repeats_in_window()\n unit_length = 15\n\n # Integrate repeats generated from nucmer and from repeat-match\n mummer_repeats = run_mummer(input_file, org)\n\n # Get short repeats (SSRs) in each window size up to max unit_length\n all_ssr = []\n for i in range(unit_length):\n min_count = math.ceil(8 / (i + 1))\n if min_count < 3:\n min_count = 3\n if (i + 1) == 1:\n min_count = 4\n repeat_in_window = get_repeats_in_window(i + 1, my_seq, min_count, org)\n if repeat_in_window:\n all_ssr += repeat_in_window\n\n # Merge repeat lists together\n merged_repeats = mummer_repeats + all_ssr\n\n # Check if any areas overlap annotated regions\n merged_repeats = check_overlap(merged_repeats, features, form.cleaned_data.get('check_features'))\n\n # Truncate repeat list based on absolute mutation rate\n merged_repeats_trunc = truncate_table(merged_repeats, 10 ** (-9))\n\n # Find the sum of all mutation rates for sequences.\n overall_rate = rate_sum(merged_repeats, len(my_seq))\n\n return {'repeats': merged_repeats_trunc if merged_repeats_trunc else '',\n 'features': features,\n 'seq_length': len(my_seq),\n 'rate': overall_rate,\n 'title': form.cleaned_data['title'],\n 'check_features': form.cleaned_data['check_features'],\n 'organism': org,\n 'version': EFM_VERSION}\n","repo_name":"barricklab/efm-calculator","sub_path":"efm_helper.py","file_name":"efm_helper.py","file_ext":"py","file_size_in_byte":16938,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"15088268665","text":"# implements a linked list class - methods: append, prepend, includes, length, pop, shift, remove, reverse\n\nclass Node:\n def __init__(self,value):\n self.value = value\n self.next = None\n def __str__(self) -> str:\n #return \"value: {}, next => {}\".format(self.value,self.next)\n return \"{}\".format(self.value)\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.length = 0\n\n\n def __str__(self) -> str:\n string = \"\"\n n = self.head\n while(n is not None):\n string += n.value\n n = n.next\n if(n is not None):\n string += \" => \"\n return string\n \n @classmethod\n def make_list(cls):\n return cls()\n \n\n def append(self,value):\n self.length += 1\n\n def loop_list(node):\n if(self.head is None):\n self.head = Node(value)\n print(\"Node added\")\n return\n elif(node.next is None):\n node.next = Node(value)\n print(\"Node added\")\n else:\n return loop_list(node.next)\n \n\n loop_list(self.head)\n\n\n def prepend(self,value):\n self.length +=1\n next = self.head\n new_head = Node(value)\n self.head = new_head\n self.head.next = next\n print(\"Node added\")\n\n\n def includes(self,value):\n node = self.head\n found_value = False\n while(node is not None):\n if(node.value == value):\n found_value = True\n node = node.next\n return found_value\n \n \n def pop(self):\n current_node = self.head\n prev_node = None\n while(current_node is not None):\n if current_node.next is None:\n prev_node.next = None\n self.length -= 1\n return current_node\n else:\n prev_node = current_node\n current_node = current_node.next\n\n\n def shift(self):\n if(self.head and (self.head.next is not None)):\n new_head = self.head.next\n prev_head = self.head\n self.head = new_head\n self.length -= 1\n return prev_head\n \n \n def remove(self,value):\n node = self.head\n prev = None\n while node is not None:\n if node.value == value :\n if prev is None:\n self.head = None\n else:\n prev.next = None\n print(\"an item was removed from your list\")\n self.length -= 1\n return\n else:\n prev = node\n node = node.next\n print(\"no item found in list to remove\")\n\n def reverse(self):\n new_order = []\n node = self.head\n while node is not None:\n new_order.insert(0,node)\n node = node.next\n new_list = self.make_list()\n for n in new_order:\n new_list.append(n.value)\n return new_list\n \n\n","repo_name":"Davidjustice28/python-data-structures","sub_path":"linked-list.py","file_name":"linked-list.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31487191927","text":"from flask import Flask\n\nUPLOAD_FOLDER = ''\n\napp = Flask(__name__)\n#app.secret_key = \"secret key\"\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n\n\n# resp.result=content\n# resp.confidence=confidence\n# return resp\n\n# resp = jsonify(errors)\n# resp.status_code = 500\n# return resp","repo_name":"Shaon2221/Face-Recognition","sub_path":"Flask-REST-API/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38120955115","text":"from chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\n\n# chatbot = ChatBot('Basic Bot')\n# chatbot.set_trainer(ChatterBotCorpusTrainer)\n# chatbot.train(\"chatterbot.corpus.english\")\n\nchatbot = ChatBot(\n \"Chatbot Backed by MongoDB\",\n storage_adapter=\"chatterbot.storage.MongoDatabaseAdapter\",\n database=\"chatterbot_db\",\n database_uri=\"mongodb://192.168.99.100:27017/\",\n logic_adapters=[\n 'chatterbot.logic.BestMatch'\n ],\n trainer='chatterbot.trainers.ChatterBotCorpusTrainer',\n filters=[\n 'chatterbot.filters.RepetitiveResponseFilter'\n ]\n)\nchatbot.set_trainer(ChatterBotCorpusTrainer)\nchatbot.train(\"chatterbot.corpus.english\")\n","repo_name":"dghanwat/faq-chatbot-service","sub_path":"python/base_training.py","file_name":"base_training.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36950393717","text":"from .__types__ import Cursor, Status, Matchable, Groups\n\nclass SubstringMatcher(Matchable):\n def __init__(self, substring: str):\n self._substring = substring\n\n def match(self, cursor: Cursor, groups: Groups) -> Status:\n if cursor.index >= cursor.length: return Status.STOP\n if len(self._substring) > cursor.length: return Status.STOP\n\n for character in self._substring:\n if cursor.current == character: cursor.next()\n else: return Status.FALSE\n\n return Status.TRUE","repo_name":"SampleUserD/regular-expressions","sub_path":"regexp/matchers/SubstringMatcher.py","file_name":"SubstringMatcher.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9182629130","text":"import sys\r\n\r\nfrom flask import Flask, render_template, abort\r\nimport post_ticket\r\nimport math\r\n\r\napp = Flask(__name__)\r\n\r\nget_ticket = post_ticket\r\n\r\nALL_TICKETS = 'https://zccbuff.zendesk.com/api/v2/tickets.json'\r\nROWS_PER_PAGE = 25\r\nCURRENT_PAGE = 0\r\n\r\ndata = get_ticket.fetch_tickets(ALL_TICKETS)\r\nif data == 401:\r\n print('Authentication error. Please check username or password')\r\n sys.exit()\r\nelse:\r\n TOTAL_PAGES = math.ceil(len(data) / ROWS_PER_PAGE)\r\n\r\n\r\n# For handling common http errors\r\n\r\n# API/Web server is not available\r\n@app.errorhandler(503)\r\ndef service_unavailable(e):\r\n return render_template('503.html'), 503\r\n\r\n\r\n# Authentication error\r\n@app.errorhandler(401)\r\ndef authentication_error(e):\r\n return render_template('401.html'), 401\r\n\r\n\r\n# Website not able to find the request\r\n@app.errorhandler(404)\r\ndef page_not_found(e):\r\n return render_template('404.html'), 404\r\n\r\n\r\n# Internal server error\r\n@app.errorhandler(500)\r\ndef internal_error(e):\r\n return render_template('500.html'), 500\r\n\r\n\r\n# Default page which will display first 25 pages\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef displayTicket():\r\n global ROWS_PER_PAGE, CURRENT_PAGE\r\n global TOTAL_PAGES\r\n global data\r\n CURRENT_PAGE = 0\r\n page_data = data[0:ROWS_PER_PAGE]\r\n if len(page_data) == 0:\r\n abort(404)\r\n if data == 503:\r\n abort(503)\r\n return render_template('display_ticket.html', data=page_data, current_page=CURRENT_PAGE,\r\n total_pages=TOTAL_PAGES)\r\n\r\n\r\n# Next pages till all tickets are displayed. Each page shows 25 tickets\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef displayTicket_withPagination(current_page):\r\n global CURRENT_PAGE, ROWS_PER_PAGE\r\n global TOTAL_PAGES\r\n global data\r\n CURRENT_PAGE = int(current_page)\r\n page_data = data[(CURRENT_PAGE * ROWS_PER_PAGE):(CURRENT_PAGE + 1) * ROWS_PER_PAGE]\r\n if len(page_data) == 0:\r\n abort(404)\r\n if data == 503:\r\n abort(503)\r\n return render_template('display_ticket.html', data=page_data, current_page=CURRENT_PAGE, total_pages=TOTAL_PAGES)\r\n\r\n\r\n# For viewing individual tickets\r\n\r\n@app.route('/individualticket/', methods=['GET', 'POST'])\r\ndef displayIndividualTicket(ticketId):\r\n ticket = get_ticket.fetch_ticket_from_url(ticketId)\r\n if ticket == 503:\r\n abort(503)\r\n return render_template('singleticket.html', data=ticket)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","repo_name":"nilesh-kaizen/Zendesk_CC","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42423927709","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn as sk\nimport seaborn as sns\n\nfrom Environment import Environment\nfrom operator import add\n\nREWARD_NONTERMINAL = -1\nREWARD_TERMINAL = 10\nREWARD_CLIFF = -100\n\n# Down, right, top, left\nACTION_DIRECTIONS = [(0, 1), (1, 0), (0, -1), (-1, 0)]\n\n\nclass CliffEnvironment(Environment):\n \"\"\"\n Environment, where position coordinates correspond to (x, y).\n [(0,0), (1,0), (2,0), (3,0)]\n [(0,1), (1,1), (2,1), (3,1)]\n [(0,2), (1,2), (2,2), (3,2)]\n [(0,3), (1,3), (2,3), (3,3)]\n \"\"\"\n def __init__(self, nr_columns, nr_rows, nr_actions=4, init_qa_values=0):\n super().__init__(nr_columns, nr_rows)\n self.init_world_rewards()\n\n def init_world_rewards(self):\n \"\"\" Initialize rewards for reaching different states. \"\"\"\n self.world[:, :] = REWARD_NONTERMINAL\n self.world[self.nr_rows - 1:, 1:self.nr_columns - 1] = REWARD_CLIFF\n self.world[self.nr_rows - 1, self.nr_columns - 1] = REWARD_TERMINAL\n\n def next_state(self, state, action_index):\n \"\"\" Returns next-state tuple.\n If: state walks to edge, return same state. \n Else: add action to tuple and return new state\n \"\"\"\n action = ACTION_DIRECTIONS[action_index]\n next_state = tuple(map(add, state, action))\n\n if self.is_out_of_bounds(next_state):\n next_state = state\n\n return next_state\n\n def check_termination(self, state):\n \"\"\" Checks if state is in cliff or in terminal zone. \"\"\"\n return state[1] == self.nr_rows - 1 and state[0] > 0\n\n def spawn_in_environment(self):\n \"\"\"Returns where user spawns after end of episode. In cliff-walking, always same corner. \"\"\"\n return (0, 3)","repo_name":"JMitnik/MASCode","sub_path":"Cliff Walking/CliffEnvironment.py","file_name":"CliffEnvironment.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74509954184","text":"from matplotlib.pyplot import axis\nimport numpy as np\n\n\ndef plot(axes, data, x_axis, y_axes, **kwargs):\n \"\"\"2次元直交座標のプロット\n \n Parameters\n ----------\n axes : :obj:`matplotlib.Axes`\n プロットする軸\n data : dict\n ファイルから読み込んだデータ\n x_axis : str\n x軸に使用するデータ系列名\n y_axes : list of str\n y軸に使用するデータ系列名のリスト\n ymin : float (option)\n y軸の最小値\n ymax : float (option)\n y軸の最大値\n \"\"\"\n \n axes.set_xlabel(x_axis)\n\n xmin = min(data[x_axis])\n xmax = max(data[x_axis])\n\n axes.set_xlim([xmin, xmax])\n\n if 'ymax' in kwargs:\n axes.set_ylim([kwargs['ymin'], kwargs['ymax']])\n\n for y_axis in y_axes:\n axes.plot(data[x_axis], data[y_axis], label=y_axis, linewidth=3)","repo_name":"yn4k4nishi/aemwel_viewer","sub_path":"plot/cartesian2D.py","file_name":"cartesian2D.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19400142300","text":"import os\nimport logging\nfrom itertools import groupby\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.cache import cache\nfrom django.core.exceptions import PermissionDenied, ObjectDoesNotExist\nfrom django.db.models import Q\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.template import loader, RequestContext\nfrom django.utils.translation import to_locale, ugettext as _\nfrom django.utils.translation.trans_real import parse_accept_lang_header\nfrom django.utils import simplejson, timezone\nfrom django.utils.encoding import iri_to_uri\nfrom django.views.decorators.cache import never_cache\n\nfrom translate.lang import data\n\nfrom pootle.core.decorators import (get_translation_project,\n set_request_context)\nfrom pootle_app.models import Suggestion as SuggestionStat\nfrom pootle_app.models.permissions import (get_matching_permissions,\n check_permission,\n check_profile_permission)\nfrom pootle_misc.baseurl import redirect\nfrom pootle_misc.checks import check_names, get_quality_check_failures\nfrom pootle_misc.forms import make_search_form\nfrom pootle_misc.stats import get_raw_stats\nfrom pootle_misc.url_manip import ensure_uri, previous_view_url\nfrom pootle_misc.util import paginate, ajax_required, jsonify\nfrom pootle_profile.models import get_profile\nfrom pootle_statistics.models import (Submission, SubmissionFields,\n SubmissionTypes)\n\nfrom .decorators import get_store_context, get_unit_context\nfrom .models import Store, Unit\nfrom .forms import (unit_comment_form_factory, unit_form_factory,\n highlight_whitespace)\nfrom .signals import translation_submitted\nfrom .templatetags.store_tags import (highlight_diffs, pluralize_source,\n pluralize_target)\nfrom .util import (UNTRANSLATED, FUZZY, TRANSLATED, STATES_MAP,\n absolute_real_path, find_altsrcs, get_sugg_list)\n\n\n@get_store_context('view')\ndef export_as_xliff(request, store):\n \"\"\"Export given file to xliff for offline translation.\"\"\"\n path = store.real_path\n if not path:\n # bug 2106\n project = request.translation_project.project\n if project.get_treestyle() == \"gnu\":\n path = \"/\".join(store.pootle_path.split(os.path.sep)[2:])\n else:\n parts = store.pootle_path.split(os.path.sep)[1:]\n path = \"%s/%s/%s\" % (parts[1], parts[0], \"/\".join(parts[2:]))\n\n path, ext = os.path.splitext(path)\n export_path = \"/\".join(['POOTLE_EXPORT', path + os.path.extsep + 'xlf'])\n abs_export_path = absolute_real_path(export_path)\n\n key = iri_to_uri(\"%s:export_as_xliff\" % store.pootle_path)\n last_export = cache.get(key)\n if (not (last_export and last_export == store.get_mtime() and\n os.path.isfile(abs_export_path))):\n from pootle_app.project_tree import ensure_target_dir_exists\n from translate.storage.poxliff import PoXliffFile\n from pootle_misc import ptempfile as tempfile\n import shutil\n ensure_target_dir_exists(abs_export_path)\n outputstore = store.convert(PoXliffFile)\n outputstore.switchfile(store.name, createifmissing=True)\n fd, tempstore = tempfile.mkstemp(prefix=store.name, suffix='.xlf')\n os.close(fd)\n outputstore.savefile(tempstore)\n shutil.move(tempstore, abs_export_path)\n cache.set(key, store.get_mtime(), settings.OBJECT_CACHE_TIMEOUT)\n return redirect('/export/' + export_path)\n\n\n@get_store_context('view')\ndef export_as_type(request, store, filetype):\n \"\"\"Export given file to xliff for offline translation.\"\"\"\n from pootle_store.filetypes import factory_classes, is_monolingual\n klass = factory_classes.get(filetype, None)\n if (not klass or is_monolingual(klass) or\n store.pootle_path.endswith(filetype)):\n raise ValueError\n\n path, ext = os.path.splitext(store.real_path)\n export_path = os.path.join('POOTLE_EXPORT',\n path + os.path.extsep + filetype)\n abs_export_path = absolute_real_path(export_path)\n\n key = iri_to_uri(\"%s:export_as_%s\" % (store.pootle_path, filetype))\n last_export = cache.get(key)\n if (not (last_export and last_export == store.get_mtime() and\n os.path.isfile(abs_export_path))):\n from pootle_app.project_tree import ensure_target_dir_exists\n from pootle_misc import ptempfile as tempfile\n import shutil\n ensure_target_dir_exists(abs_export_path)\n outputstore = store.convert(klass)\n fd, tempstore = tempfile.mkstemp(prefix=store.name,\n suffix=os.path.extsep + filetype)\n os.close(fd)\n outputstore.savefile(tempstore)\n shutil.move(tempstore, abs_export_path)\n cache.set(key, store.get_mtime(), settings.OBJECT_CACHE_TIMEOUT)\n return redirect('/export/' + export_path)\n\n@get_store_context('view')\ndef download(request, store):\n store.sync(update_translation=True)\n return redirect('/export/' + store.real_path)\n\n\ndef get_filter_name(GET):\n \"\"\"Gets current filter's human-readable name.\n\n :param GET: A copy of ``request.GET``.\n :return: Two-tuple with the filter name, and a list of extra arguments\n passed to the current filter.\n \"\"\"\n filter = extra = None\n\n if 'filter' in GET:\n filter = GET['filter']\n\n if filter.startswith('user-'):\n extra = [GET.get('user', _('User missing'))]\n elif filter == 'checks' and 'checks' in GET:\n extra = map(lambda check: check_names.get(check, check),\n GET['checks'].split(','))\n elif 'search' in GET:\n filter = 'search'\n\n extra = [GET['search']]\n if 'sfields' in GET:\n extra.extend(GET['sfields'].split(','))\n\n filter_name = {\n 'all': _('All'),\n 'translated': _('Translated'),\n 'untranslated': _('Untranslated'),\n 'fuzzy': _('Needs work'),\n 'incomplete': _('Incomplete'),\n # Translators: This is the name of a filter\n 'search': _('Search'),\n 'checks': _('Checks'),\n 'user-submissions': _('Submissions'),\n 'user-submissions-overwritten': _('Overwritten submissions'),\n }.get(filter)\n\n return (filter_name, extra)\n\n\n@get_translation_project\n@set_request_context\ndef export_view(request, translation_project, dir_path, filename=None):\n \"\"\"Displays a list of units with filters applied.\"\"\"\n current_path = translation_project.directory.pootle_path + dir_path\n\n if filename:\n current_path = current_path + filename\n store = get_object_or_404(Store, pootle_path=current_path)\n units_qs = store.units\n else:\n store = None\n units_qs = translation_project.units.filter(\n store__pootle_path__startswith=current_path,\n )\n\n filter_name, filter_extra = get_filter_name(request.GET)\n\n units = get_step_query(request, units_qs)\n unit_groups = [(path, list(units)) for path, units in\n groupby(units, lambda x: x.store.path)]\n\n ctx = {\n 'source_language': translation_project.project.source_language,\n 'language': translation_project.language,\n 'project': translation_project.project,\n 'unit_groups': unit_groups,\n 'filter_name': filter_name,\n 'filter_extra': filter_extra,\n }\n\n return render_to_response('store/list.html', ctx,\n context_instance=RequestContext(request))\n\n\n####################### Translate Page ##############################\n\ndef get_alt_src_langs(request, profile, translation_project):\n language = translation_project.language\n project = translation_project.project\n source_language = project.source_language\n\n langs = profile.alt_src_langs.exclude(\n id__in=(language.id, source_language.id)\n ).filter(translationproject__project=project)\n\n if not profile.alt_src_langs.count():\n from pootle_language.models import Language\n accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')\n\n for accept_lang, unused in parse_accept_lang_header(accept):\n if accept_lang == '*':\n continue\n\n simplified = data.simplify_to_common(accept_lang)\n normalized = to_locale(data.normalize_code(simplified))\n code = to_locale(accept_lang)\n if (normalized in\n ('en', 'en_US', source_language.code, language.code) or\n code in ('en', 'en_US', source_language.code, language.code)):\n continue\n\n langs = Language.objects.filter(\n code__in=(normalized, code),\n translationproject__project=project,\n )\n if langs.count():\n break\n\n return langs\n\n\ndef get_non_indexed_search_step_query(form, units_queryset):\n words = form.cleaned_data['search'].split()\n result = units_queryset.none()\n\n if 'source' in form.cleaned_data['sfields']:\n subresult = units_queryset\n for word in words:\n subresult = subresult.filter(source_f__icontains=word)\n result = result | subresult\n\n if 'target' in form.cleaned_data['sfields']:\n subresult = units_queryset\n for word in words:\n subresult = subresult.filter(target_f__icontains=word)\n result = result | subresult\n\n if 'notes' in form.cleaned_data['sfields']:\n translator_subresult = units_queryset\n developer_subresult = units_queryset\n for word in words:\n translator_subresult = translator_subresult.filter(\n translator_comment__icontains=word,\n )\n developer_subresult = developer_subresult.filter(\n developer_comment__icontains=word,\n )\n result = result | translator_subresult | developer_subresult\n\n if 'locations' in form.cleaned_data['sfields']:\n subresult = units_queryset\n for word in words:\n subresult = subresult.filter(locations__icontains=word)\n result = result | subresult\n\n return result\n\ndef get_non_indexed_search_exact_query(form, units_queryset):\n phrase = form.cleaned_data['search']\n result = units_queryset.none()\n\n if 'source' in form.cleaned_data['sfields']:\n subresult = units_queryset.filter(source_f__contains=phrase)\n result = result | subresult\n\n if 'target' in form.cleaned_data['sfields']:\n subresult = units_queryset.filter(target_f__contains=phrase)\n result = result | subresult\n\n if 'notes' in form.cleaned_data['sfields']:\n translator_subresult = units_queryset\n developer_subresult = units_queryset\n translator_subresult = translator_subresult.filter(\n translator_comment__contains=phrase,\n )\n developer_subresult = developer_subresult.filter(\n developer_comment__contains=phrase,\n )\n result = result | translator_subresult | developer_subresult\n\n if 'locations' in form.cleaned_data['sfields']:\n subresult = units_queryset.filter(locations__contains=phrase)\n result = result | subresult\n\n return result\n\ndef get_search_step_query(translation_project, form, units_queryset):\n \"\"\"Narrows down units query to units matching search string.\"\"\"\n\n if 'exact' in form.cleaned_data['soptions']:\n logging.debug(u\"Using exact database search for %s\",\n translation_project)\n return get_non_indexed_search_exact_query(form, units_queryset)\n\n if translation_project.indexer is None:\n logging.debug(u\"No indexer for %s, using database search\",\n translation_project)\n return get_non_indexed_search_step_query(form, units_queryset)\n\n logging.debug(u\"Found %s indexer for %s, using indexed search\",\n translation_project.indexer.INDEX_DIRECTORY_NAME,\n translation_project)\n\n word_querylist = []\n words = form.cleaned_data['search']\n fields = form.cleaned_data['sfields']\n paths = units_queryset.order_by() \\\n .values_list('store__pootle_path', flat=True) \\\n .distinct()\n path_querylist = [('pofilename', pootle_path)\n for pootle_path in paths.iterator()]\n cache_key = \"search:%s\" % str(hash((repr(path_querylist),\n translation_project.get_mtime(),\n repr(words),\n repr(fields))))\n\n dbids = cache.get(cache_key)\n if dbids is None:\n searchparts = []\n word_querylist = [(field, words) for field in fields]\n textquery = translation_project.indexer.make_query(word_querylist,\n False)\n searchparts.append(textquery)\n\n pathquery = translation_project.indexer.make_query(path_querylist,\n False)\n searchparts.append(pathquery)\n limitedquery = translation_project.indexer.make_query(searchparts, True)\n\n result = translation_project.indexer.search(limitedquery, ['dbid'])\n dbids = [int(item['dbid'][0]) for item in result[:999]]\n cache.set(cache_key, dbids, settings.OBJECT_CACHE_TIMEOUT)\n\n return units_queryset.filter(id__in=dbids)\n\n\ndef get_step_query(request, units_queryset):\n \"\"\"Narrows down unit query to units matching conditions in GET.\"\"\"\n if 'filter' in request.GET:\n unit_filter = request.GET['filter']\n username = request.GET.get('user', None)\n\n profile = request.profile\n if username:\n try:\n user = User.objects.get(username=username)\n profile = user.get_profile()\n except User.DoesNotExist:\n pass\n\n if unit_filter:\n match_queryset = units_queryset.none()\n\n if unit_filter == 'all':\n match_queryset = units_queryset\n elif unit_filter == 'translated':\n match_queryset = units_queryset.filter(state=TRANSLATED)\n elif unit_filter == 'untranslated':\n match_queryset = units_queryset.filter(state=UNTRANSLATED)\n elif unit_filter == 'fuzzy':\n match_queryset = units_queryset.filter(state=FUZZY)\n elif unit_filter == 'incomplete':\n match_queryset = units_queryset.filter(\n Q(state=UNTRANSLATED) | Q(state=FUZZY),\n )\n elif unit_filter == 'suggestions':\n #FIXME: is None the most efficient query\n match_queryset = units_queryset.exclude(suggestion=None)\n elif unit_filter == 'user-suggestions':\n match_queryset = units_queryset.filter(\n suggestion__user=profile,\n ).distinct()\n elif unit_filter == 'user-suggestions-accepted':\n # FIXME: Oh, this is pretty lame, we need a completely\n # different way to model suggestions\n unit_ids = SuggestionStat.objects.filter(\n suggester=profile,\n state='accepted',\n ).values_list('unit', flat=True)\n match_queryset = units_queryset.filter(\n id__in=unit_ids,\n ).distinct()\n elif unit_filter == 'user-suggestions-rejected':\n # FIXME: Oh, this is as lame as above\n unit_ids = SuggestionStat.objects.filter(\n suggester=profile,\n state='rejected',\n ).values_list('unit', flat=True)\n match_queryset = units_queryset.filter(\n id__in=unit_ids,\n ).distinct()\n elif unit_filter == 'user-submissions':\n match_queryset = units_queryset.filter(\n submission__submitter=profile,\n ).distinct()\n elif unit_filter == 'user-submissions-overwritten':\n match_queryset = units_queryset.filter(\n submission__submitter=profile,\n ).exclude(submitted_by=profile).distinct()\n elif unit_filter == 'checks' and 'checks' in request.GET:\n checks = request.GET['checks'].split(',')\n\n if checks:\n match_queryset = units_queryset.filter(\n qualitycheck__false_positive=False,\n qualitycheck__name__in=checks\n ).distinct()\n\n\n units_queryset = match_queryset\n\n if 'search' in request.GET and 'sfields' in request.GET:\n # use the search form for validation only\n search_form = make_search_form(request.GET)\n\n if search_form.is_valid():\n units_queryset = get_search_step_query(request.translation_project,\n search_form, units_queryset)\n\n return units_queryset\n\n\ndef translate_page(request):\n cantranslate = check_permission(\"translate\", request)\n cansuggest = check_permission(\"suggest\", request)\n canreview = check_permission(\"review\", request)\n\n translation_project = request.translation_project\n language = translation_project.language\n project = translation_project.project\n profile = request.profile\n\n store = getattr(request, \"store\", None)\n directory = getattr(request, \"directory\", None)\n\n is_single_file = store and True or False\n path = is_single_file and store.path or directory.path\n pootle_path = (is_single_file and store.pootle_path or\n directory.pootle_path)\n\n is_terminology = (project.is_terminology or store and\n store.is_terminology)\n search_form = make_search_form(request=request,\n terminology=is_terminology)\n\n previous_overview_url = previous_view_url(request, ['overview'])\n\n context = {\n 'cantranslate': cantranslate,\n 'cansuggest': cansuggest,\n 'canreview': canreview,\n 'search_form': search_form,\n 'store': store,\n 'store_id': store and store.id,\n 'directory': directory,\n 'directory_id': directory and directory.id,\n 'path': path,\n 'pootle_path': pootle_path,\n 'is_single_file': is_single_file,\n 'language': language,\n 'project': project,\n 'translation_project': translation_project,\n 'profile': profile,\n 'source_language': translation_project.project.source_language,\n 'previous_overview_url': previous_overview_url,\n 'MT_BACKENDS': settings.MT_BACKENDS,\n 'LOOKUP_BACKENDS': settings.LOOKUP_BACKENDS,\n 'AMAGAMA_URL': settings.AMAGAMA_URL,\n }\n\n return render_to_response('store/translate.html', context,\n context_instance=RequestContext(request))\n\n\n@get_store_context('view')\ndef translate(request, store):\n return translate_page(request)\n\n#\n# Views used with XMLHttpRequest requests.\n#\n\ndef _filter_ctx_units(units_qs, unit, how_many, gap=0):\n \"\"\"Returns ``how_many``*2 units that are before and after ``index``.\"\"\"\n result = {'before': [], 'after': []}\n\n if how_many and unit.index - gap > 0:\n before = units_qs.filter(store=unit.store_id, index__lt=unit.index) \\\n .order_by('-index')[gap:how_many+gap]\n result['before'] = _build_units_list(before, reverse=True)\n result['before'].reverse()\n\n #FIXME: can we avoid this query if length is known?\n if how_many:\n after = units_qs.filter(store=unit.store_id,\n index__gt=unit.index)[gap:how_many+gap]\n result['after'] = _build_units_list(after)\n\n return result\n\ndef _build_units_list(units, reverse=False):\n \"\"\"Given a list/queryset of units, builds a list with the unit data\n contained in a dictionary ready to be returned as JSON.\n\n :return: A list with unit id, source, and target texts. In case of\n having plural forms, a title for the plural form is also provided.\n \"\"\"\n return_units = []\n for unit in iter(units):\n source_unit = []\n target_unit = []\n for i, source, title in pluralize_source(unit):\n unit_dict = {'text': source}\n if title:\n unit_dict[\"title\"] = title\n source_unit.append(unit_dict)\n for i, target, title in pluralize_target(unit):\n unit_dict = {'text': target}\n if title:\n unit_dict[\"title\"] = title\n target_unit.append(unit_dict)\n prev = None\n next = None\n if return_units:\n if reverse:\n return_units[-1]['prev'] = unit.id\n next = return_units[-1]['id']\n else:\n return_units[-1]['next'] = unit.id\n prev = return_units[-1]['id']\n return_units.append({'id': unit.id,\n 'isfuzzy': unit.isfuzzy(),\n 'prev': prev,\n 'next': next,\n 'source': source_unit,\n 'target': target_unit})\n return return_units\n\n\ndef _build_pager_dict(pager):\n \"\"\"Given a pager object ``pager``, retrieves all the information needed\n to build a pager.\n\n :return: A dictionary containing necessary pager information to build\n a pager.\n \"\"\"\n return {\"number\": pager.number,\n \"num_pages\": pager.paginator.num_pages,\n \"per_page\": pager.paginator.per_page\n }\n\n\ndef _get_index_in_qs(qs, unit, store=False):\n \"\"\"Given a queryset ``qs``, returns the position (index) of the unit\n ``unit`` within that queryset. ``store`` specifies if the queryset is\n limited to a single store.\n\n :return: Value representing the position of the unit ``unit``.\n :rtype: int\n \"\"\"\n if store:\n return qs.filter(index__lt=unit.index).count()\n else:\n store = unit.store\n return (qs.filter(store=store, index__lt=unit.index) | \\\n qs.filter(store__pootle_path__lt=store.pootle_path)).count()\n\n\ndef get_view_units(request, units_queryset, store, limit=0):\n \"\"\"Gets source and target texts excluding the editing unit.\n\n :return: An object in JSON notation that contains the source and target\n texts for units that will be displayed before and after editing\n unit.\n\n If asked by using the ``meta`` and ``pager`` parameters,\n metadata and pager information will be calculated and returned\n too.\n \"\"\"\n current_unit = None\n json = {}\n\n try:\n limit = int(limit)\n except ValueError:\n limit = None\n\n if not limit:\n limit = request.profile.get_unit_rows()\n\n step_queryset = get_step_query(request, units_queryset)\n\n # Return metadata it has been explicitely requested\n if request.GET.get('meta', False):\n tp = request.translation_project\n json[\"meta\"] = {\"source_lang\": tp.project.source_language.code,\n \"source_dir\": tp.project.source_language.get_direction(),\n \"target_lang\": tp.language.code,\n \"target_dir\": tp.language.get_direction(),\n \"project_style\": tp.project.checkstyle}\n\n # Maybe we are trying to load directly a specific unit, so we have\n # to calculate its page number\n uid = request.GET.get('uid', None)\n if uid:\n current_unit = units_queryset.get(id=uid)\n preceding = _get_index_in_qs(step_queryset, current_unit, store)\n page = preceding / limit + 1\n else:\n page = None\n\n pager = paginate(request, step_queryset, items=limit, page=page)\n\n json[\"units\"] = _build_units_list(pager.object_list)\n\n # Return paging information if requested to do so\n if request.GET.get('pager', False):\n json[\"pager\"] = _build_pager_dict(pager)\n if not current_unit:\n try:\n json[\"uid\"] = json[\"units\"][0][\"id\"]\n except IndexError:\n pass\n else:\n json[\"uid\"] = current_unit.id\n\n response = jsonify(json)\n return HttpResponse(response, mimetype=\"application/json\")\n\n\n@ajax_required\n@get_store_context('view')\ndef get_view_units_store(request, store, limit=0):\n \"\"\"Gets source and target texts excluding the editing widget (store-level).\n\n :return: An object in JSON notation that contains the source and target\n texts for units that will be displayed before and after\n unit ``uid``.\n \"\"\"\n return get_view_units(request, store.units, store=True, limit=limit)\n\n\ndef _is_filtered(request):\n \"\"\"Checks if unit list is filtered.\"\"\"\n return ('filter' in request.GET or 'checks' in request.GET or\n 'user' in request.GET or\n ('search' in request.GET and 'sfields' in request.GET))\n\n\n@ajax_required\n@get_unit_context('view')\ndef get_more_context(request, unit):\n \"\"\"Retrieves more context units.\n\n :return: An object in JSON notation that contains the source and target\n texts for units that are in the context of unit ``uid``.\n \"\"\"\n store = request.store\n json = {}\n gap = int(request.GET.get('gap', 0))\n qty = int(request.GET.get('qty', 1))\n\n json[\"ctx\"] = _filter_ctx_units(store.units, unit, qty, gap)\n rcode = 200\n response = jsonify(json)\n return HttpResponse(response, status=rcode, mimetype=\"application/json\")\n\n\n@never_cache\n@get_unit_context('view')\ndef timeline(request, unit):\n \"\"\"Returns a JSON-encoded string including the changes to the unit\n rendered in HTML.\n \"\"\"\n timeline = Submission.objects.filter(unit=unit, field__in=[\n SubmissionFields.TARGET, SubmissionFields.STATE,\n SubmissionFields.COMMENT\n ])\n timeline = timeline.select_related(\"submitter__user\",\n \"translation_project__language\")\n\n context = {}\n entries_group = []\n\n import locale\n from pootle_store.fields import to_python\n\n for key, values in groupby(timeline, key=lambda x: x.creation_time):\n entry_group = {\n 'datetime': key,\n 'datetime_str': key.strftime(locale.nl_langinfo(locale.D_T_FMT)),\n 'entries': [],\n }\n\n for item in values:\n # Only add submitter information for the whole entry group once\n entry_group.setdefault('submitter', item.submitter)\n\n context.setdefault('language', item.translation_project.language)\n\n entry = {\n 'field': item.field,\n 'field_name': SubmissionFields.NAMES_MAP[item.field],\n }\n\n if item.field == SubmissionFields.STATE:\n entry['old_value'] = STATES_MAP[int(to_python(item.old_value))]\n entry['new_value'] = STATES_MAP[int(to_python(item.new_value))]\n else:\n entry['new_value'] = to_python(item.new_value)\n\n entry_group['entries'].append(entry)\n\n entries_group.append(entry_group)\n\n # Let's reverse the chronological order\n entries_group.reverse()\n\n # Remove first timeline item if it's solely a change to the target\n if (entries_group and len(entries_group[0]['entries']) == 1 and\n entries_group[0]['entries'][0]['field'] == SubmissionFields.TARGET):\n del entries_group[0]\n\n context['entries_group'] = entries_group\n\n if request.is_ajax():\n # The client will want to confirm that the response is relevant for\n # the unit on screen at the time of receiving this, so we add the uid.\n json = {'uid': unit.id}\n\n t = loader.get_template('unit/xhr-timeline.html')\n c = RequestContext(request, context)\n json['timeline'] = t.render(c).replace('\\n', '')\n\n response = simplejson.dumps(json)\n return HttpResponse(response, mimetype=\"application/json\")\n else:\n return render_to_response('unit/timeline.html', context,\n context_instance=RequestContext(request))\n\n\n@ajax_required\n@get_unit_context('translate')\ndef comment(request, unit):\n \"\"\"Stores a new comment for the given ``unit``.\n\n :return: If the form validates, the cleaned comment is returned.\n An error message is returned otherwise.\n \"\"\"\n # Update current unit instance's attributes\n unit.commented_by = request.profile\n unit.commented_on = timezone.now()\n\n language = request.translation_project.language\n form = unit_comment_form_factory(language)(request.POST, instance=unit,\n request=request)\n\n if form.is_valid():\n form.save()\n\n context = {\n 'unit': unit,\n 'language': language,\n }\n t = loader.get_template('unit/comment.html')\n c = RequestContext(request, context)\n\n json = {'comment': t.render(c)}\n rcode = 200\n else:\n json = {'msg': _(\"Comment submission failed.\")}\n rcode = 400\n\n response = simplejson.dumps(json)\n\n return HttpResponse(response, status=rcode, mimetype=\"application/json\")\n\n\n@never_cache\n@ajax_required\n@get_unit_context('view')\ndef get_edit_unit(request, unit):\n \"\"\"Given a store path ``pootle_path`` and unit id ``uid``, gathers all the\n necessary information to build the editing widget.\n\n :return: A templatised editing widget is returned within the ``editor``\n variable and paging information is also returned if the page\n number has changed.\n \"\"\"\n json = {}\n\n translation_project = request.translation_project\n language = translation_project.language\n\n if unit.hasplural():\n snplurals = len(unit.source.strings)\n else:\n snplurals = None\n\n form_class = unit_form_factory(language, snplurals, request)\n form = form_class(instance=unit)\n comment_form_class = unit_comment_form_factory(language)\n comment_form = comment_form_class({}, instance=unit)\n\n store = unit.store\n directory = store.parent\n profile = request.profile\n alt_src_langs = get_alt_src_langs(request, profile, translation_project)\n project = translation_project.project\n report_target = ensure_uri(project.report_target)\n\n suggestions = get_sugg_list(unit)\n template_vars = {\n 'unit': unit,\n 'form': form,\n 'comment_form': comment_form,\n 'store': store,\n 'directory': directory,\n 'profile': profile,\n 'user': request.user,\n 'language': language,\n 'source_language': translation_project.project.source_language,\n 'cantranslate': check_profile_permission(profile, \"translate\",\n directory),\n 'cansuggest': check_profile_permission(profile, \"suggest\", directory),\n 'canreview': check_profile_permission(profile, \"review\", directory),\n 'altsrcs': find_altsrcs(unit, alt_src_langs, store=store,\n project=project),\n 'report_target': report_target,\n 'suggestions': suggestions,\n }\n\n if translation_project.project.is_terminology or store.is_terminology:\n t = loader.get_template('unit/term_edit.html')\n else:\n t = loader.get_template('unit/edit.html')\n c = RequestContext(request, template_vars)\n json['editor'] = t.render(c)\n\n rcode = 200\n\n # Return context rows if filtering is applied but\n # don't return any if the user has asked not to have it\n current_filter = request.GET.get('filter', 'all')\n show_ctx = request.COOKIES.get('ctxShow', 'true')\n\n if ((_is_filtered(request) or current_filter not in ('all',)) and\n show_ctx == 'true'):\n # TODO: review if this first 'if' branch makes sense\n if translation_project.project.is_terminology or store.is_terminology:\n json['ctx'] = _filter_ctx_units(store.units, unit, 0)\n else:\n ctx_qty = int(request.COOKIES.get('ctxQty', 1))\n json['ctx'] = _filter_ctx_units(store.units, unit, ctx_qty)\n\n response = jsonify(json)\n return HttpResponse(response, status=rcode, mimetype=\"application/json\")\n\n\ndef get_failing_checks(request, pathobj):\n \"\"\"Gets a list of failing checks for the current object.\n\n :return: JSON string with a list of failing check categories which\n include the actual checks that are failing.\n \"\"\"\n stats = get_raw_stats(pathobj)\n failures = get_quality_check_failures(pathobj, stats, include_url=False)\n\n response = jsonify(failures)\n\n return HttpResponse(response, mimetype=\"application/json\")\n\n\n@ajax_required\n@get_store_context('view')\ndef get_failing_checks_store(request, store):\n return get_failing_checks(request, store)\n\n\n@ajax_required\n@get_unit_context('')\ndef submit(request, unit):\n \"\"\"Processes translation submissions and stores them in the database.\n\n :return: An object in JSON notation that contains the previous and last\n units for the unit next to unit ``uid``.\n \"\"\"\n json = {}\n\n cantranslate = check_permission(\"translate\", request)\n if not cantranslate:\n raise PermissionDenied(_(\"You do not have rights to access \"\n \"translation mode.\"))\n\n translation_project = request.translation_project\n language = translation_project.language\n\n if unit.hasplural():\n snplurals = len(unit.source.strings)\n else:\n snplurals = None\n\n # Store current time so that it is the same for all submissions\n current_time = timezone.now()\n\n # Update current unit instance's attributes\n unit.submitted_by = request.profile\n unit.submitted_on = current_time\n\n form_class = unit_form_factory(language, snplurals, request)\n form = form_class(request.POST, instance=unit)\n\n if form.is_valid():\n if form.updated_fields:\n for field, old_value, new_value in form.updated_fields:\n sub = Submission(\n creation_time=current_time,\n translation_project=translation_project,\n submitter=request.profile,\n unit=unit,\n field=field,\n type=SubmissionTypes.NORMAL,\n old_value=old_value,\n new_value=new_value,\n )\n sub.save()\n\n form.save()\n translation_submitted.send(\n sender=translation_project,\n unit=form.instance,\n profile=request.profile,\n )\n\n rcode = 200\n else:\n # Form failed\n #FIXME: we should display validation errors here\n rcode = 400\n json[\"msg\"] = _(\"Failed to process submission.\")\n response = jsonify(json)\n return HttpResponse(response, status=rcode, mimetype=\"application/json\")\n\n\n@ajax_required\n@get_unit_context('')\ndef suggest(request, unit):\n \"\"\"Processes translation suggestions and stores them in the database.\n\n :return: An object in JSON notation that contains the previous and last\n units for the unit next to unit ``uid``.\n \"\"\"\n json = {}\n\n cansuggest = check_permission(\"suggest\", request)\n if not cansuggest:\n raise PermissionDenied(_(\"You do not have rights to access \"\n \"translation mode.\"))\n\n translation_project = request.translation_project\n language = translation_project.language\n\n if unit.hasplural():\n snplurals = len(unit.source.strings)\n else:\n snplurals = None\n\n form_class = unit_form_factory(language, snplurals, request)\n form = form_class(request.POST, instance=unit)\n\n if form.is_valid():\n if form.instance._target_updated:\n # TODO: Review if this hackish method is still necessary\n #HACKISH: django 1.2 stupidly modifies instance on\n # model form validation, reload unit from db\n unit = Unit.objects.get(id=unit.id)\n sugg = unit.add_suggestion(form.cleaned_data['target_f'],\n request.profile)\n if sugg:\n SuggestionStat.objects.get_or_create(\n translation_project=translation_project,\n suggester=request.profile, state='pending', unit=unit.id\n )\n rcode = 200\n else:\n # Form failed\n #FIXME: we should display validation errors here\n rcode = 400\n json[\"msg\"] = _(\"Failed to process suggestion.\")\n response = jsonify(json)\n return HttpResponse(response, status=rcode, mimetype=\"application/json\")\n\n\n@ajax_required\n@get_unit_context('')\ndef reject_suggestion(request, unit, suggid):\n json = {}\n translation_project = request.translation_project\n\n json[\"udbid\"] = unit.id\n json[\"sugid\"] = suggid\n if request.POST.get('reject'):\n try:\n sugg = unit.suggestion_set.get(id=suggid)\n except ObjectDoesNotExist:\n raise Http404\n\n if (not check_permission('review', request) and\n (not request.user.is_authenticated() or sugg and\n sugg.user != request.profile)):\n raise PermissionDenied(_(\"You do not have rights to access \"\n \"review mode.\"))\n\n success = unit.reject_suggestion(suggid)\n if sugg is not None and success:\n # FIXME: we need a totally different model for tracking stats, this\n # is just lame\n suggstat, created = SuggestionStat.objects.get_or_create(\n translation_project=translation_project,\n suggester=sugg.user,\n state='pending',\n unit=unit.id,\n )\n suggstat.reviewer = request.profile\n suggstat.state = 'rejected'\n suggstat.save()\n\n response = jsonify(json)\n return HttpResponse(response, mimetype=\"application/json\")\n\n\n@ajax_required\n@get_unit_context('review')\ndef accept_suggestion(request, unit, suggid):\n json = {\n 'udbid': unit.id,\n 'sugid': suggid,\n }\n translation_project = request.translation_project\n\n if request.POST.get('accept'):\n try:\n suggestion = unit.suggestion_set.get(id=suggid)\n except ObjectDoesNotExist:\n raise Http404\n\n old_target = unit.target\n success = unit.accept_suggestion(suggid)\n\n json['newtargets'] = [highlight_whitespace(target)\n for target in unit.target.strings]\n json['newdiffs'] = {}\n for sugg in unit.get_suggestions():\n json['newdiffs'][sugg.id] = \\\n [highlight_diffs(unit.target.strings[i], target)\n for i, target in enumerate(sugg.target.strings)]\n\n if suggestion is not None and success:\n if suggestion.user:\n translation_submitted.send(sender=translation_project,\n unit=unit, profile=suggestion.user)\n\n # FIXME: we need a totally different model for tracking stats, this\n # is just lame\n suggstat, created = SuggestionStat.objects.get_or_create(\n translation_project=translation_project,\n suggester=suggestion.user,\n state='pending',\n unit=unit.id,\n )\n suggstat.reviewer = request.profile\n suggstat.state = 'accepted'\n suggstat.save()\n\n # For now assume the target changed\n # TODO: check all fields for changes\n creation_time = timezone.now()\n sub = Submission(\n creation_time=creation_time,\n translation_project=translation_project,\n submitter=suggestion.user,\n from_suggestion=suggstat,\n unit=unit,\n field=SubmissionFields.TARGET,\n type=SubmissionTypes.SUGG_ACCEPT,\n old_value=old_target,\n new_value=unit.target,\n )\n sub.save()\n\n response = jsonify(json)\n return HttpResponse(response, mimetype=\"application/json\")\n\n@ajax_required\ndef clear_vote(request, voteid):\n json = {}\n json[\"voteid\"] = voteid\n if request.POST.get('clear'):\n try:\n from voting.models import Vote\n vote = Vote.objects.get(pk=voteid)\n if vote.user != request.user:\n # No i18n, will not go to UI\n raise PermissionDenied(\"Users can only remove their own votes\")\n vote.delete()\n except ObjectDoesNotExist:\n raise Http404\n response = jsonify(json)\n return HttpResponse(response, mimetype=\"application/json\")\n\n\n@ajax_required\n@get_unit_context('')\ndef vote_up(request, unit, suggid):\n json = {}\n json[\"suggid\"] = suggid\n if request.POST.get('up'):\n try:\n suggestion = unit.suggestion_set.get(id=suggid)\n from voting.models import Vote\n # Why can't it just return the vote object?\n Vote.objects.record_vote(suggestion, request.user, +1)\n json[\"voteid\"] = Vote.objects.get_for_user(suggestion,\n request.user).id\n except ObjectDoesNotExist:\n raise Http404(_(\"The suggestion or vote is not valid any more.\"))\n response = jsonify(json)\n return HttpResponse(response, mimetype=\"application/json\")\n\n\n@ajax_required\n@get_unit_context('review')\ndef reject_qualitycheck(request, unit, checkid):\n json = {}\n json[\"udbid\"] = unit.id\n json[\"checkid\"] = checkid\n if request.POST.get('reject'):\n try:\n check = unit.qualitycheck_set.get(id=checkid)\n check.false_positive = True\n check.save()\n # update timestamp\n unit.save()\n except ObjectDoesNotExist:\n raise Http404\n\n response = jsonify(json)\n return HttpResponse(response, mimetype=\"application/json\")\n","repo_name":"moriaty2013/pootle","sub_path":"pootle/apps/pootle_store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":42617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"354237515","text":"#!/usr/bin/python\n\nfrom PyQt5.QtCore import QDateTime, Qt, QTimer\nfrom PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox, QDateTimeEdit,\n QDial, QDialog, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit,\n QProgressBar, QPushButton, QRadioButton, QScrollBar, QSizePolicy,\n QSlider, QSpinBox, QStyleFactory, QTableWidget, QTabWidget, QTextEdit,\n QVBoxLayout, QWidget)\n\nimport sys\n\n\nclass WidgetGallery(QDialog):\n def __init__(self, parent=None):\n super(WidgetGallery, self).__init__(parent)\n\n self.originalPalette = QApplication.palette()\n\n styleComboBox = QComboBox()\n styleComboBox.addItems(QStyleFactory.keys())\n\n styleLabel = QLabel(\"E&stilo:\")\n styleLabel.setBuddy(styleComboBox)\n \n\n self.useStylePaletteCheckBox = QCheckBox(\"&Usar estilos predeterminados\")\n self.useStylePaletteCheckBox.setChecked(True)\n\n disableWidgetsCheckBox = QCheckBox(\"&Deshabilitar widgets\")\n\n self.createTopLeftGroupBox()\n self.createTopRightGroupBox()\n self.createbottomRightTabWidget()\n self.createbottomLeftGroupBox()\n # self.createProgressBar()\n\n styleComboBox.activated[str].connect(self.changeStyle)\n self.useStylePaletteCheckBox.toggled.connect(self.changePalette)\n disableWidgetsCheckBox.toggled.connect(self.topLeftGroupBox.setDisabled)\n disableWidgetsCheckBox.toggled.connect(self.topRightGroupBox.setDisabled)\n disableWidgetsCheckBox.toggled.connect(self.bottomRightTabWidget.setDisabled)\n disableWidgetsCheckBox.toggled.connect(self.bottomLeftGroupBox.setDisabled)\n\n topLayout = QHBoxLayout()\n topLayout.addWidget(styleLabel)\n topLayout.addWidget(styleComboBox)\n topLayout.addStretch(1)\n topLayout.addWidget(self.useStylePaletteCheckBox)\n topLayout.addWidget(disableWidgetsCheckBox)\n\n mainLayout = QGridLayout()\n mainLayout.addLayout(topLayout, 0, 0, 1, 2)\n mainLayout.addWidget(self.topLeftGroupBox, 1, 0)\n mainLayout.addWidget(self.topRightGroupBox, 1, 1)\n mainLayout.addWidget(self.bottomLeftGroupBox, 2, 0)\n mainLayout.addWidget(self.bottomRightTabWidget, 2, 1)\n # mainLayout.addWidget(self.progressBar, 3, 0, 1, 2)\n mainLayout.setRowStretch(1, 1)\n mainLayout.setRowStretch(2, 1)\n mainLayout.setColumnStretch(0, 1)\n mainLayout.setColumnStretch(1, 1)\n self.setLayout(mainLayout)\n\n self.setWindowTitle(\"Proyecto X\")\n self.changeStyle('fusion')\n\n def changeStyle(self, styleName):\n QApplication.setStyle(QStyleFactory.create(styleName))\n self.changePalette()\n\n def changePalette(self):\n if self.useStylePaletteCheckBox.isChecked():\n QApplication.setPalette(QApplication.style().standardPalette())\n else:\n QApplication.setPalette(self.originalPalette)\n\n def advanceProgressBar(self):\n curVal = self.progressBar.value()\n maxVal = self.progressBar.maximum()\n self.progressBar.setValue(curVal + (maxVal - curVal) / 100)\n\n def createTopLeftGroupBox(self):\n self.topLeftGroupBox = QGroupBox(\"Input\")\n\n radioButton1 = QRadioButton(\"Radio button 1\")\n radioButton2 = QRadioButton(\"Radio button 2\")\n radioButton3 = QRadioButton(\"Radio button 3\")\n radioButton1.setChecked(True)\n\n checkBox = QCheckBox(\"Tri-state check box\")\n checkBox.setTristate(True)\n checkBox.setCheckState(Qt.PartiallyChecked)\n\n layout = QVBoxLayout()\n layout.addWidget(radioButton1)\n layout.addWidget(radioButton2)\n layout.addWidget(radioButton3)\n layout.addWidget(checkBox)\n layout.addStretch(1)\n self.topLeftGroupBox.setLayout(layout)\n\n def createTopRightGroupBox(self):\n self.topRightGroupBox = QGroupBox(\"Output\")\n\n defaultPushButton = QPushButton(\"Default Push Button\")\n defaultPushButton.setDefault(True)\n\n togglePushButton = QPushButton(\"Toggle Push Button\")\n togglePushButton.setCheckable(True)\n togglePushButton.setChecked(True)\n\n # flatPushButton = QPushButton(\"Flat Push Button\")\n # flatPushButton.setFlat(True)\n\n layout = QVBoxLayout()\n layout.addWidget(defaultPushButton)\n layout.addWidget(togglePushButton)\n # layout.addWidget(flatPushButton)\n layout.addStretch(1)\n self.topRightGroupBox.setLayout(layout)\n\n def createbottomRightTabWidget(self):\n self.bottomRightTabWidget = QTabWidget()\n self.bottomRightTabWidget.setSizePolicy(QSizePolicy.Preferred,\n QSizePolicy.Ignored)\n\n tab1 = QWidget()\n textEditTokensSourceCode = QTextEdit()\n\n textEditTokensSourceCode.setPlainText(\"salida\")\n\n tab1hbox = QHBoxLayout()\n tab1hbox.setContentsMargins(5, 5, 5, 5)\n tab1hbox.addWidget(textEditTokensSourceCode)\n tab1.setLayout(tab1hbox)\n\n tab2 = QWidget()\n textEditTokensSourceCode = QTextEdit()\n\n textEditTokensSourceCode.setPlainText(\"salida\")\n\n tab2hbox = QHBoxLayout()\n tab2hbox.setContentsMargins(5, 5, 5, 5)\n tab2hbox.addWidget(textEditTokensSourceCode)\n tab2.setLayout(tab2hbox)\n\n tab3 = QWidget()\n textEditTokensSourceCode = QTextEdit()\n\n textEditTokensSourceCode.setPlainText(\"salida\")\n\n tab3hbox = QHBoxLayout()\n tab3hbox.setContentsMargins(5, 5, 5, 5)\n tab3hbox.addWidget(textEditTokensSourceCode)\n tab3.setLayout(tab3hbox)\n\n self.bottomRightTabWidget.addTab(tab1, \"Source Code Tokenize&d\")\n self.bottomRightTabWidget.addTab(tab2, \"Source Code Pars&ed\")\n self.bottomRightTabWidget.addTab(tab3, \"Source Code Trans&piled\")\n\n def createbottomLeftGroupBox(self):\n self.bottomLeftGroupBox = QGroupBox(\"Source Code\")\n self.bottomLeftGroupBox.setCheckable(True)\n self.bottomLeftGroupBox.setChecked(True)\n\n textEditSourceCode = QTextEdit()\n\n textEditSourceCode.setPlainText(\"int variable = 10\\n\")\n\n # lineEdit = QLineEdit('s3cRe7')\n # lineEdit.setEchoMode(QLineEdit.Password)\n\n # spinBox = QSpinBox(self.bottomLeftGroupBox)\n # spinBox.setValue(50)\n\n # dateTimeEdit = QDateTimeEdit(self.bottomLeftGroupBox)\n # dateTimeEdit.setDateTime(QDateTime.currentDateTime())\n\n # slider = QSlider(Qt.Horizontal, self.bottomLeftGroupBox)\n # slider.setValue(40)\n\n # scrollBar = QScrollBar(Qt.Horizontal, self.bottomLeftGroupBox)\n # scrollBar.setValue(60)\n\n # dial = QDial(self.bottomLeftGroupBox)\n # dial.setValue(30)\n # dial.setNotchesVisible(True)\n\n layout = QGridLayout()\n layout.addWidget(textEditSourceCode, 0, 0, 1, 2)\n # layout.addWidget(lineEdit, 0, 0, 1, 2)\n # layout.addWidget(spinBox, 1, 0, 1, 2)\n # layout.addWidget(dateTimeEdit, 2, 0, 1, 2)\n # layout.addWidget(slider, 3, 0)\n # layout.addWidget(scrollBar, 4, 0)\n # layout.addWidget(dial, 3, 1, 2, 1)\n layout.setRowStretch(5, 1)\n self.bottomLeftGroupBox.setLayout(layout)\n\n def createProgressBar(self):\n self.progressBar = QProgressBar()\n self.progressBar.setRange(0, 10000)\n self.progressBar.setValue(0)\n\n timer = QTimer(self)\n timer.timeout.connect(self.advanceProgressBar)\n timer.start(1000)\n\n\nif __name__ == '__main__':\n app = QApplication([])\n gallery = WidgetGallery()\n gallery.show()\n app.exec_()\n","repo_name":"mariano-dim/ProyectoX","sub_path":"src/dashboardCompiler.py","file_name":"dashboardCompiler.py","file_ext":"py","file_size_in_byte":7700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5575406388","text":"# **NOTE**: try/except block iterates through 65535 ports!! maximum scan!\nfrom socket import socket, AF_INET, SOCK_STREAM\nfrom datetime import datetime, timedelta\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n hostAddress = '127.0.0.1' # loopback address for scanning localhost\n start_time = datetime.now() # port scan start time\n data = [] # lists open port strings\n try:\n for port in range(1, 65535):\n sock = socket(AF_INET, SOCK_STREAM)\n sock.settimeout(2) # scan for 2 secs\n result = sock.connect_ex((hostAddress, port))\n if result == 0:\n data.append(f'Port {port}: OPEN') # Port __: OPEN\n sock.close()\n except OSError as e:\n if e.errno != errno.ENOENT:\n print(f'{e}')\n sys.exit()\n end_time = datetime.now() # port scan ends: mark time\n duration = end_time - start_time # port scan duration\n data.append(f'Scan duration: {round(duration.total_seconds(), 2)}secs')\n return render_template('portscan.html', data=data, address=hostAddress)\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"nick3499/flask_portscan","sub_path":"portscan.py","file_name":"portscan.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3138694182","text":"# Script to resample a raster to a smaller pixel size.\n\nimport os\nfrom osgeo import gdal\n\n# Don't forget to change the folder.\nos.chdir(r'D:\\osgeopy-data\\Landsat\\Washington')\n\n# Open the input raster.\nin_ds = gdal.Open('p047r027_7t20000730_z10_nn10.tif')\nin_band = in_ds.GetRasterBand(1)\n\n# Computer the number of output rows and columns (double the input numbers\n# because we're cutting pixel size in half).\nout_rows = in_band.YSize * 2\nout_columns = in_band.XSize * 2\n\n# Create the output raster using the computed dimensions.\ngtiff_driver = gdal.GetDriverByName('GTiff')\nout_ds = gtiff_driver.Create('band1_resampled.tif',\n out_columns, out_rows)\n\n# Change the geotransform so it reflects the smaller cell size before\n# setting it onto the output.\nout_ds.SetProjection(in_ds.GetProjection())\ngeotransform = list(in_ds.GetGeoTransform())\ngeotransform[1] /= 2\ngeotransform[5] /= 2\nout_ds.SetGeoTransform(geotransform)\n\n# Read in the data, but have gdal resample it so that it has the specified\n# number of rows and columns instead of the numbers that the input has.\n# This effectively resizes the pixels.\ndata = in_band.ReadAsArray(\n buf_xsize=out_columns, buf_ysize=out_rows)\n\n# Write the data to the output raster.\nout_band = out_ds.GetRasterBand(1)\nout_band.WriteArray(data)\n\n# Compute statistics and build overviews.\nout_band.FlushCache()\nout_band.ComputeStatistics(False)\nout_ds.BuildOverviews('average', [2, 4, 8, 16, 32, 64])\n\ndel out_ds\n","repo_name":"cgarrard/osgeopy-code","sub_path":"Chapter9/listing9_4.py","file_name":"listing9_4.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"81"} +{"seq_id":"35957248408","text":"class Solution:\n\tdef longestPalindrome(self, s) :\n\t\tlis=[]\n\t\tif s==s[::-1]:lis.append(s)\n\t\tif len(s)==1:\n\t\t\tlis.append(s)\n\t\telif len(s)==2:\n\t\t\tif s[0]==s[1]:\n\t\t\t\tlis.append(s)\n\t\t\telse:\n\t\t\t\tlis.append(s[0])\n\t\tfor i in range(len(s)):\n\t\t\tj=len(s)\n\t\t\twhile j-i>1:\n\t\t\t\ttemp=s[i:j]\n\t\t\t\tj-=1\n\t\t\t\tif temp==temp[::-1]:\n\t\t\t\t\tlis.append(temp)\n\t\t\t\t\tbreak\n\t\tlis.sort(key=len,reverse=True)\n\t\treturn lis\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\na=Solution()\nprint(a.longestPalindrome('abcda'))\t\n\t\t\t\t","repo_name":"warriorwizard/competiitve_coding","sub_path":"longest_palindrome_substring.py","file_name":"longest_palindrome_substring.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"39985407567","text":"#!/usr/bin/env/ python\r\n# -*-coding:utf-8 -*-\r\nimport tornado.web\r\nimport json\r\nfrom Model.Merchant import MerchantService\r\nfrom UIAdmin.Forms.Merchant import MerchantForm\r\nfrom pymysql import IntegrityError\r\n\r\nclass MerchantManagerHandler(tornado.web.RequestHandler):\r\n def get(self,*args,**kwargs):\r\n self.render(\"Merchant/merchantManager.html\")\r\n\r\nclass MerchantHandler(tornado.web.RequestHandler):\r\n def get(self, *args, **kwargs):\r\n req_type = self.get_argument('type', None)\r\n if req_type == 'pagination':\r\n ret = {'status': False, 'message': '', 'total': 0, 'rows': []}\r\n try:\r\n page = int(self.get_argument('page', 1))\r\n rows = int(self.get_argument('rows', 10))\r\n start = (page - 1) * rows\r\n service = MerchantService()\r\n ret['total']=service.fetch_merchant_count()\r\n ret['rows'] = service.fetch_merchant_by_page(start, rows)\r\n ret['status'] = True\r\n except Exception as e:\r\n ret['message'] = str(e)\r\n self.write(json.dumps(ret))\r\n return\r\n self.render('Merchant/merchantManager.html')\r\n\r\nclass MerchantEdit(tornado.web.RequestHandler):\r\n def get(self,*args,**kwargs):\r\n error_summary=\"\"\r\n merchant_id=self.get_argument(\"nid\",None)\r\n if not merchant_id:\r\n crumbs=\"添加商户\"\r\n form=MerchantForm()\r\n method=\"POST\"\r\n else:\r\n crumbs=\"编辑商户\"\r\n form=MerchantForm()\r\n #根据ID获取用户信息\r\n service=MerchantService()\r\n detail=service.fetch_merchant_detail_by_nid(merchant_id)\r\n\r\n country_caption=detail.pop(\"country_caption\")\r\n country_id=detail.get(\"country_id\")\r\n form.country_id.widget.choices.append({\"value\":country_id,\"text\":country_caption})\r\n method=\"put\"\r\n form.init_value(detail)\r\n self.render(\"Merchant/merchantEdit.html\",form=form,crumbs=crumbs,method=method,summary=error_summary,nid=merchant_id)\r\n\r\n def post(self, *args, **kwargs):\r\n \"\"\"创建商户\r\n \"\"\"\r\n method=self.get_argument(\"_method\",None)\r\n if method==\"put\":\r\n return self.put(self,*args,**kwargs)\r\n error_summary=\"\"\r\n form=MerchantForm()\r\n try:\r\n is_valid=form.valid(self)\r\n print(form._value_dict)\r\n if is_valid:\r\n if form._value_dict[\"country_id\"]==\"0\":\r\n form._error_dict[\"country_id\"]=\"请选择县区\"\r\n else:\r\n del form._value_dict[\"nid\"]\r\n del form._value_dict[\"city_id\"]\r\n del form._value_dict[\"province_id\"]\r\n # 添加到数据库\r\n service=MerchantService()\r\n service.add_merchant(**form._value_dict)\r\n self.redirect(\"merchantManager.html\")\r\n return\r\n else:\r\n form.init_value(form._value_dict)\r\n except IntegrityError as e:\r\n error_summary=\"商户名称或登录用户必须唯一\"\r\n except Exception as e:\r\n error_summary=str(e)\r\n self.render(\"Merchant/merchantEdit.html\",form=form,crumbs=\"添加商户\",method=\"post\",summary=error_summary,nid=None)\r\n\r\n def put(self,*args,**kwargs):\r\n \"\"\"修改商户\"\"\"\r\n error_summary=\"\"\r\n form=MerchantForm()\r\n merchant_id=self.get_argument(\"nid\",None)\r\n\r\n try:\r\n is_valid=form.valid(self)\r\n if is_valid:\r\n if form._value_dict[\"country_id\"]==\"0\":\r\n form._error_dict[\"country_id\"]=\"请选择县(区)ID\"\r\n else:\r\n nid=form._value_dict.pop(\"nid\")\r\n del form._value_dict[\"city_id\"]\r\n del form._value_dict[\"province_id\"]\r\n # 添加到数据库\r\n service=MerchantService()\r\n db_result=service.update_merchant(nid,**form._value_dict)\r\n print(db_result)\r\n if db_result:\r\n self.redirect(\"merchantManager.html\")\r\n return\r\n else:\r\n error_summary=\"更新失败\"\r\n else:\r\n form.init_value(form._value_dict)\r\n except Exception as e:\r\n error_summary=str(e)\r\n service=MerchantService()\r\n detail=service.fetch_merchant_detail_by_nid(merchant_id)\r\n country_caption=detail.pop(\"country_caption\")\r\n country_id=detail.get(\"country_id\")\r\n form.country_id.widget.choices.append({\r\n \"value\":country_id,\"text\":country_caption\r\n })\r\n self.render(\"Merchant/merchantEdit.html\",form=form,crumbs=\"编辑商户\",method=\"put\",summary=error_summary,nid=merchant_id)\r\n\r\n def delete(self, *args, **kwargs):\r\n ret={\"message\":\"\",\"status\":False}\r\n nid=self.get_argument(\"nid\",None)\r\n print(nid)\r\n if nid:\r\n try:\r\n service=MerchantService()\r\n db_result=service.delete_merchant(nid)\r\n if db_result:\r\n ret[\"message\"]=\"删除成功\"\r\n ret['status']=True\r\n else:\r\n ret[\"message\"]=\"删除失败\"\r\n except Exception as e:\r\n ret[\"message\"]=str(e)\r\n else:\r\n ret[\"message\"]=\"请选择要删除的行\"\r\n self.write(json.dumps(ret))\r\n\r\n\r\n\r\n","repo_name":"lizhihong886/ShoppingMall","sub_path":"UIAdmin/Controllers/Merchant.py","file_name":"Merchant.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21176021120","text":"# Aula 7\n# Operadores aritimeticos\n5 + 2 == 7\n5 - 2 == 3\n5 * 2 == 10\n5 / 2 == 2.5\n# exponencial 5 elevado a 2\n5 ** 2 == 25\n# inteiro da divisão\n5 // 2 == 2\n# resto da divisão\n5 % 2 == 1\n\n## precedencia de operadores\n#1 () primeiro resolva tudo dentro dos parentesis\n#2 ** resolva os exponenciais\n#3 * / // % resolva estes do primeiro ao ultimo ( ex se % aparece primeiro na linha faça-o)\n#4 + - por ultimo estes\n\nr = 2 + (3**2 + 2/2) * 2 / 10\nprint(r)\n2 + (3**2 + 2/2) * 2 / 10 == 4\nr = 3 * 5 + 4 ** 2\nprint('3 * 5 + 4 ** 2 = ',r)\nr = 3 * (5 + 4) ** 2\nprint('3 * (5 + 4) ** 2 = ',r)\n\n\n\n","repo_name":"Alexandre1961/Python","sub_path":"acm_aula_em_codigo/anotacao01.py","file_name":"anotacao01.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14485837784","text":"\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport scipy.fftpack as spfft\r\nimport scipy.misc\r\nfrom skimage.measure import compare_ssim as ssim\r\nimport time\r\n\r\nfrom pylbfgs import owlqn\r\n\r\n\r\ndef dct2(x):\r\n return spfft.dct(\r\n spfft.dct(x.T, norm='ortho', axis=0).T, norm='ortho', axis=0)\r\n\r\n\r\ndef idct2(x):\r\n return spfft.idct(\r\n spfft.idct(x.T, norm='ortho', axis=0).T, norm='ortho', axis=0)\r\n\r\n\r\ndef progress(x, g, fx, xnorm, gnorm, step, k, ls):\r\n # Print variables to screen or file or whatever. Return zero to\r\n # continue algorithm; non-zero will halt execution.\r\n\r\n if gnorm < 5:\r\n a = 1\r\n else:\r\n a = 0\r\n return a\r\n\r\n\r\ndef evaluate(x, g, step):\r\n \"\"\"An in-memory evaluation callback.\"\"\"\r\n\r\n # we want to return two things:\r\n # (1) the norm squared of the residuals, sum((Ax-b).^2), and\r\n # (2) the gradient 2*A'(Ax-b)\r\n\r\n # expand x columns-first\r\n x2 = x.reshape((nx, ny)).T\r\n\r\n # Ax is just the inverse 2D dct of x2\r\n Ax2 = idct2(x2)\r\n im.set_data(Ax2)\r\n fig.canvas.draw()\r\n print(ssim(Xorig, Ax2))\r\n\r\n # stack columns and extract samples\r\n\r\n ############OPTION1\r\n \"\"\"\r\n Ax = Ax2.T.flat[ri].reshape(b.shape)\r\n \"\"\"\r\n ####OPTIONA2\r\n Ax = np.dot(mask_vec, Ax2.T.flatten())\r\n ######\r\n # calculate the residual Ax-b and its 2-norm squared\r\n\r\n ############OPTION1\r\n \"\"\"\r\n Axb = Ax - b\r\n \"\"\"\r\n ####OPTIONA2\r\n\r\n Axb = Ax - intensity_vec\r\n #####\r\n fx = np.sum(np.power(Axb, 2))\r\n # project residual vector (k x 1) onto blank image (ny x nx)\r\n\r\n ############OPTION1\r\n \"\"\"\r\n Axb2 = np.zeros(x2.shape)\r\n Axb2.T.flat[ri] = Axb\r\n \"\"\"\r\n ####OPTIONA2\r\n\r\n Axb2 = np.zeros(x2.shape, dtype=\"float64\")\r\n for a in range(0, len(mask_vec)):\r\n Axb2 += mask_vec[a].reshape(x2.shape).T * Axb[a]\r\n \"\"\"\r\n Axb2 = np.dot(Axb,mask_vec).reshape(x2.shape).T\r\n \"\"\"\r\n\r\n # A'(Ax-b) is just the 2D dct of Axb2\r\n AtAxb2 = 2 * dct2(Axb2)\r\n AtAxb = AtAxb2.T.reshape(x.shape) # stack columns\r\n # copy over the gradient vector\r\n\r\n np.copyto(g, AtAxb)\r\n\r\n return fx\r\n\r\n\r\n# read original image\r\nTEST_IMAGE = scipy.misc.face()\r\nTEST_IMAGE = TEST_IMAGE[:, :, 0]\r\nXorig = scipy.misc.imresize(TEST_IMAGE, [128, 128])\r\nXorig = Xorig.astype(\"float64\")\r\nny, nx = Xorig.shape\r\n\r\nmask_vec = []\r\nintensity_vec = []\r\n\r\nfor i in range(0, 1000):\r\n mask = (np.random.rand(nx, ny) < 0.5) * 1\r\n masked = mask * Xorig\r\n intensity = np.sum(masked)\r\n mask_vec.append(mask.T.flatten())\r\n intensity_vec.append(intensity)\r\n#mask_vec = np.expand_dims(mask_vec, axis=1)\r\nmask_vec = np.asarray(mask_vec)\r\nintensity_vec = np.asarray(intensity_vec, dtype=\"float64\")\r\n\r\n# perform the L1 minimization in memory\r\ntic = time.clock()\r\nplt.gray()\r\nplt.ion()\r\nfig = plt.figure()\r\nim = plt.imshow(Xorig)\r\nplt.show()\r\nXat2 = owlqn(nx * ny, evaluate, progress, 10000)\r\ntac = time.clock()\r\n\r\nprint(tac - tic)\r\n\r\n# transform the output back into the spatial domain\r\nXat = Xat2.reshape(nx, ny).T # stack columns\r\nXa = idct2(Xat)\r\nZ = Xa.astype('uint8')\r\n\r\nfig = plt.figure()\r\nfig.add_subplot(1, 2, 1)\r\nplt.gray()\r\nplt.imshow(Xorig)\r\nfig.add_subplot(1, 2, 2)\r\nplt.imshow(Z)\r\nplt.show()","repo_name":"eloymg/one","sub_path":"optim.py","file_name":"optim.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12928717757","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/4/24 18:33\n# @File : args_parser.py\n# @IDE : PyCharm\n\n\nimport configparser\n\n\ndef read_config(config_file_name):\n config = configparser.ConfigParser(\n interpolation=configparser.ExtendedInterpolation()\n )\n config.read(config_file_name)\n return config\n\n\nif __name__ == '__main__':\n config_file = './conf.ini'\n conf = read_config(config_file)\n print(conf)\n","repo_name":"Thancoo/proxymask","sub_path":"reload/args_parser.py","file_name":"args_parser.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18227296916","text":"# Скрипт конвертирует все датасеты и аудио до частоты дискретизации указанной в конфиге\n\nimport glob\nimport yaml\nimport tqdm \nimport pathlib\nfrom contextlib import suppress\n\nimport librosa\nimport torch\nimport torchaudio\n\n\ndef convert(path: pathlib.Path, sample_rate:int, pattern:str):\n \"\"\"Конвертирует все аудио до нужной частоты дискретизации\"\"\"\n output_path = path.parent / (path.stem + '_converted')\n output_path.mkdir(parents=True, exist_ok=True)\n\n fnames = glob.glob(pattern, root_dir=path)\n for fname in tqdm.tqdm(fnames):\n audio, sr = librosa.load(path / fname, sr=sample_rate)\n try:\n torchaudio.save(output_path / fname, torch.from_numpy(audio[None]), sample_rate=sample_rate)\n except RuntimeError:\n new_folder = output_path / fname\n if new_folder.suffix:\n new_folder = new_folder.parent\n new_folder.mkdir(parents=True, exist_ok=True)\n\n torchaudio.save(output_path / fname, torch.from_numpy(audio[None]), sample_rate=sample_rate)\n\nif __name__ == \"__main__\":\n try:\n with open('config.yml', 'r') as stream:\n config = yaml.safe_load(stream)\n except FileNotFoundError:\n with open('config.yaml', 'r') as stream:\n config = yaml.safe_load(stream)\n\n sample_rate = config['sample_rate']\n\n with suppress(Exception):\n path = pathlib.Path('data/UrbanSound8K/')\n convert(path, sample_rate=sample_rate, pattern=\"**/*.wav\")\n\n path = pathlib.Path('data/ESC-50/audio/')\n convert(path, sample_rate=sample_rate, pattern=\"*.wav\")\n\n path = pathlib.Path('data/RIR/')\n convert(path, sample_rate=sample_rate, pattern=\"*.wav\")","repo_name":"fabuloudy/audio_survey","sub_path":"prepoccesing.py","file_name":"prepoccesing.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14134009911","text":"from typing import TypeVar\n\nfrom django.conf import settings\nfrom django.db.models import Model\nfrom django.db.models.fields.files import FieldFile, ImageFieldFile\nfrom django_s3_storage.storage import S3Storage\n\nT_MODEL = TypeVar(\"T_MODEL\", bound=Model)\n\n\ndef s3_asset_delete(*, storage_key: str | None = None) -> None:\n \"\"\"\n Delete s3 assets based on storage key.\n \"\"\"\n\n if not storage_key:\n raise ValueError(\n \"Storage key cannot be empty, to able to cleanup remote folder.\"\n )\n\n storage = S3Storage(aws_s3_bucket_name=settings.AWS_S3_BUCKET_NAME)\n parent_path_key = storage_key.rsplit(\"/\", 1)[0] # Get everything before the last /\n\n if storage.exists(parent_path_key):\n dirs, files = storage.listdir(parent_path_key)\n\n if len(dirs) == 0 and len(files) == 0:\n storage.delete(parent_path_key)\n\n\ndef s3_asset_cleanup(*, instance: T_MODEL, field: str) -> None:\n \"\"\"\n Delete specific s3 assets belonging to an instance.\n \"\"\"\n\n instance_field = getattr(instance, field, None)\n\n if instance_field and isinstance(instance_field, (ImageFieldFile, FieldFile)):\n storage_key = instance_field.name\n instance_field.delete(save=False)\n\n s3_asset_delete(storage_key=storage_key)\n","repo_name":"danielkjellid/nest","sub_path":"nest/core/utils/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71843057852","text":"import requests\r\n\r\n\r\ncity = input(\"Enter your city: \")\r\nurl = f\"https://api.openweathermap.org/data/2.5/weather?q={city}&appid=a118e4b915fbfb737736634d31139853\"\r\nrest = requests.get(url)\r\n\r\ndata = rest.json() \r\n\r\nhumidity = data['main']['humidity']\r\npressure = data['main']['pressure']\r\nwind = data['wind']['speed']\r\ndescription = data['weather'][0]['description']\r\ntemp= data['main']['temp']\r\n\r\nprint(f\"\"\" \r\n Humidity : {humidity}\r\n Pressure : {pressure}\r\n Wind : {wind}\r\n Description: {description}\r\n Temperature :{round(temp - 273.15, 1)} C \"\"\")\r\n","repo_name":"Itgeeksk/Python_miniprojects","sub_path":"cliweatherapp.py","file_name":"cliweatherapp.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39605415361","text":"# -*- coding: utf-8 -*-\n\"\"\"\n src.common.decorators\n ~~~~~~~~~~~~~~~~~~~~~\n\n\"\"\"\nfrom flask import current_app as app, json, _request_ctx_stack\nfrom functools import wraps\nfrom werkzeug.exceptions import Unauthorized, Forbidden\nfrom src.common.utils import _get_token_auth_header\nfrom six.moves.urllib.request import urlopen\nfrom jose import jwt\nfrom src.common.scope import Scope\nfrom sentry_sdk import set_user, add_breadcrumb\n\n\ndef authenticate(f):\n \"\"\"\n Authenticates the user using bearer token.\n \"\"\"\n\n doc = getattr(f, \"__doc__\")\n if doc:\n setattr(f, \"__doc__\", doc + \"security:\\n\\t- BearerAuth: []\")\n\n @wraps(f)\n def decorator(*args, **kwargs):\n\n if app.config.get(\"TESTING\"):\n \"\"\"If we're testing, just return a user w/ all perms\"\"\"\n _request_ctx_stack.top.current_user = {\n \"roles\": list(Scope.members().keys())\n }\n return f(*args, **kwargs)\n\n try:\n token = _get_token_auth_header()\n jsonurl = urlopen(\"https://login.microsoftonline.com/\" +\n app.config.get(\"AZURE_TENANT_ID\") +\n \"/discovery/v2.0/keys\")\n jwks = json.loads(jsonurl.read())\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n for key in jwks[\"keys\"]:\n if key[\"kid\"] == unverified_header[\"kid\"]:\n rsa_key = {\n \"kty\": key[\"kty\"],\n \"kid\": key[\"kid\"],\n \"use\": key[\"use\"],\n \"n\": key[\"n\"],\n \"e\": key[\"e\"]\n }\n except Exception:\n raise Unauthorized(\"Unable to parse authentication\")\n\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=[\"RS256\"],\n audience=app.config.get(\"AZURE_API_AUDIENCE\"),\n issuer=(\"https://sts.windows.net/\" +\n app.config.get(\"AZURE_TENANT_ID\") +\n \"/\")\n )\n except jwt.ExpiredSignatureError:\n raise Unauthorized(\"Token is expired\")\n\n except jwt.JWTClaimsError:\n raise Unauthorized(\"Incorrect claims please check the audience\"\n \" and issuer\")\n\n except Exception:\n raise Unauthorized(\"Unable to parse authentication token\")\n\n _request_ctx_stack.top.current_user = payload\n\n \"\"\"Set the authenticated user in Sentry\"\"\"\n set_user({\n \"id\": payload.get(\"unique_id\"),\n \"username\": payload.get(\"preferred_username\"),\n \"roles\": payload.get(\"roles\", [])\n })\n\n \"\"\"Add breadcrumb\"\"\"\n add_breadcrumb(\n category=\"auth\",\n message=(\"Authenticated user \" +\n payload.get(\"preferred_username\", \"\")),\n level=\"info\"\n )\n\n return f(*args, **kwargs)\n raise Unauthorized(\"Unable to find appropriate key\")\n\n return decorator\n\n\ndef requires_scope(scope: Scope):\n\n def decorator(f):\n\n \"\"\"Shoves the scopes specified by the decorator in the docstring\"\"\"\n doc = getattr(f, \"__doc__\")\n if doc:\n setattr(f, \"__doc__\", (doc + \"description: |\\n\" +\n \"\\tRequired Permissions:\\n\" +\n \"\".join((f\"\\t- {i}\"\"\\n\")\n for i in scope.names) +\n \" \"))\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n cu = _request_ctx_stack.top.current_user\n\n s = Scope(cu.get(\"roles\"))\n\n \"\"\" Check if the user has the required scope(s) \"\"\"\n if not(scope & s):\n raise Forbidden()\n\n return f(*args, **kwargs)\n return decorated_function\n return decorator\n","repo_name":"KnightHacks/hackathon-2021-backend","sub_path":"src/common/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"37417094978","text":"#programm a game\nfrom random import randint\n\n\nprint(\"Welcome to my game. In this game two players gonna battle each other.\")\nname = input(\"Enter the name of player 1:\")\nprint(name)\nname2 = input(\"Enter the name of Player 2:\")\nprint(name2)\n\nclass Player: \n def __init__(self, maxAttack, maxDefense, health, name):\n self.maxAttack = maxAttack\n self.maxDefense = maxDefense\n self.health = health\n self.name = name\n\n def get_attack(self): \n return randint(0, self.maxAttack)\n\n def get_defense(self):\n return randint(0, self.maxDefense)\n\n\nplayer1 = Player(30,20,200,name)\nplayer2 = Player(30,20,200,name2)\n\n# attackPlayer1 = randint(1, player1.attack)\n\n# print(player1.attack)\n# print(player1.attack-player1.defense)\n# print(player1.attack)\n\nwhile player1.health > 0 and player2.health > 0:\n option = input(\"Choose what you want to do: \\\n 1.... attack \\\n 2.... defense\")\n\n bonus = 10\n # print(f\"this is player.get_attack:{player1.get_attack()}\")\n # print(f\"this is player.get_defense:{player2.get_defense()}\")\n calculatedDamage = player1.get_attack() - player2.get_defense()\n calculatedDefense = player2.get_attack() - player1.get_defense()\n if option == 1:\n calculatedDamage +=bonus\n elif option == 2:\n calculatedDefense +=bonus\n if calculatedDamage <= 0:\n print(\"Die Attacke von dir wurde blockiert\")\n else: \n player2.health -=calculatedDamage\n print(f\"Du hast / Sie haben so viel Schaden gemacht: {calculatedDamage}. Der Gegner hat noch so viel Leben:{player2.health}\")\nif player1.health > 0:\n print(f\"Gratuliere {player1.name} zu deinem Sieg\")\nif player2.health > 0: \n print(f\"Gratuliere {player2.name}zu deinem Sieg\")\n\n\n\n\n\n# if player1 set attack :\n# player2.health - player1.attack\n\n\n","repo_name":"Andrej8840/Schnupperlehre","sub_path":"Aufgabe2.py","file_name":"Aufgabe2.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6472833376","text":"from pymongo import MongoClient\n\nDB_HOST = \"mongodb://127.0.0.1:27017/\"\nDB_NAME = \"yelp\"\nDB_COLLECTION = \"review\"\n\n\nclient = MongoClient(DB_HOST)\ndb = client[DB_NAME]\nreview_Collection = db[DB_COLLECTION]\n\nreview_Cursor = review_Collection.find(no_cursor_timeout=False)\n\n\nbusiness_Collection = db[\"business\"]\n\nprint(\"Code started successfully\")\n\nskip = 0\ntotal_Count = review_Collection.find({}).count()\nlimit = 10\ncount = 0\n\n\nwhile count < total_Count:\n # print(f\"count of while = {count}\")\n review_Cursor = review_Collection.aggregate([\n {\n \"$skip\":skip\n },\n {\n \"$limit\":limit\n }\n ])\n\n for doc in review_Cursor:\n count = count + 1\n print(f\"count of for = {count}\")\n tempCursor = business_Collection.find({\n \"business_id\":doc[\"business_id\"]\n })\n\n if tempCursor.count() == 0:\n review_Collection.delete_one(doc)\n\n\n skip = skip + limit\nprint(\"Code successfully exited\")\n","repo_name":"NikhilAshodariya/YelpRestaurant","sub_path":"Miscellaneous/Old .py files/remove_From_Review.py","file_name":"remove_From_Review.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31192346212","text":"# Prints weather information for the next 5 days split in 3-hour segments\n# For API information: https://openweathermap.org/forecast5\nimport requests\nimport json\n\nif __name__ == '__main__':\n baseUrl = 'http://api.openweathermap.org/data/2.5/forecast'\n # In 'appid', insert your own key from OpenWeather\n parameters = {'q': 'Dublin,IE', 'appid': 'YOUR-KEY-HERE', 'units': 'metric'}\n response = requests.get(baseUrl, params=parameters)\n content = response.content\n # Parse json info\n info = json.loads(content)\n\n # To iterate through every timestamp\n list = info['list']\n for i in range(len(list)):\n listInfo = list[i]\n date = listInfo['dt_txt']\n mainData = listInfo['main']\n tempMax = mainData['temp_max']\n tempMin = mainData['temp_min']\n feelsLike = mainData['feels_like']\n weather = listInfo['weather']\n weatherDesc = weather[0]\n description = weatherDesc['description']\n\n print('Weather information for %s\\nMax Temp: %sC, Min Temp: %sC, Feels like: %sC\\nWeather description: %s\\n' %\n (date, tempMax, tempMin, feelsLike, description))\n\n # print(listInfo)\n","repo_name":"Igor-Ono/checkWeather","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37510072208","text":"import numpy as np\n\nfrom collections.VanCollections import TracksData, Frame\nfrom matplotlib import pyplot as plt\nfrom functional import VanFunctional as F\n\nLOAD_PRE_COMPUTED_TRACKING = True\n\n\ndef process_tracks_stats(tracks_data: TracksData):\n \"\"\"\n This function computes tracks statistics.\n :param tracks_data: tracks data structure.\n :return: None.\n \"\"\"\n tracks_df = tracks_data.cords_data\n\n # Total number of tracks\n track_count = len(tracks_df['TrackId'].unique())\n print(f'Total number of tracks : {track_count}')\n\n # Total number of frames\n frame_count = len(tracks_df['FrameId'].unique())\n print(f'Total number of frames: {frame_count}')\n\n # track length statistics\n tracks_len_df = tracks_df[['TrackId', 'FrameId']].groupby('TrackId').count()\n tracks_len_df.sort_values(by='FrameId', inplace=True, ascending=False)\n print(f'Track minimum length: {tracks_len_df.min().item()}')\n print(f'Track maximum length: {tracks_len_df.max().item()}')\n print(f'Track mean length: {tracks_len_df.mean().item()}')\n\n # average number of tracks per image\n tracks_count_df = tracks_df[['TrackId', 'FrameId']].groupby('FrameId').count()\n print(f'Mean number of frame links: {tracks_count_df.mean().item()}')\n\n\ndef cut_image_region(left_x, y, region_size=100, img_width=1226, img_height=370):\n \"\"\"\n This function cuts a region around a track.\n :param left_x: start x.\n :param y: start y.\n :param region_size: region size.\n :param img_width: original image width\n :param img_height: original image height.\n :return: region boundaries.\n \"\"\"\n offset = np.floor(region_size / 2)\n scatter_x_offset = 0\n scatter_y_offset = 0\n\n # vertical bounds\n upper_bound = max(y - offset, 0)\n lower_bound = min(y + (region_size - offset), img_height)\n padding_size = region_size - np.abs(lower_bound - upper_bound)\n\n if upper_bound == 0:\n lower_bound += padding_size\n scatter_y_offset -= padding_size\n elif lower_bound == img_height:\n upper_bound -= padding_size\n scatter_y_offset += padding_size\n\n # horizontal bounds - left image\n left_bound = max(left_x - offset, 0)\n right_bound = min(left_x + (region_size - offset), img_width)\n padding_size = region_size - np.abs(right_bound - left_bound)\n\n if left_bound == 0:\n right_bound += padding_size\n scatter_x_offset -= padding_size\n elif right_bound == img_width:\n left_bound -= padding_size\n scatter_x_offset += padding_size\n\n return int(np.round(left_bound)), int(np.round(right_bound)), \\\n int(np.round(upper_bound)), int(np.round(lower_bound)), \\\n int(np.round(scatter_x_offset)), int(np.round(scatter_y_offset))\n\n\ndef visualize_track(tracks_data: TracksData, track_id: int, max_seq_len=10):\n \"\"\"\n This function visualizes a track.\n :param tracks_data: tracks data structure.\n :param track_id: track id.\n :param max_seq_len: maximum frames to visualize.\n :return: None.\n \"\"\"\n track_obj = tracks_data.get_track_object(track_id)\n start_frame = track_obj.get_start_frame()\n track_len = min(track_obj.get_len(), max_seq_len)\n\n for frame_id in range(start_frame, start_frame + track_len):\n cur_frame = Frame(frame_id)\n cur_track_cords = tracks_data.get_track_frame_cords(frame_id, track_id)\n\n # cutting region from left image\n x_min, x_max, y_min, y_max, left_x_offset, left_y_offset = cut_image_region(\n cur_track_cords[0], cur_track_cords[2])\n\n left_region = cur_frame.get_left_image()[y_min: y_max, x_min: x_max]\n\n # cutting region from right image\n x_min, x_max, y_min, y_max, right_x_offset, right_y_offset = cut_image_region(\n cur_track_cords[1], cur_track_cords[2])\n\n right_region = cur_frame.get_right_image()[y_min: y_max, x_min: x_max]\n\n fig, ax = plt.subplots(1, 2)\n ax[0].imshow(left_region, cmap='gray')\n ax[1].imshow(right_region, cmap='gray')\n\n ax[0].scatter(50 + left_x_offset, 50 + left_y_offset)\n ax[1].scatter(50 + right_x_offset, 50 + right_y_offset)\n\n ax[0].set_title('left region')\n ax[1].set_title('right region')\n fig.suptitle(f'Frame #{frame_id}')\n\n plt.savefig(f'{track_id}_f{frame_id}.png')\n plt.show()\n\n\ndef get_connectivity_data(tracks_data: TracksData):\n \"\"\"\n This function computes the connectivity data.\n :param tracks_data: tracks data structure.\n :return: connectivity data.\n \"\"\"\n out_going_list = []\n num_frames = tracks_data.get_num_frames()\n\n cur_frame_tracks = set(tracks_data.get_tracks_by_frame(0))\n next_frame_tracks = set(tracks_data.get_tracks_by_frame(1))\n\n for next_frame_id in range(1, num_frames - 2):\n cur_out_going = cur_frame_tracks.intersection(next_frame_tracks)\n out_going_list.append(len(cur_out_going))\n\n cur_frame_tracks = next_frame_tracks\n next_frame_tracks = set(tracks_data.get_tracks_by_frame(next_frame_id + 1))\n\n return out_going_list\n\n\ndef plot_connectivity(connectivity_data: list):\n \"\"\"\n This function plots connectivity data.\n :param connectivity_data: connectivity data as list.\n :return: None.\n \"\"\"\n x_data = np.arange(len(connectivity_data))\n\n fig, ax = plt.subplots(figsize=(12, 8))\n ax.plot(x_data, connectivity_data)\n\n plt.xlabel('frame')\n plt.ylabel('outgoing tracks')\n plt.title('Connectivity')\n\n plt.show()\n\n\ndef plot_track_length_histogram(tracks_data: TracksData):\n \"\"\"\n This function plots the track length histogram.\n :param tracks_data: tracks data.\n :return: None.\n \"\"\"\n tracks_df = tracks_data.cords_data[['TrackId', 'FrameId']]\n tracks_df = tracks_df.groupby('TrackId').count()\n tracks_df.reset_index(None, drop=False, inplace=True)\n tracks_df = tracks_df.groupby('FrameId').count()\n\n hist_x, hist_y = [0], [0]\n hist_x.extend(list(tracks_df.index))\n hist_y.extend(list(tracks_df['TrackId']))\n\n fig, ax = plt.subplots(figsize=(12, 8))\n ax.plot(hist_x, hist_y)\n\n plt.xlabel('Track length')\n plt.ylabel('Track #')\n plt.title('Track length histogram')\n\n plt.show()\n\n\ndef track_re_projection(tracks_data: TracksData, track_id=-1, positions_lst=None):\n \"\"\"\n This function computes the re-projection error for a track.\n :param tracks_data: tracks data.\n :return: errors list, start frame.\n \"\"\"\n if track_id < 0:\n track_id = tracks_data.get_track_of_len(10)\n\n track_obj = tracks_data.get_track_object(track_id)\n last_frame = track_obj.get_start_frame() + track_obj.get_len() - 1\n\n # getting the track's coordinates\n last_track_cords = tracks_data.get_track_frame_cords(last_frame, track_id)\n\n # loading the ground truth data\n if positions_lst is None:\n gt_list = F.get_camera_ground_truth()\n else:\n gt_list = positions_lst\n\n k, m1, m2 = F.get_calib()\n\n # computing the last camera matrices\n last_left_ext = gt_list[last_frame]\n last_right_rotation = last_left_ext[:, :-1]\n last_right_translation = (last_left_ext[:, -1] + m2[:, -1]).reshape(3, 1)\n last_right_ext = np.hstack([last_right_rotation, last_right_translation])\n\n # computing triangulation for the last frame\n track_3d_cords = F.compute_triangulation(k, last_left_ext, last_right_ext,\n (last_track_cords[0], last_track_cords[2]),\n (last_track_cords[1], last_track_cords[2]))\n track_3d_cords = track_3d_cords.reshape(4, 1)\n track_3d_cords = track_3d_cords / track_3d_cords[-1, :]\n dist_list = []\n\n for frame_id in range(track_obj.get_start_frame(), track_obj.get_start_frame() + track_obj.get_len()):\n cur_track_cords = tracks_data.get_track_frame_cords(frame_id, track_id)\n cur_left_img_cords = [cur_track_cords[0], cur_track_cords[2]]\n cur_right_img_cords = [cur_track_cords[1], cur_track_cords[2]]\n\n cur_left_img_cords = np.array(cur_left_img_cords).reshape(2, 1)\n cur_right_img_cords = np.array(cur_right_img_cords).reshape(2, 1)\n\n cur_left_ext = gt_list[frame_id]\n cur_right_r = cur_left_ext[:, :-1]\n cur_right_t = (cur_left_ext[:, -1] + m2[:, -1]).reshape(3, 1)\n cur_right_ext = np.hstack([cur_right_r, cur_right_t])\n\n # projecting the points to pixels\n cur_left_proj = k @ cur_left_ext @ track_3d_cords\n cur_left_proj = cur_left_proj[:-1, :] / cur_left_proj[-1, :]\n\n cur_right_proj = k @ cur_right_ext @ track_3d_cords\n cur_right_proj = cur_right_proj[:-1, :] / cur_right_proj[-1, :]\n\n # computing the average error for current frame\n cur_err = np.linalg.norm(cur_left_img_cords - cur_left_proj) + \\\n np.linalg.norm(cur_right_img_cords - cur_right_proj)\n dist_list.append(cur_err / 2)\n\n return dist_list, track_obj.get_start_frame()\n\n\ndef plot_re_projection_error(dist_list: list, start_frame: int):\n \"\"\"\n This function plots the re-projection error.\n :param dist_list: errors list.\n :param start_frame: start frame.\n :return: None.\n \"\"\"\n x_vals = np.arange(start_frame, start_frame + len(dist_list), 1).astype(int)\n\n fig, ax = plt.subplots(figsize=(12, 8))\n ax.plot(x_vals, dist_list)\n\n plt.xlabel('frame')\n plt.ylabel('average re-projection error')\n plt.title('Re-projection error per frame')\n\n plt.show()\n\n\ndef plot_inliers_data(tracks_data: TracksData):\n \"\"\"\n This function plots the inliers data.\n :param tracks_data: tracks data.\n :return: None.\n \"\"\"\n cur_data = tracks_data.get_supporters_data()\n x_data = np.array(list(cur_data.keys()))\n y_data = np.array(list(cur_data.values())) * 100\n\n fig, ax = plt.subplots(figsize=(12, 8))\n ax.plot(x_data, y_data)\n\n plt.xlabel('frame')\n plt.ylabel('inliers percentage')\n plt.title('Q5 - inliers percentage per frame')\n\n plt.show()\n\n\nif __name__ == '__main__':\n if not LOAD_PRE_COMPUTED_TRACKING:\n F.track_frames(2560, 'try1.pkl', True)\n\n x = TracksData.from_pickle('try1.pkl')\n\n # ----------------------------------------------------- Question 2\n process_tracks_stats(x)\n\n # ----------------------------------------------------- Question 3\n cur_id = x.get_track_of_len(10)\n visualize_track(x, cur_id, max_seq_len=10)\n\n # ----------------------------------------------------- Question 4\n c_data = get_connectivity_data(x)\n plot_connectivity(c_data)\n\n # ----------------------------------------------------- Question 5\n plot_inliers_data(x)\n\n # ----------------------------------------------------- Question 6\n plot_track_length_histogram(x)\n\n # ----------------------------------------------------- Question 7\n re_projection_errs, start = track_re_projection(x)\n plot_re_projection_error(re_projection_errs, start)\n","repo_name":"dortal721/VANProject","sub_path":"ex_code/slam_ex4.py","file_name":"slam_ex4.py","file_ext":"py","file_size_in_byte":10924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2876284479","text":"import threading\nimport simpleubjson\nimport json\nimport os\nimport time\nimport bson\nimport msgpack\n\nclass CompressorThread(threading.Thread):\n INPUT_FOLDER_SIZE_COUNTER = 0\n OUTPUT_FOLDER_SIZE_COUNTER = 0\n TOTAL_COMPRESSION_DURATION = 0\n\n def __init__(self, items, output_folder, compression_type, extension):\n threading.Thread.__init__(self)\n self.extension = extension\n self.items = items\n self.output_folder = output_folder\n self.compression_type = compression_type\n\n def get_json_as_object(self, json_file_location):\n json_file = open(json_file_location)\n json_raw = json_file.read()\n json_file.close()\n return json.loads(json_raw)\n\n def compress_file_ub_json(self, json_file_location):\n parsed_json = self.get_json_as_object(json_file_location)\n compr_started_time = time.time()\n ubjson_bytes = simpleubjson.encode(parsed_json)\n duration_compression = time.time() - compr_started_time\n CompressorThread.TOTAL_COMPRESSION_DURATION += duration_compression\n return ubjson_bytes\n\n def compress_file_bson(self, json_file_location):\n parsed_json = self.get_json_as_object(json_file_location)\n compr_started_time = time.time()\n bson_bytes = bson.encode_array(parsed_json, [])\n duration_compression = time.time() - compr_started_time\n CompressorThread.TOTAL_COMPRESSION_DURATION += duration_compression\n return bson_bytes\n\n def compress_file_msgpack(self, json_file_location):\n parsed_json = self.get_json_as_object(json_file_location)\n compr_started = time.time()\n msgpack_bytes = msgpack.packb(parsed_json, use_bin_type=True)\n duration_compression = time.time() - compr_started\n CompressorThread.TOTAL_COMPRESSION_DURATION += duration_compression\n return msgpack_bytes\n\n def write_bytes_to_location(self, output_file_location, bytes_output):\n bin_json = open(output_file_location, \"wb\")\n bin_json.write(bytes_output)\n bin_json.close()\n\n def update_stats(self, initial_file_location, compressed_file_location, file_name):\n initial_size = os.path.getsize(initial_file_location)\n compressed_size = os.path.getsize(compressed_file_location)\n CompressorThread.INPUT_FOLDER_SIZE_COUNTER += initial_size\n CompressorThread.OUTPUT_FOLDER_SIZE_COUNTER += compressed_size\n\n def compress_and_store_file(self, input_location, output_location, file_name):\n bytes_compressed = self.call_compression_function(input_location)\n self.write_bytes_to_location(output_location, bytes_compressed)\n self.update_stats(input_location, output_location, file_name)\n\n def call_compression_function(self, input_location):\n if self.compression_type == 'ubjson':\n return self.compress_file_ub_json(input_location)\n elif self.compression_type == 'bson':\n return self.compress_file_bson(input_location)\n elif self.compression_type == 'msgpack':\n return self.compress_file_msgpack(input_location)\n\n def run(self):\n for item in self.items:\n basename = os.path.basename(item)\n output_file = self.output_folder + basename.split(self.extension)[0]\n self.compress_and_store_file(item, output_file, basename)","repo_name":"LSDE-Flickr-ML-Classification/json-compression-test","sub_path":"compressor_thread.py","file_name":"compressor_thread.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"844753442","text":"# Databricks notebook source\n# MAGIC %sql\n# MAGIC CREATE DATABASE IF NOT EXISTS groupe8;\n\n# COMMAND ----------\n\ndf = spark.read.csv(\"/mnt/groupe8/Characters.csv\",sep=';',header=True)\nlistOfHouse = ['Gryffindor', 'Slytherin', 'Ravenclaw', 'Hufflepuff']\nformatDate = \"yyyy-mm\"\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import *\nfrom datetime import datetime\n\ndata = df.filter(col(\"House\").isin(listOfHouse))\ndata = data.select(\"Gender\", \"Wand\", \"Patronus\", \"Species\", \"Blood status\", \"Loyalty\", \"Skills\", \"Birth\", \"House\", \"Eye colour\").withColumnRenamed(\"Blood status\", \"Bloodstatus\").withColumnRenamed(\"Eye colour\", \"Eyecolour\")\ndata = data.withColumn(\"Patronus\", when(data.Patronus == \"Unknown\", \"null\") .when(data.Patronus == \"None\", \"null\").otherwise(data.Patronus))\n\n# COMMAND ----------\n\ndata = data.withColumn(\"Birth\", regexp_replace('Birth', 'Late', '28'))\ndata = data.withColumn(\"Birth\", regexp_replace('Birth', 'or earlier', ''))\ndata = data.withColumn(\"Birth\", regexp_replace('Birth', 'prior to', ''))\ndata = data.withColumn(\"Birth\", expr(\"CASE WHEN Birth LIKE '%–%' THEN 'null' \" + \n \"WHEN Birth LIKE '%Pre%' THEN 'null' \"+\n \"WHEN Birth LIKE '%pre%' THEN 'null' \"+\n \"WHEN Birth LIKE '%c.%' THEN 'null' \" +\n \"WHEN Birth LIKE '%Post%' THEN 'null' \"+\n \"WHEN Birth LIKE '%century%' THEN 'null' \"+\n \"WHEN Birth LIKE '%In or before%' THEN 'null' \"+\n \"ELSE Birth END\"))\n\n# COMMAND ----------\n\ndata.write.format(\"delta\").mode(\"overwrite\").option(\"userMetadata\", \"init\").saveAsTable(\"groupe8.pottertable3\")\n\n# COMMAND ----------\n\ndisplay(data)\n\n# COMMAND ----------\n\ndisplay(df)\n\n# COMMAND ----------\n\n\n","repo_name":"nitharsan21/Azure_Data","sub_path":"EtudeData.py","file_name":"EtudeData.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4478940750","text":"#is a blue print in oop\n#an object/instance\n#syntax\n#class name_of_the_class():\n #the blue print attributes\n\n\nclass Person():\n name = 'Developer'\n\nd1 = Person()\nd2 = Person()\n\nprint(d1.name)\nprint(d2.name)\n\n\n#all classes has a function called _init_()\n# which is always executed when the class is being initiated\n#Use the _init_() function to assign values to properties or other operations that are neccessary\n\n\n\n\nclass Animal():\n country = 'kenya'#class property\n def __init__(self,name):\n self.thename = name\n\n def sound(self):\n if self.thename == 'cutty':\n print('mweeew')\n\n def kitty(self):\n if self.thename == 'cutty':\n print('such a playerful cat')\n def fear(self):\n if self.thename == 'Betty':\n print('betty is not feeling well')\n def white(self):\n if self.thename == 'cutty':\n print('cutty is a deaf cat')\n\n\n def sounds(self):\n if self.thename == 'bob':\n print('woooof')\n\n\n\ncat = Animal('cutty')\nprint(cat.thename)\ncat2= Animal('Betty')\nprint(cat.thename)\nprint(cat.country)\nprint('bob is a nice puppy')\ndog = Animal('bob')\n\n#deleting a property\n#del object.property\n#el cat.thename\nprint(cat.thename)\nprint(cat2.thename)\n#deleting an object\n#del object\ndel cat\nprint(cat)\n\ncat.sound()\ncat.kitty()\ncat2.fear()\ncat.white()\ndog.sounds()\n\n#modifying objectproperty\n#object.property='new value'\ncat.country = 'Uganda'\nprint(cat.country)\n\n\n\n","repo_name":"muigaipeter/Python_class","sub_path":"workingfolder/classes/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25845301531","text":"import scapy.all as scapy\nimport optparse\n\n\ndef get_user_input():\n parse = optparse.OptionParser()\n parse.add_option(\"-i\", \"--ipaddress\", dest=\"ip_address\", help=\"Enter IP Address\")\n\n (user_input, arguments) = parse.parse_args()\n\n if not user_input.ip_address:\n print(\"Enter IP Address\")\n\n return user_input\n\n\ndef scan_my_network(ip):\n # Create ARP Request\n arp_request = scapy.ARP(pdst=ip)\n # scapy.ls(scapy.ARP())\n\n # Broadcast\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n combined = broadcast/arp_request # iki paketi tek paket yapti\n\n # Response\n (answered_list, unanswered_list) = scapy.srp(combined, timeout=1) # paket gonderimi(cevap verilenler/verilmeyenler return edilir.)\n answered_list.summary()\n\n\nuser_ip_address = get_user_input()\nscan_my_network(user_ip_address.ip_address)\n","repo_name":"DilaraGule/Ethical_Hacker_Course_net_scanner","sub_path":"net_scanner.py","file_name":"net_scanner.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25868968153","text":"\"\"\" Helper classes \"\"\"\n\nimport base64\nimport os\nfrom collections import Callable\nfrom email.mime.text import MIMEText\nfrom urllib import parse\n\nimport facebook\nimport httplib2\nimport requests as r\nimport soundcloud\nimport twitter\nfrom googleapiclient import discovery, errors\nfrom oauth2client.file import Storage\n\nfrom bot_chucky.errors import BotChuckyError\nfrom bot_chucky.utils import split_text\n\n\nclass FacebookData:\n def __init__(self, token):\n \"\"\"\n :param token: Facebook Page token\n :param _api: Instance of the GraphAPI object\n \"\"\"\n self.token = token\n self._api = facebook.GraphAPI(self.token)\n\n def get_user_name(self, _id):\n \"\"\"\n :param _id: find user object by _id\n :return: first name of user, type -> str\n \"\"\"\n if not isinstance(_id, str):\n raise ValueError('id must be a str')\n user = self._api.get_object(_id)\n return user['first_name'] if user else None\n\n\nclass WeatherData:\n \"\"\"\n Class which collect weather data\n \"\"\"\n def __init__(self, api_token):\n \"\"\"\n :param api_token: Open Weather TOKEN\n \"\"\"\n self.token = api_token\n\n def get_current_weather(self, city_name):\n \"\"\"\n :param city_name: Open weather API, find by city name\n :return dictionary object with information\n\n for example:\n\n {'weather': [{'id': 800, 'main': 'Clear', 'description': 'clear sky'}]}\n \"\"\"\n api_url = 'http://api.openweathermap.org' \\\n '/data/2.5/weather?q={0}&APPID={1}'.format(city_name, self.token)\n\n info = r.get(api_url).json()\n return info\n\n\nclass TwitterData:\n \"\"\"\n Class which collect Twitter data\n \"\"\"\n def __init__(self, tokens):\n \"\"\"\n :param tokens: Dictionary of all tokens\n [consumer_key, consumer_secret, access_token_key,\n access_token_secret]\n required to initialize the Twitter Api\n \"\"\"\n self.api = twitter.Api(\n consumer_key=tokens['consumer_key'],\n consumer_secret=tokens['consumer_secret'],\n access_token_key=tokens['access_token_key'],\n access_token_secret=tokens['access_token_secret']\n )\n\n def send_tweet(self, status):\n if status:\n try:\n return {\n 'success': True,\n 'tweet': self.api.PostUpdate(status)\n }\n except twitter.error.TwitterError as TWE:\n return {\n 'detail': TWE.message[0]['message'],\n 'success': False\n }\n\n\nclass StackExchangeData:\n \"\"\"\n Class which collect StackExchange data\n \"\"\"\n _default_parameters = {\n 'order': 'desc',\n 'sort': 'activity',\n 'site': 'stackoverflow',\n }\n\n def get_stack_answer_by(self, **kwargs):\n \"\"\"\n :param kwargs: create a query by arguments\n for example:\n tag='Python', will be search by tag\n title='Update Python', will be search by title\n and etc.\n :return: an array with links\n \"\"\"\n if len(kwargs) > 1:\n raise BotChuckyError('The argument must be one')\n\n for key in kwargs.keys():\n query = kwargs.get(key)\n self._default_parameters.update({key: query})\n\n if not isinstance(query, str):\n raise TypeError(f'{query} must be a string')\n\n encode_query = parse.urlencode(self._default_parameters)\n\n stack_url = f'https://api.stackexchange.com/2.2/search/advanced?' \\\n f'{encode_query}'\n\n questions = r.get(stack_url).json()\n links = [obj['link'] for obj in questions['items']]\n return links\n\n\nclass SoundCloudData:\n \"\"\"\n Class to gather soundcloud data, tracks etc\n \"\"\"\n def __init__(self, client_id):\n \"\"\"\n client_id = Client ID, must be registered\n \"\"\"\n self.client_id = client_id\n self._api = soundcloud.Client(client_id=self.client_id)\n\n def resolve_track(self, url):\n \"\"\"\n Resolve a track name\n :param url: permalink to a track (str)\n \"\"\"\n try:\n track = self._api.get('/resolve', str(url))\n\n return {\n 'success': True,\n 'track': track.id\n }\n except Exception as e:\n return {\n 'success': False,\n 'detail': f'Error: {e.message}, Code: {e.response.status_code}'\n }\n\n def search(self, artist=None):\n \"\"\"\n Search for tracks by artist, or artist by track\n :param artist: search by artist, returns tracks and info, type -> str\n \"\"\"\n self.artist = artist\n\n if self.artist is not None:\n try:\n artists = self._api.get('/users', q=self.artist)\n tracks = self._api.get('/tracks', q=self.artist)\n return {\n 'success': True,\n 'artists': artists,\n 'tracks': tracks\n }\n except Exception as e:\n return {\n 'success': False,\n 'detail': f'Error: {e.message}, Code: '\n f'{e.response.status_code}'\n }\n\n\nclass GmailData:\n \"\"\"\n Class which collect Gmail Data\n \"\"\"\n def __init__(self):\n self.api = self._create_gmail_api()\n\n def send_mail(self, to, subject, body):\n \"\"\"\n :param to: Email address of the receiver\n :param subject: Subject of the email\n :param body: Body of the email\n \"\"\"\n message = self._create_message(to, subject, body)\n try:\n message = self.api.users().messages().send(\n userId='me',\n body=message\n ).execute()\n return {\n \"success\": True,\n \"message\": message\n }\n except errors.HttpError as error:\n return {\n \"success\": False,\n \"detail\": str(error)\n }\n\n def _create_gmail_api(self):\n try:\n credentials = self._get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n return service\n except AttributeError:\n return ''\n\n def _get_credentials(self):\n \"\"\"Gets valid user credentials from storage.\n :return: Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n return credentials\n\n def _create_message(self, to, subject, body):\n \"\"\"\n Create a message for an Email.\n :param to: Email address of the receiver\n :param subject: Subject of the email\n :param body: Body of the email\n \"\"\"\n message = MIMEText(body)\n message['to'] = to\n message['subject'] = subject\n return {\n 'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()\n }\n\n\nclass ChuckyCustomGenerator(Callable):\n \"\"\"\n warnings:: Class not completed yet\n description:: Class will allow to add customs unique words/functions,\n If user want to create own realization of the bot,\n he should use the CustomGenerator class.\n future:: It will be imported into BotChucky class.\n\n :Example:\n # first create custom functions\n def hello_python():\n return 'Hello Python!'\n\n def news_python():\n return 'Python news!'\n\n my_config = {\n '#Python': hello_python\n }\n\n # Create instance of ChuckyGenerator\n bot = ChuckyCustomGenerator()\n bot.config = my_config\n\n # If we get some text from messenger\n # And we pass an argument to the bot\n\n my_message = 'Hello I want to learn #Python'\n bot(my_message)\n\n The bot will return the result of a custom function: 'Hello Python!'\n\n Update our config, and add topics:\n\n # Add topics\n # For example\n # If we got text with #Python and 'bye' word\n my_config = {\n '#Python': {'news': news_python}\n }\n bot.config = my_config\n\n my_message = 'Hey #Python, and send me your news'\n bot(my_message)\n\n bot will return the result of a custom function: 'Python news!'\n \"\"\"\n config = {}\n\n def get_text(self, text):\n return split_text(text)\n\n @property\n def config_keys(self):\n return self.config.keys()\n\n def check_and_run(self, text):\n func = None\n for key in self.config_keys:\n if key not in text:\n msg = 'Sorry, could you repeat please?'\n return msg\n if key in text:\n func = self.config.get(key)\n if isinstance(func, Callable):\n return func()\n else:\n for topic in self.config.get(key):\n if topic in text:\n func = self.config[key][topic]\n return func()\n\n def __call__(self, text, **kwargs):\n text = self.get_text(text)\n return self.check_and_run(text)\n\n def __str__(self):\n return f'{self.__class__.__name__}' \\\n f'(Your config: {self.config})'\n","repo_name":"sk364/Bot-Chucky","sub_path":"bot_chucky/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":10032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17566880331","text":"import json\nimport datetime \n\nfrom django.http import JsonResponse\nfrom django.views import View\n\nfrom orders.models import WishList, OrderList, Order, PaymentMethod\nfrom users.models import Address\nfrom users.utils import login_confirm\n\nclass WishListVew(View):\n @login_confirm\n def get(self, request):\n user = request.user\n products = [wishlist.product for wishlist in WishList.objects.filter(user=user)]\n\n result = [\n {\n 'product_id' : product.id,\n 'product_name' : product.name,\n 'product_thumbnail_url' : product.thumbnail_url,\n 'product_selections' : [\n {\n 'size' : product_selection.size,\n 'price' : product_selection.price\n } for product_selection in product.productselection_set.all() \n ]\n } for product in products \n ]\n\n return JsonResponse({'result': result}, status=200)\n\n @login_confirm\n def post(self, request):\n user = request.user\n data = json.loads(request.body)\n\n if WishList.objects.filter(user=user, product_id=data['product_id']).exists():\n return JsonResponse({'result': 'ALREADY_EXIT_ERROR'}, status=400)\n\n WishList.objects.create(user=user, product_id=data['product_id'])\n\n return JsonResponse({'result': 'SUCCESS'}, status=201)\n\nclass ModifyWishListVew(View):\n @login_confirm\n def delete(self, request, product_id):\n user = request.user\n\n if not WishList.objects.filter(user=user, product_id=product_id).exists():\n return JsonResponse({'message': 'DOES_NOT_EXIST_ERROR'}, status=400)\n\n WishList.objects.filter(user=user, product_id=product_id).delete()\n \n return JsonResponse({'result': 'SUCCESS'}, status=204)\n\nclass CartListView(View):\n @login_confirm\n def get(self, request):\n user = request.user\n order = Order.objects.get(user=user, status__name='장바구니')\n\n result = {\n 'order_id' : order.id,\n 'products' : [\n {\n 'product_id' : order_list.product_selection.product.id,\n 'product_name' : order_list.product_selection.product.name,\n 'product_size' : order_list.product_selection.size,\n 'product_price' : order_list.product_selection.price,\n 'product_quantity' : order_list.quantity\n } for order_list in OrderList.objects.filter(order=order)\n ]\n }\n\n return JsonResponse({'result': result}, status=200)\n\n @login_confirm\n def post(self, request):\n user = request.user\n data = json.loads(request.body)\n\n if not Order.objects.filter(user=user, status__name='장바구니').exists():\n order = Order.objects.create(user=user, status_id=1)\n OrderList.objects.create(order=order, product_selection_id=data['product_selection_id'], quantity=1)\n else:\n order = Order.objects.get(user=user, status__name='장바구니')\n if OrderList.objects.filter(order=order, product_selection_id=data['product_selection_id']).exists():\n orderlist = OrderList.objects.get(order=order, product_selection_id=data['product_selection_id'])\n orderlist.quantity += 1\n orderlist.save()\n else:\n OrderList.objects.create(order=order, product_selection_id=data['product_selection_id'], quantity=1)\n\n return JsonResponse({'result': 'SUCCESS'}, status=201)\n \nclass ModifyCartListView(View):\n @login_confirm\n def patch(self, request, product_selection_id):\n user = request.user\n data = json.loads(request.body)\n order = Order.objects.get(user=user, status__name='장바구니')\n\n if not OrderList.objects.filter(order=order, product_selection=product_selection_id).exists():\n return JsonResponse({'message': 'DOES_NOT_EXIST_ERROR'}, status=400)\n \n orderlist = OrderList.objects.get(order=order, product_selection=product_selection_id)\n orderlist.quantity = data['quantity']\n orderlist.save()\n\n return JsonResponse({'result': 'SUCCESS'}, status=200)\n\n @login_confirm\n def delete(self, request, product_selection_id):\n user = request.user\n order = Order.objects.get(user=user, status__name='장바구니')\n\n if not OrderList.objects.filter(order=order, product_selection=product_selection_id).exists():\n return JsonResponse({'message': 'DOES_NOT_EXIST_ERROR'}, status=400)\n\n OrderList.objects.get(order=order, product_selection=product_selection_id).delete()\n \n return JsonResponse({'result': 'SUCCESS'}, status=204)\n\nclass OrderView(View):\n @login_confirm\n def get(self, request):\n user = request.user\n orders = Order.objects.prefetch_related('orderlist_set').filter(user=user, status__name='구매완료')\n\n result = [\n {\n 'address' : order.address.address if order.address else None,\n 'memo' : order.memo if order.memo else None,\n 'payment_method' : order.payment_method.name if order.payment_method else None,\n 'purchased_at' : order.purchased_at,\n 'total_price' : order.total_price,\n 'free_delivery' : order.free_delivery,\n 'products' : [\n {\n 'product_id' : order_list.product_selection.product.name,\n 'product_name' : order_list.product_selection.product.name,\n 'product_size' : order_list.product_selection.size,\n 'product_price' : order_list.product_selection.price,\n 'product_quantity' : order_list.quantity\n } for order_list in order.orderlist_set.all()\n ]\n } for order in orders\n ]\n\n return JsonResponse({'result': result}, status=200)\n\n @login_confirm\n def post(self, request):\n try:\n user = request.user\n data = json.loads(request.body)\n\n if not Order.objects.prefetch_related('orderlist_set').filter(id=data['order_id']).exists():\n return JsonResponse({'message': 'DOES_NOT_EXIST_ERROR'}, status=400)\n \n if Order.objects.get(id=data['order_id']).status_id == 2:\n return JsonResponse({'message': 'ALREADY_PAYED_ERROR'}, status=400)\n\n order = Order.objects.prefetch_related('orderlist_set').get(id=data['order_id'])\n address, create = Address.objects.get_or_create(user=user, address=data['address'])\n payment_method = PaymentMethod.objects.get(id=data['payment_method_id'])\n total_price = sum([order_list.product_selection.price * order_list.quantity for order_list in order.orderlist_set.all()])\n\n FREE_DELIVERY = 30000\n\n order.status_id = 2\n order.address = address\n order.memo = data.get('memo', None)\n order.payment_method = payment_method if payment_method else None\n order.total_price = total_price\n order.free_delivery = True if total_price > FREE_DELIVERY else False\n order.purchased_at = datetime.datetime.now()\n order.save()\n\n return JsonResponse({'result': 'SUCCESS'}, status=200)\n\n except KeyError: \n return JsonResponse({'message':'KEY_ERROR'}, status=400)","repo_name":"hwaya2828/WESOP-backend","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74733526330","text":"\"\"\"\n-- ПРИНЦИП РАБОТЫ --\n# Если честно, алгоритм очень хорошо описан в теории к спринту: \"Реализация бинарного поиска с помощью рекурсии\".\nПоскольку масив был сдвинут, то дополнительно для использования бинарного поиска нам необходимо\nпровести проверку на нахождение отсортированной половины массива. Например, сравнив левый конец с медианой.\nПосле этой проверки воспользуемся рекурсивным бинарным поиском.\n\n-- ДОКАЗАТЕЛЬСТВО КОРРЕКТНОСТИ --\nПо условию сказано \"... в массиве только уникальные элементы\".\n - В случае right > mid мы будем рассматривать отрезок [mid; right), значит можно отбрость левую\n половину массива, проводя дальнейший поиск только в правой половине, т.е. новые границы поиска\n будут: [mid; right) и этот полуинтервал будет точно отсортирован.\n - В случае left < mid мы будем рассматривать отрезок [left; mid), значит можно отбрость правую\n половину массива, проводя дальнейший поиск только в левой половине, т.е. новые границы поиска\n будут: [left; mid) - этот полуинтервал будет также отсортирован.\n\n-- ВРЕМЕННАЯ СЛОЖНОСТЬ --\nПри использовании бинарного поиска мы сокращаем интервал поиска в два раза, значит временная сложность будет O(log(n)).\n\n-- ПРОСТРАНСТВЕННАЯ СЛОЖНОСТЬ --\nМы храним исходный массив и используем рекурсивные вызовы. В худшем случае (см. временную сложность) мы имеем\nсложность O(logN), т.к. делаем O(logN) вызовов и храним на стеке значения каждого вызова.\nТакая сложность также связана с тем, что в python не поддерживается оптимизация хвостовой рекурсии.\n\"\"\"\n\n\"\"\"\n-- ID успешной посылки --\n56013520\n\"\"\"\n\n\"\"\"\nВернулся к первой версии реализации поиска. Спасибо большое за совет! Изначально запутался в промежутках...,\nпоэтому, чтобы проходило все тест-кейсы и написал те функции, которые находили границы. Но теперь разобрался с ними.\nСпасибо :) !\n\"\"\"\n\n\n# Рассчет середины массива.\ndef count_mid(left, right):\n return (right + left) // 2\n\n\n# Рекурсивный бинарный поиск.\ndef binary_search(array_, left_, right_, element_):\n # Находим середину массива.\n mid = count_mid(left_, right_)\n\n # Если найденная середина массива и есть искомый элемент, то сразу вернем результат.\n if array_[mid] == element_:\n return mid\n\n # Базовый случай рекурсии.\n if right_ - left_ <= 0:\n return -1\n\n # Случай, когда левая часть отсортирована.\n if array_[left_] <= array_[mid]:\n if array_[left_] <= element_ < array_[mid]:\n return binary_search(array_, left_, mid, element_)\n else:\n return binary_search(array_, mid + 1, right_, element_)\n\n # Случай, когда левая часть отсортирована.\n if array_[left_] > array_[mid]:\n if array_[mid] < element_ <= array_[right_]:\n return binary_search(array_, mid + 1, right_, element_)\n else:\n return binary_search(array_, left_, mid, element_)\n\n\n# Финкция поиска элемента в массиве.\ndef broken_search(array_, element_):\n left_ = 0\n right_ = len(array_) - 1\n\n return binary_search(array_, left_, right_, element_)\n\n\nif __name__ == '__main__':\n n = int(input()) # Длина массива.\n element = int(input()) # Элемент массива, индекс которого надо найти.\n array = list(map(int, input().strip().split()))[:n] # Исходный массив.\n\n print(broken_search(array, element))\n","repo_name":"DimaZzZz101/Yandex_Practicum_Algorithms","sub_path":"Sprint_3/Final_tasks/Task_1/search_in_broken_array.py","file_name":"search_in_broken_array.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74875693370","text":"from random import randint\nfrom timeit import repeat\n\nfrom random import randint\n\ndef quick_sort(array):\n \n # Base cases\n if len(array) < 2:\n return array\n\n low, same, high = [], [], []\n\n # Select the pivot randomly\n pivot = array[randint(0, len(array) - 1)]\n\n for item in array:\n # Elements smaller than the pivot go to the low list\n if item < pivot:\n low.append(item)\n # Elements equal to pivot go to the same list.\n elif item == pivot:\n same.append(item)\n # Elements larger than pivot go to the high list. \n elif item > pivot:\n high.append(item)\n\n # The final result combines the sorted low, same, sorted\n return quick_sort(low) + same + quick_sort(high)\n\n# Test cases\n\n# Performance evaluation\ndef check_performance(algorithm):\n\n ARRAY_LENGTH = 1000\n\n # Generate an array of `ARRAY_LENGTH` random integer between 0 and 999\n array = [randint(0, 1000) for i in range(ARRAY_LENGTH)]\n\n # Execute the code ten times and return the time that each execution took\n stmt = f\"{algorithm}({array})\"\n setup_code = f\"from __main__ import {algorithm}\"\n times = repeat(setup=setup_code, stmt=stmt, repeat=3, number=10)\n\n # Display the name of the algorithm and the minimum time it took to run\n print(f\"Algorithm: {algorithm}, minimum execution time: {min(times)}\")\n\n\nif __name__ == \"__main__\":\n\n check_performance(\"quick_sort\")","repo_name":"edab/DSA_Quick_Reference","sub_path":"solutions/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"39974825123","text":"\"\"\"\n Factory with static methods (python equivalence) to transform\n Foaf instance in Lucene Document and inverse process\n\"\"\"\nfrom PyLucene import Document, Field\n#from futil.foaf.foaf import Foaf\n\nfields = {\n \"name\" : ( Field.Store.YES, Field.Index.TOKENIZED),\n \"nick\" : ( Field.Store.YES, Field.Index.TOKENIZED),\n# \"sha\" : ( Field.Store.YES, Field.Index.UN_TOKENIZED),\n \"uri\" : ( Field.Store.YES, Field.Index.UN_TOKENIZED),\n \"friends\": (Field.Store.YES, Field.Index.UN_TOKENIZED),\n \"geolat\": (Field.Store.YES, Field.Index.UN_TOKENIZED),\n \"geolong\": (Field.Store.YES, Field.Index.UN_TOKENIZED)\n}\n\n\"\"\"\n Helper class to create static methods\n http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52304\n\"\"\"\nclass Static:\n def __init__(self, anycallable):\n self.__call__ = anycallable\n\n\nclass FoafDocumentFactory:\n\n def getDocumentFromFOAF(foaf):\n doc = Document()\n for attr, value in foaf.iteritems():\n if ( fields.has_key(attr)):\n # Now is always a list!\n for x in value:\n if isinstance(x, tuple): #for example (sha, uri)\n doc.add(Field(attr, x[1], fields[attr][0], fields[attr][1]))\n else:\n doc.add(Field(attr, x, fields[attr][0], fields[attr][1]))\n\n else:\n pass\n # DEBUG information print \"E: Field \" + attr + \" ignored in index\"\n return doc\n getDocumentFromFOAF = Static(getDocumentFromFOAF)\n\n def getFOAFFromDocument(doc):\n f = {}\n for key in fields.iterkeys():\n values = doc.getValues(key)\n if not values:\n continue\n if len(values) > 1:\n f[key] = doc.getValues(key)\n else:\n f[key] = values[0]\n return f\n getFOAFFromDocument = Static(getFOAFFromDocument)\n\n","repo_name":"BackupTheBerlios/futil-svn","sub_path":"trunk/src/futil/index/foafDocumentFactory.py","file_name":"foafDocumentFactory.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41849779328","text":"\"\"\"ocrarian file manager config\"\"\"\nfrom pathlib import Path\nfrom appdirs import AppDirs\n\nfrom ocrarian import APP_NAME\n\n\nclass Config(AppDirs):\n \"\"\"Config class for ocrarian\n This class is responsible for creating configuration directory and settings file.\n It also handles settings load, save and delete.\"\"\"\n\n def __init__(self):\n super().__init__(appname=APP_NAME)\n self.user_docs_dir = Path(\"~/Documents\").expanduser()\n self.create_directories()\n\n def create_directories(self):\n \"\"\"Create config and docs directories.\"\"\"\n # pylint: disable=no-member\n # Instance of 'user_config_dir' has no 'exists' member (no-member)\n # Instance of 'user_config_dir' has no 'mkdir' member (no-member)\n if not self.user_config_dir.exists():\n self.user_config_dir.mkdir()\n if not self.user_docs_dir.exists():\n self.user_docs_dir.mkdir()\n if not self.user_cache_dir.exists():\n self.user_cache_dir.mkdir()\n","repo_name":"ocrarian/ocrarian.py","sub_path":"ocrarian/common/file_manager/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"73808510013","text":"import os\nimport shlex\nimport contextlib\nfrom functools import partial\n\nfrom tornado import gen\nfrom tornado.process import Subprocess\nfrom tornado import ioloop\nfrom tornado import iostream\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass ProcessMeta(type):\n PROCESSES = {}\n\n def __call__(cls, name, *args, **kwargs):\n if name in ProcessMeta.PROCESSES:\n raise RuntimeError(\"Duplicate process '{}'\".format(name))\n\n inst = type.__call__(cls, name, *args, **kwargs)\n ProcessMeta.PROCESSES[name] = inst\n return inst\n\n\nclass CommandMixin(object):\n def command_prefix(self):\n return []\n\n def command_suffix(self):\n return []\n\n def __enter__(self):\n pass\n\n def __exit__(self, *_):\n pass\n\n\nclass Argument(CommandMixin):\n def __init__(self, *args):\n self.args = args\n\n def __repr__(self):\n return \"\".format(\" \".join(self.args))\n\n def command_suffix(self):\n return list(self.args)\n\n\nclass Environ(CommandMixin):\n def __init__(self, *args, **kwargs):\n self.environ = dict(args, **kwargs)\n\n def __repr__(self):\n return \"\".format(\" \".join(self.environ,keys()))\n\n def __enter__(self):\n self._original_state = {}\n for key in self.environ:\n if key in os.environ:\n self._original_state[key] = os.environ[key]\n os.environ[key] = self.environ[key]\n\n def __exit__(self, *_):\n for key in self.environ:\n if key in self._original_state:\n os.environ[key] = self._original_state\n else:\n del os.environ[key]\n del self._original_state\n\n\nclass Wrapper(CommandMixin):\n def __init__(self, *args):\n self.prefix = args\n\n def command_prefix(self):\n return list(self.prefix)\n\n\nclass Process(object):\n __metaclass__ = ProcessMeta\n\n command = []\n\n def __init__(self, name, *mixins, **kwargs):\n self.name = name\n self.kwargs = kwargs\n self.mixins = mixins\n\n self.proc = None\n self.exitcode = None\n self.subs = {}\n\n def __repr__(self):\n return \"\".format(\n self.name,\n \" \".join(self.format_command()))\n\n def __str__(self):\n return self.name\n\n def command_vars(self):\n return dict(name=self.name, **self.kwargs)\n\n def format_command(self):\n _vars = self.command_vars()\n\n cmd = self.command[:]\n\n for mixin in self.mixins:\n cmd = mixin.command_prefix() + cmd + mixin.command_suffix()\n\n if isinstance(cmd, (str, unicode)):\n cmd = shlex.split(cmd)\n\n for i, part in enumerate(cmd):\n cmd[i] = part.format(**_vars)\n\n return cmd\n\n @property\n def running(self):\n return self.proc is not None\n\n @property\n def pid(self):\n return self.proc.pid if self.proc else None\n\n def run(self):\n logger.info(\"Running %s\", \" \".join(self.command))\n\n with contextlib.nested(*self.mixins):\n self.proc = Subprocess(\n self.format_command(),\n stdout=Subprocess.STREAM,\n stderr=Subprocess.STREAM)\n\n self._exit_future = self._wait_for_exit()\n self._start_read('stdout', self.proc.stdout)\n self._start_read('stderr', self.proc.stderr)\n\n @gen.coroutine\n def stop(self, kill=False):\n proc = self.proc.proc\n if kill:\n logger.info(\"Killing %r [PID %i]\", self, self.pid)\n proc.kill()\n else:\n logger.info(\"Terminating %r [PID %i]\", self, self.pid)\n proc.terminate()\n\n exitcode = yield self._exit_future\n raise gen.Return(exitcode)\n\n def subscribe(self, id, cb):\n logger.info(\"Subscription: %r added\", id)\n self.subs[id] = cb\n\n def unsubscribe(self, id):\n try:\n del self.subs[id]\n logger.info(\"Subscription: %r removed\", id)\n except KeyError:\n pass\n\n @gen.coroutine\n def _wait_for_exit(self):\n exitcode = yield self.proc.wait_for_exit(False)\n logger.info(\"Process %r stopped\", self)\n self.proc = None\n self.exitcode = exitcode\n self.subs = {}\n raise gen.Return(exitcode)\n\n def _start_read(self, label, stream):\n loop = ioloop.IOLoop.current()\n loop.add_future(self._read(label, stream), lambda f: f)\n\n @gen.coroutine\n def _read(self, label, stream):\n try:\n while self.running:\n msg = yield stream.read_until('\\n')\n for sub_cb in self.subs.itervalues():\n sub_cb(self, label, msg)\n except iostream.StreamClosedError:\n pass\n","repo_name":"xlevus/python-gurgle","sub_path":"gurgle/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17501992090","text":"import csv\nimport pymysql\n\n# Connect to the database\ndb = pymysql.connect(host='localhost', user='UNAME', password='PASSWORD', database='DB_NAME')\ncursor = db.cursor()\n\n# Insert movies from CSV into the database\ndef insert_movies_from_csv():\n with open('data.csv', 'r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n insert_movie(row['name'], row['time'])\n\n# Insert movies into the database\ndef insert_movie(name, time):\n try:\n query = \"INSERT INTO movies (name, time) VALUES (%s, %s)\"\n cursor.execute(query, (name, time))\n db.commit()\n print(f\"Movie '{name}' loaded successfully!\")\n except Exception as e:\n db.rollback()\n print(\"Error:\", e)\n\nif __name__ == \"__main__\":\n insert_movies_from_csv()\n db.close()\n","repo_name":"Cyber-Zypher/Cinema-Tickets-Booking-System","sub_path":"load_movie.py","file_name":"load_movie.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30835178842","text":"# https://www.tutorialspoint.com/gensim/gensim_creating_tf_idf_matrix.htm\n# https://honingds.com/blog/natural-language-processing-with-python/\n\nimport itertools\nfrom collections import defaultdict\n\nfrom gensim.corpora import Dictionary\nfrom nltk import word_tokenize\n\n\ndef lda_gensim_parse_post(classification, blogs):\n tokenized_docs = [word_tokenize(doc) for doc in blogs]\n dictionary = Dictionary(tokenized_docs)\n bag_of_words_corpus = [dictionary.doc2bow(tokenized_doc) for tokenized_doc in tokenized_docs]\n\n total_word_count = defaultdict(int)\n for word_id, word_count in itertools.chain.from_iterable(bag_of_words_corpus):\n total_word_count[word_id] += word_count\n\n # Create a sorted list from the defaultdict: sorted_word_count\n sorted_word_count = sorted(total_word_count.items(), key=lambda w: w[1], reverse=True)\n\n # Print the top 1 words across all documents alongside the count\n for word_id, word_count in sorted_word_count[:1]:\n print(classification + \" is talking about '\" + str(dictionary.get(word_id)) + \"', with \" + str(\n word_count) + \" occurence\")\n","repo_name":"jaycedel/TextMiningAssignment2","sub_path":"lda_gensim_topic_modelling.py","file_name":"lda_gensim_topic_modelling.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"10901800018","text":"import scipy.io\nimport numpy as np\nfrom tqdm import tqdm\n\nclass CSL_data_preprocessing:\n \n def load_from_mat(self,fileAddress):\n \"\"\"\n Load data from a MATLAB file.\n\n Args:\n fileAddress (str): The address of the MATLAB file.\n\n Returns:\n numpy.ndarray: The data stored under the key 'Data' in the MATLAB file.\n \"\"\"\n return scipy.io.loadmat(fileAddress)[\"gestures\"]\n\n def extra_data(self,fileAddress):\n \"\"\"\n Load and concatenate data from multiple MATLAB files, generating corresponding labels.\n\n Args:\n fileAddress (str): The address of the directory containing the MATLAB files.\n\n Returns:\n tuple: A tuple containing two numpy.ndarrays. The first element is the concatenated data,\n and the second element is the corresponding labels. The third element is the number of classes\n \"\"\"\n data = None\n label = None\n num_class = 26\n for i in tqdm(range(1, num_class+1),desc=\"Processing Files\"):\n for trial in tqdm(range(10),desc=\"Processing trials in file\"):\n if data is None:\n data = self.load_from_mat(\n f\"{fileAddress}/gest{i}.mat\")[trial][0].T\n label = np.repeat(i-1,data.shape[0])\n else:\n temp = self.load_from_mat(\n f\"{fileAddress}/gest{i}.mat\")[trial][0].T\n data = np.concatenate((data,temp),axis=0)\n label = np.concatenate((label,np.repeat(i-1,temp.shape[0])),axis=0)\n return data,label,num_class\n\n def NormalizeData(self,data):\n # data = (data - data.mean())/(data.std())\n return data\n","repo_name":"MIC-Laboratory/CNN-HD-sEMG-Classifier","sub_path":"utils/CSL_data_preprocessing.py","file_name":"CSL_data_preprocessing.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"22961731692","text":"def digits(n: int):\n count = 0\n while n != 0:\n n //= 10\n count += 1\n return count\n\ndef power(x: int, y: int):\n if y == 0:\n return 1\n elif y == 1:\n return x\n return x * power(x, y-1)\n\ndef get_digit(num: int, index: int):\n return num // power(10, index) % 10\n\ndef split_at(num: int, index: int):\n # Index starting from the right\n return int(str(num)[:-index]), int(str(num)[-index:])\n\ndef third_grade_multiplication(a: int, b: int):\n suma = 0\n for i in range(digits(b)):\n digit = get_digit(b, i)\n suma += a * digit * power(10, i)\n return suma\n\ndef rec_int_mult(a: int, b:int):\n pass\n\ndef karatsuba_multiplication(a: int, b: int):\n if a < 10 or b < 10:\n return a * b\n \n # Size of the numbers\n m = min(digits(a), digits(b)) // 2\n\n # Split the digit sequences in the middle\n high1, low1 = split_at(a, m)\n high2, low2 = split_at(b, m)\n\n # Recursive calls to numbers approximately half the size\n z0 = karatsuba_multiplication(low1, low2)\n z1 = karatsuba_multiplication(low1 + high1, low2 + high2)\n z2 = karatsuba_multiplication(high1, high2)\n \n return z2 * power(10, m * 2) + (z1 - z2 - z0) * power(10, m) + z0\n\n\nif __name__ == '__main__':\n\n test_cases = [\n (3, 4, 3 * 4),\n (11, 12, 11 * 12),\n (111, 12, 111 * 12),\n (123456, 33, 123456 * 33),\n (654, 987654, 654 * 987654),\n ]\n \n for a, b, solution in test_cases:\n result = third_grade_multiplication(a, b)\n string = f'third_grade_multiplication({a:6}, {b:6}) = {result:9}'\n string += ' ' * (60 - len(string))\n print(string, f'\\t\\tTest: {\"OK\" if solution == result else \"NOT OK\"}')\n\n result = karatsuba_multiplication(a, b)\n string = f' karatsuba_multiplication({a:6}, {b:6}) = {result:9}'\n string += ' ' * (60 - len(string))\n print(string, f'\\t\\tTest: {\"OK\" if solution == result else \"NOT OK\"}\\n')","repo_name":"daalgi/algorithms","sub_path":"divide-and-conquer/integer_multiplication.py","file_name":"integer_multiplication.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"69820941054","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n # @Time : 2019-04-26 17:22\n # @Author : Awiny\n # @Site :\n # @Project : pytorch_i3d\n # @File : charades_checkpoints.py\n # @Software: PyCharm\n # @Github : https://github.com/FingerRec\n # @Blog : http://fingerrec.github.io\n\"\"\"\nimport scipy.io\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #close the warning\n\"\"\" Defines functions used for checkpointing models and storing model scores \"\"\"\nimport os\nimport torch\nimport shutil\nfrom collections import OrderedDict\n\n\ndef ordered_load_state(model, chkpoint):\n \"\"\"\n Wrapping the model with parallel/dataparallel seems to\n change the variable names for the states\n This attempts to load normally and otherwise aligns the labels\n of the two statese and tries again.\n \"\"\"\n try:\n model.load_state_dict(chkpoint)\n except Exception: # assume order is the same, and use new labels\n print('keys do not match model, trying to align')\n modelkeys = model.state_dict().keys()\n fixed = OrderedDict([(z,y)\n for (x,y),z in zip(chkpoint.items(), modelkeys)])\n model.load_state_dict(fixed)\n\n\ndef load(args, model, optimizer):\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n chkpoint = torch.load(args.resume)\n if isinstance(chkpoint, dict) and 'state_dict' in chkpoint:\n args.start_epoch = chkpoint['epoch']\n mAP = chkpoint['mAP']\n ordered_load_state(model, chkpoint['state_dict'])\n optimizer.load_state_dict(chkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, chkpoint['epoch']))\n return mAP\n else:\n ordered_load_state(model, chkpoint)\n print(\"=> loaded checkpoint '{}' (just weights)\"\n .format(args.resume))\n return 0\n else:\n raise ValueError(\"no checkpoint found at '{}'\".format(args.resume))\n return 0\n\n\ndef score_file(scores, filename):\n with open(filename, 'w') as f:\n for key, val in sorted(scores.items()):\n f.write('{} {}\\n'.format(key, val))\n\n\ndef save(epoch, args, model, optimizer, is_best, scores):\n state = {\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'mAP': scores['mAP'],\n 'optimizer': optimizer.state_dict(),\n }\n filename = \"{}/model.pth.tar\".format('checkpoints/charades')\n score_file(scores, \"{}/model_{:03d}.txt\".format('checkpoints/charades', epoch+1))\n torch.save(state, filename)\n if is_best:\n bestname = \"{}/model_best.pth.tar\".format('checkpoints/charades')\n score_file(scores, \"{}/model_best.txt\".format('checkpoints/charades', epoch+1))\n shutil.copyfile(filename, bestname)\n","repo_name":"FingerRec/RHE","sub_path":"tools/utils/charades_checkpoints.py","file_name":"charades_checkpoints.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"26307274064","text":"import os, time\nimport zipfile\n\n\ndef origin_zip():\n # 准备一个空的压缩文件命名为demoreport.zip\n # 开启添加多个文件的模式 ‘a’\n with zipfile.ZipFile(file='../demoreport.zip', mode='a') as f:\n # 添加被压缩的文件,并命名为新的文件名\n f.write(r'C:\\Users\\janti\\PycharmProjects\\autotest-android\\YYandroid\\UItest\\report\\demoreport.html',\n 'demoreport11_5.html')\n f.close()\n\n\ndef upgrade_zipfile():\n with open('../data/result.csv',encoding='UTF-8') as f:\n text=f.read()\n # print(text)\n with zipfile.ZipFile(file='../test_result_set.zip', mode='a') as z:\n for filename in os.listdir('../UItest/report/screenshot'):\n if filename in text:\n z.write('../UItest/report/screenshot/%s' % filename, filename)\n z.write('../UItest/report/test_report.html','UItestreport.html')\n for videofile in os.listdir('../data'):\n if videofile in text:\n print(videofile)\n # pass\n z.write('../data/%s'%videofile,videofile)\n z.close()\n\n\n\n\nif __name__ == '__main__':\n t1 = time.time()\n upgrade_zipfile()\n t2 = time.time()\n print(t2 - t1)\n","repo_name":"jantionMH/fuckyou","sub_path":"YYandroid/Utility/zipsource.py","file_name":"zipsource.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10349804013","text":"# 15489 : 파스칼 삼각형\n# https://www.acmicpc.net/problem/15489\n\nr, c, w = map(lambda x: int(x), input().split())\nheight0 = r-1\nwidth0 = c-1\nheight1 = r-1 + w\n# print(height0, width0, height1)\n\npascal = [[1] * i for i in range(1, height1+1)]\nfor i in range(1, height1):\n for j in range(1, i):\n pascal[i][j] = pascal[i-1][j-1] + pascal[i-1][j]\n# print(pascal)\n\nresult = 0\nindex = 1\nfor i in range(height0, height1):\n for j in range(width0, width0+index):\n # print(pascal[i][j])\n result += pascal[i][j]\n index += 1\n\nprint(result)","repo_name":"Ir2placeable/Area51","sub_path":"python/DP/silver_4/15489.py","file_name":"15489.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33845033286","text":"from .deformation import calc_deformation\nfrom .utils import sample_from_discrete_uniform\nimport numpy\nimport torch\n\nimport pyro\nimport pyro.distributions as dist\nimport pyro.primitives as prim\n\n\nclass GeoModel:\n def __init__(self, heads, reference_times, observed_deformations):\n # initializations\n self.heads = heads\n self.reference_times = reference_times\n self.observed_deformations = torch.FloatTensor(observed_deformations)\n\n @pyro.infer.config_enumerate\n def model(self):\n kv = prim.sample(name=\"kv\", fn=dist.Cauchy(loc=-5, scale=3)).item()\n sskv = prim.sample(name=\"sskv\",\n fn=dist.Cauchy(loc=-3.5, scale=3)).item()\n sske = prim.sample(name=\"sske\", fn=dist.Cauchy(loc=-5, scale=3)).item()\n nclay = sample_from_discrete_uniform(name=\"nclay\",\n values=list(range(5, 11)))\n claythick = 5\n interp_times, defm, _, _ = calc_deformation(\n time=self.reference_times, head=self.heads, Kv=10**kv,\n Sskv=10**sskv, Sske=10**sske, claythick=claythick,\n nclay=nclay)\n\n aligned_deformation = torch.FloatTensor(\n numpy.interp(self.reference_times, interp_times, defm))\n\n for i in pyro.plate(\"data_loop\", len(self.observed_deformations)):\n prim.sample(name=\"data_{}\".format(i),\n fn=dist.Normal(self.observed_deformations[i], 2),\n obs=aligned_deformation[i])\n\n return kv, sskv, sske, nclay\n\n def model_importance_sampling(self, observed_deformations):\n kv = prim.sample(name=\"kv\", fn=dist.Cauchy(loc=-5, scale=3)).item()\n sskv = prim.sample(name=\"sskv\",\n fn=dist.Cauchy(loc=-3.5, scale=3)).item()\n sske = prim.sample(name=\"sske\", fn=dist.Cauchy(loc=-5, scale=3)).item()\n # print(kv, sskv, sske)\n nclay = sample_from_discrete_uniform(name=\"nclay\",\n values=list(range(5, 11)))\n interp_times, defm, _, _ = calc_deformation(\n time=self.reference_times, head=self.heads, Kv=10**kv,\n Sskv=10**sskv, Sske=10**sske, claythick=5, nclay=nclay)\n\n aligned_deformation = torch.FloatTensor(\n numpy.interp(self.reference_times, interp_times, defm))\n\n for i in pyro.plate(\"data_loop\", len(observed_deformations)):\n prim.sample(name=\"data_{}\".format(i),\n fn=dist.Normal(observed_deformations[i], 2),\n obs=aligned_deformation[i])\n\n return kv, sskv, sske, nclay\n\n\ndef read_from_file(file_location: str):\n \"\"\"Read the input data from the given file\n\n Args:\n file_location (str): The location of the given input file.\n\n Returns:\n (List[int], List[int], List[int]): Three lists of the same length,\n corresponding to: heads, reference time and observed deformation\n measurments.\n \"\"\"\n with open(file_location) as f:\n heads = [float(h) for h in f.readline().split()]\n reference_times = [float(h) for h in f.readline().split()]\n observed_deformations = [float(h) for h in f.readline().split()]\n if len(heads) != len(reference_times) != len(observed_deformations):\n raise RuntimeError(\"Heads, reference times and observed deformations\"\n \" were expected to have the same length but they\"\n \" didn't.\")\n return heads, reference_times, observed_deformations\n","repo_name":"Ganoash/CSE4340-PP","sub_path":"Problems/GEO/geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41754580104","text":"from pico2d import *\nimport gobj\nfrom missile import Missile\nimport gfw\n\nclass Player:\n KEY_MAP = {\n (SDL_KEYDOWN, SDLK_UP): (0, 1),\n (SDL_KEYDOWN, SDLK_DOWN): (0, -1),\n (SDL_KEYDOWN, SDLK_LEFT): (-1, 0),\n (SDL_KEYDOWN, SDLK_RIGHT): ( 1, 0),\n (SDL_KEYUP, SDLK_UP): (0, -1),\n (SDL_KEYUP, SDLK_DOWN): (0, 1),\n (SDL_KEYUP, SDLK_LEFT): ( 1, 0),\n (SDL_KEYUP, SDLK_RIGHT): (-1, 0)\n }\n KEYDOWN_Z = (SDL_KEYDOWN, SDLK_z)\n KEYDOWN_SHIFT = (SDL_KEYDOWN, SDLK_LSHIFT)\n KEYUP_Z = (SDL_KEYUP, SDLK_z)\n KEYUP_SHIFT = (SDL_KEYUP, SDLK_LSHIFT)\n\n SHOOTHING_INTERVAL = 0.05\n ROTATING_SLOWEFF = 3.14 / 180\n\n def __init__(self):\n self.image = gfw.image.load('./res/player.png')\n self.slowEffImage = gfw.image.load('./res/eff_sloweffect.png') # 임시, 원래값: 0,0,64,64\n gobj.set_image_alpha(self.slowEffImage, 220)\n self.pos = get_canvas_width() // 2, 100\n self.delta = 0, 0\n self.fidx = 0\n self.action = 0\n self.time = 0\n self.shooting_time = 0\n self.speed = 300\n self.degree = 0\n self.death = 0\n self.deathtime = 0\n self.slowing = False\n self.shooting = False\n self.nodamage = False\n Player.player = self\n\n global BOUNDARY_LEFT, BOUNDARY_RIGHT, BOUNDARY_DOWN, BOUNDARY_UP\n BOUNDARY_LEFT = 16\n BOUNDARY_DOWN = 24\n BOUNDARY_RIGHT = get_canvas_width() - BOUNDARY_LEFT\n BOUNDARY_UP = get_canvas_height() - BOUNDARY_DOWN\n\n def update(self):\n x, y = self.pos\n dx, dy = self.delta\n x += dx * self.speed * gfw.delta_time\n y += dy * self.speed * gfw.delta_time\n\n x = clamp(BOUNDARY_LEFT, x, BOUNDARY_RIGHT)\n y = clamp(BOUNDARY_DOWN, y, BOUNDARY_UP)\n self.pos = x, y\n\n self.time += gfw.delta_time\n self.shooting_time += gfw.delta_time\n frame = self.time * 10\n if self.action != 0:\n self.fidx = (int(frame) % 4) + 4\n else:\n self.fidx = int(frame) % 8\n\n if self.shooting and self.shooting_time > Player.SHOOTHING_INTERVAL:\n self.fire()\n\n if self.nodamage:\n self.deathtime += gfw.delta_time\n gobj.set_image_alpha(self.image, 125)\n if self.deathtime > 3:\n self.nodamage = False\n self.deathtime = 0\n gobj.set_image_alpha(self.image, 255)\n self.degree += Player.ROTATING_SLOWEFF\n\n def draw(self):\n width, height = 32, 48\n sx = self.fidx * width\n sy = (self.image.h - 48) - (self.action * height)\n\n self.image.clip_draw(sx, sy, width, height, *self.pos)\n\n if self.slowing:\n self.slowEffImage.clip_composite_draw(0,0,64,64, self.degree, '', *self.pos ,64,64)\n\n def handle_event(self, e):\n pair = (e.type, e.key)\n if pair in Player.KEY_MAP:\n self.delta = gobj.pos_add(self.delta, Player.KEY_MAP[pair])\n dx = self.delta[0]\n self.action = \\\n 1 if dx < 0 else \\\n 2 if dx > 0 else 0\n\n if pair == Player.KEYDOWN_Z:\n self.shooting = True\n elif pair == Player.KEYUP_Z:\n self.shooting = False\n\n if pair == Player.KEYDOWN_SHIFT:\n self.slowing = True\n self.speed = 100\n elif pair == Player.KEYUP_SHIFT:\n self.slowing = False\n self.speed = 300\n\n\n def fire(self):\n self.shooting_time = 0\n x, y = self.pos\n halfX = 16 # 32 // 2\n halfY = 24 # 48 // 2\n m1 = Missile(x - halfX, y + halfY)\n m2 = Missile(x + halfX, y + halfY)\n gfw.world.add(gfw.layer.missile, m1)\n gfw.world.add(gfw.layer.missile, m2)\n\n def get_BB(self):\n hw = 2\n hh = 2\n return (self.pos[0] - hw, self.pos[0] + hw, self.pos[1] - hh, self.pos[1] +hh)\n\n","repo_name":"Brw-fox/2DGP","sub_path":"기말 프로젝트/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20941697715","text":"\"\"\" database.py\n created by: Lucca Giusti (solucca)\n This module uses functional programming to handle\n data from the mariadb mysql database\n\"\"\"\n\nimport mysql.connector\nfrom typing import Dict, List, Union\nimport json\n\nDATABASE_CONFIG = {}\nwith open(\"./credentials.json\", \"r\") as f:\n DATABASE_CONFIG = json.load(f)\n\n\ndef create_type_table(payload: dict) -> str:\n \"\"\"Creates a table for the entity type provided in the payload\"\"\"\n entity_type: str = payload.get(\"type\")\n\n # Connect to the MariaDB database\n conn = mysql.connector.connect(**DATABASE_CONFIG)\n cursor = conn.cursor()\n\n try:\n # Create the SQL query to create the table\n table_name = entity_type.lower()\n columns = [\n \"id INT AUTO_INCREMENT PRIMARY KEY\",\n \"entity_id VARCHAR(64)\",\n \"timestamp DATETIME DEFAULT CURRENT_TIMESTAMP\",\n ]\n\n for key, data in payload.items():\n if key in [\"id\", \"type\"]:\n continue\n\n # Add columns based on the data type\n if data[\"type\"] == \"str\":\n columns.append(f\"{key} VARCHAR(255)\")\n elif data[\"type\"] == \"float\":\n columns.append(f\"{key} FLOAT\")\n elif data[\"type\"] == \"int\":\n columns.append(f\"{key} INT\")\n elif data[\"type\"] == \"datetime\":\n columns.append(f\"{key} DATETIME\")\n else:\n raise ValueError(f\"Invalid attribute type: {data['type']}\")\n\n # Construct the CREATE TABLE query\n create_query = f\"CREATE TABLE IF NOT EXISTS {table_name} ({', '.join(columns)})\"\n\n # Execute the query\n cursor.execute(create_query)\n conn.commit()\n\n finally:\n # Close the cursor and connection\n cursor.close()\n conn.close()\n\n return table_name\n\n\ndef table_exists(table_name: str) -> bool:\n cnx = mysql.connector.connect(**DATABASE_CONFIG)\n cursor = cnx.cursor()\n cursor.execute(\"SHOW TABLES;\")\n tables = []\n for name in cursor:\n tables.append(name[0])\n cursor.close()\n cnx.close()\n return table_name.lower() in tables\n\n\ndef match_schema(payload: dict) -> bool:\n if not table_exists(payload.get(\"type\")):\n return False\n entity_type = payload.get(\"type\").lower()\n cnx = mysql.connector.connect(**DATABASE_CONFIG)\n cursor = cnx.cursor()\n try:\n cursor.execute(f\"DESCRIBE {entity_type};\")\n data = [i[0] for i in cursor.fetchall()]\n for key, value in payload.items():\n if key in [\"id\", \"type\"]:\n continue\n elif not key in data:\n return False\n return True\n\n finally:\n cursor.close()\n cnx.close()\n\n\ndef get_columns(type: str) -> Union[List, Dict]:\n \"\"\" Get Columns of table\n \"\"\"\n try:\n cnx = mysql.connector.connect(**DATABASE_CONFIG)\n cursor = cnx.cursor()\n cursor.execute(f\"DESCRIBE {type};\")\n out = [{\"Field\": i[0], \"Type\": i[1]} for i in cursor.fetchall()]\n return out\n except Exception as e:\n return {\"error\" : str(e)}\n finally:\n cnx.close()\n cursor.close()\n\n\ndef insert_entity(payload: dict) -> bool:\n \"\"\"Saves the Entity in a Table.\n If there is no Table for this Type of entity, one is created.\n \"\"\"\n entity_id, entity_type = payload.get(\"id\"), payload.get(\"type\").lower()\n\n if not table_exists(entity_type):\n create_type_table(payload)\n if not match_schema(payload):\n return False\n\n # Construct the SQL insert query\n columns = [\"entity_id\"]\n values = [entity_id]\n\n for key, data in payload.items():\n if key in [\"id\", \"type\"]:\n continue\n\n # Extract the value and add it to the lists\n columns.append(key)\n values.append(data[\"value\"])\n\n placeholders = \", \".join([\"%s\" for _ in values])\n sql = f\"INSERT INTO {entity_type} ({', '.join(columns)}) VALUES ({placeholders})\"\n\n # Connect to the MariaDB database\n cnx = mysql.connector.connect(**DATABASE_CONFIG)\n cursor = cnx.cursor()\n\n try:\n # Execute the SQL query\n cursor.execute(sql, values)\n cnx.commit()\n\n finally:\n # Close the cursor and connection\n cursor.close()\n cnx.close()\n\n return True\n\n\ndef get_entity(entity_id: str, n: int = None) -> Dict[str, object]:\n \"\"\"Saves the Entity in a Table.\n If there is no Table for this Type of entity, one is created.\n \"\"\"\n if not \":\" in entity_id:\n return {\"Error\": \"Wrong Format :\"}\n entity_type, entity_id = entity_id.split(\":\")\n table_name = entity_type.lower()\n\n if not table_exists(entity_type):\n return {\"Error\": f\"No data for the type {entity_type}\"}\n if not n:\n n = 1\n\n # Connect to the MariaDB database\n conn = mysql.connector.connect(**DATABASE_CONFIG)\n cursor = conn.cursor()\n\n try:\n # Construct the SELECT query\n select_query = f\"SELECT * FROM {table_name} WHERE entity_id = %s ORDER BY timestamp DESC LIMIT {n}\"\n cursor.execute(select_query, (entity_id,))\n\n data = cursor.fetchmany(n)\n out = []\n for row in data:\n entry = {}\n for i in range(1, len(cursor.column_names)):\n entry[cursor.column_names[i]] = row[i]\n out.append(entry)\n\n return out\n\n finally:\n # Close the cursor and connection\n cursor.close()\n conn.close()\n\n\ndef modify_type_table(type: str, new_column: dict):\n \"\"\" new_column example: \n {\"field\":\"humidity\", \"type\":\"float\"}\n \"\"\"\n if not table_exists(type): return {\"error\": f\"Type {type} does not exist\"}\n columns = get_columns(type)\n for column in columns:\n if column[\"Field\"] == new_column[\"field\"]:\n return {\"error\": f\"Field {new_column['field']} already exists\"}\n \n if new_column[\"type\"] == \"str\":\n datatype = \"VARCHAR(255)\"\n elif new_column[\"type\"] == \"float\":\n datatype = \"FLOAT\"\n elif new_column[\"type\"] == \"int\":\n datatype = \"INT\"\n elif new_column[\"type\"] == \"datetime\":\n datatype = \"DATETIME\"\n else:\n return {\"error\": f\"Datatype {new_column['type']} does not exist\"}\n try:\n cnx = mysql.connector.connect(**DATABASE_CONFIG)\n cursor = cnx.cursor()\n cursor.execute(f\"ALTER TABLE {type} ADD {new_column['field']} {datatype};\")\n return {'success', f'added column: {type}'}\n \n except Exception as e:\n return {'error':str(e)}\n finally:\n cursor.close()\n cnx.close()\n\n\nif __name__ == \"__main__\":\n print(\"Running test on database\")\n print(f\"Testing connection: {table_exists('SoilSensor')}\")\n payload = {\n \"id\": \"01\",\n \"type\": \"SoilSensor\",\n \"temperature\": {\"type\": \"float\", \"value\": 15.0},\n \"soil_humidity\": {\"type\": \"int\", \"value\": 28},\n \"last_water\": {\"type\": \"datetime\", \"value\": \"2020-07-26 20:20:20\"},\n }\n payload2 = {\n \"id\": \"02\",\n \"type\": \"weather\",\n \"temperature\": {\"type\": \"float\", \"value\": 15.0},\n }\n print(get_entity(\"weather:02\"))\n","repo_name":"solucca/HomeSystem","sub_path":"backend/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17272572816","text":"'other solution'\n\n\ndef isSolved(board):\n for i in range(0,3):\n if board[i][0] == board[i][1] == board[i][2] != 0:\n return board[i][0]\n elif board[0][i] == board[1][i] == board[2][i] != 0:\n return board[0][i]\n\n if board[0][0] == board[1][1] == board[2][2] != 0:\n return board[0][0]\n elif board[0][2] == board[1][1] == board[2][0] != 0:\n return board[0][0]\n\n elif 0 not in board[0] and 0 not in board[1] and 0 not in board[2]:\n return 0\n else:\n return -1\n\n\n\n\n'勉强勉强通过'\n\n\ndef alleq(lis):\n if max(lis) == min(lis) and min(lis) > 0:\n return max(lis)\n return False\n\n# print(alleq([1,1,1]))\n\ndef isSolved(board):\n\n for i in range(3):\n if alleq(board[i]):\n return alleq(board[i])\n for j in range(3):\n column = [ board[0][j], board[1][j], board[2][j]]\n if alleq(column):\n return alleq(column)\n left = [board[0][0], board[1][1], board[2][2]]\n if alleq(left): return alleq(left)\n right = [board[0][2], board[1][1], board[2][0]]\n if alleq(right): return alleq(right)\n if any([ 0 in row for row in board]): return -1\n return 0\n\n\n\n\n\ndef alleq(lis):\n if max(lis) == min(lis):\n return max(lis)\n return False\n\n# print(alleq([1,1,1]))\n\ndef isSolved(board):\n if any([ 0 in row for row in board]): return -1\n for i in range(3):\n if alleq(board[i]):\n return alleq(board[i])\n for j in range(3):\n column = [ board[0][j], board[1][j], board[2][j]]\n if alleq(column):\n return alleq(column)\n left = [board[0][0], board[1][1], board[2][2]]\n if alleq(left): return alleq(left)\n right = [board[0][2], board[1][1], board[2][0]]\n if alleq(right): return alleq(right)\n return 0\n\nboard = [[1,1,2],\n [2,1,2],\n [2,1,2]]\n# print(any ([ i for i in [1,True, False] if i < 1 ]))\nprint(isSolved(board))\n'''\n# You can use test.expect(boolean, [optional] string) to test your code\ntest.expect(isSolved( [[1,1,1],\n [2,1,2],\n [2,1,2]] ) is -1)\n'''\n'''descriptor tic-tac-toe checker\n\nIf we were to set up a Tic-Tac-Toe game, we would want to know whether the board's current state is solved, wouldn't we? Our goal is to create a function that will check that for us!\n\nAssume that the board comes in the form of a 3x3 array, where the value is 0 if a spot is empty, 1 if it is an X, or 2 if it is an O, like so:\n\n[[0,0,1],\n [0,1,2],\n [2,1,0]]\n\nWe want our function to return -1 if the board is not solved yet, 1 if X won, 2 if O won, or 0 if it's a cat's game (i.e. a draw).\n\nYou may assume that the board passed in is valid in the context of a game of Tic-Tac-Toe.\n\n\n'''\n\n","repo_name":"shubham25namdeo/Leetcode","sub_path":"codewars/tic-tac-toe.py","file_name":"tic-tac-toe.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"32962659490","text":"#!/usr/bin/python\n#\n# UiHandler.py - handler for the RadioPi user interface\n#\n# (c) David Haworth\n\nimport string\nimport traceback\n\nfrom LcdHandler import LcdHandler\nfrom MainMenu import MainMenu\nfrom Browser import Browser\nfrom StationList import StationList\nfrom MountMenu import MountMenu\nfrom SysMenu import SysMenu\nfrom NetMenu import NetMenu\nfrom MessageScreen import MessageScreen\nfrom AskYesNo import AskYesNo\n\n# UI modes\nmode_Disconnected = 0\nmode_StartupWait = 1\nmode_Startup = 2\nmode_Home = 3\nmode_Menu = 4\n\n# Home screen positions\nrow_artist\t= 1\ncol_artist\t= 1\nrow_album\t= 2\ncol_album\t= 1\nrow_title\t= 3\ncol_title\t= 1\nrow_time\t= 4\ncol_time\t= 1\nrow_dur\t\t= 4\ncol_dur\t\t= 6\nrow_vol\t\t= 4\ncol_vol\t\t= 18\t# 3 digits\n\n\nclass UiHandler:\n\tdef __init__(self, eq, mpd):\n\t\tself.eq = eq\t\t\t\t\t\t# Event queue\n\t\tself.mpd = mpd\t\t\t\t\t\t# MPD handler\n\t\tself.mode = mode_Disconnected\t\t# Current mode\n\t\tself.count = 0\n#----\n\t\tself.vol = -1\t\t\t\t\t\t# State of info on screen. Only update if changed.\n\t\tself.time = -1\n\t\tself.artist = \"\"\n\t\tself.album = \"\"\n\t\tself.title = \"\"\n\t\tself.dur = -1\n\t\tself.state = \"\"\n\t\tself.force = True\t\t\t\t\t# Forced update\n#----\n\t\tself.menu = None\n\t\tself.menustack = []\n\t\tself.lcd = LcdHandler()\n\t\tif self.lcd.Open():\n\t\t\tself.mode = mode_StartupWait\n\t\t\tself.count = 2\n\n#===========================================================\n# Enter the menu system\n#===========================================================\n\tdef Enter(self):\n\t\tself.menustack = []\t\t\t\t\t\t\t\t\t# Clear out existing menus (if any)\n\t\tself.menu = MainMenu(self, self.lcd, self.eq)\t\t# Create the main menu.\n\t\tself.menu.Show()\n\n#===========================================================\n# Enter the music browser at the specified place.\n#===========================================================\n\tdef EnterBrowser(self, dir):\n\t\tself.menustack.append(self.menu)\t\t\t\t\t# Push current menu.\n\t\tself.menu = Browser(self, self.lcd, self.eq, dir)\t# Create a browser.\n\t\tself.menu.Show()\n\n#===========================================================\n# Enter the station list menu.\n#===========================================================\n\tdef EnterStationList(self):\n\t\tself.menustack.append(self.menu)\t\t\t\t\t# Push current menu.\n\t\tself.menu = StationList(self, self.lcd, self.eq)\t# Create the menu.\n\t\tself.menu.Show()\n\n#===========================================================\n# Enter the mount menu.\n#===========================================================\n\tdef EnterMountMenu(self):\n\t\tself.menustack.append(self.menu)\t\t\t\t\t# Push current menu.\n\t\tself.menu = MountMenu(self, self.lcd, self.eq)\t\t# Create the menu.\n\t\tself.menu.Show()\n\n#===========================================================\n# Enter the network menu.\n#===========================================================\n\tdef EnterNetMenu(self):\n\t\tself.menustack.append(self.menu)\t\t\t\t\t# Push current menu.\n\t\tself.menu = NetMenu(self, self.lcd, self.eq)\t\t# Create the menu.\n\t\tself.menu.Show()\n\n#===========================================================\n# Enter the system menu.\n#===========================================================\n\tdef EnterSysMenu(self):\n\t\tself.menustack.append(self.menu)\t\t\t\t\t# Push current menu.\n\t\tself.menu = SysMenu(self, self.lcd, self.eq)\t\t# Create the menu.\n\t\tself.menu.Show()\n\n#===========================================================\n# Show a message.\n#===========================================================\n\tdef ShowMessage(self, m, ack):\n\t\tself.menustack.append(self.menu)\t\t\t\t\t\t\t# Push current menu.\n\t\tself.menu = MessageScreen(self, self.lcd, self.eq, m, ack)\t# Create the message screen.\n\t\tself.menu.Show()\n\n#===========================================================\n# Ask a question\n#===========================================================\n\tdef AskYesNo(self, m):\n\t\tself.menustack.append(self.menu)\t\t\t\t\t# Push current menu.\n\t\tself.menu = AskYesNo(self, self.lcd,self.eq, m)\t\t# Create the yes/no screen.\n\t\tself.menu.Show()\n\n#===========================================================\n# Receive the answer from AskYesNo\n#===========================================================\n\tdef Answer(self, ans):\n\t\tif len(self.menustack) == 0:\n\t\t\tself.menu = None\n\t\t\tself.ModeHome()\n\t\telse:\n\t\t\tself.menu = self.menustack.pop()\n\t\t\tself.menu.Event(ans)\n\t\t\tself.menu.Show()\n\n#===========================================================\n# Go back up a level.\n#===========================================================\n\tdef Back(self):\n\t\tif len(self.menustack) == 0:\n\t\t\tself.menu = None\n\t\t\tself.ModeHome()\n\t\telse:\n\t\t\tself.menu = self.menustack.pop()\n\t\t\tself.menu.Show()\n\n#===========================================================\n# Handle a timer event\n#===========================================================\n\tdef Timer(self):\n\t\tif self.mode == mode_Disconnected:\t\t# If disconnected, attempt to open\n\t\t\tif self.lcd.Open():\n\t\t\t\tself.mode = mode_StartupWait\n\t\t\t\tself.count = 2\n\n\t\telif self.mode == mode_StartupWait:\t\t# After successful open, let the terminal settle\n\t\t\tself.count -= 1\n\t\t\tif self.count <= 0:\n\t\t\t\tself.StartupScreen()\n\t\t\t\tself.mode = mode_Startup\n\t\t\t\tself.force = True\n\t\t\t\tself.count = 2\n\n\t\telif self.mode == mode_Startup:\t\t\t# Display splash screen.\n\t\t\ttry:\n\t\t\t\tself.count -= 1\n\t\t\t\tif self.count <= 0:\n\t\t\t\t\tself.mode = mode_Home\n\t\t\t\t\tself.force = True\n\t\t\t\t\tself.HomeScreen()\t\t\t# Redraw home screen\n\t\t\texcept:\n\t\t\t\tself.mode = mode_Disconnected\n\t\t\t\tprint(traceback.format_exc())\n\n\t\telif self.mode == mode_Home:\t\t\t# Update home screen\n\t\t\ttry:\n\t\t\t\tself.HomeScreen()\n\t\t\texcept:\n\t\t\t\tself.mode = mode_Disconnected\n\t\t\t\tprint(traceback.format_exc())\n\n\t\telse:\t\t\t\t\t\t\t\t\t# After some inactivity, revert to home screen.\n\t\t\tself.count -= 1\n\t\t\tif self.count <= 0:\n\t\t\t\tself.count = 0\n\t\t\t\tself.mode = mode_Home\n\t\t\t\tself.force = True\n\n\t\treturn False\n\n#===========================================================\n# Handle a user input event\n#===========================================================\n\tdef Event(self, evt):\n\t\tif self.mode < mode_Home:\t\t\t\t# Ignore events during start sequence.\n\t\t\treturn False\n\n\t\tself.count = 30\t\t\t\t\t\t\t# Restart the inactivity timer\n\n\t\tif self.mode == mode_Home:\t\t\t\t# On home screen, 'menu', 'ok', 'right' or 'Rl' enters menu.\n\t\t\t# Translate rotary control events and requeue those not for me.\n\t\t\tif evt == \"R+\":\n\t\t\t\tself.eq.PutEvent(\"vol+\")\n\t\t\t\treturn True\n\t\t\tif evt == \"R-\":\n\t\t\t\tself.eq.PutEvent(\"vol-\")\n\t\t\t\treturn True\n\t\t\tif evt == \"Rs\":\n\t\t\t\tself.eq.PutEvent(\">/||\")\n\t\t\t\treturn True\n\t\t\tif evt == \"Rl\":\t\t# Handle this one locally (don't requeue)\n\t\t\t\tevt = \"menu\"\n\n\t\t\tif evt == \"menu\" or evt == \"ok\" or evt == \"right\":\n\t\t\t\tself.mode = mode_Menu\n\t\t\t\tself.Enter()\n\t\t\t\treturn True\n\n\t\t\treturn False\t\t\t\t\t\t# Ignore all other events while on home screen.\n\n\t\t# Translate rotary control events for menu mode and handle locally (don't requeue)\n\t\tif evt == \"R+\":\n\t\t\tevt = \"down\"\n\t\telif evt == \"R-\":\n\t\t\tevt = \"up\"\n\t\telif evt == \"Rl\":\n\t\t\tevt = \"home\"\n\t\telif evt == \"Rs\":\n\t\t\tevt = \"ok\"\n\n\t\tif evt == \"home\":\t\t\t\t\t\t# Anywhere in the menus, 'home' exits back to the home screen\n\t\t\tself.menu = None\t\t\t\t\t# Clear the menu stack\n\t\t\tself.menustack = []\n\t\t\tself.ModeHome()\n\t\t\treturn True\n\n\t\t# Menu mode. Handle up/down/left/back as navigation on existing menu stack.\n\t\tif evt == \"back\" or evt == \"left\" :\n\t\t\tself.Back()\n\t\t\treturn True\n\n\t\telif evt == \"up\" or evt == \"R-\":\n\t\t\tself.menu.PtrUp()\n\t\t\treturn True\n\n\t\telif evt == \"down\" or evt == \"R+\":\n\t\t\tself.menu.PtrDown()\n\t\t\treturn True\n\n\t\t# All other events in menu mode: pass on to the individual menu\n\t\treturn self.menu.Event(evt)\n\n\n#===========================================================\n# StartupScreen() - display startup screen\n#===========================================================\n\n\tdef StartupScreen(self):\n\t\tif self.force:\n\t\t\tself.lcd.HomeAndClear()\n\t\t\tself.lcd.WriteAt(1, 1, \"RadioPi\\r\\n\")\n\t\t\tself.lcd.WriteAt(4, 1, \"waiting for mpd\")\n\t\tself.force = False\n\n#===========================================================\n# ModeHome() - switch to 'home' mode (leave menu)\n#===========================================================\n\n\tdef ModeHome(self):\n\t\tself.mode = mode_Home\n\t\tself.force = True\n\t\tself.HomeScreen()\n\n#===========================================================\n# HomeScreen() - read mpd status and display it\n#===========================================================\n\n\tdef HomeScreen(self):\n\t\tl_artist = \"\"\n\t\tl_album = \"\"\n\t\tl_title = \"\"\n\t\tl_dur = -1\n\t\tl_time = -1\n\t\tl_vol = -1\n\t\tl_file = \"\"\n\t\tl_name = \"\"\n\t\tl_state = \"\"\n\t\tinternetRadio = False;\n\n\t\ts = self.mpd.Status()\n\t\tif s:\n\t\t\tfor k in s.keys():\n\t\t\t\tif k == \"volume\":\n\t\t\t\t\tl_vol = int(s[\"volume\"])\n\t\t\t\telif k == \"state\":\n\t\t\t\t\tl_state = s[\"state\"]\n\t\t\t\telif k == \"elapsed\":\n\t\t\t\t\tl_time = int(float(s[\"elapsed\"]))\n\n\t\tif l_state == \"stop\":\n\t\t\tl_time = -1\n\n\t\ts = self.mpd.CurrentSong()\n\t\tif s:\n\t\t\tfor k in s.keys():\n\t\t\t\tif k == \"artist\":\n\t\t\t\t\tl_artist = s[\"artist\"]\n\t\t\t\telif k == \"album\":\n\t\t\t\t\tl_album = s[\"album\"]\n\t\t\t\telif k == \"title\":\n\t\t\t\t\tl_title = s[\"title\"]\n\t\t\t\telif k == \"time\":\n\t\t\t\t\tl_dur = int(s[\"time\"])\n\t\t\t\telif k == \"file\":\n\t\t\t\t\tl_file = s[\"file\"]\n\t\t\t\telif k == \"name\":\n\t\t\t\t\tl_name = s[\"name\"]\n\n\t\t\tif l_file[0:7] == \"http://\" or l_file[0:8] == \"https://\":\n\t\t\t\tinternetRadio = True\n\t\t\t\tl_artist = l_name\n\t\t\t\tif l_album == \"\":\n\t\t\t\t\tif string.find(l_title, \" - \") >= 0:\n\t\t\t\t\t\tl_album, l_title = string.split(l_title, \" - \", 1)\n\t\t\t\t\telif string.find(l_title, \"-\") >= 0:\n\t\t\t\t\t\tl_album, l_title = string.split(l_title, \"-\", 1)\n\t\t\telif l_title == \"\" and l_file != \"\":\n\t\t\t\tx, l_title = os.path.split(l_file)\n\n\t\telse:\n\t\t\t# No track\n\t\t\tl_artist = \" RadioPi\"\n\t\t\tl_album = \"\"\n\t\t\tl_title = \"===== no track =====\"\n\n\t\tif self.force:\n\t\t\tself.lcd.HomeAndClear()\n\n\t\tif self.force or self.artist != l_artist:\n\t\t\tself.artist = l_artist\n\t\t\tself.lcd.WriteAt(row_artist, col_artist, self.artist)\n\t\t\tself.lcd.ClearEol()\n\n\t\tif self.force or self.album != l_album:\n\t\t\tself.album = l_album\n\t\t\tself.lcd.WriteAt(row_album, col_album, self.album)\n\t\t\tself.lcd.ClearEol()\n\n\t\tif self.force or self.title != l_title:\n\t\t\tself.title = l_title\n\t\t\tself.lcd.WriteAt(row_title, col_title, self.title);\n\t\t\tself.lcd.ClearEol()\n\n\t\tif internetRadio:\n\t\t\tself.dur = -2\n\t\t\tself.time = -2\n\t\t\tbuf = \"%-16s\" % (\"Stream: \"+l_state)\n\t\t\tself.lcd.WriteAt(row_time, col_time, buf)\n\t\telif l_dur < 0:\n\t\t\tif self.force or self.dur != l_dur:\n\t\t\t\tself.dur = l_dur\n\t\t\t\tself.time = -1\n\t\t\t\tself.lcd.WriteAt(row_time, col_time, \" \")\n\t\telse:\n\t\t\tif self.force or self.time != l_time:\n\t\t\t\tself.time = l_time\n\t\t\t\tif l_time < 0:\n\t\t\t\t\ts_time = \"--:--\"\n\t\t\t\telse:\n\t\t\t\t\tmins = l_time / 60\n\t\t\t\t\tsecs = l_time % 60\n\t\t\t\t\ts_time = \"%02d:%02d\"%(mins, secs)\n\t\t\t\tself.lcd.WriteAt(row_time, col_time, s_time)\n\n\t\t\tif self.force or self.dur != l_dur:\n\t\t\t\tself.dur = l_dur\n\t\t\t\tmins = l_dur / 60\n\t\t\t\tsecs = l_dur % 60\n\t\t\t\ts_time = \"/%02d:%02d\"%(mins, secs)\n\t\t\t\tself.lcd.WriteAt(row_dur, col_dur, s_time)\n\t\t\n\t\tif self.force or self.vol != l_vol:\n\t\t\tself.vol = l_vol\n\t\t\tif l_vol < 0:\n\t\t\t\ts_vol = \"---\"\n\t\t\telse:\n\t\t\t\ts_vol = \"%3d\"%(l_vol)\n\t\t\tself.lcd.WriteAt(row_vol, col_vol, s_vol)\n\n\t\tself.force = False\n\n","repo_name":"TheLancashireman/RadioPi","sub_path":"controller/UiHandler.py","file_name":"UiHandler.py","file_ext":"py","file_size_in_byte":10967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23266172573","text":"'''\nGiven an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0?\nFind all unique triplets in the array which gives the sum of zero.\nNotice that the solution set must not contain duplicate triplets.\nInput: nums = [-1,0,1,2,-1,-4]\nOutput: [[-1,-1,2],[-1,0,1]]\n'''\n\n\nclass Solution:\n def threeSum(self, nums):\n if len(nums) < 3:\n return []\n nums.sort()\n ans = []\n i = 0\n while i < len(nums) - 2:\n left = i + 1\n right = len(nums) - 1\n while left < right:\n temp = nums[left] + nums[right] + nums[i]\n if temp == 0:\n ans.append([nums[left], nums[right], nums[i]])\n left += 1\n while left < right and nums[left] == nums[left - 1]:\n left += 1\n elif temp < 0:\n left += 1\n else:\n right -= 1\n while i < len(nums) - 2 and nums[i] == nums[i + 1]:\n i += 1\n i += 1\n return ans\n","repo_name":"ryanSoftwareEngineer/algorithms","sub_path":"arrays and matrices/15_3Sum.py","file_name":"15_3Sum.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33674869197","text":"import os\nimport re\nimport sys\nfrom maint.lib import maintainers\n\nfrom style.repo import GitRepo\n\n\ndef _printErrorQuit(error_message):\n \"\"\"\n Print an error message, followed my a help message and inform failure.\n\n @param error_message A message describing the error that caused the\n failure.\n \"\"\"\n print(error_message)\n\n print(\n \"The commit has been cancelled, but a copy of it can be found in \"\n + sys.argv[1]\n + \" : \"\n )\n\n print(\n \"\"\"\n--------------------------------------------------------------------------\n \"\"\"\n )\n print(open(sys.argv[1], \"r\").read())\n print(\n \"\"\"\n--------------------------------------------------------------------------\n \"\"\"\n )\n\n print(\n \"\"\"\nThe first line of a commit must contain one or more gem5 tags separated by\ncommas (see MAINTAINERS.yaml for the possible tags), followed by a colon and\na commit title. There must be no leading nor trailing whitespaces.\n\nThis header line must then be followed by an empty line. A detailed message,\nalthough highly recommended, is not mandatory and can follow that empty line.\n\ne.g.:\n cpu: Refactor branch predictors\n\n Refactor branch predictor code to improve its readability, moving functions\n X and Y to the base class...\n\ne.g.:\n mem,mem-cache: Improve packet class readability\n\n The packet class...\n\"\"\"\n )\n sys.exit(1)\n\n\ndef _validateTags(commit_header):\n \"\"\"\n Check if all tags in the commit header belong to the list of valid\n gem5 tags.\n\n @param commit_header The first line of the commit message.\n \"\"\"\n\n # List of valid tags\n maintainer_dict = maintainers.Maintainers.from_file()\n valid_tags = [tag for tag, _ in maintainer_dict]\n\n # Remove non-tag 'pmc' and add special tags not in MAINTAINERS.yaml\n valid_tags.remove(\"pmc\")\n valid_tags.extend([\"RFC\", \"WIP\"])\n\n tags = \"\".join(commit_header.split(\":\")[0].split()).split(\",\")\n if any(tag not in valid_tags for tag in tags):\n invalid_tag = next((tag for tag in tags if tag not in valid_tags))\n _printErrorQuit(\"Invalid Gem5 tag: \" + invalid_tag)\n\n\n# Go to git directory\nos.chdir(GitRepo().repo_base())\n\n# Get the commit message\ncommit_message = open(sys.argv[1]).read()\n\n# The first line of a commit must contain at least one valid gem5 tag, and\n# a commit title\ncommit_message_lines = commit_message.splitlines()\ncommit_header = commit_message_lines[0]\ncommit_header_match = re.search(\n \"^(fixup! )?(\\S[\\w\\-][,\\s*[\\w\\-]+]*:.+\\S$)\", commit_header\n)\nif commit_header_match is None:\n _printErrorQuit(\"Invalid commit header\")\nif commit_header_match.group(1) == \"fixup! \":\n sys.exit(0)\n_validateTags(commit_header_match.group(2))\n\n# Make sure commit title does not exceed threshold. This line is limited to\n# a smaller number because version control systems may add a prefix, causing\n# line-wrapping for longer lines\ncommit_title = commit_header.split(\":\")[1]\nmax_header_size = 65\nif len(commit_header) > max_header_size:\n _printErrorQuit(\n \"The commit header (tags + title) is too long (\"\n + str(len(commit_header))\n + \" > \"\n + str(max_header_size)\n + \")\"\n )\n\n# Then there must be at least one empty line between the commit header and\n# the commit description\nif commit_message_lines[1] != \"\":\n _printErrorQuit(\n \"Please add an empty line between the commit title and \"\n \"its description\"\n )\n\n# Encourage providing descriptions\nif re.search(\n \"^(Signed-off-by|Change-Id|Reviewed-by):\", commit_message_lines[2]\n):\n print(\"Warning: Commit does not have a description\")\n\nsys.exit(0)\n","repo_name":"gem5/gem5","sub_path":"util/git-commit-msg.py","file_name":"git-commit-msg.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","stars":1196,"dataset":"github-code","pt":"78"} +{"seq_id":"21274466922","text":"import pyttsx3\r\nimport speech_recognition as sr\r\nimport datetime\r\nimport os\r\nimport openai\r\nimport wikipedia\r\nimport subprocess\r\nimport pywhatkit\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom pyautogui import moveTo,write,leftClick\r\nimport pyjokes\r\nimport webbrowser\r\nimport pyautogui\r\nimport datetime\r\nimport colorama\r\nfrom colorama import Fore\r\nimport sys\r\nfrom PyQt5 import QtGui\r\nfrom PyQt5.QtCore import QTimer,QTime,QDate\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\nfrom jarvisSuperUI import Ui_Form\r\n\r\n\r\nengine = pyttsx3.init('sapi5')\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voices',voices[0].id)\r\n\r\n\r\n \r\n \r\n \r\ndef speak(audio):\r\n \r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\nclass MainThread(QThread):\r\n def __init__(self):\r\n \r\n super(MainThread,self).__init__()\r\n \r\n def run(self):\r\n self.TaskExection()\r\n\r\n def commands(self):\r\n \r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n \r\n r.pause_threshold = 1\r\n r.adjust_for_ambient_noise(source , duration=1)\r\n audio=r.listen(source)\r\n try:\r\n \r\n query = r.recognize_google(audio, language='en-in')\r\n print(f\"You Just Said: {query}\\n\")\r\n except Exception as e:\r\n print(e)\r\n \r\n query=\"none\"\r\n \r\n return query\r\n \r\n\r\n def wishings(self):\r\n hour = int(datetime.datetime.now().hour)\r\n if hour >=0 and hour<12:\r\n \r\n speak('Good morning BOSS')\r\n elif hour>=12 and hour<17:\r\n \r\n speak(\"Good Afternoon BOSS\")\r\n elif hour >=17 and hour<21:\r\n \r\n speak(\"Good Evening BOSS\")\r\n else:\r\n \r\n speak(\"Good Night BOSS\")\r\n\r\n\r\n def TaskExection(self):\r\n \r\n def open_another_file(self):\r\n \r\n speak('you switched to hand sign mode')\r\n os.startfile(\"hand_assistant.py\")\r\n \r\n self.wishings()\r\n while True:\r\n self.query = self.commands().lower()\r\n if 'time' in self.query:\r\n strTime = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n speak(\"Sir, The time is: \" + strTime)\r\n print(strTime)\r\n elif 'question' in self.query or 'doubts' in self.query or 'question' in self.query or 'doubt' in self.query:\r\n openai.api_key = \"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\"\r\n speak('ask anything')\r\n prompt = self.commands().lower()\r\n if prompt in \"none\":\r\n continue\r\n print(\"Your query:\", prompt)\r\n response = openai.Completion.create(engine=\"text-davinci-002\", prompt=prompt, max_tokens=2000)\r\n a= response[\"choices\"][0][\"text\"]\r\n print(a)\r\n speak(a)\r\n\r\n elif 'wikipedia' in self.query:\r\n speak(\"Searching in wikipedia\")\r\n try:\r\n self.query=self.query.replace(\"wikipedia\", '')\r\n results = wikipedia.summary(self.query, sentences=1)\r\n speak(\"According to Wikipedia..\")\r\n print(results)\r\n speak(results)\r\n except:\r\n print(\"No results found..\")\r\n speak(\"no results found\")\r\n\r\n elif 'play' in self.query:\r\n playquery=self.query.replace('play','')\r\n speak(\"Playing \" + playquery)\r\n pywhatkit.playonyt(playquery)\r\n elif 'gmail' in self.query:\r\n speak('Opening Gmail....')\r\n url = \"https://www.gmail.com\"\r\n webbrowser.open_new_tab(url)\r\n elif 'youtube' in self.query:\r\n speak('Opening....')\r\n url = \"https://www.youtube.com\"\r\n webbrowser.open_new_tab(url)\r\n elif 'music' in self.query:\r\n speak('Playing')\r\n music_dir = 'fav songs'\r\n songs = os.listdir(music_dir)\r\n print(songs) \r\n os.startfile(os.path.join(music_dir, songs[0])) \r\n \r\n elif 'temperature' in self.query:\r\n speak('sure, Tell me the city')\r\n print(\"Tell me the city\")\r\n city = self.commands().lower()\r\n print(city)\r\n url = f\"https://www.google.com/search?q=temperature+{city}\"\r\n r = requests.get(url)\r\n soup = BeautifulSoup(r.text, \"html.parser\")\r\n temp = soup.find(\"div\", class_=\"BNeawe\").text\r\n temp = temp.split(\":\")[-1].split()[0]\r\n z= f\"The temperature in {city} is {temp}.\"\r\n print(z)\r\n speak(z)\r\n continue\r\n \r\n\r\n elif 'screenshot ' in self.query:\r\n now = datetime.datetime.now()\r\n filename = \"screenshot{}.png\".format(now.strftime(\"%Y-%m-%d-%H-%M-%S\"))\r\n pyautogui.screenshot(filename)\r\n\r\n \r\n\r\n\r\n elif 'joke' in self.query:\r\n jarvisJoke = pyjokes.get_joke()\r\n print(jarvisJoke)\r\n speak(jarvisJoke)\r\n else:\r\n speak(\"please say the command again\")\r\n\r\ndef open_another_file():\r\n \r\n speak('you switched to hand sign mode')\r\n \r\n os.startfile(\"hand_assistant.py\")\r\n \r\n \r\n \r\n\r\nstartExecution = MainThread()\r\n\r\nclass Main(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.ui = Ui_Form()\r\n self.ui.setupUi(self)\r\n\r\n self.ui.startPushButton.clicked.connect(self.startTask)\r\n self.ui.quitPushButton.clicked.connect(open_another_file)\r\n\r\n def startTask(self, text):\r\n self.ui.movie = QtGui.QMovie(\"GUI files\\\\telex.gif\")\r\n self.ui.ironManBackground.setMovie(self.ui.movie)\r\n self.ui.movie.start()\r\n # ironmanGIF\r\n self.ui.movie = QtGui.QMovie(\"GUI files\\\\listeningGIF.gif\")\r\n self.ui.ironManGIF.setMovie(self.ui.movie)\r\n self.ui.movie.start()\r\n # dateLabel\r\n self.ui.movie = QtGui.QMovie(\"GUI files\\\\gggf.jpg\")\r\n self.ui.dateLabel.setMovie(self.ui.movie)\r\n self.ui.movie.start()\r\n # timeLabel\r\n self.ui.movie = QtGui.QMovie(\"GUI files\\\\gggf.jpg\")\r\n self.ui.timeLabel.setMovie(self.ui.movie)\r\n self.ui.movie.start()\r\n # startLabelNotButton\r\n self.ui.movie = QtGui.QMovie(\"GUI files\\\\20230401_185949_0000.png\")\r\n self.ui.startLabelNotButton.setMovie(self.ui.movie)\r\n self.ui.movie.start()\r\n # quitLabelNotButton\r\n self.ui.movie = QtGui.QMovie(\"GUI files\\\\20230401_191740_0000.png\")\r\n self.ui.quitLabelNotButton.setMovie(self.ui.movie)\r\n self.ui.movie.start()\r\n # earthGIF\r\n self.ui.movie = QtGui.QMovie(\"GUI files\\\\Earth.gif\")\r\n self.ui.earthGIF.setMovie(self.ui.movie)\r\n self.ui.movie.start()\r\n\r\n timer = QTimer(self)\r\n timer.timeout.connect(self.showTime)\r\n timer.start(1000)\r\n startExecution.start()\r\n\r\n def showTime(self):\r\n currentTime = QTime.currentTime()\r\n currentDate = QDate.currentDate()\r\n labelTime = currentTime.toString('hh:mm:ss')\r\n labelDate = currentDate.toString(Qt.ISODate)\r\n self.ui.dateTextBrowser.setText(f\"Date: {labelDate}\")\r\n self.ui.timeTextBrowser.setText(f\"Date: {labelTime}\")\r\n \r\n\r\napp = QApplication(sys.argv)\r\njarvis = Main()\r\njarvis.show()\r\nexit(app.exec_())\r\n","repo_name":"Aseefmohamed/Virtual-Personal-Assistant-by-using-Hand-Gesture-and-Voice-Assistant-for-Disabilities","sub_path":"Telex/jarvis.py","file_name":"jarvis.py","file_ext":"py","file_size_in_byte":7862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30956839533","text":"import re\nimport sys\n\n\ndef main():\n print(parse(input(\"HTML: \")))\n\n\ndef parse(s):\n try:\n matches = re.search(r'^(?:.+)\"(?:https?.+youtube.+com/)(?:embed/)(.+?)\"(?:.+)$', s)\n if not matches:\n return \"None\"\n else:\n html = (\"https://youtu.be/\" + matches.group(1))\n return html\n\n except(ValueError):\n return \"None\"\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"KimagureWizard/CS50","sub_path":"cs50p/week7/watch_f/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"456024227","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 20 20:48:46 2023\n\n@author: dev\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport os\nfrom tqdm import tqdm\n\ndef threshold_mask(mask):\n _, thresholded = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)\n return thresholded\n\ndef combine_masks(mask1, mask2):\n combined = np.maximum(mask1, mask2)\n return combined\n\n# Path to the folders containing the masks\nmask_folder1 = './ISIC 2016 maskgen outputs for UNET/MASKS/BW/'\nmask_folder2 = './ISIC 2016 maskgen outputs for UNET/MASKS/REFERENCE/'\n\n# Get the list of mask file names from folder1\nmask_files = os.listdir(mask_folder1)\n\n# Iterate over each mask file\nfor file in tqdm(mask_files, total=1279):\n # Load masks from both folders\n mask1_path = os.path.join(mask_folder1, file)\n mask2_path = os.path.join(mask_folder2, file)\n \n mask1 = cv2.imread(mask1_path, cv2.IMREAD_GRAYSCALE)\n mask2 = cv2.imread(mask2_path, cv2.IMREAD_GRAYSCALE)\n \n # Perform thresholding\n mask1 = threshold_mask(mask1)\n mask2 = threshold_mask(mask2)\n \n # Combine the masks\n combined_mask = combine_masks(mask1, mask2)\n \n # Save the resulting combined mask\n output_path = './ISIC 2016 maskgen outputs for UNET/MASKS/' + file\n if not cv2.imwrite(output_path, combined_mask):\n raise Exception('Image not saved')\n","repo_name":"DevBhuyan/Unsup-Segmentation","sub_path":"union.py","file_name":"union.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40004092258","text":"from flask import Flask, request, jsonify\nimport uuid\nfrom langchain.document_loaders import TextLoader\nfrom langchain.indexes import VectorstoreIndexCreator\nimport constants\n\nfrom google.cloud import texttospeech\nimport os, json\nimport openai\nfrom typing import Tuple\nfrom flask_cors import CORS, cross_origin\n\nimport io\nimport pydub\nimport ffmpeg\n\napp = Flask(__name__)\ncors = CORS(app)\n\nopenai.api_key = constants.OPEN_AI_KEY\nos.environ[\"OPENAI_API_KEY\"] = constants.OPEN_AI_KEY\n\napp = Flask(__name__)\nhost = '127.0.0.1'\nport = 8080\ndb = {}\nloader = TextLoader('resources/jio.txt')\nindex = VectorstoreIndexCreator().from_loaders([loader])\n\n@app.route('/healthcheck')\ndef hello_world():\n return 'Team Ode to Code goes to Dubai'\n\n\n@app.route('/query', methods=['POST'])\ndef raise_query():\n print(\"here\")\n session_id = request.headers.get(\"session-id\")\n print(session_id)\n # print(session_id)\n # print(str(request.files.keys()))\n # print(str(request.form.keys()))\n # print(str(request.get_json()))\n text = None\n print(request)\n if 'audio' in request.files:\n text = convert_audio_to_text(request.files['audio'])\n\n elif 'text' in request.get_json():\n text = request.json['text']\n else:\n return jsonify({'message': 'No file part in the request'}), 400\n print(f\"Incoming text: {text}\")\n translated_text, language = translate_to_english(text)\n if translated_text:\n english_response, category = append_user_query_to_conversation(session_id, translated_text)\n regional_response = translate_from_english(f\"{language};{english_response}\")\n # convert_text_to_audio(language+\"-IN\", regional_response)\n return jsonify({'category': category, 'message': regional_response}), 200\n else:\n return jsonify({'message': 'Could not understand/translate the message, please try again'}), 200\n\n\n@app.route('/createSession', methods=['POST'])\ndef create_session():\n session_uuid = str(uuid.uuid1())\n db[session_uuid] = \"\"\n return session_uuid\n\n\n@app.route('/activeSessions', methods=['GET'])\ndef get_all_active_sessions():\n return str(db.keys())\n\n\ndef convert_audio_to_text(audio_file) -> str:\n audio_file.save(\"input.mp3\")\n audio = open(\"input.mp3\", \"rb\")\n transcription = openai.Audio.transcribe(\"whisper-1\", audio)\n audio.close()\n os.remove('input.mp3')\n return transcription.text.strip()\n\ndef translate_to_english(text) -> Tuple[str, str]:\n prompt = f\"\"\"Assume you are a assistant that translates source language to English. \n Response should have the source language in BCP-47 format and then the translation in English.\n Dont use your brain, just translate.\n Q: what is going on\n A: en, what is going on\n \n Q: nenu chaala manchi manishini\n A: te, I am a very good human\n \n Q: {text}\n A:\n \"\"\"\n translate_response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=prompt,\n max_tokens=constants.MAX_TOKENS,\n temperature=0,\n )\n try:\n split_values = translate_response.choices[0].text.strip().split(\",\")\n language = str(split_values[0])\n if language.startswith(\"ur\"):\n language = \"hi\"\n english_text = split_values[1].strip()\n except Exception as e:\n print(f\"ERROR: Translated text by model:<{translate_response.choices[0].message.content.strip()}>\")\n english_text = None\n language = \"en\"\n print(f\"English_text:{english_text}\")\n return english_text, language\n\ndef translate_from_english(text) -> str:\n print(f\"Text for translation:{text}\")\n prompt = f\"\"\"Assume you are a assistant that translates English to target language. \n Target language is specified in BCP-47 format as first two characters.\n Dont translate if already in English\n Q: en;what is going on\n A: what is going on\n \n Q: te;I am a very good human\n A: నేను ఒక చాలా మంచి మానవుడు\n \n Q: {text}\n A:\n \"\"\"\n reverse_translate_response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=prompt,\n max_tokens=constants.MAX_TOKENS,\n temperature=0,\n )\n print(f\"Translated to regional:<{reverse_translate_response.choices[0].text}>\")\n return reverse_translate_response.choices[0].text.strip()\n\n\ndef append_user_query_to_conversation(session_id: str, text: str) -> Tuple[str, str]:\n try:\n # conversation = db.get(session_id)\n conversation = db.get(session_id)\n if not conversation:\n conversation = f\"\"\"You are a helpful and knowledgeable customer service assistant. \n Below are the categories which the request can be a part of.\n {constants.JIO_CATEGORIES_AND_SAMPLE_QA} \n Send the response in json format.\n If you dont understand, send category as Others.\n \"\"\"\n conversation += f\"\\nCustomer: {text}\"\n query_conversation = conversation + \"\"\"\\nAssistant: {\"Category\": {}, \"Solution\": {}}\"\"\"\n else:\n conversation += f\"\\nCustomer: {text}\"\n query_conversation = conversation + \"\"\"\\nAssistant: {\"Category\": {}, \"Solution\": {}}\"\"\"\n # print(f\"Input to the model: {text}\")\n # print(f\"Input to the model(conversation): {conversation}\")\n model_response = index.query(query_conversation)\n print(f\"Response from the model: {model_response}\")\n model_json = json.loads(model_response)\n conversation += str(\"\\nAssistant: \") + str(model_json)\n db[session_id] = conversation\n print(conversation)\n print(\"\\n------------------\\n\")\n return model_json[\"Solution\"], model_json[\"Category\"]\n except Exception as e:\n return \"I am unable to answer this. Our executive will reach out to you within 2 hours.\", \"Others\"\n#shree\ndef classify_query(query: str) -> str:\n pass\n\n\n\n\nif __name__ == '__main__':\n app.run(host, port)\n #initialise all models\n\n","repo_name":"vishalpuri674/Multilingual-AI-Chatbot-with-Advanced-Translation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23428274602","text":"import json_validate\nimport json_to_schema_validate\nimport os\n\n\ndef validate(data, schema, schema_name, data_name):\n des_data = json_validate.json_deserialize(data, file_name=data_name)\n des_schema = json_validate.json_deserialize(schema, file_name=schema_name)\n json_to_schema_validate.required_field_validate(schema=des_schema, data=des_data, schema_name=schema_name, data_name=data_name)\n json_to_schema_validate.properties_field_validate(schema=des_schema, data=des_data, schema_name=schema_name, data_name=data_name)\n\n\nfor schema_name in os.listdir(path='task_folder/schema'):\n schema_path = \"task_folder/schema/\"+schema_name\n schema_file = open(schema_path)\n schema = schema_file.read()\n for data_name in os.listdir(path='task_folder/event'):\n data_path = \"task_folder/event/\"+data_name\n data_file = open(data_path)\n data = data_file.read()\n validate(data=data, schema=schema, schema_name=schema_name, data_name=data_name)\n data_file.close()\n schema_file.close()\n\n\n","repo_name":"Argushub/TestTask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37599008992","text":"import hashlib\nimport re\nimport time\n\nfrom .common import InfoExtractor\nfrom ..networking.exceptions import HTTPError\nfrom ..utils import (\n ExtractorError,\n classproperty,\n float_or_none,\n traverse_obj,\n url_or_none,\n)\n\n\nclass DacastBaseIE(InfoExtractor):\n _URL_TYPE = None\n\n @classproperty\n def _VALID_URL(cls):\n return fr'https?://iframe\\.dacast\\.com/{cls._URL_TYPE}/(?P[\\w-]+)/(?P[\\w-]+)'\n\n @classproperty\n def _EMBED_REGEX(cls):\n return [rf']+\\bsrc=[\"\\'](?P{cls._VALID_URL})']\n\n _API_INFO_URL = 'https://playback.dacast.com/content/info'\n\n @classmethod\n def _get_url_from_id(cls, content_id):\n user_id, media_id = content_id.split(f'-{cls._URL_TYPE}-')\n return f'https://iframe.dacast.com/{cls._URL_TYPE}/{user_id}/{media_id}'\n\n @classmethod\n def _extract_embed_urls(cls, url, webpage):\n yield from super()._extract_embed_urls(url, webpage)\n for content_id in re.findall(\n rf']+\\bsrc=[\"\\']https://player\\.dacast\\.com/js/player\\.js\\?contentId=([\\w-]+-{cls._URL_TYPE}-[\\w-]+)[\"\\']', webpage):\n yield cls._get_url_from_id(content_id)\n\n\nclass DacastVODIE(DacastBaseIE):\n _URL_TYPE = 'vod'\n _TESTS = [{\n 'url': 'https://iframe.dacast.com/vod/acae82153ef4d7a7344ae4eaa86af534/1c6143e3-5a06-371d-8695-19b96ea49090',\n 'info_dict': {\n 'id': '1c6143e3-5a06-371d-8695-19b96ea49090',\n 'ext': 'mp4',\n 'uploader_id': 'acae82153ef4d7a7344ae4eaa86af534',\n 'title': '2_4||Adnexal mass characterisation: O-RADS US and MRI||N. Bharwani, London/UK',\n 'thumbnail': 'https://universe-files.dacast.com/26137208-5858-65c1-5e9a-9d6b6bd2b6c2',\n },\n 'params': {'skip_download': 'm3u8'},\n }]\n _WEBPAGE_TESTS = [{\n 'url': 'https://www.dacast.com/support/knowledgebase/how-can-i-embed-a-video-on-my-website/',\n 'info_dict': {\n 'id': 'b6674869-f08a-23c5-1d7b-81f5309e1a90',\n 'ext': 'mp4',\n 'title': '4-HowToEmbedVideo.mp4',\n 'uploader_id': '3b67c4a9-3886-4eb1-d0eb-39b23b14bef3',\n 'thumbnail': 'https://universe-files.dacast.com/d26ab48f-a52a-8783-c42e-a90290ba06b6.png',\n },\n 'params': {'skip_download': 'm3u8'},\n }, {\n 'url': 'https://gist.githubusercontent.com/bashonly/4ad249ef2910346fbdf3809b220f11ee/raw/87349778d4af1a80b1fcc3beb9c88108de5858f5/dacast_embeds.html',\n 'info_dict': {\n 'id': 'e7df418e-a83b-7a7f-7b5e-1a667981e8fa',\n 'ext': 'mp4',\n 'title': 'Evening Service 2-5-23',\n 'uploader_id': '943bb1ab3c03695ba85330d92d6d226e',\n 'thumbnail': 'https://universe-files.dacast.com/337472b3-e92c-2ea4-7eb7-5700da477f67',\n },\n 'params': {'skip_download': 'm3u8'},\n }]\n\n def _real_extract(self, url):\n user_id, video_id = self._match_valid_url(url).group('user_id', 'id')\n query = {'contentId': f'{user_id}-vod-{video_id}', 'provider': 'universe'}\n info = self._download_json(self._API_INFO_URL, video_id, query=query, fatal=False)\n access = self._download_json(\n 'https://playback.dacast.com/content/access', video_id,\n note='Downloading access JSON', query=query, expected_status=403)\n\n error = access.get('error')\n if error in ('Broadcaster has been blocked', 'Content is offline'):\n raise ExtractorError(error, expected=True)\n elif error:\n raise ExtractorError(f'Dacast API says \"{error}\"')\n\n hls_url = access['hls']\n hls_aes = {}\n\n if 'DRM_EXT' in hls_url:\n self.report_drm(video_id)\n elif '/uspaes/' in hls_url:\n # From https://player.dacast.com/js/player.js\n ts = int(time.time())\n signature = hashlib.sha1(\n f'{10413792000 - ts}{ts}YfaKtquEEpDeusCKbvYszIEZnWmBcSvw').digest().hex()\n hls_aes['uri'] = f'https://keys.dacast.com/uspaes/{video_id}.key?s={signature}&ts={ts}'\n\n for retry in self.RetryManager():\n try:\n formats = self._extract_m3u8_formats(hls_url, video_id, 'mp4', m3u8_id='hls')\n except ExtractorError as e:\n # CDN will randomly respond with 403\n if isinstance(e.cause, HTTPError) and e.cause.status == 403:\n retry.error = e\n continue\n raise\n\n return {\n 'id': video_id,\n 'uploader_id': user_id,\n 'formats': formats,\n 'hls_aes': hls_aes or None,\n **traverse_obj(info, ('contentInfo', {\n 'title': 'title',\n 'duration': ('duration', {float_or_none}),\n 'thumbnail': ('thumbnailUrl', {url_or_none}),\n })),\n }\n\n\nclass DacastPlaylistIE(DacastBaseIE):\n _URL_TYPE = 'playlist'\n _TESTS = [{\n 'url': 'https://iframe.dacast.com/playlist/943bb1ab3c03695ba85330d92d6d226e/b632eb053cac17a9c9a02bcfc827f2d8',\n 'playlist_mincount': 28,\n 'info_dict': {\n 'id': 'b632eb053cac17a9c9a02bcfc827f2d8',\n 'title': 'Archive Sermons',\n },\n }]\n _WEBPAGE_TESTS = [{\n 'url': 'https://gist.githubusercontent.com/bashonly/7efb606f49f3c6e07ea0327de5a661d1/raw/05a16eac830245ea301fb0a585023bec71e6093c/dacast_playlist_embed.html',\n 'playlist_mincount': 28,\n 'info_dict': {\n 'id': 'b632eb053cac17a9c9a02bcfc827f2d8',\n 'title': 'Archive Sermons',\n },\n }]\n\n def _real_extract(self, url):\n user_id, playlist_id = self._match_valid_url(url).group('user_id', 'id')\n info = self._download_json(\n self._API_INFO_URL, playlist_id, note='Downloading playlist JSON', query={\n 'contentId': f'{user_id}-playlist-{playlist_id}',\n 'provider': 'universe',\n })['contentInfo']\n\n def entries(info):\n for video in traverse_obj(info, ('features', 'playlist', 'contents', lambda _, v: v['id'])):\n yield self.url_result(\n DacastVODIE._get_url_from_id(video['id']), DacastVODIE, video['id'], video.get('title'))\n\n return self.playlist_result(entries(info), playlist_id, info.get('title'))\n","repo_name":"yt-dlp/yt-dlp","sub_path":"yt_dlp/extractor/dacast.py","file_name":"dacast.py","file_ext":"py","file_size_in_byte":6335,"program_lang":"python","lang":"en","doc_type":"code","stars":60520,"dataset":"github-code","pt":"78"} +{"seq_id":"31313237501","text":"# https://leetcode.com/problems/reverse-words-in-a-string-iii\n\nclass Solution:\n def reverseWords(self, s: str) -> str:\n words = s.split(' ')\n \n for i in range(len(words)):\n words[i] = words[i][::-1]\n \n return ' '.join(words)\n\nSolution().reverseWords(s = \"Let's take LeetCode contest\") == \"s'teL ekat edoCteeL tsetnoc\"\nSolution().reverseWords(s = \"God Ding\") == \"doG gniD\"\n","repo_name":"milanogc/leetcode-python3","sub_path":"reverse-words-in-a-string-iii.py","file_name":"reverse-words-in-a-string-iii.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37020868870","text":"\"\"\"\nModifications made by Achilleas Koutsou:\n\n- Original functions renamed to `distance` and `pairwise` to comply with the\nother modules in spikerlib.\n\n- Added `pairwise_mp` and `interval` functions. See individual\nfunction docstrings for description.\n\n\nKreuz SPIKE-distance\nKreuz, Chicharro, Greschner, Andrzejak, 2011, Journal of Neuroscience Methods.\nand\nKreuz T, Chicharro D, Houghton C, Andrzejak RG, Mormann F, 2013,\nJournal of Neurophysiology.\n\n\n######################## Original documentation follows #######################\n\nComment by Thomas Kreuz:\n\nThis Python code (including all further comments) was written by Jeremy Fix (see http://jeremy.fix.free.fr/),\nbased on Matlab code written by Thomas Kreuz.\n\nThe SPIKE-distance is described in this paper:\n\nKreuz T, Chicharro D, Houghton C, Andrzejak RG, Mormann F:\nMonitoring spike train synchrony.\nJ Neurophysiol 109, 1457-1472 (2013).\n\nThe Matlab codes as well as more information can be found at http://www.fi.isc.cnr.it/users/thomas.kreuz/sourcecode.html.\n\n\"\"\"\nfrom __future__ import division\nimport numpy as np\nimport multiprocessing\nimport itertools\n\n\ndef _find_corner_spikes(t, train, ibegin, start, end):\n \"\"\"\n Return the times (t1,t2) of the spikes in train[ibegin:]\n such that t1 < t and t2 >= t\n \"\"\"\n if(ibegin == 0):\n tprev = start\n else:\n tprev = train[ibegin-1]\n for idts, ts in enumerate(train[ibegin:]):\n if(ts >= t):\n return np.array([tprev, ts]), idts+ibegin\n tprev = ts\n return np.array([train[-1],end]), idts+ibegin\n\n\ndef distance(st_one, st_two, start, end, nsamples):\n \"\"\"\n\n Computes the bivariate SPIKE distance of Kreuz et al. (2012) t1 and t2 are\n 1D arrays with the spiking times of two neurones It returns the array of\n the values of the distance between time ti and te with N samples. The\n arrays t1, t2 and values ti, te are unit less\n\n \"\"\"\n t = np.linspace(start+(end-start)/nsamples, end, nsamples)\n st_one = np.insert(st_one, 0, start)\n st_one = np.append(st_one, end)\n st_two = np.insert(st_two, 0, start)\n st_two = np.append(st_two, end)\n\n # We compute the corner spikes for all the time instants we consider\n # corner_spikes is a 4 column matrix [t, tp1, tf1, tp2, tf2]\n corner_spikes = np.zeros((nsamples,5))\n\n ibegin_one = 0\n ibegin_two = 0\n corner_spikes[:,0] = t\n for itc, tc in enumerate(t):\n corner_spikes[itc,1:3], ibegin_t1 = _find_corner_spikes(tc, st_one,\n ibegin_one,\n start, end)\n corner_spikes[itc,3:5], ibegin_t2 = _find_corner_spikes(tc, st_two,\n ibegin_two,\n start, end)\n\n #print corner_spikes\n xisi = np.zeros((nsamples,2))\n xisi[:,0] = corner_spikes[:,2] - corner_spikes[:,1]\n xisi[:,1] = corner_spikes[:,4] - corner_spikes[:,3]\n norm_xisi = np.sum(xisi,axis=1)**2.0\n\n # We now compute the smallest distance between the spikes in st_two\n # and the corner spikes of st_one\n # with np.tile(st_two,(N,1)) we build a matrix :\n # np.tile(st_two,(N,1)) = [st_two st_two st_two]' -\n # np.tile(reshape(corner_spikes,(N,1)), st_two.size) =\n # [corner corner corner]'\n\n dp1 = np.min(np.fabs(np.tile(st_two,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,1],(nsamples,1)),\n st_two.size)),\n axis=1)\n df1 = np.min(np.fabs(np.tile(st_two,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,2],(nsamples,1)),\n st_two.size)),\n axis=1)\n # And the smallest distance between the spikes in st_one and the corner spikes of st_two\n dp2 = np.min(np.fabs(np.tile(st_one,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,3],\n (nsamples,1)),st_one.size)),\n axis=1)\n df2 = np.min(np.fabs(np.tile(st_one,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,4],(nsamples,1)),\n st_one.size)),\n axis=1)\n\n xp1 = t - corner_spikes[:,1]\n xf1 = corner_spikes[:,2] - t\n xp2 = t - corner_spikes[:,3]\n xf2 = corner_spikes[:,4] - t\n\n S1 = (dp1 * xf1 + df1 * xp1)/xisi[:,0]\n S2 = (dp2 * xf2 + df2 * xp2)/xisi[:,1]\n\n inst_dist = (S1 * xisi[:,1] + S2 * xisi[:,0]) / (norm_xisi/2.0)\n\n return t, inst_dist\n\ndef _find_prev_spikes(t, spiketrains):\n prv = []\n for st in spiketrains:\n prv.append(max(st[st < t]))\n return prv\n\ndef _find_next_spikes(t, spiketrains):\n nxt = []\n for st in spiketrains:\n nxt.append(min(st[st >= t]))\n return nxt\n\ndef multivariate(spiketrains, start, end, nsamples):\n \"\"\"\n Multivariate version as described in Kreuz et al., 2011.\n\n Parameters\n ==========\n spiketrains : is an array of spike time arrays\n\n start : the initial time of the recordings\n\n end : the end time of the recordings\n\n nsamples : the number of samples used to compute the distance\n \"\"\"\n t = np.linspace(start+(end-start)/nsamples, end, nsamples)\n N = len(spiketrains)\n\n strains_se = []\n for idx in range(N):\n newst = np.insert(spiketrains[idx], 0, start)\n strains_se.append(np.append(newst, end))\n\n # different between t and previous and next spikes for each t\n prev_spikes = np.zeros((nsamples, N))\n next_spikes = np.zeros((nsamples, N))\n dprev_spikes = np.zeros((nsamples, N))\n dnext_spikes = np.zeros((nsamples, N))\n for idx, ti in enumerate(t):\n prev_spikes[idx] = _find_prev_spikes(ti, strains_se)\n next_spikes[idx] = _find_next_spikes(ti, strains_se)\n dprev_spikes[idx] = ti-prev_spikes[idx]\n dnext_spikes[idx] = next_spikes[idx]-ti\n\n # mean interval from t to previous/next spike on each spiketrain (over t)\n meanp = np.mean(dprev_spikes, axis=1)\n meanf = np.mean(dnext_spikes, axis=1)\n # stdev interval from t to previous/next spike on each spiketrain (over t)\n sigmap = np.std(prev_spikes, axis=1)\n sigmaf = np.std(next_spikes, axis=1)\n # mean inter-spike interval around each t\n xisi = meanp+meanf\n\n mvdist = ((sigmap*meanf)+(sigmaf*meanp))/(xisi**2)\n return t, mvdist\n\ndef pairwise(spiketrains, start, end, nsamples):\n \"\"\"\n Calculate the instantaneous average over all the pairwise distances\n\n Parameters\n ==========\n spiketrains : is an array of spike time arrays\n\n start : the initial time of the recordings\n\n end : the end time of the recordings\n\n nsamples : the number of samples used to compute the distance\n\n spiketrains is a list of arrays of shape (N, T) with N spike trains\n \"\"\"\n # remove empty spike trains\n spiketrains = [sp for sp in spiketrains if len(sp)]\n d = np.zeros((nsamples,))\n n_trains = len(spiketrains)\n t = 0\n for i, t1 in enumerate(spiketrains[:-1]):\n for t2 in spiketrains[i+1:]:\n tij, dij = distance(t1, t2, start, end, nsamples)\n if(i == 0):\n t = tij # The times are only dependent on ti, te, and N\n d = d + dij\n d = d / float(n_trains * (n_trains-1) /2)\n return t,d\n\n\ndef pairwise_mp(spiketrains, start, end, N):\n \"\"\"\n Calculates the multivariate distance (the instantaneous average over all\n the pairwise distances) using Python's multiprocessing.Pool() to\n run sets of calculations in parallel.\n\n Arguments have the same meaning as for `pairwise`.\n\n NB: This function has a slight (on the order 1E-16) deviation from the\n single-process version of the function `pairwise`. The cause of\n the difference has yet to be determined.\n \"\"\"\n # remove empty spike trains\n spiketrains = [sp for sp in spiketrains if len(sp)]\n count = len(spiketrains)\n idx_all = range(count-1)\n pool = multiprocessing.Pool()\n pool_results = pool.map(_all_dist_to_end,\n zip(idx_all, itertools.repeat(spiketrains),\n itertools.repeat(start),\n itertools.repeat(end),\n itertools.repeat(N)))\n pool.close()\n pool.join()\n # Each pool calculated a different number of distance pairs\n # due to the nature of the `_all_dist_to_end` function.\n # We need to organise them into a proper 2D array.\n times = pool_results[0][0]\n distances = []\n for pr in pool_results:\n distances.extend(pr[1])\n return times, np.mean(distances, axis=0)\n\n\ndef _all_dist_to_end(args):\n \"\"\"\n Helper function for parallel pairwise distance calculations.\n \"\"\"\n idx = args[0]\n spiketrains = args[1]\n start = args[2]\n end = args[3]\n N = args[4]\n num_spiketrains = len(spiketrains)\n distances = []\n for jdx in range(idx+1, num_spiketrains):\n dist = distance(spiketrains[idx], spiketrains[jdx], start, end, N)\n times = dist[0] # should be the same for all\n distances.append(dist[1])\n return times, distances\n\n\ndef pairwise_interval(inputspikes, outputspikes, samples=1, mp=True):\n \"\"\"\n Calculates the mean pairwise SPIKE-distance in intervals defined\n by a separate spike train. This function is used to calculate the distance\n between *input* spike trains based on the interspike intervals of the\n *output* spike train. The result is therefore the distance between the\n input spikes that caused each response.\n\n NB: This is NOT the ISI distance. It is the spike distance calculated in\n successive intervals.\n\n Parameters\n ==========\n inputspikes : A list or array of spike trains whose pairwise distance will\n be calculated\n\n outputspikes : A single spike train to be used to calculate the\n intervals\n\n samples : The number of samples to use to for each interval\n\n mp : Set to True to use the multiprocessing implementation\n of the pairwise calculation function or False to use the\n single process version (default: True)\n\n \"\"\"\n times = []\n krdists = []\n pairwise_func = pairwise_mp if mp else pairwise\n for prv, nxt in zip(outputspikes[:-1], outputspikes[1:]):\n krd = pairwise_func(inputspikes, prv, nxt, samples)\n times.append(krd[0])\n krdists.append(krd[1])\n return times, krdists\n\n\ndef interval_multivariate(inputspikes, outputspikes, samples=1):\n \"\"\"\n Calculates the multivariate SPIKE-distance in intervals defined\n by a separate spike train. This function is used to calculate the distance\n between *input* spike trains based on the interspike intervals of the\n *output* spike train. The result is therefore the distance between the\n input spikes that caused each response.\n\n NB: This is NOT the ISI distance. It is the spike distance calculated in\n successive intervals.\n\n Parameters\n ==========\n inputspikes : A list or array of spike trains whose pairwise distance will\n be calculated\n\n outputspikes : A single spike train to be used to calculate the\n intervals\n\n samples : The number of samples to use to for each interval\n\n \"\"\"\n times = []\n krdists = []\n for prv, nxt in zip(outputspikes[:-1], outputspikes[1:]):\n krd = multivariate(inputspikes, prv, nxt, samples)\n times.append(krd[0])\n krdists.append(krd[1])\n return times, krdists\n","repo_name":"achilleas-k/spikerlib","sub_path":"metrics/kreuz.py","file_name":"kreuz.py","file_ext":"py","file_size_in_byte":11619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28904953260","text":"from realog import debug\nimport json\nimport os\nimport random\nimport copy\nfrom dateutil import parser\nimport datetime\n\nfrom db import conn\n\ndef file_read_data(path):\n\tif not os.path.isfile(path):\n\t\treturn \"\"\n\tfile = open(path, \"r\")\n\tdata_file = file.read()\n\tfile.close()\n\treturn data_file\n\ndebug.info(\"Load old BDD: \")\n\ndata = file_read_data('bdd_video.json')\nmy_old_bdd = json.loads(data)\n\ndebug.info(\"create the table:\")\n\nc = conn.cursor()\n\n# Create table\nc.execute('''\nCREATE TABLE video (\n\tid INTEGER PRIMARY KEY,\n\tdeleted INTEGER,\n\tcreate_date INTEGER NOT NULL,\n\tmodify_date INTEGER NOT NULL,\n\tname TEXT NOT NULL,\n\tdescription TEXT,\n\tcovers TEXT,\n\tdata_id INTEGER,\n\ttype_id INTEGER,\n\tunivers_id INTEGER,\n\tgroup_id INTEGER,\n\tsaison_id INTEGER,\n\tdate VARCHAR,\n\tepisode INTEGER,\n\ttime INTEGER)\n''')\n\ndef list_to_string(data):\n\tout = \"\";\n\tfor elem in data:\n\t\tif out != \"\":\n\t\t\tout += \"/\"\n\t\tout +=str(elem)\n\treturn out\n\n#sqlite3 bdd_group.db3 \"SELECT * from data\"\n\ndebug.info(\"insert elements: \")\niii = 0;\nfor elem in my_old_bdd:\n\tiii+=1;\n\tdebug.info(\"[\" + str(iii) + \"/\" + str(len(my_old_bdd)) + \"] send new element\")\n\tid = elem[\"id\"]\n\ttime = elem[\"create_date\"].replace(\"Z\",\"\").replace(\"H\",\" \");\n\ttmp_time = parser.parse(time)\n\tdebug.info(\" => \" + str(tmp_time) + \" from \" + elem[\"create_date\"])\n\tnew_time = int(tmp_time.timestamp())\n\tmodify_time = int(datetime.datetime.utcnow().timestamp());\n\tname = elem[\"name\"]\n\tif \"description\" not in elem.keys():\n\t\tdescription = None\n\telse:\n\t\tdescription = elem[\"description\"]\n\tif \"covers\" not in elem.keys():\n\t\tcovers = []\n\telse:\n\t\tcovers = elem[\"covers\"]\n\t\tif covers == None:\n\t\t\tcovers = [];\n\tif \"data_id\" not in elem.keys():\n\t\tdata_id = None\n\telse:\n\t\tdata_id = elem[\"data_id\"]\n\tif \"type_id\" not in elem.keys():\n\t\ttype_id = None\n\telse:\n\t\ttype_id = elem[\"type_id\"]\n\tif \"univers_id\" not in elem.keys():\n\t\tunivers_id = None\n\telse:\n\t\tunivers_id = elem[\"univers_id\"]\n\tif \"group_id\" not in elem.keys():\n\t\tgroup_id = None\n\telse:\n\t\tgroup_id = elem[\"group_id\"]\n\tif \"saison_id\" not in elem.keys():\n\t\tsaison_id = None\n\telse:\n\t\tsaison_id = elem[\"saison_id\"]\n\tif \"date\" not in elem.keys():\n\t\tdate = None\n\telse:\n\t\tdate = elem[\"date\"]\n\tif \"episode\" not in elem.keys():\n\t\tepisode = None\n\telse:\n\t\tepisode = elem[\"episode\"]\n\tif \"time\" not in elem.keys():\n\t\ttime = None\n\telse:\n\t\ttime = elem[\"time\"]\n\trequest_insert = (id, new_time, modify_time, name, description, list_to_string(covers), data_id, type_id, univers_id, group_id, saison_id, date, episode, time)\n\tc.execute('INSERT INTO video VALUES (%s,0,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', request_insert)\n\n# Save (commit) the changes\nconn.commit()\n\n# def dict_factory(cursor, row):\n# d = {}\n# for idx, col in enumerate(cursor.description):\n# d[col[0]] = row[idx]\n# return d\n\n# conn.row_factory = dict_factory\n# c = conn.cursor()\n# c.execute('SELECT * FROM video WHERE deleted=false')\n# results = c.fetchall()\n# print(results)\n\n# We can also close the connection if we are done with it.\n# Just be sure any changes have been committed or they will be lost.\nconn.close()\n\n","repo_name":"neruyzo/karideo","sub_path":"back/transfer_bdd/v0.0...v1.0/transfert_video.py","file_name":"transfert_video.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11169533026","text":"from classes.cl_cliente import *\nfrom classes.cl_eap import *\nimport pandas as pd\nfrom functions.functions import data_pt\n\n\ndef lista_eap():\n cnx = con()\n with cnx.cursor() as c:\n c.execute(\"SELECT * FROM cl_eap\")\n return c.fetchall()\n\n\ndef relatorio_eap():\n relatorio = []\n clientes = lista_clientes()\n dados_eap = lista_eap()\n for cl in clientes:\n dados = [\n cl['graduacao_txt'],\n f\"{cl['re']}-{cl['dc']}\",\n cl['nome'],\n '',\n '',\n ''\n ]\n for eap in dados_eap:\n if cl['re'] == eap['re']:\n dados = [\n cl['graduacao_txt'],\n f\"{cl['re']}-{cl['dc']}\",\n cl['nome'],\n eap['periodo_ead'],\n data_pt(eap['data_eap']),\n data_pt(eap['send_mail']),\n ]\n relatorio.append(dados)\n\n df = pd.DataFrame(columns=['Graduação', 'RE', 'Nome', 'Periodo', 'Data EAP', 'E-mail (aviso)'],\n data=relatorio)\n\n return df\n","repo_name":"marcelojacomini/admin_pm","sub_path":"relatorios/rel_eap.py","file_name":"rel_eap.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32741934161","text":"#Change these parameters\nX_YEARS = 3 #THIS CHANGES HOW MANY YEARS BACK IT LOOKS FOR STOCK DATA, BUT NOT EARNINGS DATA\nDAYS_SINCE_MARKET_DAY = 1 #DON'T TOUCH UNLESS IT GIVES YOU AN ERROR, THEN ENTER RANDOM NUMBERS 0-6 UNTIL IT WORKS. AFTER TODAY 0 SHOULD WORK EXCEPT ON WEEKENDS\n\n#Don't change anything below this line\n#-----------------------------------------\n\nimport requests\nfrom datetime import date\n\nold = (str(date.today().month - 1), str(date.today().day - DAYS_SINCE_MARKET_DAY), str(date.today().year - X_YEARS))\n\ndef get_change_ni(symb):\n url = \"http://financials.morningstar.com/ajax/ReportProcess4CSV.html?t=\" + symb + \"&reportType=is&period=12&dataType=A&order=asc&columnYear=5&number=1\"\n IS = requests.get(url).text.split(',')\n i = 1\n for line in IS:\n if \"Net income\" in line:\n break\n i += 1\n if i >= len(IS):\n return\n if IS[i] != None:\n curr = \"\"\n for char in IS[i + 5]:\n if char in \"0123456789\":\n curr += char\n else:\n break\n return (IS[i + 2], curr)\n \ndef mapper(symb):\n \"Given a stock symbol, return a list containing SYMB, x_years performance, and PE\"\n quote = requests.get(\"http://finance.yahoo.com/d/quotes.csv?s=\" + symb + \"&f=l1r\")\n price, PE = quote.text.split(',')\n if price != \"N/A\":\n price = float(price)\n if PE != \"N/A\\n\":\n PE = float(PE)\n else:\n PE = \"N/A\"\n #MAKE IT WORK FOR NON-MARKET DAYS\n old_price_url = \"http://ichart.finance.yahoo.com/table.csv?g=d&ignore=.csv&f=\" + old[2] + \"&e=\" + old[1] + \"&c=\" + old[2] + \"&b=\" + old[1] + \"&a=\" + old[0] + \"&d=\" + old[0] + \"&s=\" + symb\n old_price = requests.get(old_price_url)\n try:\n assert type(PE) is not str\n old_price_num = float(old_price.text.split(',')[-1]) #10 for close, -1 for adj close\n except:\n #For some reason, this is not returning an entry. Leading causes are a none market day and the company did not exist back then\n return symb + ',' + 'NA' + ',' + 'NA'\n eps = get_change_ni(symb)\n if eps == None or eps[0] == \"\" or eps[1] == \"\":\n return symb + ',NA,NA'\n change_pe = int(eps[0]) - int(eps[1]) #- PE\n change = old_price_num - price\n return symb + ',' + str(change_pe) + ',' + str(change)\n\ndef filt(stats):\n _, pe, price = stats.split(',')\n if price != 'NA' and pe != 'NA':\n if float(price) > 0 and int(pe) < 0:\n return True \n return False\nnasdaq = \"http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=nasdaq&render=download\"\nnyse = \"http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=nyse&render=download\"\nexchanges = {\"nasdaq\" : nasdaq, \"nyse\" : nyse}\n\ndbutils.fs.mkdirs(\"dbfs:/tmp\")\n\nfor name, exchange in exchanges.items():\n csv_data = requests.get(exchange) \n localpath=\"/tmp/\" + name + \"_raw.csv\"\n file = open(localpath, 'w')\n file.write(csv_data.text)\n file.close()\n df = spark.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('file:' + localpath)\n symbs = df.select('Symbol').collect()\n data = []\n for symb in symbs:\n data.append(symb['Symbol'])\n rdd = sc.parallelize(data)\n mapRDD = rdd.map(mapper).filter(filt).collect()\n for row in mapRDD:\n print(row.split(',')[0])\n","repo_name":"IanMadlenya/assest_hunter","sub_path":"pe.py","file_name":"pe.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72949482172","text":"# python scraper to fetch most up-to-date active cases\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nwebsite = 'https://www.worldometers.info/coronavirus/#countries' # url for the site\nwebsite_url = requests.get(website).text\nsoup = BeautifulSoup(website_url, 'html.parser')\n\nmy_table = soup.find('tbody')\n\ntable_data = []\nfor row in my_table.findAll('tr'):\n row_data = []\n for cell in row.findAll('td'):\n row_data.append(cell.text)\n if len(row_data) > 0:\n data_item = {\"Country\": row_data[1],\n \"ActiveCases\": row_data[8],\n }\n table_data.append(data_item)\n\ndf = pd.DataFrame(table_data)\ndf = df.drop(range(0, 8))\ndf.to_csv('active_cases.csv', index=False)\n","repo_name":"yuchenghuo/covid-simulator","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"3332842045","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import accuracy_score\r\nimport pickle\r\n\r\nheart_data = pd.read_csv('heart_new.csv') # Kaggle Dataset\r\nheart_data = heart_data.drop(['Race'], axis=1)\r\n\r\nheart_data['HeartDisease'].replace({'Yes':1,'No':0},inplace=True)\r\nheart_data['Smoking'].replace({'Yes':3,'No':2},inplace=True)\r\nheart_data['AlcoholDrinking'].replace({'Yes':3,'No':2},inplace=True)\r\nheart_data['Stroke'].replace({'Yes':3,'No':2},inplace=True)\r\nheart_data['DiffWalking'].replace({'Yes':3,'No':2},inplace=True)\r\nheart_data['Sex'].replace({'Male':3,'Female':2},inplace=True)\r\nheart_data['Diabetic'].replace({'Yes':3,'No':2,'No, borderline diabetes':4,'Yes (during pregnancy)':5},inplace=True)\r\nheart_data['PhysicalActivity'].replace({'Yes':3,'No':2},inplace=True)\r\nheart_data['Asthma'].replace({'Yes':3,'No':2},inplace=True)\r\nheart_data['AgeCategory'].replace({'18-24':2,'25-29':3,'30-34':4,'35-39':5,'40-44':6,'45-49':7,'50-54':8,'55-59':9,'60-64':10,'65-69':11,'70-74':12,'75-79':13,'80 or older':14},inplace=True)\r\nheart_data['GenHealth'].replace({'Very good':2,'Fair':3,'Good':4,'Poor':5,'Excellent':6},inplace=True)\r\n\r\nX = heart_data.drop(columns='HeartDisease', axis=1)\r\nY = heart_data['HeartDisease']\r\n\r\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, stratify=Y, random_state=3)\r\n\r\n# Logistic Regression\r\nmodel = LogisticRegression(max_iter=1025)\r\n\r\n# Training the logistic regression model with training data\r\nmodel.fit(X_train, Y_train)\r\n\r\n# accuracy on training data\r\nX_test_prediction = model.predict(X_test)\r\ntesting_data_accuracy = accuracy_score(X_test_prediction, Y_test)\r\nprint('Accuracy on testing data: ', testing_data_accuracy)\r\n\r\n# Creating a pickle file for the classifier\r\nfilename = 'heart-disease-prediction-Logistic-Regression-model.pkl'\r\npickle.dump(model, open(filename, 'wb'))","repo_name":"ilyas7273ilyas/HerokuDemo01","sub_path":"disease_prediction.py","file_name":"disease_prediction.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37007593594","text":"import numpy as np\n\ndef SolVgGravity(x,y):\n\n eta=-np.sin(x*x*y*y+x*y+5.)+1.+0.001 #epsilon;\n\n costerm=np.cos(x*x*y*y+x*y+5.)\n\n deta_dx=-y*(2.*x*y+1.)*costerm\n deta_dy=-x*(2.*x*y+1.)*costerm\n\n dpdx=2. * x *y*y +y \n dpdy=2. * x*x*y +x \n\n exx= 3.*x*x * y +2.*x +y +1.\n eyy=-3.*x*x * y -2.*x -y -1.\n\n exy=0.5*(x*x*x + x -3.*x*y*y -2.*y)\n eyx=0.5*(x*x*x + x -3.*x*y*y -2.*y)\n\n dexxdx= 6.*x*y+2.\n deyxdy=-3.*x*y-1.\n\n dexydx= 0.5*(3.*x*x +1. -3.*y*y)\n deyydy= -3.*x*x -1.\n\n gx =-dpdx + 2.*eta*dexxdx + 2.*deta_dx*exx + 2.*eta*deyxdy + 2.*deta_dy*eyx\n gy =-dpdy + 2.*eta*dexydx + 2.*deta_dx*exy + 2.*eta*deyydy + 2.*deta_dy*eyy\n\n return -gx,-gy\n","repo_name":"cedrict/fieldstone","sub_path":"python_codes/fieldstone_112/solvg.py","file_name":"solvg.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"78"} +{"seq_id":"4553886905","text":"import unittest\nimport urllib\nimport badsec\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_connect_to_badsec_server_using_request(self):\n '''\n This returns OK as I'm checking for a response and that the request succeeds, which it does\n :return:\n '''\n\n headers = dict()\n headers['accept'] = 'application/json'\n headers['User-Agent'] = \"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:48.0) Gecko/20100101 Firefox/48.0\"\n states_url = 'https://disease.sh/v3/covid-19/states'\n\n auth_request = urllib.request.Request(states_url, headers=headers, method='GET')\n auth_response, succeeded = badsec.connect_to_badsec_server_using_request(auth_request)\n self.assertTrue(auth_response.code, 200)\n self.assertTrue(True, succeeded)\n\n def test_connect_to_badsec_server_using_bad_request(self):\n '''\n This returns OK as I'm checking for lack of a response and that the request does not succeed\n If you want to make this not succeed, change either self.assertIsNone to self.assertIs or assertFalse to assertTrue\n :return:\n '''\n headers = dict()\n headers['accept'] = 'application/json'\n headers['User-Agent'] = \"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:48.0) Gecko/20100101 Firefox/48.0\"\n bad_url = 'https://disease.sh/v3/covid-19/state'\n\n auth_request = urllib.request.Request(bad_url, headers=headers, method='GET')\n auth_response, succeeded = badsec.connect_to_badsec_server_using_request(auth_request)\n self.assertIsNone(auth_response)\n self.assertFalse(False, succeeded)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rcgottlieb/badsec","sub_path":"test_badsec.py","file_name":"test_badsec.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72832154491","text":"#!/usr/bin/env python\n\nimport argparse\nimport os\nimport urllib\n\nUBUNTU_DISTROS = ['precise', 'quantal', 'raring', 'saucy', 'trusty', 'utopic', 'vivid', 'wily', 'xenial', 'yakkety', 'zesty', 'artful', 'bionic', 'cosmic', 'disco', 'eoan', 'focal']\nDEBIAN_DISTROS = ['jessie', 'stretch', 'buster', 'bullseye']\nOS_DISTROS = UBUNTU_DISTROS + DEBIAN_DISTROS\nARCHES = ['i386', 'amd64', 'armhf', 'arm64', 'source']\nROS1_DISTROS = ['boxturtle', 'cturtle', 'diamondback', 'electric', 'fuerte', 'groovy', 'hydro', 'indigo', 'jade', 'kinetic', 'lunar', 'melodic', 'noetic']\nROS2_DISTROS = ['ardent', 'bouncy', 'crystal', 'dashing', 'eloquent', 'foxy', 'galactic', 'humble']\nROS2_ROLLING_DISTROS = ['rolling']\n\nROS_DISTROS = ROS1_DISTROS + ROS2_DISTROS + ROS2_ROLLING_DISTROS\n\nMIN_DATA_THRESHOLD = 0.001\n\ndef count_d(res):\n return -1 * res.count_downloads()\n\nclass Results:\n def __init__(self, package, rosdistro):\n self.urls = {}\n self.name = package\n self.rosdistro = rosdistro\n def add_url(self, url, count):\n if url in self.urls:\n self.urls[url] += count\n else:\n self.urls[url] = count\n\n def count_downloads(self, arch=None, distro=None):\n total = 0\n for url, count in self.urls.items():\n if not arch or arch == get_arch_from_url(url):\n if not distro or distro == get_distro_from_url(url):\n total += count\n return total\n\ndef get_beginning_url(url):\n if not url.endswith('.deb') and not url.endswith('.dsc'):\n #print(\"not a debian package %s\" % url)\n return None\n bn = os.path.basename(url)\n #print(\"bn %s\" % bn)\n return bn.split('_')[0]\n \ndef get_arch_from_url(url):\n if url.endswith('.dsc'):\n return 'source'\n if not url.endswith('.deb'):\n #print(\"not a debian package %s\" % url)\n return None\n bn = os.path.basename(url)\n bn = bn[:-4]\n # print(\"bn %s\" % bn)\n return bn.split('_')[-1]\n \ndef get_distro_from_url(url):\n \"\"\" Detect which distro we're using. This implementation has the potentila to overmatch.\"\"\"\n for distro in OS_DISTROS:\n if distro in url:\n return distro\n return None\n\ndef get_package_info_from_url(basename_beginning):\n name_elements = basename_beginning.split('-')\n if len(name_elements) < 3:\n # print(\"package name too short: %s\" % basename_beginning)\n return None\n if name_elements[0] != 'ros':\n print(\"not a ros package name: %s\" % basename_beginning)\n return None\n\n distro = name_elements[1]\n return distro\n\ndef get_package_version_from_url(basename_beginning):\n #print(basename_beginning)\n basename_beginning = urllib.unquote(basename_beginning)\n v = basename_beginning.split('_')[1].split('-')[0]\n #print('v is %s' % v)\n return v\n\ndef count_versions(urls):\n versions = set()\n for u in urls:\n versions.add(get_package_version_from_url(u))\n return len(versions)\n\n#def count_rosdistros(urls):\n# rosdistros = set()\n# for u in urls:\n# rosdistros.add(get_package_version_from_url(u))\n# return len(rosdistros)\n\nparser = argparse.ArgumentParser(description=\"process awstats files\")\nparser.add_argument('filename', help=\"filename to load\", type=str, nargs='+')\n\n\nargs = parser.parse_args()\n\nprint(\"Loading %s\" % args.filename)\n\nresults = {}\nother_packages = {}\n\n\nAWSTATS_DOWNLOAD_SECTION = {\n '6.9': 'SIDER',\n '7.4': 'DOWNLOADS'\n}\n\nfor filename in args.filename:\n\n with open(filename, 'r') as fh:\n inside = False\n skipped_lines = 0\n processed_lines = 0\n shadow_fixed = 0\n awstats_version = None\n for line in fh:\n if not awstats_version:\n if 'AWSTATS DATA FILE' in line:\n awstats_version = line.split()[3]\n # print(\"processing line %s\" % line)\n if not inside and 'BEGIN_%s ' % AWSTATS_DOWNLOAD_SECTION[awstats_version] in line:\n inside = True\n continue\n if inside and ('END_%s' % AWSTATS_DOWNLOAD_SECTION[awstats_version]) in line:\n inside = False\n continue\n if not inside:\n skipped_lines += 1\n # print(\"SKIPPED, not inside\")\n continue\n if '/ros-shadow-fixed' in line:\n shadow_fixed += 1\n # print(\"SKIPPED, shadow_fixed\")\n continue\n if '/ros2-testing' in line:\n shadow_fixed += 1\n # print(\"SKIPPED, ros2-testing\")\n continue\n elements = line.strip().split()\n if len(elements) < 4:\n print(\"Too few elemnts in %s\" % elements)\n # print(\"line: %s\" % line)\n skipped_lines += 1\n continue\n processed_lines += 1\n\n url = elements[0]\n count = int(elements[1])\n # print(\"url %s\" % url)\n beg = get_beginning_url(url)\n arch = get_arch_from_url(url)\n if not beg:\n #print(\"failing here %s --- %s\" % (beg, url))\n continue\n rosdistro = get_package_info_from_url(beg)\n # print(\"%s -- %s\" % (rosdistro, beg))\n if beg in results:\n results[beg].add_url(url, count)\n else:\n results[beg] = Results(beg, rosdistro)\n results[beg].add_url(url, count)\n # print(\"hello %s\" % beg)\n\n\ns = sorted(results.values(), key=count_d)\nfor i in range(0, min(100000, len(results))):\n if s[i].name[0:3] == 'ros' or s[i].name[0:3] == 'pyt' :\n versions = count_versions(s[i].urls)\n #rosdistros = count_rosdistros(s[i].urls)\n print(\"%s: %s versions: %s\" % (s[i].name, s[i].count_downloads(), versions))\n\n\n\n\n# non_hydro = [r for r in results.values() if r.rosdistro != 'hydro']\n\ntotal_downloads = 0\nfor r in results.values():\n total_downloads += r.count_downloads()\n\nunique_urls = 0\nfor r in results.values():\n unique_urls += len(r.urls)\n\narch_stats = {}\narchdistro_stats = {}\nrd_stats = {}\nprint(\"Breakdown by rosdistro:\")\nfor rd in ROS_DISTROS:\n rd_stats[rd] = sum([r.count_downloads() for r in results.values() if r.rosdistro == rd]) * 100.0/total_downloads\n print(\"%s: %.2f %%\" % (rd, rd_stats[rd]))\nprint(\"Breakdown by Arch:\")\nfor arch in ARCHES:\n arch_stats[arch] = sum([r.count_downloads(arch) for r in results.values()]) * 100.0/total_downloads\n print(\"%s: %.2f %%\" % (arch, arch_stats[arch]))\n for distro in OS_DISTROS:\n archdistro = '%s_%s' % (distro, arch)\n archdistro_stats[archdistro] = sum([r.count_downloads(arch=arch, distro=distro) for r in results.values()]) * 100.0/total_downloads\n\nprint(\"Results larger than %s%%\" % MIN_DATA_THRESHOLD)\nfor k, v in sorted(archdistro_stats.items()):\n if v > MIN_DATA_THRESHOLD:\n print(\"%s: %.2f %%\" % (k, v))\n\nprint(\"Unique debian package versions: %s\" % unique_urls)\nprint(\"Number of different packages: %s\" % len(results))\nprint(\"total deb downloads: %s\" % total_downloads)\nprint(\"skipped lines: %s\" % skipped_lines)\nprint(\"processed lines: %s\" % processed_lines)\nprint(\"shadow_fixed lines skipped: %s\" % shadow_fixed)\n","repo_name":"osrf/ros_metrics_analysis","sub_path":"scripts/analyze_awstats.py","file_name":"analyze_awstats.py","file_ext":"py","file_size_in_byte":7275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"71050227771","text":"from keras.datasets import imdb\n\nfrom keras import layers, models, optimizers\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\ndef vectorize_sequence(sequences, dimension=1000):\n\n results = np.zeros((len(sequences), dimension))\n\n for i, sequence in enumerate(sequences):\n\n results[i,sequence] = 1.\n\n return results\n\n(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words = 1000)\n# print(\"Max index in train data: {max([max(sequence) for sequence in train_data])}\")\n\n# print(\"Train label: train_labels[0] = {train_labels[0]})\n\n# print({len(train_data[0])})\nword_index = imdb.get_word_index() # maps words to index, commonly called as w_to_i\n\nreverse_word_index = dict( [ (value, key) for (key, value) in word_index.items()] ) # this is just opposite, i_to_w\n\ndecode_review = ' '.join( [reverse_word_index.get(i - 3, '?') for i in train_data[0] ])\n# print(f\"Encoded Review: \\n {train_data[0]} \\n\")\n\nprint(decode_review)\n\nx_train = vectorize_sequence(train_data) # mapping one hot encoding to a particular index\n\nx_test = vectorize_sequence(test_data) # mapping one hot encoding to a particular index\ny_train = np.asarray(train_labels).astype('float32')\n\ny_test = np.asarray(test_labels).astype('float32')\n# print(\"A data point appears like\" (x_train[0]))\n\n# print(\"Actual appears as: {y_train[0]}\")\n# Validation separation\n\n\n\nx_val = x_train[:1000]\n\npartial_x_train = x_train[1000:]\n\n\n\ny_val = y_train[:1000]\n\npartial_y_train = y_train[1000:]\n# Model definition\n\n\n\nmodel = models.Sequential()\n\nmodel.add(layers.Dense(16, activation='relu', input_shape=(1000,), name=\"Input_Layer\"))\n\nmodel.add(layers.Dense(16, activation='relu', name=\"Hidden_Layer_1\"))\n\nmodel.add(layers.Dense(1, activation='sigmoid', name=\"Output_Layer\"))\n\nprint(model.summary())\n\n# Compiling the model\n\n\n\nmodel.compile(optimizer=optimizers.RMSprop(lr = 0.001),\n\n loss='binary_crossentropy',\n\n metrics=['accuracy'])\n\nhistory = model.fit(partial_x_train,\n\n partial_y_train,\n\n epochs = 4,\n\n batch_size = 128,\n\n validation_data = (x_val, y_val))\n\nhistory_dict = history.history\n\nacc = history_dict[\"acc\"]\n\nval_acc = history_dict[\"val_acc\"]\n\nloss_values = history_dict['loss']\n\nval_loss_values = history_dict['val_loss']\n\nprint (loss_values)\nepochs = range(1, len(acc) + 1)\nprint (epochs)\nimport matplotlib.pyplot as plt\n\nplt.figure(1)\n\nplt.subplot(211)\n\nplt.plot(epochs, loss_values, 'bo', label='Training Loss')\n\nplt.plot(epochs, val_loss_values, 'b', label='Validation Loss')\n\nplt.title('Training and validation loss')\n\nplt.ylabel('Loss')\n\nplt.legend()\n\nplt.show()\nplt.subplot(212)\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\n\nplt.plot(epochs, val_acc, 'b', label = 'Validation acc')\n\nplt.title('Training and validation accuracy')\n\nplt.xlabel('Epochs')\n\nplt.ylabel('Loss')\n\nplt.legend()\n\nplt.show()\n# Running Predict\n\ntest = x_test[1].reshape(1, -1)\n\nprint(f\"Model's Prediction: {model.predict(test)[0]}\")\n\nprint(f\"Actual: {y_test[1]}\")","repo_name":"aorursy/new-nb-1","sub_path":"avni09_imdb-keras.py","file_name":"avni09_imdb-keras.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26086831125","text":"from stemseg.config import cfg\nfrom stemseg.data.video_dataset import VideoDataset\nfrom stemseg.data.instance_duplicator import InstanceDuplicator\nfrom stemseg.structures.mask import BinaryMask, BinaryMaskSequenceList\n\nimport math\nimport numpy as np\nimport random\n\n\nclass DavisDataLoader(VideoDataset):\n def __init__(self, base_dir, vds_json_file,\n samples_to_create=-1,\n apply_augmentation=False,\n single_instance_duplication=False,\n background_as_ignore_region=True):\n\n super(DavisDataLoader, self).__init__(base_dir, vds_json_file, cfg.INPUT.NUM_FRAMES, apply_augmentation)\n\n self.filter_zero_instance_frames()\n\n self.samples = self.create_training_subsequences(samples_to_create)\n\n self.instance_duplicator = InstanceDuplicator()\n self.single_instance_duplication = single_instance_duplication\n self.background_as_ignore_region = background_as_ignore_region\n\n def create_training_subsequences(self, num_subsequences):\n frame_range = list(range(cfg.DATA.DAVIS.FRAME_GAP_LOWER, cfg.DATA.DAVIS.FRAME_GAP_UPPER + 1))\n subseq_length = self.clip_length\n\n # filter sequences which are too short\n min_sequence_length = frame_range[0] + 1 # so that multiple, different subsequences can be generated\n sequences = [seq for seq in self.sequences if len(seq) > min_sequence_length]\n\n # compute number of sub-sequences to create from each video sequence\n total_frames = sum([len(seq) for seq in sequences])\n samples_per_seq = [max(1, int(math.ceil((len(seq) / total_frames) * num_subsequences))) for seq in sequences]\n\n subseq_span_range = frame_range.copy()\n subsequence_idxes = []\n # num_intermediate_frames = subseq_length - 2\n\n for sequence, num_samples in zip(sequences, samples_per_seq):\n for _ in range(num_samples):\n subseq_span = min(random.choice(subseq_span_range), len(sequence) - 1)\n max_start_idx = len(sequence) - subseq_span - 1\n assert max_start_idx >= 0\n\n start_idx = 0 if max_start_idx == 0 else random.randint(0, max_start_idx)\n end_idx = start_idx + subseq_span\n sample_idxes = np.round(np.linspace(start_idx, end_idx, subseq_length)).astype(np.int32).tolist()\n\n assert len(set(sample_idxes)) == len(sample_idxes) # sanity check: ascertain no duplicate indices\n subsequence_idxes.append((sequence.id, sample_idxes))\n\n # because of rounding up the number of samples to create per sequence, we will always have more than the\n # required number of samples. So we randomly select the required number.\n assert len(subsequence_idxes) >= num_subsequences, \\\n \"{} should be >= {}\".format(len(subsequence_idxes), num_subsequences)\n\n subsequence_idxes = random.sample(subsequence_idxes, num_subsequences)\n random.shuffle(subsequence_idxes)\n\n sequences = {seq.id: seq for seq in sequences}\n subsequences = [\n sequences[video_id].extract_subsequence(frame_idxes)\n for video_id, frame_idxes in subsequence_idxes\n ]\n\n return subsequences\n\n def parse_sample_at(self, idx):\n sample = self.samples[idx]\n\n images = sample.load_images()\n masks = sample.load_masks() # list(T))\n\n if len(sample.instance_ids) == 1 and self.single_instance_duplication:\n masks_flat = [mask[0] for mask in masks]\n augmented_images, augmented_masks = self.instance_duplicator(images, masks_flat)\n if augmented_images is not None: # duplication was successful\n images = augmented_images\n masks = list(zip(*augmented_masks)) # list(N, list(T)) --> list(T, list(N))\n\n if self.background_as_ignore_region:\n fg_masks = [np.any(np.stack(masks_t, 0), 0) for masks_t in masks]\n ignore_masks = [BinaryMask((fg_mask == 0).astype(np.uint8)) for fg_mask in fg_masks]\n else:\n ignore_masks = [BinaryMask(np.zeros_like(masks[0][0], np.uint8)) for _ in range(len(masks))]\n\n masks = [\n [BinaryMask(mask) for mask in masks_t]\n for masks_t in masks\n ]\n\n masks = BinaryMaskSequenceList(masks)\n instance_categories = [1 for _ in range(masks.num_instances)]\n\n return images, masks, instance_categories, {\"ignore_masks\": ignore_masks, \"seq_name\": sample.id}\n\n def __len__(self):\n return len(self.samples)\n","repo_name":"sabarim/STEm-Seg","sub_path":"stemseg/data/davis_data_loader.py","file_name":"davis_data_loader.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"78"} +{"seq_id":"1301163115","text":"import numpy as np\nimport numpy.linalg as la\nfrom scipy.sparse import issparse, lil_array\n\n\ndef core_covariance_matrix(lap_b, lap_c, stderr: np.ndarray):\n if issparse(lap_c):\n lap_c = lap_c.todense()\n lap_c_inv = la.inv(lap_c)\n\n err_diag = lil_array((stderr.shape[0], stderr.shape[0]))\n err_diag.setdiag(stderr ** 2)\n tmp = lap_b @ err_diag @ lap_b.T\n res = lap_c_inv @ tmp @ lap_c_inv.T\n return res\n\n\ndef perturbation_variance(lap_q: np.ndarray, core_coefficients: np.ndarray,\n core_covariance: np.ndarray, core_edge_count: int):\n temp = lap_q @ core_covariance\n unscaled_variance = 2 * np.trace(temp @ temp) + \\\n 4 * core_coefficients @ temp @ lap_q @ core_coefficients\n return unscaled_variance / core_edge_count ** 2\n","repo_name":"mikethenut/BNPA","sub_path":"perturbationx/toponpa/statistics/variance.py","file_name":"variance.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2714117901","text":"#Embedded file name: ui/cocoa/notification_center.py\nimport os\nfrom AppKit import NSObject, NSUserNotificationCenter, NSUserNotification\nfrom PyObjCTools import AppHelper\nfrom objc import typedSelector\nfrom Queue import Empty, Queue\nfrom dropbox.gui import message_sender, assert_message_queue\nfrom dropbox.mac.version import MAC_VERSION, MAVERICKS\nfrom dropbox.trace import unhandled_exc_handler, TRACE\n\nclass NotificationCenterBubbler(NSObject):\n POPUP_DURATION = 5\n\n @assert_message_queue\n def __new__(cls, app):\n return NotificationCenterBubbler.alloc().initWithApp_(app)\n\n def initWithApp_(self, app):\n self = super(NotificationCenterBubbler, self).init()\n if self is None:\n return\n self._app = app\n self._current = None\n self._q = Queue()\n NSUserNotificationCenter.defaultUserNotificationCenter().setDelegate_(self)\n return self\n\n @typedSelector('v@:@@')\n @assert_message_queue\n def userNotificationCenter_didActivateNotification_(self, center, notification):\n self.nextbubble()\n user_info = notification.userInfo() or {}\n if user_info.get('pid') != unicode(os.getpid()):\n TRACE(\"NCBubbler: pid doesn't match %r vs %r\", user_info, os.getpid())\n return\n ctx_ref = user_info.get('ctx_ref')\n if ctx_ref is not None:\n try:\n self._app.bubble_context.thunk_and_expire_context_ref(int(ctx_ref))\n except Exception:\n unhandled_exc_handler()\n\n @typedSelector('B@:@@')\n @assert_message_queue\n def userNotificationCenter_shouldPresentNotification_(self, center, notification):\n return True\n\n @typedSelector('v@:@@')\n @assert_message_queue\n def userNotificationCenter_didDeliverNotification_(self, center, notification):\n if MAC_VERSION >= MAVERICKS:\n if notification in center.deliveredNotifications():\n center.removeAllDeliveredNotifications()\n else:\n center.removeDeliveredNotification_(notification)\n\n def still_running(self):\n return True\n\n @typedSelector('v@:**@')\n @message_sender(AppHelper.callAfter)\n def do_bubble(self, message, caption, ctx_ref = None):\n TRACE('NotificationCenterController: Bubbling: %s', message)\n if self._current:\n TRACE('Queuing Notification: %s', message)\n self._q.put_nowait((message, caption, ctx_ref))\n return\n user_info = {u'pid': unicode(os.getpid())}\n if ctx_ref is not None:\n user_info[u'ctx_ref'] = unicode(ctx_ref)\n n = NSUserNotification.alloc().init()\n n.setTitle_(caption)\n n.setInformativeText_(message)\n n.setHasActionButton_(False)\n n.setUserInfo_(user_info)\n self._current = n\n NSUserNotificationCenter.defaultUserNotificationCenter().deliverNotification_(n)\n AppHelper.callLater(self.POPUP_DURATION, self.timeout, n)\n\n @typedSelector('v@:@@')\n @assert_message_queue\n def timeout(self, notification):\n if self._current == notification:\n if MAC_VERSION >= MAVERICKS:\n NSUserNotificationCenter.defaultUserNotificationCenter().removeAllDeliveredNotifications()\n self.nextbubble()\n\n @typedSelector('v@:@')\n @assert_message_queue\n def nextbubble(self):\n self._current = None\n try:\n message, caption, ctx_ref = self._q.get(False)\n self.do_bubble(message, caption, ctx_ref)\n except Empty:\n pass\n","repo_name":"bizonix/DropBoxLibrarySRC","sub_path":"pyc_decrypted/latest/ui/cocoa/notification_center.py","file_name":"notification_center.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"7328570729","text":"from threading import Thread, Condition\nimport time\n\ndef printer_thread_func():\n global prime_holder\n global found_prime\n\n while not exit_prog:\n cond_var.acquire()\n while not found_prime and not exit_prog:\n cond_var.wait()\n cond_var.release()\n\n if not exit_prog:\n print(prime_holder)\n\n prime_holder = None\n cond_var.acquire()\n found_prime = False\n cond_var.notify()\n cond_var.release()\n\ndef finder_thread_func():\n global prime_holder\n global found_prime\n\n i = 2\n \n while not exit_prog:\n\n while not is_prime(i):\n i += 1\n \n prime_holder = i\n cond_var.acquire()\n found_prime = True\n cond_var.notify()\n cond_var.acquire()\n\n cond_var.acquire()\n while found_prime and not exit_prog:\n cond_var.wait()\n cond_var.release()\n \n i += 1\n\n\n\ndef is_prime(num):\n if num == 2 or num == 3:\n return True\n \n div = 2\n \n while div <= num / 2:\n if num % div == 0:\n return False\n div += 1\n \n return True\n\n\ncond_var = Condition()\nfound_prime = False\nprime_holder = None\nexit_prog = False\n \nprinter_thread = Thread(target=printer_thread_func)\nprinter_thread.start()\n \nfinder_thread = Thread(target=finder_thread_func)\nfinder_thread.start()\n \n# Let the threads run for 5 seconds\ntime.sleep(3)\n \n# Let the threads exit\nexit_prog = True\ncond_var.acquire()\ncond_var.notifyAll()\ncond_var.release()\n \nprinter_thread.join()\nfinder_thread.join()","repo_name":"sky-bot/Interview_Preparation","sub_path":"Threads/improvement1.py","file_name":"improvement1.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16572693994","text":"#!/usr/bin/env python3\nimport configparser\nimport sys\n\nconfig = configparser.ConfigParser()\nconfig.read('/ops/utils/env_vars.ini')\n\n\ndef get_config(env: str, setting: str) -> str:\n \"\"\" Read value from config file \"\"\"\n return config.get(env, setting)\n\n\nif __name__ == \"__main__\":\n \"\"\" \n Helper function to parse the env_vars.ini file that's mapped in the docker compose. Usage:\n python parse_config.py foss4g_ops github_oauth \n \"\"\"\n try:\n env = sys.argv[1]\n setting = sys.argv[2]\n except IndexError:\n sys.stderr.write(\"Env or setting not supplied\")\n sys.exit(1)\n\n setting_value = get_config(env, setting)\n print(setting_value)\n","repo_name":"thinkWhere/foss4g-serverless-ops","sub_path":"ops/utils/parse_config.py","file_name":"parse_config.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"70411158972","text":"'''\n Should contain only Task methods.\n avoid having any utility functions inside this file.\n Please do not corrupt/pollute this file with unncessary imports, or functions\n'''\n\nimport os\nfrom .models import Notification, CustomToken, FoodEvent, STATUS\nfrom celery import Celery, shared_task\nfrom celery.utils.log import get_task_logger\nfrom exponent_server_sdk import PushClient, PushMessage \nfrom django.core.mail import EmailMultiAlternatives\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.db.models import Q\nimport logging\n\nlogger = get_task_logger(__name__)\napp = Celery()\n\ndef send_push_message(user, title, message, notification_type):\n try:\n Notification.objects.create(user=user, title=title, message=message, notificationType=notification_type)\n \n if CustomToken.objects.filter(user=user).exists():\n custom_token = CustomToken.objects.get(user=user)\n\n try:\n PushClient().publish(\n PushMessage(\n to=custom_token.expoPushToken,\n title=title,\n body=message,\n )\n )\n except Exception as e:\n print('==========>',str(e))\n else:\n print('Custom Token for user not exists') \n except Exception as e:\n print('Cant Send MSG', str(e))\n\n@shared_task(name='checking_event_status')\ndef event_status_check():\n try:\n today_date = timezone.now()\n active_food_events_list = FoodEvent.objects.filter(Q(eventStartDate__lte=today_date) & Q(eventEndDate__gte=today_date), active=False)\n for food_events in active_food_events_list:\n food_events.active = True\n food_events.save()\n expired_food_events_list = FoodEvent.objects.filter(Q(eventStartDate__gt=today_date) | Q(eventEndDate__lt=today_date), active=True)\n for food_events in expired_food_events_list:\n food_events.active = False\n food_events.save()\n return ({'success': True, 'message': 'Message is sent'})\n except Exception as e:\n return ({'success': False, 'message': str(e)})\n\n@shared_task(name='pending_events_email')\ndef pending_events_reminder():\n try:\n pending_event_list = FoodEvent.objects.filter(status=STATUS[2][0]).order_by('-eventStartDate')[:5]\n detailslist = []\n for pending_events in pending_event_list:\n jobdetailscard = open(os.path.join(settings.PROJECT_DIR,'emailTemplates/EventDetailsCard.txt')).read()\n details = jobdetailscard.replace('{{event_name}}',str(pending_events.name)).replace('{{event_food}}',str(pending_events.additionalInfo)).replace('{{event_location}}', str(pending_events.address)).replace('{{event_date}}',str(pending_events.eventStartDate.date())).replace('{{event_time}}',str(pending_events.eventStartDate.time())).replace('{{event_organizer}}', str(pending_events.createdBy.name))\n detailslist.append(details) \n emaildetailstext = open(os.path.join(settings.PROJECT_DIR,'emailTemplates/emaildetails.html'), mode = 'w')\n emaildetailstext.writelines(detailslist)\n emaildetailstext.close() \n htmlstring = open(os.path.join(settings.PROJECT_DIR,'emailTemplates/emaildetails.html')).read()\n subject = f'Approval Pending for New Food Events'\n\n email_from = settings.DEFAULT_SENDER\n recipient_list = ['srao@climatehealers.org','climatehealers@climatehealers.org']\n\n finalhtmlcontent = open(os.path.join(settings.PROJECT_DIR,'emailTemplates/PendingEventsNotification.html')).read()\n email_text = finalhtmlcontent.replace('{{base_url}}', settings.PRODUCTION_URL).replace('{{details}}', htmlstring)\n try:\n msg = EmailMultiAlternatives(subject=subject, body=email_text, from_email=email_from, to=recipient_list)\n msg.attach_alternative(email_text, \"text/html\")\n msg.send()\n return ({'success': True, 'message': 'Message is sent'})\n except Exception as e:\n return ({'success': False, 'error': str(e)})\n\n except Exception as e:\n return ({'success': False, 'error': str(e)})\n","repo_name":"ClimateHealers/foodhealers-backends","sub_path":"app/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8507786031","text":"#!/usr/bin/python\n\nimport sys\nimport re\nimport math\n\nfilename = '/usr/local/google/home/skamens/advent_of_code_2020/day9/input.txt'\n\nlast25 = []\n\ndef check25(value):\n for i in last25[0:24] :\n for j in last25[1:] :\n if ((i != j) and ((i+j) == value)) :\n return True\n\n return False\n\n\n# Read the input\n\npreambleSize = 25\ncurrentPos = 0\npreambleRemaining = preambleSize\n\nkeyNumber = 0\n\nwith open(filename) as f_obj:\n\n for line in f_obj:\n\n if (preambleRemaining > 0) :\n last25.append(int(line))\n preambleRemaining -= 1\n continue\n\n # Now we've pre-seeded the 25, so now we can check\n\n if not check25(int(line)) :\n keyNumber = int(line)\n break\n\n last25[currentPos] = int(line)\n currentPos = (currentPos + 1) % preambleSize\n\n\n# \n# Part 2 - now we have the key number. Let's look for a contiguous range that\n# equals that number. \n#\n# Try to do it a little efficiently, just for fun\n\ncontiguousNumbers = []\n\nsumOfNumbers = 0\n\nwith open(filename) as f_obj:\n\n for line in f_obj:\n\n num = int(line)\n contiguousNumbers.append(num)\n sumOfNumbers += num\n\n if ((sumOfNumbers == keyNumber) and (len(contiguousNumbers) > 1)):\n print (contiguousNumbers)\n print(contiguousNumbers[0] + contiguousNumbers[-1])\n break\n\n while sumOfNumbers > keyNumber :\n print (contiguousNumbers, sumOfNumbers, keyNumber)\n sumOfNumbers -= contiguousNumbers.pop(0)\n\n if (sumOfNumbers == keyNumber) :\n print (\"DONE\", contiguousNumbers, min(contiguousNumbers) + max(contiguousNumbers))\n break\n\n \n\n","repo_name":"skamens/advent_of_code_2020","sub_path":"day9/encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22825050100","text":"import random\nimport PySimpleGUI as sg\nfrom Scripts.Algorithm_Functions.Base64_Functions import DeCode_Base64,EnCode_Base64\nfrom Scripts.Algorithm_Functions.Trash_Functions import Add_Trash,Remove_Trash\n\n\n#Decrypt a string using a key \ndef decrypt(string, key):\n try:\n key = DeCode_Base64(key)\n string =DeCode_Base64(string)\n\n string_length=len(string)\n decrypted_string = \"\"\n\n counter=0\n for i in range(string_length):\n decrypted_char = chr(ord(string[i]) - int(key[counter]))\n decrypted_string += decrypted_char\n\n counter=counter+1\n if(counter+1==len(key)):\n counter=0\n\n\n decrypted_string = Remove_Trash(decrypted_string)\n\n return decrypted_string\n\n#if cannot decrypt with given key return given string.\n except:\n sg.popup_ok('Error')\n\n#Encrypt a string in a specific method and give a decryption key\ndef encrypt(string):\n try:\n string = Add_Trash(string)\n\n encrypted_string =\"\"\n key=[]\n for char in string:\n randnum=random.randint(0, 9)\n encrypted_char = chr(ord(char) + randnum)\n key.append(randnum)\n encrypted_string += encrypted_char\n\n key =''.join(map(str,key))\n return EnCode_Base64(encrypted_string), EnCode_Base64(key)\n except:\n sg.popup_ok('Error')\n\ndef encrypt_with_key(string,key):\n try:\n string = Add_Trash(string)\n\n key = DeCode_Base64(key)\n\n string_length=len(string)\n\n encrypted_string =\"\"\n\n counter=0\n for i in range(string_length):\n encrypted_char = chr(ord(string[i]) + int(key[counter]))\n encrypted_string += encrypted_char\n\n counter=counter+1\n if(counter+1==len(key)):\n counter=0\n\n \n\n return EnCode_Base64(encrypted_string), EnCode_Base64(key)\n except:\n sg.popup_ok('Error')\n\n","repo_name":"Lashaka/String-Encrypter","sub_path":"Scripts/Cryptography.py","file_name":"Cryptography.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37600138792","text":"import re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n js_to_json,\n)\n\n\nclass OnDemandKoreaIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?ondemandkorea\\.com/(?P[^/]+)\\.html'\n _GEO_COUNTRIES = ['US', 'CA']\n _TESTS = [{\n 'url': 'https://www.ondemandkorea.com/ask-us-anything-e351.html',\n 'info_dict': {\n 'id': 'ask-us-anything-e351',\n 'ext': 'mp4',\n 'title': 'Ask Us Anything : Jung Sung-ho, Park Seul-gi, Kim Bo-min, Yang Seung-won - 09/24/2022',\n 'description': 'A talk show/game show with a school theme where celebrity guests appear as “transfer students.”',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n 'params': {\n 'skip_download': 'm3u8 download'\n }\n }, {\n 'url': 'https://www.ondemandkorea.com/work-later-drink-now-e1.html',\n 'info_dict': {\n 'id': 'work-later-drink-now-e1',\n 'ext': 'mp4',\n 'title': 'Work Later, Drink Now : E01',\n 'description': 'Work Later, Drink First follows three women who find solace in a glass of liquor at the end of the day. So-hee, who gets comfort from a cup of soju af',\n 'thumbnail': r're:^https?://.*\\.png$',\n 'subtitles': {\n 'English': 'mincount:1',\n },\n },\n 'params': {\n 'skip_download': 'm3u8 download'\n }\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id, fatal=False)\n\n if not webpage:\n # Page sometimes returns captcha page with HTTP 403\n raise ExtractorError(\n 'Unable to access page. You may have been blocked.',\n expected=True)\n\n if 'msg_block_01.png' in webpage:\n self.raise_geo_restricted(\n msg='This content is not available in your region',\n countries=self._GEO_COUNTRIES)\n\n if 'This video is only available to ODK PLUS members.' in webpage:\n raise ExtractorError(\n 'This video is only available to ODK PLUS members.',\n expected=True)\n\n if 'ODK PREMIUM Members Only' in webpage:\n raise ExtractorError(\n 'This video is only available to ODK PREMIUM members.',\n expected=True)\n\n title = self._search_regex(\n r'class=[\"\\']episode_title[\"\\'][^>]*>([^<]+)',\n webpage, 'episode_title', fatal=False) or self._og_search_title(webpage)\n\n jw_config = self._parse_json(\n self._search_regex((\n r'(?P{\\s*[\\'\"]tracks[\\'\"].*?})[)\\];]+$',\n r'playlist\\s*=\\s*\\[(?P.+)];?$',\n r'odkPlayer\\.init.*?(?P{[^;]+}).*?;',\n ), webpage, 'jw config', flags=re.MULTILINE | re.DOTALL, group='options'),\n video_id, transform_source=js_to_json)\n info = self._parse_jwplayer_data(\n jw_config, video_id, require_title=False, m3u8_id='hls',\n base_url=url)\n\n info.update({\n 'title': title,\n 'description': self._og_search_description(webpage),\n 'thumbnail': self._og_search_thumbnail(webpage)\n })\n return info\n","repo_name":"yt-dlp/yt-dlp","sub_path":"yt_dlp/extractor/ondemandkorea.py","file_name":"ondemandkorea.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":60520,"dataset":"github-code","pt":"78"} +{"seq_id":"71876211452","text":"import os\nimport time\nimport joblib\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom utils import load_data, save_history_plot\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Dropout\nfrom tensorflow.keras.layers import GlobalAveragePooling2D\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n\nBASE_DIR = str(int(np.ceil(time.time())))\n\nif not os.path.exists(BASE_DIR):\n os.makedirs(BASE_DIR)\n print(f'[INFO] {BASE_DIR} created')\n\nX, y = load_data('data.npy')\n\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nencoder = LabelEncoder()\ny_train = encoder.fit_transform(y_train.reshape(-1,))\ny_test = encoder.transform(y_test.reshape(-1,))\njoblib.dump(encoder, f'{BASE_DIR}\\\\labelencoder.pkl')\n\nclass_names = encoder.classes_\n\nprint(f'\\n\\t[INFO] Shape of x_train: {x_train.shape}')\nprint(f'\\t[INFO] Shape of y_train: {y_train.shape}')\nprint(f'\\t[INFO] Shape of x_test: {x_test.shape}')\nprint(f'\\t[INFO] Shape of y_test: {y_test.shape}')\n\ninput_shape = (x_train.shape[1], x_train.shape[2], 1)\n\nmodel = Sequential()\n\nmodel.add(Conv2D(16, 2, input_shape=input_shape, activation='relu'))\nmodel.add(MaxPooling2D(2))\n\nmodel.add(Conv2D(32, 2, activation='relu'))\nmodel.add(MaxPooling2D(2))\nmodel.add(Dropout(0.2))\n\nmodel.add(Conv2D(64, 2, activation='relu'))\nmodel.add(MaxPooling2D(2))\n\nmodel.add(Conv2D(128, 2, activation='relu'))\nmodel.add(MaxPooling2D(2))\nmodel.add(Dropout(0.2))\n\nmodel.add(GlobalAveragePooling2D())\nmodel.add(Dense(10, activation='softmax'))\n\nmodel.compile(loss='sparse_categorical_crossentropy',\n optmizer=Adam(lr=1e-3, decay=1e-5),\n metrics=['sparse_categorical_accuracy'])\n\nmodel.summary(line_length=150)\n\nplot_model(model, to_file=f'{BASE_DIR}\\\\model.png', show_shapes=True,\n dpi=200, expand_nested=True)\n\nmc = ModelCheckpoint(f'{BASE_DIR}\\\\model.h5', save_best_only=True,\n monitor='val_loss')\n\nes = EarlyStopping(patience=30, monitor='val_loss')\n\nhistory = model.fit(x_train, y_train, batch_size=32, epochs=150,\n validation_split=0.2, callbacks=[mc, es])\n\nmodel.save(f'{BASE_DIR}\\\\model.h5')\n\njson_config = model.to_json(indent=4)\nwith open(f'{BASE_DIR}\\\\model_config.json', 'w') as f:\n f.write(json_config)\n\nmodel.save_weights(f'{BASE_DIR}\\\\weights.h5')\nsave_history_plot(history, BASE_DIR)\n\nprint('\\n\\t[INFO] Saved model and training history plot')\n\nloss, acc = model.evaluate(x_test, y_test, verbose=0)\nprint(f'\\t[INFO] Accuracy: {acc} Loss: {loss}')\n\ny_pred = model.predict_classes(x_test)\n\nmatrix = confusion_matrix(y_test, y_pred)\nmatrix = matrix / matrix.astype(np.float).sum(axis=0)\ndf = pd.DataFrame(matrix, index=class_names, columns=class_names)\n\nfig = plt.figure(figsize=(12, 12))\nhm = sns.heatmap(df, annot=True, cmap='coolwarm')\nhm.yaxis.set_ticklabels(hm.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=10)\nhm.xaxis.set_ticklabels(hm.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=10)\nplt.savefig(f'{BASE_DIR}\\\\confusion_matrix.png')\n\nprint('\\t[INFO] Saved confusion matrix plot')\n\nos.rename(BASE_DIR, BASE_DIR + '_loss_' + str(loss) + '__accuracy__' + str(acc))\nprint('\\t[DONE]')\n","repo_name":"Gautam-J/BirdCall-Identification","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"12854766914","text":"\"\"\"\nUnit tests for the docker state\n\"\"\"\n\nimport pytest\n\nimport salt.modules.dockermod as docker_mod\nimport salt.states.docker_image as docker_state\nfrom tests.support.mock import MagicMock, patch\n\n\n@pytest.fixture\ndef configure_loader_modules():\n return {\n docker_mod: {\"__context__\": {\"docker.docker_version\": \"\"}},\n docker_state: {\"__opts__\": {\"test\": False}},\n }\n\n\ndef test_present_already_local():\n \"\"\"\n According following sls,\n\n .. code-block:: yaml\n\n image:latest:\n docker_image.present:\n - force: true\n\n if ``image:latest`` is already downloaded locally the state\n should not report changes.\n \"\"\"\n docker_inspect_image = MagicMock(return_value={\"Id\": \"abcdefghijkl\"})\n docker_pull = MagicMock(\n return_value={\n \"Layers\": {\"Already_Pulled\": [\"abcdefghijkl\"], \"Pulled\": []},\n \"Status\": \"Image is up to date for image:latest\",\n \"Time_Elapsed\": 1.1,\n }\n )\n docker_list_tags = MagicMock(return_value=[\"image:latest\"])\n docker_resolve_tag = MagicMock(return_value=\"image:latest\")\n __salt__ = {\n \"docker.list_tags\": docker_list_tags,\n \"docker.pull\": docker_pull,\n \"docker.inspect_image\": docker_inspect_image,\n \"docker.resolve_tag\": docker_resolve_tag,\n }\n with patch.dict(docker_state.__dict__, {\"__salt__\": __salt__}):\n ret = docker_state.present(\"image:latest\", force=True)\n assert ret == {\n \"changes\": {},\n \"result\": True,\n \"comment\": \"Image 'image:latest' was pulled, but there were no changes\",\n \"name\": \"image:latest\",\n }\n\n\ndef test_present_and_force():\n \"\"\"\n According following sls,\n\n .. code-block:: yaml\n\n image:latest:\n docker_image.present:\n - force: true\n\n if ``image:latest`` is not downloaded and force is true\n should pull a new image successfully.\n \"\"\"\n docker_inspect_image = MagicMock(return_value={\"Id\": \"1234567890ab\"})\n docker_pull = MagicMock(\n return_value={\n \"Layers\": {\"Pulled\": [\"abcdefghijkl\"]},\n \"Status\": \"Image 'image:latest' was pulled\",\n \"Time_Elapsed\": 1.1,\n }\n )\n docker_list_tags = MagicMock(side_effect=[[], [\"image:latest\"]])\n docker_resolve_tag = MagicMock(return_value=\"image:latest\")\n __salt__ = {\n \"docker.list_tags\": docker_list_tags,\n \"docker.pull\": docker_pull,\n \"docker.inspect_image\": docker_inspect_image,\n \"docker.resolve_tag\": docker_resolve_tag,\n }\n with patch.dict(docker_state.__dict__, {\"__salt__\": __salt__}):\n ret = docker_state.present(\"image:latest\", force=True)\n assert ret == {\n \"changes\": {\n \"Layers\": {\"Pulled\": [\"abcdefghijkl\"]},\n \"Status\": \"Image 'image:latest' was pulled\",\n \"Time_Elapsed\": 1.1,\n },\n \"result\": True,\n \"comment\": \"Image 'image:latest' was pulled\",\n \"name\": \"image:latest\",\n }\n","repo_name":"saltstack/salt","sub_path":"tests/pytests/unit/states/test_docker_image.py","file_name":"test_docker_image.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"20605321284","text":"import torch\nfrom torch.nn import Module\n\nfrom ltron.config import Config\n\nfrom ltron_torch.models.padding import cat_padded_seqs\nfrom ltron_torch.models.positional_encoding import LearnedPositionalEncoding\nfrom ltron_torch.models.embedding import TileEmbedding, TokenEmbedding\nfrom ltron_torch.models.heads import LinearMultiheadDecoder\n\nclass HandTableEmbeddingConfig(Config):\n tile_h = 16\n tile_w = 16\n tile_c = 3\n table_h = 256\n table_w = 256\n hand_h = 96\n hand_w = 96\n \n max_sequence_length = 1024\n \n embedding_dropout = 0.1\n \n token_vocabulary = 2\n \n def set_dependents(self):\n assert self.table_h % self.tile_h == 0\n assert self.table_w % self.tile_w == 0\n self.table_tiles_h = self.table_h // self.tile_h\n self.table_tiles_w = self.table_w // self.tile_w\n self.table_tiles = self.table_tiles_h * self.table_tiles_w\n \n assert self.hand_h % self.tile_h == 0\n assert self.hand_w % self.tile_w == 0\n self.hand_tiles_h = self.hand_h // self.tile_h\n self.hand_tiles_w = self.hand_w // self.tile_w\n self.hand_tiles = self.hand_tiles_h * self.hand_tiles_w\n \n self.spatial_tiles = self.table_tiles + self.hand_tiles\n\nclass HandTableEmbedding(Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n \n # build the tokenizers\n self.tile_embedding = TileEmbedding(\n config.tile_h,\n config.tile_w,\n config.tile_c,\n config.encoder_channels,\n config.embedding_dropout,\n )\n self.token_embedding = TokenEmbedding(\n config.token_vocabulary,\n config.encoder_channels,\n config.embedding_dropout,\n )\n \n # let's give this another look soon\n if self.config.factor_cursor_distribution:\n self.table_cursor_embedding = TokenEmbedding(\n config.table_decoder_pixels,\n config.encoder_channels,\n config.embedding_dropout,\n )\n self.table_polarity_embedding = TokenEmbedding(\n 2, config.encoder_channels, config.embedding_dropout)\n \n self.hand_cursor_embedding = TokenEmbedding(\n config.hand_decoder_pixels,\n config.encoder_channels,\n config.embedding_dropout,\n )\n self.hand_polarity_embedding = TokenEmbedding(\n 2, config.encoder_channels, config.embedding_dropout)\n \n # build the positional encodings\n self.spatial_position_encoding = LearnedPositionalEncoding(\n config.encoder_channels, config.spatial_tiles)\n self.temporal_position_encoding = LearnedPositionalEncoding(\n config.encoder_channels, config.max_sequence_length)\n \n def forward(self,\n table_tiles, table_t, table_yx, table_pad,\n hand_tiles, hand_t, hand_yx, hand_pad,\n token_x,\n table_cursor_yx,\n table_cursor_p,\n hand_cursor_yx,\n hand_cursor_p,\n token_t, token_pad,\n ):\n \n # linearize table_yx and hand_yx\n table_w = self.config.table_tiles_w\n table_yx = table_yx[...,0] * table_w + table_yx[...,1]\n hand_w = self.config.hand_tiles_w\n hand_yx = hand_yx[...,0] * hand_w + hand_yx[...,1]\n \n # cat table and hand tiles\n tile_x, tile_pad = cat_padded_seqs(\n table_tiles, hand_tiles, table_pad, hand_pad)\n tile_t, _ = cat_padded_seqs(table_t, hand_t, table_pad, hand_pad)\n tile_yx, _ = cat_padded_seqs(table_yx, hand_yx, table_pad, hand_pad)\n \n # make the tile embeddings\n tile_x = self.tile_embedding(tile_x)\n tile_pt = self.temporal_position_encoding(tile_t)\n tile_pyx = self.spatial_position_encoding(tile_yx)\n tile_x = tile_x + tile_pt + tile_pyx\n \n # make the tokens\n token_x = self.token_embedding(token_x)\n token_pt = self.temporal_position_encoding(token_t)\n token_x = token_x + token_pt\n \n if self.config.factor_cursor_distribution:\n table_cursor_yx = self.table_cursor_embedding(table_cursor_yx)\n #table_cursor_yx = table_cursor_yx + token_pt\n table_cursor_p = self.table_polarity_embedding(table_cursor_p)\n #table_cursor_p = table_cursor_p + token_pt\n # THIS IS ALL SO GROSS\n if table_cursor_yx.shape[0] == token_pt.shape[0]//2:\n table_pt = token_pt[::2]\n table_t = token_t[::2]\n table_pad = (token_pad/2).long()\n else:\n table_pt = token_pt\n table_t = token_t\n table_pad = token_pad\n table_x = table_cursor_yx + table_cursor_p + table_pt\n \n hand_cursor_yx = self.hand_cursor_embedding(hand_cursor_yx)\n #hand_cursor_yx = hand_cursor_yx + token_pt\n hand_cursor_p = self.hand_polarity_embedding(hand_cursor_p)\n #hand_cursor_p = hand_cursor_p + token_pt\n if hand_cursor_yx.shape[0] == token_pt.shape[0]//2:\n hand_pt = token_pt[::2]\n hand_t = token_t[::2]\n hand_pad = (token_pad/2).long()\n else:\n hand_pt = token_pt\n hand_t = token_t\n hand_pad = token_pad\n hand_x = hand_cursor_yx + hand_cursor_p + hand_pt\n \n # all these cat_padded_seqs could probably be done more efficiently\n # in a single function that rolls them all together at once\n #table_x, table_pad = cat_padded_seqs(\n # table_cursor_yx, table_cursor_p, token_pad, token_pad)\n #table_t, _ = cat_padded_seqs(\n # token_t, token_t, token_pad, token_pad)\n #hand_x, hand_pad = cat_padded_seqs(\n # hand_cursor_yx, hand_cursor_p, token_pad, token_pad)\n #hand_t, _ = cat_padded_seqs(\n # token_t, token_t, token_pad, token_pad)\n \n cursor_x, cursor_pad = cat_padded_seqs(\n table_x, hand_x, table_pad, hand_pad)\n cursor_t, _ = cat_padded_seqs(\n table_t, hand_t, table_pad, hand_pad)\n token_x, new_token_pad = cat_padded_seqs(\n token_x, cursor_x, token_pad, cursor_pad)\n token_t, _ = cat_padded_seqs(\n token_t, cursor_t, token_pad, cursor_pad)\n token_pad = new_token_pad\n \n # concatenate the tile and discrete tokens\n x, pad = cat_padded_seqs(tile_x, token_x, tile_pad, token_pad)\n t, _ = cat_padded_seqs(tile_t, token_t, tile_pad, token_pad)\n \n return x, t, pad\n","repo_name":"aaronwalsman/ltron-torch-eccv22","sub_path":"ltron_torch/models/hand_table_embedding.py","file_name":"hand_table_embedding.py","file_ext":"py","file_size_in_byte":6831,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"41653511483","text":"import json\nimport os\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom detector import detect\n\ndet = detect.Detect()\n\n\ndef hello(request):\n return render(request, 'index.html')\n\n\ndef upload(request):\n data = {}\n if request.method == \"POST\":\n fp = request.FILES.get(\"file\")\n # fp 获取到的上传文件对象\n if fp:\n path = os.path.join('static/', 'img/' + fp.name) # 上传文件本地保存路径, image是static文件夹下专门存放图片的文件夹\n # fp.name #文件名\n # yield = fp.chunks() # 流式获取文件内容\n # fp.read() # 直接读取文件内容\n if fp.multiple_chunks(): # 判断上传文件大于2.5MB的大文件\n # 为真\n file_yield = fp.chunks() # 迭代写入文件\n with open(path, 'wb') as f:\n for buf in file_yield: # for情况执行无误才执行 else\n f.write(buf)\n else:\n data['code'] = 1\n else:\n with open(path, 'wb') as f:\n f.write(fp.read())\n print(\"小文件上传完毕\")\n # path = os.path.abspath(path)\n\n data['code'] = 1\n data['path'] = det.detect(path)\n else:\n data['code'] = 0\n return HttpResponse(json.dumps(data), content_type=\"application/json,charset=utf-8\")\n","repo_name":"qq979249745/pedestrian_detector","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"30753989258","text":"import re\n\nf = open(\"reports/filter_spacer_count.txt\", \"r\")\n\nspacercount = f.readlines()\noutputAnnotation = open(\"reports/annotated_hits.txt\", \"w\")\noutputFastap = open(\"reports/hits_proteins.fasta\", \"w\")\n##### id_region {genomeid : start position of each filtered spacer} #####\nid_region = {}\n\nfor line in spacercount:\n if bool(re.search('\\w+(?=.txt)', line)):\n key = line.strip(\".txt\\n\")\n id_region[key] = []\n continue\n else:\n value = re.findall('[0-9]{3,}', line)\n value = map(int, value)\n # value = re.findall('\\[(.+)\\]', line)\n for v in value:\n id_region[key].append(v)\n\nsize = len(id_region)\ncount = 1\n\nfor key in id_region:\n # keeping track of progress\n print(\"Processing entry %i out of %i\\n\" % (count, size))\n count += 1\n\n # parsing annotation file\n outputAnnotation.write(str(key) + \"\\n\")\n annotation_file = open(\"annotations/\"+key+\".txt\", \"r\")\n annotation = annotation_file.read()\n annotation = annotation.split(\"FEATURES\")[1].split(\"ORIGIN\")[0].replace(\"\\n \", \"\").split(\"\\n\")\n\n # region_features{start position of feature : feature}\n region_features = {}\n for line in annotation:\n if bool(re.findall('\\d+\\.\\.\\d+', line)):\n start = re.findall('\\d+(?=\\.\\.\\d+)', line)[0]\n stop = re.findall('(?<=\\.\\.)\\d+', line)[0]\n start = int(start)\n stop = int(stop)\n region_features[start] = [stop, line]\n else:\n continue\n\n spacer_feature = {}\n # for key in id_region:\n for value in id_region[key]:\n for region in reversed(sorted(region_features)):\n if value > region :\n if value < region_features[region][0]:\n spacer_feature[value] = region_features[region][1]\n else:\n spacer_feature[value] = \"Not annotated\\n\"\n break\n \n for a in spacer_feature:\n outputAnnotation.write(str(a) + \"\\n\")\n outputFastap.write(\"\\n>\" + str(key))\n outputFastap.write(\"\\t\" + str(a) )\n line = spacer_feature[a].split(\"/\")\n for entry in line:\n if (\"Not annotated\" in entry):\n outputFastap.write(\"\\n\" + entry.rstrip(\"\\n\"))\n if (\"product\" in entry):\n outputFastap.write(\"\\t\" + entry)\n if (\"translation\" in entry):\n outputFastap.write(\"\\n\" + entry.strip(\"translation=\\\"\").rstrip(\"\\\"\"))\n line = \"\\n\\t\".join(line)\n\n outputAnnotation.write(line + \"\\n\\n\")\n annotation_file.close()\n\nf.close()\noutputAnnotation.close()\noutputFastap.close()\n\n","repo_name":"0mician/TheOmicians","sub_path":"CRISPR-Genomics/annotate-hits.py","file_name":"annotate-hits.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"40816356518","text":"# -*- coding: utf-8 -*-\n\"\"\"\nhttps://mitpress.mit.edu/sicp/full-text/book/book-Z-H-15.html#%_thm_2.32\n\"\"\"\nfrom Chapter2.themes.lisp_list_structured_data import lisp_list, cdr, append, car\nfrom Chapter2.themes.mapping_over_lists import map\nfrom utils import let\n\n\ndef subsets(s):\n if s is None:\n return lisp_list()\n with let(subsets(cdr(s))) as (rest,):\n return append(\n rest,\n tuple((None,None)) if rest is None else\n map(\n lambda x: append(lisp_list(car(s)), x),\n rest\n )\n )\n\ndef run_the_magic():\n print(subsets(lisp_list(1, 2, 3)))\n\n\nif __name__ == '__main__':\n run_the_magic()\n","repo_name":"aoyono/sicpy","sub_path":"Chapter2/exercises/exercise2_32.py","file_name":"exercise2_32.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7364261786","text":"#import libraries\nimport pygame\nimport random\nimport os\n\n\n# Constants\nWIDTH = 1280\nHEIGHT = 800\nFPS = 60\nSEA_BLUE = (0, 105, 148)\nGREEN = (0, 255, 0)\nBLACK = (0, 0, 0)\n\n# Setup folders for assets\ngame_folder = os.path.dirname(__file__)\nimage_folder = os.path.join(game_folder, \"img\")\n\n\n# Create sprites\nclass Player(pygame.sprite.Sprite):\n # Player\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(os.path.join(image_folder, \"fishTile_074.png\")).convert() # image for the sprite\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect() # rectangle that encloses the sprite\n self.rect.center = (int(WIDTH/2), int(HEIGHT/2)) # Set sprite location to center of screen\n self.speedx= 0\n self.speedy = 0\n\n def update(self):\n self.speedx = 0\n self.speedy = 0\n keystate = pygame.key.get_pressed()\n if keystate[pygame.K_LEFT] or keystate[pygame.K_a]:\n self.speedx = -5\n if keystate[pygame.K_RIGHT] or keystate[pygame.K_d]:\n self.speedx = 5\n if keystate[pygame.K_UP] or keystate[pygame.K_w]:\n self.speedy = -2\n if keystate[pygame.K_DOWN] or keystate[pygame.K_s]:\n self.speedy = 2\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n if self.rect.left > WIDTH:\n self.rect.right = 0\n if self.rect.right < 0:\n self.rect.left = WIDTH\n if self.rect.top < 0:\n self.rect.top = 0\n if self.rect.bottom > HEIGHT:\n self.rect.bottom = HEIGHT\n\n\n# Setup pygame\npygame.init()\npygame.mixer.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Always a Bigger Fish\")\nclock = pygame.time.Clock()\nrunning = True\n\n# Create sprite group\nall_sprites = pygame.sprite.Group()\nplayer = Player()\nall_sprites.add(player)\n\n\nwhile running:\n # Add ability to close game\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n all_sprites.update()\n\n # Draw\n screen.fill(SEA_BLUE)\n all_sprites.draw(screen)\n\n # Display game\n pygame.display.flip()\n\n clock.tick(FPS) # 60 fps\n\npygame.quit()","repo_name":"GitItRachel/PygameHackathon","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7112856941","text":"import pygame, sys, random\n\npygame.init()\nscreen = pygame.display.set_mode((1280,720))\nclock = pygame.time.Clock()\npygame.mouse.set_visible(True)\n\n\nbackground4_png = pygame.image.load('background4.png')\ntrampoline3_png = pygame.image.load('trampoline3.png')\ng3_png = pygame.image.load('g3.png')\ng1falling_png = pygame.image.load('g1falling.png')\ng2_png = pygame.image.load('g2.png')\n#cloud1_png = pygame.image.load('cloud1.png')\nclouds_png = pygame.image.load('clouds.png')\n#g4_png = pygame.image.load('g4.png')\nplaybutton_png = pygame.image.load('playbutton.png')\n\n\n#game_font = pygame.font.Font(None,60)\n#text_surface = game_font.render('GAME OVER',True,(0,240,170))\n#text_rect = text_surface.get_rect(center = (640,360))\n\npb_pos_x = 0\npb_pos_y = 0\n\nt1_pos_x = 550\nt1_pos_y = 550\n\ntrampoline_rect=(t1_pos_x,t1_pos_y)\n\ng3_pos_x = 550\ng3_pos_y = 300\n#g3_rect=(g3_pos_x,g3_pos_y)\ng3_rect=(t1_pos_x,t1_pos_y)\n\n\ng3_top_max_pos = 50\ng3_bottom_max_pos = g3_pos_y\n\nspeed= -20\n\ncollided = True\nstarted = False\ncan_miss = False\n\nplaybutton_rect = playbutton_png.get_rect(center =(pb_pos_x,pb_pos_y))\n\nwhile True:\n g3_rect=g3_png.get_rect(center =(g3_pos_x,g3_pos_y))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEMOTION:\n x, y = event.pos\n trampoline_rect = trampoline3_png.get_rect(center =(x,t1_pos_y))\n if x - g3_pos_x >-250 and x - g3_pos_x <90 :\n collided = True\n elif started:\n collided = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if playbutton_rect.collidepoint(event.pos):\n started = True\n can_miss = True\n\n screen.blit(background4_png,(0,0))\n screen.blit(trampoline3_png,(trampoline_rect))\n screen.blit(clouds_png,(0,0))\n if not started:\n screen.blit(playbutton_png,(0,0))\n\n if collided and started:\n screen.blit(g3_png,g3_rect)\n elif can_miss:\n screen.blit(g1falling_png,(0,0))\n started = False\n\n\n if g3_pos_y <= g3_top_max_pos or g3_pos_y > g3_bottom_max_pos :\n speed = speed * -1\n g3_pos_y = g3_pos_y + speed\n\n #if g1_pos_y > g1_top_max_pos:\n #g1_pos_y = g1_pos_y - 1\n #if g1_pos_y <= g1_top_max_pos:\n #g1_pos_y = g1_pos_y + 1\n\n\n pygame.display.update()\n clock.tick(120)\n","repo_name":"pariganakakreeda/jumpoline","sub_path":"jumpoline.py","file_name":"jumpoline.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37729993222","text":"import importlib\nimport logging\nimport os\nimport sys\nfrom collections import namedtuple\nfrom typing import Iterable, List, Optional, Tuple\n\n\n_LOGGER = logging.getLogger(__name__)\n\n__all__ = [\"SupportedTasks\", \"AliasedTask\"]\n\n\nclass AliasedTask:\n \"\"\"\n A task that can have multiple aliases to match to.\n For example, question_answering which can alias to qa as well\n\n :param name: the name of the task such as question_answering or text_classification\n :param aliases: the aliases the task can go by in addition to the name such as\n qa, glue, sentiment_analysis, etc\n \"\"\"\n\n def __init__(self, name: str, aliases: List[str]):\n self._name = name\n self._aliases = aliases\n\n @property\n def name(self) -> str:\n \"\"\"\n :return: the name of the task such as question_answering\n \"\"\"\n return self._name\n\n @property\n def aliases(self) -> List[str]:\n \"\"\"\n :return: the aliases the task can go by such as qa, glue, sentiment_analysis\n \"\"\"\n return self._aliases\n\n def matches(self, task: str) -> bool:\n \"\"\"\n :param task: the name of the task to check whether the given instance matches.\n Checks the current name as well as any aliases.\n Everything is compared at lower case and \"-\" and whitespace\n are replaced with \"_\".\n :return: True if task does match the current instance, False otherwise\n \"\"\"\n task = task.lower().replace(\"-\", \"_\")\n\n # replace whitespace with \"_\"\n task = \"_\".join(task.split())\n\n return task == self.name or task in self.aliases\n\n\nclass SupportedTasks:\n \"\"\"\n The supported tasks in the DeepSparse pipeline and system\n \"\"\"\n\n nlp = namedtuple(\n \"nlp\",\n [\n \"question_answering\",\n \"text_classification\",\n \"token_classification\",\n \"zero_shot_text_classification\",\n \"transformers_embedding_extraction\",\n ],\n )(\n question_answering=AliasedTask(\"question_answering\", [\"qa\"]),\n text_classification=AliasedTask(\n \"text_classification\", [\"glue\", \"sentiment_analysis\"]\n ),\n token_classification=AliasedTask(\"token_classification\", [\"ner\"]),\n zero_shot_text_classification=AliasedTask(\"zero_shot_text_classification\", []),\n transformers_embedding_extraction=AliasedTask(\n \"transformers_embedding_extraction\", []\n ),\n )\n\n chat = namedtuple(\"chat\", [\"chatbot\", \"chat\"])(\n chatbot=AliasedTask(\"chatbot\", []), chat=AliasedTask(\"chat\", [])\n )\n text_generation = namedtuple(\n \"text_generation\", [\"text_generation\", \"opt\", \"bloom\"]\n )(\n text_generation=AliasedTask(\"text_generation\", []),\n opt=AliasedTask(\"opt\", []),\n bloom=AliasedTask(\"bloom\", []),\n )\n code_generation = namedtuple(\"code_generation\", [\"code_generation\", \"codegen\"])(\n code_generation=AliasedTask(\"code_generation\", []),\n codegen=AliasedTask(\"codegen\", []),\n )\n\n image_classification = namedtuple(\"image_classification\", [\"image_classification\"])(\n image_classification=AliasedTask(\n \"image_classification\",\n [\"image_classification\"],\n ),\n )\n\n yolo = namedtuple(\"yolo\", [\"yolo\"])(\n yolo=AliasedTask(\"yolo\", [\"yolo\"]),\n )\n yolov8 = namedtuple(\"yolov8\", [\"yolov8\"])(\n yolov8=AliasedTask(\"yolov8\", [\"yolov8\"]),\n )\n yolact = namedtuple(\"yolact\", [\"yolact\"])(\n yolact=AliasedTask(\"yolact\", [\"yolact\"]),\n )\n\n haystack = namedtuple(\"haystack\", [\"information_retrieval_haystack\"])(\n information_retrieval_haystack=AliasedTask(\n \"information_retrieval_haystack\", [\"haystack\"]\n ),\n )\n embedding_extraction = namedtuple(\"embedding_extraction\", [\"embedding_extraction\"])(\n embedding_extraction=AliasedTask(\n \"embedding_extraction\", [\"embedding_extraction\"]\n ),\n )\n open_pif_paf = namedtuple(\"open_pif_paf\", [\"open_pif_paf\"])(\n open_pif_paf=AliasedTask(\"open_pif_paf\", [\"open_pif_paf\"]),\n )\n\n all_task_categories = [\n nlp,\n image_classification,\n yolo,\n yolov8,\n yolact,\n haystack,\n embedding_extraction,\n open_pif_paf,\n text_generation,\n chat,\n code_generation,\n ]\n\n @classmethod\n def check_register_task(\n cls, task: str, extra_tasks: Optional[Iterable[str]] = None\n ):\n \"\"\"\n :param task: task name to validate and import dependencies for\n :param extra_tasks: valid task names that are not included in supported tasks.\n i.e. tasks registered to Pipeline at runtime\n \"\"\"\n if task == \"custom\":\n # custom task, register the CustomPipeline\n import deepsparse.pipelines.custom_pipeline # noqa: F401\n\n elif cls.is_text_generation(task):\n import deepsparse.transformers.pipelines.text_generation # noqa: F401\n\n elif cls.is_chat(task):\n import deepsparse.transformers.pipelines.chat # noqa: F401\n\n elif cls.is_code_generation(task):\n import deepsparse.transformers.pipelines.code_generation # noqa: F401\n\n elif cls.is_nlp(task):\n # trigger transformers pipelines to register with Pipeline.register\n import deepsparse.transformers.pipelines # noqa: F401\n\n elif cls.is_image_classification(task):\n # trigger image classification pipelines to\n # register with Pipeline.register\n import deepsparse.image_classification.pipelines # noqa: F401\n\n elif cls.is_yolact(task):\n # trigger yolo pipelines to register with Pipeline.register\n import deepsparse.yolact.pipelines # noqa: F401\n\n elif cls.is_yolo(task):\n # trigger yolo pipelines to register with Pipeline.register\n import deepsparse.yolo.pipelines # noqa: F401\n\n elif cls.is_yolov8(task):\n # trigger yolo pipelines to register with Pipeline.register\n import deepsparse.yolov8.pipelines # noqa: F401\n\n elif cls.is_haystack(task):\n # trigger haystack pipeline as well as transformers pipelines to\n # register with Pipeline.register\n import deepsparse.transformers.haystack # noqa: F401\n\n elif cls.is_embedding_extraction(task):\n # trigger embedding_extraction pipelines to register with\n # Pipeline.register\n import deepsparse.pipelines.embedding_extraction # noqa :F401\n\n elif cls.is_open_pif_paf(task):\n # trigger embedding_extraction pipelines to register with\n # Pipeline.register\n import deepsparse.open_pif_paf.pipelines # noqa :F401\n\n all_tasks = set(cls.task_names() + (list(extra_tasks or [])))\n if task not in all_tasks:\n raise ValueError(\n f\"Unknown Pipeline task {task}. Currently supported tasks are \"\n f\"{list(all_tasks)}\"\n )\n\n @classmethod\n def is_chat(cls, task: str) -> bool:\n \"\"\"\n :param task: the name of the task to check whether it is a chat task\n :return: True if it is a chat task, False otherwise\n \"\"\"\n return any(chat_task.matches(task) for chat_task in cls.chat)\n\n @classmethod\n def is_text_generation(cls, task: str) -> bool:\n \"\"\"\n :param task: the name of the task to check whether it is a text generation task\n such as codegen\n :return: True if it is a text generation task, False otherwise\n \"\"\"\n return any(\n text_generation_task.matches(task)\n for text_generation_task in cls.text_generation\n )\n\n @classmethod\n def is_code_generation(cls, task: str) -> bool:\n \"\"\"\n :param task: the name of the task to check whether it is a text generation task\n such as codegen\n :return: True if it is a text generation task, False otherwise\n \"\"\"\n return any(\n code_generation_task.matches(task)\n for code_generation_task in cls.code_generation\n )\n\n @classmethod\n def is_nlp(cls, task: str) -> bool:\n \"\"\"\n :param task: the name of the task to check whether it is an nlp task\n such as question_answering\n :return: True if it is an nlp task, False otherwise\n \"\"\"\n return any([nlp_task.matches(task) for nlp_task in cls.nlp])\n\n @classmethod\n def is_cv(cls, task: str) -> bool:\n return (\n cls.is_yolo(task)\n or cls.is_yolov8(task)\n or cls.is_yolact(task)\n or cls.is_image_classification(task)\n or cls.is_open_pif_paf(task)\n )\n\n @classmethod\n def is_image_classification(cls, task: str) -> bool:\n \"\"\"\n :param task: the name of the task to check whether it is an image\n classification task\n :return: True if it is an image classification task, False otherwise\n \"\"\"\n return any([ic_task.matches(task) for ic_task in cls.image_classification])\n\n @classmethod\n def is_yolo(cls, task: str) -> bool:\n \"\"\"\n :param task: the name of the task to check whether it is an image\n segmentation task using YOLO\n :return: True if it is an segmentation task using YOLO, False otherwise\n \"\"\"\n return any([yolo_task.matches(task) for yolo_task in cls.yolo])\n\n @classmethod\n def is_yolov8(cls, task: str) -> bool:\n \"\"\"\n :param task: the name of the task to check whether it is an image\n segmentation task using YOLOv8\n :return: True if it is an segmentation task using YOLOv8, False otherwise\n \"\"\"\n return any([yolov8_task.matches(task) for yolov8_task in cls.yolov8])\n\n @classmethod\n def is_yolact(cls, task: str) -> bool:\n \"\"\"\n :param task: the name of the task to check whether it is an image\n segmentation task using YOLO\n :return: True if it is an segmentation task using YOLO, False otherwise\n \"\"\"\n return any([yolact_task.matches(task) for yolact_task in cls.yolact])\n\n @classmethod\n def is_haystack(cls, task: str) -> bool:\n \"\"\"\n :param task: the name of the task to check whether it is a haystack task\n :return: True if it is a haystack task, False otherwise\n \"\"\"\n return any([haystack_task.matches(task) for haystack_task in cls.haystack])\n\n @classmethod\n def is_embedding_extraction(cls, task):\n \"\"\"\n :param task: the name of the task to check whether it is an\n embedding_extraction task\n :return: True if it is an embedding_extraction task, False otherwise\n \"\"\"\n return any(\n embedding_extraction_task.matches(task)\n for embedding_extraction_task in cls.embedding_extraction\n )\n\n @classmethod\n def is_open_pif_paf(cls, task):\n \"\"\"\n :param task: the name of the task to check whether it is an\n embedding_extraction task\n :return: True if it is an open_pif_paf task, False otherwise\n \"\"\"\n return any(\n open_pif_paf_task.matches(task) for open_pif_paf_task in cls.open_pif_paf\n )\n\n @classmethod\n def task_names(cls):\n task_names = [\"custom\"]\n for task_category in cls.all_task_categories:\n for task in task_category:\n unique_aliases = (\n alias for alias in task._aliases if alias != task._name\n )\n task_names += (task._name, *unique_aliases)\n return task_names\n\n\ndef dynamic_import_task(module_or_path: str) -> str:\n \"\"\"\n Dynamically imports `module` with importlib, and returns the `TASK`\n attribute on the module (something like `importlib.import_module(module).TASK`).\n\n Example contents of `module`:\n ```python\n from deepsparse.pipeline import Pipeline\n from deepsparse.transformers.pipelines.question_answering import (\n QuestionAnsweringPipeline,\n )\n\n TASK = \"my_qa_task\"\n Pipeline.register(TASK)(QuestionAnsweringPipeline)\n ```\n\n NOTE: this modifies `sys.path`.\n\n :raises FileNotFoundError: if path does not exist\n :raises RuntimeError: if the imported module does not contain `TASK`\n :raises RuntimeError: if the module doesn't register the task\n :return: The task from the imported module.\n \"\"\"\n parent_dir, module_name = _split_dir_and_name(module_or_path)\n if not os.path.exists(os.path.join(parent_dir, module_name + \".py\")):\n raise FileNotFoundError(\n f\"Unable to find file for {module_or_path}. \"\n f\"Looked for {module_name}.py under {parent_dir if parent_dir else '.'}\"\n )\n\n # add parent_dir to sys.path so we can import the file as a module\n sys.path.append(os.curdir)\n if parent_dir:\n _LOGGER.info(f\"Adding {parent_dir} to sys.path\")\n sys.path.append(parent_dir)\n\n # do the import\n _LOGGER.info(f\"Importing '{module_name}'\")\n module_or_path = importlib.import_module(module_name)\n\n if not hasattr(module_or_path, \"TASK\"):\n raise RuntimeError(\n \"When using --task import:, \"\n \"module must set the `TASK` attribute.\"\n )\n\n task = getattr(module_or_path, \"TASK\")\n _LOGGER.info(f\"Using task={repr(task)}\")\n\n return task\n\n\ndef _split_dir_and_name(module_or_path: str) -> Tuple[str, str]:\n \"\"\"\n Examples:\n - `a` -> `(\"\", \"a\")`\n - `a.b` -> `(\"a\", \"b\")`\n - `a.b.c` -> `(\"a/b\", \"c\")`\n\n :return: module split into directory & name\n \"\"\"\n if module_or_path.endswith(\".py\"):\n # assume path\n split_char = os.sep\n module_or_path = module_or_path.replace(\".py\", \"\")\n else:\n # assume module\n split_char = \".\"\n *dirs, module_name = module_or_path.split(split_char)\n parent_dir = os.sep if dirs == [\"\"] else os.sep.join(dirs)\n return parent_dir, module_name\n","repo_name":"neuralmagic/deepsparse","sub_path":"src/deepsparse/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":14097,"program_lang":"python","lang":"en","doc_type":"code","stars":2498,"dataset":"github-code","pt":"78"} +{"seq_id":"35244669941","text":"import sys\nfrom heapq import heappush, heappop\ninput = sys.stdin.readline\n\n\ndef find(idx, i):\n while i != idx[i]:\n idx[i] = idx[idx[i]]\n i = idx[i]\n return i\n\n\ndef union(idx, x, y):\n xset = find(idx, x)\n yset = find(idx, y)\n if xset != yset:\n idx[xset] = yset\n return True\n else:\n return False\n\n\ndef kruskal(graph, n_node):\n idx = list(range(n_node + 1))\n weight = 0\n cnt = n_node - 1\n while cnt > 0:\n c, a, b = heappop(graph)\n if union(idx, a, b):\n weight += c\n cnt -= 1\n return weight\n\n\ndef main():\n n_node, n_edge = map(int, input().split())\n\n graph = []\n for _ in range(n_edge):\n a, b, c = map(int, input().split())\n heappush(graph, (c, a, b))\n\n print(kruskal(graph, n_node))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lapis42/boj","sub_path":"boj1197_kruskal_mst.py","file_name":"boj1197_kruskal_mst.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"972092792","text":"from rdflib import Graph, Namespace, URIRef, Literal, exceptions\nimport rdflib\nimport rdflib.plugin\nfrom Framework.Data.SpatialRelation import SpatialRelation\nfrom Framework.Data.ResolutionForTransformation import ResolutionForTransformation\nfrom Framework.Data.Data import Data\nimport Framework.namespace_util as NSUtil\n\nclass Transformation:\n def __init__(self,transformation, domain_path, base_ontology_path, extention_ontology_path, domain_supported_data_streams):\n self.domain_path = domain_path\n self.base_ontology_path =base_ontology_path\n self.extention_ontology_path = extention_ontology_path\n self.spatial_relations = None\n self.time_relations = None\n self.transformation = transformation\n self.model_requeued_data_types = None\n self.RDF = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')\n self.PRIVVULN = Namespace('https://ontology.hviidnet.com/2020/01/03/privacyvunl.ttl#')\n self.PRIVVULNV2 = Namespace('https://ontology.hviidnet.com/2020/01/03/privacyvunlV2.ttl#')\n self.domain_supported_data_streams = domain_supported_data_streams\n self.template_name = None\n self.template_output_subject = None\n self.template_output_data_type = None\n self.template_output_domain_data_type = None\n\n\n def get_template_output_subject(self):\n if self.template_output_subject is None:\n self.template_output_subject, self.template_output_data_type, self.template_output_domain_data_type = self._find_output_for_template()\n return self.template_output_subject\n\n def get_template_template_output_data_type(self):\n if self.template_output_data_type is None:\n self.template_output_subject, self.template_output_data_type, self.template_output_domain_data_type = self._find_output_for_template()\n return self.template_output_data_type\n\n def get_template_output_domain_data_type(self):\n if self.template_output_domain_data_type is None:\n self.template_output_subject, self.template_output_data_type, self.template_output_domain_data_type = self._find_output_for_template()\n return self.template_output_domain_data_type\n\n def get_spatial_relations_for_transformation(self):\n if self.spatial_relations is None:\n self.spatial_relations = self._find_spatial_resolution_for_transformation()\n return self.spatial_relations\n\n def get_time_relations_for_transformation(self):\n if self.time_relations is None:\n self.time_relations = self._find_time_resolution_for_transformation()\n return self.time_relations\n\n def get_model_requeued_data_types(self):\n if self.model_requeued_data_types is None:\n self.model_requeued_data_types = self._find_data_types_for_template()\n return self.model_requeued_data_types\n\n def get_template_name(self):\n if self.template_name is None:\n self.template_name = self._find_template_name()\n return self.template_name\n\n def _find_spatial_resolution_for_transformation(self):\n transformation_temp = self.transformation\n transformation_temp.parse(self.domain_path)\n transformation_temp.parse(self.extention_ontology_path)\n\n transformation_temp.parse(self.base_ontology_path)\n\n q = rdflib.plugins.sparql.prepareQuery(\"\"\"\n SELECT ?data ?transformationName ?input ?output\n WHERE {\n ?dataTypes rdfs:subClassOf pv:Data .\n ?data rdf:type ?dataTypes .\n ?req pv:feeds ?data .\n ?req rdf:type pv2:Constraint .\n ?req pv:feeds ?transformationName .\n ?transformationName rdf:type pv:Transformation .\n\n ?srTypes rdf:type pv2:SpatialResolution .\n ?srTypes pv2:spatialInput ?input .\n ?srTypes pv2:spatialOutput ?output .\n ?req pv:feeds ?srTypes\n }\n \"\"\"\n ,\n initNs = NSUtil.get_binding_namespaces()\n )\n ro = transformation_temp.query(q) #, initBindings={'transformation': transformation_name}\n spatial_relations = {}\n\n for row in ro:\n if not row[0] in spatial_relations : spatial_relations[row[0]] = []\n spatial_relations[row[0]].append(SpatialRelation(row[0],row[1], row[2],row[3]))\n return spatial_relations\n\n def _find_time_resolution_for_transformation(self):\n transformation_temp = self.transformation\n transformation_temp.parse(self.domain_path)\n transformation_temp.parse(self.extention_ontology_path)\n\n transformation_temp.parse(self.base_ontology_path)\n\n q = rdflib.plugins.sparql.prepareQuery(\"\"\"\n SELECT ?req ?data ?temporalResolution ?spatialResolution ?transformationName ?trTypes ?input ?output ?trName\n WHERE {\n ?dataTypes rdfs:subClassOf pv:Data .\n ?data rdf:type ?dataTypes .\n ?req pv:feeds ?data .\n ?req rdf:type pv2:Constraint .\n ?req pv2:TemporalResolution ?temporalResolution .\n # ?req pv2:SpatialResolution ?spatialResolution .\n ?req pv:feeds ?transformationName .\n ?transformationName rdf:type pv:Transformation .\n\n ?trTypes rdfs:subClassOf pv2:TimeResolution .\n ?trName rdf:type ?trTypes .\n ?trName pv2:TimeInput ?input .\n ?trName pv2:TimeOutput ?output .\n ?req pv:feeds ?trName\n }\n \"\"\"\n ,\n initNs = NSUtil.get_binding_namespaces()\n )\n ro = transformation_temp.query(q) #, initBindings={'transformation': transformation_name}\n dataTypes = []\n\n for row in ro:\n dataTypes.append(ResolutionForTransformation(row[0],row[1],float(row[2].value),row[3],row[4],row[5],float(row[6].value),float(row[7].value),row[8]))\n return dataTypes\n\n def _find_data_types_for_template(self):\n constraint_List = self._find_constraints_data_types_for_template(self.transformation)\n model_data_types = []\n for cName in constraint_List:\n data_type = self._find_used_data_input_for_constraints(self.transformation,cName)\n if data_type.domain_data_type:\n model_data_types.append(data_type)\n return model_data_types\n\n def _find_constraints_data_types_for_template(self,template):\n constraint_List = []\n for s in template.subjects(self.RDF.type, self.PRIVVULNV2.Constraint):\n if (s, self.PRIVVULN.feeds, self.get_template_name()) in template:\n constraint_List.append(s)\n else:\n print(\"Constraint element %s does not ref to the template model\"%s, \" in template %s\"%self.template_name)\n return constraint_List\n\n def _find_used_data_input_for_constraints(self,template,name_of_constraint):\n dataType = None\n temporalResolution = None\n spatialResolution = None\n for o in template.objects(name_of_constraint,self.PRIVVULN.feeds):\n if o in self.domain_supported_data_streams:\n dataType = o\n break\n if dataType is None:\n print(name_of_constraint, \" uses not support data stream or no stream defined\")\n else:\n try:\n #Only one transformation or PrivacyAttacks per model.\n temporalResolution = template.value(predicate = self.PRIVVULNV2.TemporalResolution, subject=name_of_constraint, any = False)\n except rdflib.exceptions.UniquenessError:\n return\n spatialResolutions = []\n for spatialResolution in template.objects(name_of_constraint,self.PRIVVULNV2.spatialRequirement):\n spatialResolutions.append(spatialResolution)\n temporalResolution = float(temporalResolution.value) if temporalResolution is not None else None\n return Data(dataType, temporalResolution, spatial_resolutions=spatialResolutions)\n\n def _find_template_name(self):\n transformationName = \"\"\n try:\n #Only one transformation\n transformationName = self.transformation.value(predicate = self.RDF.type, object = self.PRIVVULN.Transformation , any = False, default=\"\")\n except rdflib.exceptions.UniquenessError:\n return None\n return transformationName\n\n def _find_output_for_template(self):\n model_temp = self.transformation\n model_temp.parse(self.domain_path)\n\n model_temp.parse(self.base_ontology_path)\n\n q = rdflib.plugins.sparql.prepareQuery(\"\"\"\n SELECT ?outputSubject ?dataType ?outputDataType\n WHERE {\n ?dataType rdfs:subClassOf pv:Data .\n ?outputSubject rdf:type ?dataType .\n\n ?transformationName rdf:type pv:Transformation .\n ?transformationName pv:feeds ?outputSubject .\n ?outputSubject rdf:type ?outputDataType .\n ?outputDataType rdf:type ?dataType .\n }\n \"\"\",\n initNs = NSUtil.get_binding_namespaces())\n\n ro = model_temp.query(q)\n dataTypes = []\n\n for row in ro:\n return row[0], row[1], row[2]","repo_name":"EBC-Annex-79/Tool-chain_for_supporting_privacy_risk_assessments","sub_path":"Framework/Data/Transformation.py","file_name":"Transformation.py","file_ext":"py","file_size_in_byte":9524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"26586302386","text":"# Here we'll use the module names_function_1\n\nfrom names_function_1 import get_formatted_name # We import the function we have in that program\n\nprint(\"Enter 'q' to exit at any time.\")\n\nwhile True:\n first = input(\"\\nPlease enter your first name: \")\n if first == 'q':\n break\n\n last = input(\"\\nPlease enter your last name: \")\n if last == 'q':\n break\n\n middle = input(\"\\nDo you have a middle name? If so type it, otherwise press Enter: \")\n\n formatted_name = get_formatted_name(first, last, middle)\n print(f\"\\tNeatly formatted name: {formatted_name}\")","repo_name":"dnewbie25/Python-Quick-Reference","sub_path":"Example Exercises/Basic Exercises/Testing Code/names_2.py","file_name":"names_2.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74337157051","text":"import unittest\n\nfrom tournament_winner import tournament_winner\n\n\nclass TestTournamentWinner(unittest.TestCase):\n def test_works(self):\n competitions = [\n ['Panthers', 'Lionesses'],\n ['Lionesses', 'Dolphins'],\n ['Dolphins', 'Panthers']\n ]\n results = [0, 0, 1]\n\n winner = tournament_winner(competitions, results)\n\n self.assertEqual('Dolphins', winner)\n","repo_name":"kenny-cfg/tournament-practice-coding","sub_path":"test_tournament_winner.py","file_name":"test_tournament_winner.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3835538611","text":"import torch\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision\nimport torch.utils.data as data_utils\n\n\ndef load_cifar_10_other(augment=True, batch_size=64):\n # print('augment:', augment, 'batch_size:', batch_size) # true, 64\n # Data loading code\n normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],\n std=[x/255.0 for x in [63.0, 62.1, 66.7]])\n\n if augment:\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n else:\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n normalize\n ])\n\n kwargs = {'num_workers': 1, 'pin_memory': True}\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=True, download=True,\n transform=transform_train),\n batch_size=batch_size, shuffle=True, **kwargs)\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=False, transform=transform_test),\n batch_size=batch_size, shuffle=True, **kwargs)\n \n # print('trainloader.data.shape:', train_loader.dataset)\n \n return train_loader, val_loader\n\ndef load_cifar_10(batch_size=64, perc_size=1):\n # mean = [0.49139968, 0.48215841, 0.44653091]\n mean = [x/255.0 for x in [125.3, 123.0, 113.9]]\n # mean = [0.5, 0.5, 0.5]\n # stdv = [0.24703223, 0.24348513, 0.26158784]\n stdv = [x/255.0 for x in [63.0, 62.1, 66.7]]\n # stdv = [0.5, 0.5, 0.5]\n train_transform = transforms.Compose([\n # transforms.RandomRotation(10),\n # transforms.RandomAffine(0, shear=10, scale=(0.8,1.2)),\n # transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean, stdv)])\n \n test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean, stdv)])\n \n batch_size = batch_size # 4 \n\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=train_transform)\n # trainset = data_utils.Subset(trainset, torch.arange(int(trainset.data.shape[0]*perc_size)))\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=1, pin_memory=True)\n # print('trainloader.data.shape:', trainloader.dataset)\n\n testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=test_transform)\n # testset = data_utils.Subset(testset, torch.arange(int(testset.data.shape[0]*perc_size)))\n testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=True, num_workers=1, pin_memory=True)\n # print('testloader.data.shape:', testloader.dataset)\n classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n # return trainloader, testloader, classes\n return trainloader, testloader\n\ndef load_mnist(batch_size=128, perc_size=1):\n \n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n \n dataset1 = torchvision.datasets.MNIST('../data', train=True, download=True,\n transform=transform)\n dataset2 = torchvision.datasets.MNIST('../data', train=False,\n transform=transform)\n \n trainloader = torch.utils.data.DataLoader(dataset1, batch_size=batch_size, shuffle=True, num_workers=2)\n testloader = torch.utils.data.DataLoader(dataset2, batch_size=batch_size, shuffle=False, num_workers=2)\n \n classes = [str(i) for i in range(10)] # 0, 1, ... ,10\n \n return trainloader, testloader, classes\n\ndef subset_dataset(dataset, test_classes, train_classes):\n \"\"\"\n exclude the test_classes to create trainloader\n \"\"\"\n test_idx = sum(dataset.targets==i for i in test_classes).bool().nonzero().flatten()\n train_idx = sum(dataset.targets==i for i in train_classes).bool().nonzero().flatten()\n train_subset = torch.utils.data.Subset(dataset, train_idx)\n test_subset = torch.utils.data.Subset(dataset, test_idx)\n return train_subset, test_subset\n\ndef load_fewshot_mnist(batch_size=128, perc_size=1, test_classes=[0, 1, 2]):\n \"\"\"\n Join both train and test sets\n Segregate into two sets on the basis of select classes\n \"\"\"\n \n classes = [i for i in range(10)] # 0, 1, ... , 10\n \n train_classes = [clas for clas in classes if clas not in test_classes]\n \n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n \n dataset1 = torchvision.datasets.MNIST('../data', train=True, download=True,\n transform=transform)\n dataset2 = torchvision.datasets.MNIST('../data', train=False,\n transform=transform)\n \n dataset1_train, dataset1_test = subset_dataset(dataset1, test_classes, train_classes)\n dataset2_train, dataset2_test = subset_dataset(dataset2, test_classes, train_classes)\n \n trainset = torch.utils.data.ConcatDataset([dataset1_train, dataset2_train])\n testset = torch.utils.data.ConcatDataset([dataset1_test, dataset2_test])\n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)\n testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True, num_workers=2)\n\n# for i, (x, y) in enumerate(trainloader):\n# print(y)\n# if i>=30:\n# break\n \n# print(y.unique())\n# import pdb; pdb.set_trace()\n \n return trainloader, testloader, test_classes\n","repo_name":"joeljosephjin/torch-cnn","sub_path":"data/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5648235459","text":"import tensorflow as tf\nimport numpy as np\nfrom mnist_app.mnist_cnn.models import ConvNet\nimport cv2\nimport base64\n\ndef predict(img_path=None, checkpoint_path=None):\n # Convert image to numpy array size [1, 28, 28, 1]\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (28, 28))\n\n cv2.imwrite('out.jpg', img)\n encode_string = base64.b64encode(open('out.jpg', 'rb').read())\n\n print(encode_string)\n img = np.asarray(img, dtype=np.int32)\n img = np.reshape(img, [1, 28, 28, 1])\n\n # Load model trained and make prediction\n g = ConvNet(is_training=False)\n\n with tf.Session(graph=g.graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))\n result = sess.run(\n tf.argmax(g.logits, 1), feed_dict={g.X: img})\n return int(result[0]), encode_string\n\nif __name__ == '__main__':\n predict()","repo_name":"trinhvanson1997/Mnist-App","sub_path":"mnist_cnn/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39030906748","text":"from collections import deque\nimport sys\ninput = sys.stdin.readline\n\ndef bfs(v):\n q = deque()\n q.append(v)\n visited[v] = 1\n\n while q:\n node = q.popleft()\n\n if node == G:\n return cnt[G]\n for i in (node+U, node-D):\n if 0 < i <= F and visited[i] == 0:\n visited[i] = 1\n cnt[i] = cnt[node] + 1\n q.append(i)\n if cnt[G] == 0:\n return \"use the stairs\"\n\n\nF, S, G, U, D = map(int, input().split())\nvisited = [0 for _ in range(F+1)]\ncnt = [0 for _ in range(F+1)]\nprint(bfs(S))\n","repo_name":"yoseph0310/Algorithm_Python","sub_path":"BaekJoon/골드/5/5014_스타트링크.py","file_name":"5014_스타트링크.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5598343967","text":"from email.mime import image\nfrom numpy import imag\nimport pygame\nimport random\nimport os\n\nFPS=60\nWHITE=(255,255,255)\nBLACK=(0,0,0)\nRED=(255,0,0)\nGREEN=(0,255,0)\nWIDTH=500\nHEIGHT=600\n\n#遊戲初始化 and 創建視窗\npygame.init()\nscreen=pygame.display.set_mode((WIDTH,HEIGHT))\npygame.display.set_caption(\"承承遊戲\")\nclock=pygame.time.Clock()\n\n#載入圖片\nbackground_img=pygame.image.load(os.path.join(\"pygame\",\"image\",\"background.png\")).convert()\nbullet_img=pygame.image.load(os.path.join(\"pygame\",\"image\",\"bullet.PNG\")).convert()\nplayer_img=pygame.image.load(os.path.join(\"pygame\",\"image\",\"player.PNG\")).convert()\nrock_img=pygame.image.load(os.path.join(\"pygame\",\"image\",\"rock.PNG\")).convert()\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image=pygame.transform.scale(player_img,(50,50))\n self.image.set_colorkey(WHITE)\n self.rect=self.image.get_rect()\n self.radius=22\n #pygame.draw.circle(self.image,GREEN,self.rect.center, self.radius)\n self.rect.centerx=WIDTH/2\n self.rect.bottom=HEIGHT-10\n self.speedx=8\n\n def update(self):\n key_pressed=pygame.key.get_pressed()\n if key_pressed[pygame.K_RIGHT]: \n self.rect.x += self.speedx\n if key_pressed[pygame.K_LEFT]:\n self.rect.x -= self.speedx\n\n if self.rect.right >WIDTH:\n self.rect.right=WIDTH\n if self.rect.left <0:\n self.rect.left=0\n\n def shoot(self):\n bullet=Bullet(self.rect.centerx,self.rect.top)\n all_sprites.add(bullet)\n bullets.add(bullet)\n\n\nclass Rock(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image=pygame.transform.scale(rock_img,(43,42))\n self.image.set_colorkey(WHITE)\n self.rect=self.image.get_rect()\n self.radius=self.rect.width*0.9/2\n #pygame.draw.circle(self.image,GREEN,self.rect.center, self.radius)\n self.rect.x=random.randrange(0,WIDTH-self.rect.width)\n self.rect.y=random.randrange(-100,-40)\n self.speedx=random.randrange(-3,3)\n self.speedy=random.randrange(2,6)\n \n\n def update(self):\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n if self.rect.top > HEIGHT or self.rect.left>WIDTH or self.rect.right<0:\n self.rect.x=random.randrange(0,WIDTH-self.rect.width)\n self.rect.y=random.randrange(-100,-40)\n self.speedx=random.randrange(-3,3)\n self.speedy=random.randrange(2,10)\n\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self,x,y):\n pygame.sprite.Sprite.__init__(self)\n self.image=pygame.transform.scale(bullet_img,(28,42.5))\n self.image.set_colorkey(WHITE)\n self.rect=self.image.get_rect()\n self.rect.centerx=x\n self.rect.bottom=y\n self.speedy=-10\n \n\n def update(self):\n self.rect.y += self.speedy\n if self.rect.bottom <0:\n self.kill()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nall_sprites=pygame.sprite.Group()\nrocks=pygame.sprite.Group()\nbullets=pygame.sprite.Group()\nplayer=Player()\nall_sprites.add(player)\nfor i in range(8):\n rock=Rock()\n all_sprites.add(rock)\n rocks.add(rock)\n\n\n\n#遊戲迴圈\nrunning=True\nwhile running:\n clock.tick(FPS)\n #取得輸入\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n running=False\n elif event.type==pygame.KEYDOWN:\n if event.key==pygame.K_SPACE:\n player.shoot()\n #更新遊戲\n all_sprites.update()\n hits=pygame.sprite.groupcollide(rocks,bullets,True,True)\n for hit in hits:\n rock=Rock()\n all_sprites.add(rock)\n rocks.add(rock)\n\n hits=pygame.sprite.spritecollide(player,rocks,False,pygame.sprite.collide_circle)\n if hits:\n running=False\n \n #畫面顯示\n screen.fill(WHITE)\n screen.blit(pygame.transform.scale(background_img,(500,700)),(0,0))\n all_sprites.draw(screen)\n pygame.display.update()\n\npygame.quit()","repo_name":"Taocheng-Lin/Pygame-Project_00","sub_path":"game1.py","file_name":"game1.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40504496882","text":"import os\n\nfrom evoflow import logger\n\ntry:\n from pywinauto import Application as App\nexcept ImportError:\n logger.debug(\n \"Can't import pywin32, try to install with:\\nconda install pywin32\\nOR\\npip install pywin32==227\"\n )\n\nfrom evoflow.controller.log_controller import logger\n\nDATA_PATH = \"data\"\nSTART_CATIA = True\n\n\nclass Global:\n REMOTE_EXECUTE = False\n __caa = None\n custom_env = None\n ocr_engine = None\n\n def set_env(self, env):\n Global.custom_env = env\n return Global.custom_env\n\n @property\n def caa(self):\n if Global.__caa is None:\n try:\n logger.info(\"Opening CATIA Application ... \")\n if Global.custom_env is not None:\n Global.__caa = start_catia(Global.custom_env)\n else:\n Global.__caa = start_catia(None)\n except:\n logger.error(\"Can't start CATIA\")\n return Global.__caa\n","repo_name":"maycuatroi/evo-flow","sub_path":"evoflow/entities/global_vars.py","file_name":"global_vars.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37878198774","text":"\n\"\"\"\n\n@author: David P Fleming, University of Washington, Oct 2018\n@email dflemin3 (at) uw (dot) edu\n\nThis script examines our stellar evolution model, a bicubic interpolation of the\nBaraffe+2015 stellar evolution model, coupled with the Matt+2015 magnetic\nbraking model.\n\n\"\"\"\n\nimport numpy as np\nimport os\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n#Typical plot parameters that make for pretty plots\nmpl.rcParams['figure.figsize'] = (9,8)\nmpl.rcParams['font.size'] = 25.0\n\n## for Palatino and other serif fonts use:\nmpl.rc('font',**{'family':'serif'})\nmpl.rc('text', usetex=True)\n\n# Read in output files\npath = \"../Sims/StellarEvolution/\"\n\n# saOutputOrder Time -Radius -RotPer RadGyra\ng = np.genfromtxt(os.path.join(path,\"stellar.g.forward\"))\nk = np.genfromtxt(os.path.join(path,\"stellar.k.forward\"))\nm = np.genfromtxt(os.path.join(path,\"stellar.m.forward\"))\ngr = np.genfromtxt(os.path.join(path,\"stellar.gr.forward\"))\nkr = np.genfromtxt(os.path.join(path,\"stellar.kr.forward\"))\nmr = np.genfromtxt(os.path.join(path,\"stellar.mr.forward\"))\n\n# All on same time grid\ntime = g[:,0]\n\n# Find time indices of approximate ZAMS times from Henny Lamers'\n# stellar evolution notes\nind_g = np.argmin(np.fabs(time-6.2e7))\nind_k = np.argmin(np.fabs(time-1.0e8))\nind_m = np.argmin(np.fabs(time-3.0e8))\n\n# Plot!\nfig, ax = plt.subplots(ncols=3, figsize=(20,6))\n\n# Left panel: Stellar radius evolution\nax[0].plot(time, g[:,1], lw=2.5, color=\"C0\", label=r\"$1$ M$_{\\odot}$\")\nax[0].plot(time, k[:,1], lw=2.5, color=\"C1\", label=r\"$0.7$ M$_{\\odot}$\")\nax[0].plot(time, m[:,1], lw=2.5, color=\"C2\", label=r\"$0.2$ M$_{\\odot}$\")\n\n# Plot points to indicate ZAMS\nax[0].scatter(time[ind_g], g[ind_g,1], s=75, color=\"C0\")\nax[0].scatter(time[ind_k], k[ind_k,1], s=75, color=\"C1\")\nax[0].scatter(time[ind_m], m[ind_m,1], s=75, color=\"C2\")\n\n# Format\nax[0].legend(loc=\"best\", framealpha=0, fontsize=20)\nax[0].set_ylabel(\"Radius [R$_{\\odot}$]\", fontsize=25)\nax[0].set_xlabel(\"Time [yr]\", fontsize=25)\n\nax[0].set_xlim(1.0e6,time[-1])\nax[0].set_xscale(\"log\")\n\n# Middle panel: Stellar radius of gyration evolution\nax[1].plot(time, g[:,5], lw=2.5, color=\"C0\")\nax[1].plot(time, k[:,3], lw=2.5, color=\"C1\")\nax[1].plot(time, m[:,3], lw=2.5, color=\"C2\")\n\n# Plot points to indicate ZAMS\nax[1].scatter(time[ind_g], g[ind_g,5], s=75, color=\"C0\")\nax[1].scatter(time[ind_k], k[ind_k,3], s=75, color=\"C1\")\nax[1].scatter(time[ind_m], m[ind_m,3], s=75, color=\"C2\")\n\n# Format\nax[1].set_ylabel(\"Radius of Gyration\", fontsize=25)\nax[1].set_xlabel(\"Time [yr]\", fontsize=25)\nax[1].set_xlim(1.0e6,time[-1])\nax[1].set_xscale(\"log\")\n\n# Right panel: Stellar rotation period evolution\nax[2].plot(time, g[:,2], lw=2.5, color=\"C0\")\nax[2].plot(time, k[:,2], lw=2.5, color=\"C1\")\nax[2].plot(time, m[:,2], lw=2.5, color=\"C2\")\n\n# Plot points to indicate ZAMS\nax[2].scatter(time[ind_g], g[ind_g,2], s=75, color=\"C0\")\nax[2].scatter(time[ind_k], k[ind_k,2], s=75, color=\"C1\")\nax[2].scatter(time[ind_m], m[ind_m,2], s=75, color=\"C2\")\n\n# Format\nax[2].set_ylabel(\"Rotation Period [d]\", fontsize=25)\nax[2].set_xlabel(\"Time [yr]\", fontsize=25)\nax[2].set_xlim(1.0e6,time[-1])\nax[2].set_ylim(0.5e-1,1.0e2)\nax[2].set_xscale(\"log\")\nax[2].set_yscale(\"log\")\n\n# Save!\nfig.tight_layout()\nfig.savefig(\"../Plots/stellarExample.pdf\", bbox_inches=\"tight\")\n","repo_name":"dflemin3/sync","sub_path":"Analysis/stellar.py","file_name":"stellar.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"1582911025","text":"# -*-coding:utf-8-*-\n\"\"\"\n本模块演示预训练模型效果\n\"\"\"\nimport glob\nimport cv2\nimport numpy as np\nfrom argparse import ArgumentParser\nimport os\nfrom config import IMG_HEIGHT, IMG_WIDTH, NEIGHBOUR_NUM, T, EPSILON\nfrom model import build_model\n\n\ndef parse_argument():\n \"\"\"\n 解析命令行参数\n :return:\n \"\"\"\n ap = ArgumentParser()\n ap.add_argument(\"-o\", \"--output\", default='../results/', help=\"colorization result output folder\")\n ap.add_argument(\"-i\", \"--input\", default='../data/images/test/', help=\"input test images folder\")\n args = vars(ap.parse_args())\n return args\n\n\ndef get_images(path):\n \"\"\"\n 读取本地图片\n :param path:\n :return:\n \"\"\"\n filenames = glob.glob(path + \"*\")\n images_bgr = []\n images_gray = []\n for i in range(len(filenames)):\n print(\"processing image{}\".format(i+1))\n\n filename = filenames[i]\n bgr = cv2.imread(filename) # 读入彩色图\n bgr = cv2.resize(bgr, (IMG_HEIGHT, IMG_WIDTH), cv2.INTER_CUBIC)\n\n gray = cv2.imread(filename, 0) # 读入灰度图\n gray = cv2.resize(gray, (IMG_HEIGHT, IMG_HEIGHT), cv2.INTER_CUBIC)\n images_bgr.append(bgr)\n images_gray.append(gray)\n return np.array(images_bgr), np.array(images_gray)\n\n\ndef test(args):\n \"\"\"\n 预测\n :param args\n :return:\n \"\"\"\n if not os.path.exists('../models/training_best_weights_128end.h5'):\n print(\"no model in root/models\")\n else:\n model = build_model()\n model.load_weights('../models/training_best_weights_128end.h5')\n h, w = IMG_HEIGHT // 4, IMG_WIDTH // 4\n q_ab = np.load(\"../data/params/pts_in_hull.npy\")\n number_q = q_ab.shape[0]\n\n images_bgr, images_gray = get_images(args['input'])\n raws = []\n grays = []\n preds = []\n for i in range(images_bgr.shape[0]):\n x_test = np.empty((1, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.float32)\n x_test[0, :, :, 0] = images_gray[i] / 255.\n x_colorized = model.predict(x_test).reshape(h*w, number_q)\n\n # 调整概率\n x_colorized = np.exp(np.log(x_colorized+EPSILON) / T)\n x_colorized = x_colorized / np.sum(x_colorized, 1)[:, np.newaxis]\n # 调整权重\n q_a = q_ab[:, 0].reshape((1, 313))\n q_b = q_ab[:, 1].reshape((1, 313))\n x_a = np.sum(x_colorized*q_a, 1).reshape((h, w))\n x_b = np.sum(x_colorized*q_b, 1).reshape((h, w))\n x_a = cv2.resize(x_a, (IMG_HEIGHT, IMG_WIDTH), cv2.INTER_CUBIC)\n x_b = cv2.resize(x_b, (IMG_HEIGHT, IMG_WIDTH), cv2.INTER_CUBIC)\n x_a = x_a + 128\n x_b = x_b + 128\n out_lab = np.zeros((IMG_HEIGHT, IMG_WIDTH, 3), dtype=np.int32)\n out_lab[:, :, 0] = cv2.cvtColor(cv2.cvtColor(images_gray[i], cv2.COLOR_GRAY2BGR), cv2.COLOR_BGR2LAB)[:, :, 0]\n out_lab[:, :, 1] = x_a\n out_lab[:, :, 2] = x_b\n out_lab = out_lab.astype(np.uint8)\n out_bgr = cv2.cvtColor(out_lab, cv2.COLOR_LAB2BGR)\n out_bgr = out_bgr.astype(np.uint8)\n\n if not os.path.exists(args['output']):\n os.mkdir(args['output'])\n\n cv2.imwrite(args_['output'] + 'img{}_raw.png'.format(i), images_bgr[i])\n raws.append(images_bgr[i])\n cv2.imwrite(args_['output'] + 'img{}_gray.png'.format(i), images_gray[i])\n grays.append(images_gray[i])\n cv2.imwrite(args_['output'] + 'img{}_pred.png'.format(i), out_bgr)\n preds.append(out_bgr)\n\n import matplotlib.pyplot as plt\n for i in range(3):\n plt.subplot(3, 3, i+1)\n plt.imshow(cv2.cvtColor(raws[i], cv2.COLOR_BGR2RGB))\n plt.subplot(3, 3, i+3+1)\n plt.imshow(grays[i], cmap='gray')\n plt.subplot(3, 3, i+6+1)\n plt.imshow(cv2.cvtColor(preds[i], cv2.COLOR_BGR2RGB))\n plt.savefig('zc.png')\n\n\nif __name__ == '__main__':\n args_ = parse_argument()\n test(args_)\n","repo_name":"luanshiyinyang/Colorization","sub_path":"scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"78"} +{"seq_id":"8091263743","text":"import asyncio\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import Annotated\n\nimport aiohttp\nimport imageio.v3 as iio\nfrom graia.ariadne.app import Ariadne\nfrom graia.ariadne.event.message import GroupMessage\nfrom graia.ariadne.message.chain import MessageChain\nfrom graia.ariadne.message.element import At, Image\nfrom graia.ariadne.message.parser.twilight import (FullMatch, ResultValue,\n Twilight, WildcardMatch)\nfrom graia.ariadne.model import Group, Member\nfrom graia.saya import Channel\nfrom graiax.shortcut.saya import listen, dispatch\nfrom PIL import Image as IMG\n\nchannel = Channel.current()\n\nchannel.name(\"petpet\")\nchannel.description(\"发送'摸头@某人'制作摸头GIF\")\nchannel.author(\"I_love_study\")\n\n# 头像每一帧时的位置\n\n# 每一个 tuple 内分别是:\n# (x1, y1, x2, y2)\n# 其中 (x1, y1) 为左上角坐标,(x2, y2) 为右下角坐标\nframe_spec = [\n [27, 31, 86, 90],\n [22, 36, 91, 90],\n [18, 41, 95, 90],\n [22, 41, 91, 91],\n [27, 28, 86, 91]\n]\n\n# 挤压偏移量\n\n# 最大挤压时头像实际位置与上述描述相差的良\nsquish_factor = [\n (0, 0, 0, 0),\n (-7, 22, 8, 0),\n (-8, 30, 9, 6),\n (-3, 21, 5, 9),\n (0, 0, 0, 0)\n]\n# 最大挤压时每一帧模板向下偏移的量\nsquish_translation_factor = [0, 20, 34, 21, 0]\n\ndef make_petpet(file, squish=0):\n profile_pic = IMG.open(file)\n hands = IMG.open(Path(__file__).parent/'sprite.png')\n gifs = []\n for i,spec in enumerate(frame_spec):\n # 将位置添加偏移量\n for j, s in enumerate(spec):\n spec[j] = int(s + squish_factor[i][j] * squish)\n hand = hands.crop((112*i,0,112*(i+1),112))\n reprofile = profile_pic.resize(\n (int((spec[2] - spec[0]) * 1.2), int((spec[3] - spec[1]) * 1.2)),\n IMG.Resampling.LANCZOS)\n gif_frame = IMG.new('RGB', (112, 112), (255, 255, 255))\n gif_frame.paste(reprofile, (spec[0], spec[1]))\n gif_frame.paste(hand, (0, int(squish * squish_translation_factor[i])), hand)\n gifs.append(gif_frame)\n return iio.imwrite(\"\", gifs, extension=\".gif\", loop=0, during=40, subrectangles=True)\n\n@listen(GroupMessage)\n@dispatch(Twilight(FullMatch(\"摸头\"), WildcardMatch() @ \"para\"))\nasync def petpet(app: Ariadne, group: Group, member: Member, para: Annotated[MessageChain, ResultValue()]):\n user = para.get_first(At).target if para.has(At) else member.id\n profile_url = f\"https://q2.qlogo.cn/headimg_dl?dst_uin={user}&spec=640\"\n async with aiohttp.request(\"GET\", profile_url) as r:\n profile = BytesIO(await r.read())\n gif = await asyncio.to_thread(make_petpet, profile)\n await app.send_group_message(group, MessageChain([Image(data_bytes=gif)]))","repo_name":"I-love-study/A_Simple_QQ_Bot","sub_path":"modules/entertain/petpet/petpet.py","file_name":"petpet.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"78"} +{"seq_id":"7844972792","text":"import pyglet\nwindow = pyglet.window.Window()\n\nlabel = pyglet.text.Label('Ahoj!', x=10, y=30)\n\n@window.event\ndef on_text(text):\n existujici_text = label.text\n label.text = existujici_text + text\n\n@window.event\ndef on_draw():\n window.clear()\n label.draw()\n\npyglet.app.run()\nprint('Hotovo!')\n","repo_name":"paaja90/pyladies","sub_path":"lekce 01/python_intro - udalosti.py","file_name":"python_intro - udalosti.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74280428413","text":"from collections import defaultdict\r\n\r\n\r\ndef dfs(idx, end_idx, subtotal, direction):\r\n global ans\r\n print(\"----------------\")\r\n print(\"ans, idx, end_idx, suntotal: \", ans, idx, end_idx, subtotal)\r\n print(\"left\", left)\r\n if idx == end_idx:\r\n if direction == \"right\":\r\n print(\"if, idx, s - subtotal, left[s - subtotal]: \", idx, s - subtotal, left[s - subtotal])\r\n ans += left[s - subtotal]\r\n else:\r\n print(\"else\", idx, subtotal)\r\n left[subtotal] += 1\r\n return\r\n\r\n dfs(idx + 1, end_idx, subtotal, direction)\r\n dfs(idx + 1, end_idx, subtotal + numbers[idx], direction)\r\n\r\n\r\nans = 0\r\nn, s = map(int, input().split())\r\nnumbers = list(map(int, input().split()))\r\nleft = defaultdict(int)\r\n\r\n#print(left)\r\ndfs(0, n//2, 0, \"left\")\r\ndfs(n//2, n, 0, \"right\")\r\nprint(left)\r\nprint(ans)\r\nprint(ans if s != 0 else ans - 1)\r\n\r\n","repo_name":"goodsosbva/BOJ_BruteForce","sub_path":"1208.py","file_name":"1208.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"70477447292","text":"#Conditionals: \n# conditional *---if else---** to check whether it's a dict or list passing mean function: \ndef mean(my_input):\n if type(my_input)== dict: \n # here we could also use **\n #if isinstance(my_input, dict): \n the_mean= sum(my_input.values())/len(my_input)\n \n else:\n the_mean= sum(my_input)/len(my_input)\n\n return the_mean\ntoday_expenses=[20.0, 20.0, 8.0]\nOther_expense={\"Electricity\": 146, \"rent\":162, \"meal\": 100, \"Health_insaurance\":107}\nprint(\"Mean of list \",today_expenses,\"=\",mean(today_expenses))\nprint(\"\\nMean of dict \",Other_expense,\"=\",mean(Other_expense))\n\n# check whether string is less than 8 characters or not\ndef str_check(my_input):\n if len(my_input)<8: \n return False\n else: \n return True\nprint(\"\\n str checked as '\",str_check(\"Somewords\"),\"'\")\n\n#nested else if \ndef temperatureUpdate(reading): \n if reading>25: \n return \"Hot\"\n elif reading <=25 and reading >=15:\n return \"Warm\"\n else :\n return \"Cold\"\nprint(temperatureUpdate(50))\n\n\n\n\n\n\n\n","repo_name":"azaharudue/pyprojects","sub_path":"corePy/3_conditionals.py","file_name":"3_conditionals.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23185940632","text":"from kafka import KafkaConsumer\nimport json\nfrom db_connection import Session\nfrom models import Video, Category\n\nTOPIC_POST_VIDEO = \"video-post-event\"\n\ndef post_video(user_id, title, resume=None, category=None, category_id=None):\n with Session() as session:\n try: \n video = Video(user_id=user_id, title=title, resume=resume, category_id=category_id, category=category)\n session.add(video)\n session.commit()\n session.close()\n return None\n except Exception as e: \n return str(e)\n finally:\n session.close()\n\ndef delete_video(id):\n with Session() as session:\n try: \n video = (session.query(Video).filter(Video.video_id == id).first())\n if video:\n session.delete(video)\n session.commit()\n return video\n else: \n return 'not found'\n except Exception as e: \n return str(e)\n finally:\n session.close()\n\ndef add_like_video(id):\n with Session() as session:\n try: \n session.query(Video).filter_by(video_id=id).update({'video_rating': Video.video_rating + 1})\n session.commit()\n return None\n except Exception as e: \n return str(e)\n finally:\n session.close()\n\n\nconsumer = KafkaConsumer(\n bootstrap_servers=['kafka1:9092'],\n group_id='group1',\n value_deserializer=lambda v: json.loads(v.decode('ascii')),\n key_deserializer=lambda v: json.loads(v.decode('ascii')),\n max_poll_records=10,\n auto_offset_reset='earliest',\n session_timeout_ms=6000,\n heartbeat_interval_ms=3000\n )\nconsumer.subscribe(topics=[TOPIC_POST_VIDEO])\n\n\n\ntry:\n for message in consumer:\n print(message.key)\n post_video(\n user_id=message.value[\"user_id\"], \n title=message.value[\"title\"], \n resume=message.value[\"resume\"], \n category_id=message.value[\"category_id\"], \n category=message.value[\"category\"]\n )\n \n\nexcept Exception as e:\n print(str(e))","repo_name":"MizzleTheHorse/video-platform","sub_path":"video_consumer_post/video_consumer.py","file_name":"video_consumer.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41453617651","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def levelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n from collections import defaultdict\n self.levelList = defaultdict(list)\n \n def getLevelOrder(root, level):\n if not root:\n return\n self.levelList[level].append(root.val)\n getLevelOrder(root.left, level+1)\n getLevelOrder(root.right, level+1)\n \n getLevelOrder(root, 0)\n return self.levelList.values()\n \n","repo_name":"stavanmehta/leetcode","sub_path":"python_submission/102.binary-tree-level-order-traversal.198566579.ac.py","file_name":"102.binary-tree-level-order-traversal.198566579.ac.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39491534282","text":"import socket\nimport json\nclass jioServer:\n\t'Server Class for communication and calling function'\n\tdef __init__(self,port):\n\t\tself.port = port\n\t\tself.host = socket.gethostbyname(socket.gethostname())\n\t\tself.s = socket.socket()\n\t\tself.s.bind((self.host, self.port)) \n\tdef listen(self):\n\t\tself.s.listen(5)\n\t\tprint('JIOSERVER :: Host...',self.host)\n\t\tprint(\"JIOSERVER :: Listening on port...\",self.port)\n\t\tself.c, self.addr = self.s.accept()\n\t\tprint (\"JIOSERVER :: Connected with Client on \",self.addr)\n\t\t\n\tdef recv_msg_call_func(self,func_file):\n\t\twhile True:\n\t\t\tprint(\"JIOSERVER :: Receiving message...\")\n\t\t\tmsg = self.c.recv(1024)\n\t\t\tprint(\"LOG :: Message received : \",msg)\n\t\t\tif msg=='exit' or msg=='':\n\t\t\t\tbreak;\n\t\t\tjson_msg = json.loads(msg)\n\t\t\tfunc_name = json_msg['func_name']\n\t\t\targs = json_msg['args']\n\t\t\ttry:\n\t\t\t\tret_msg = getattr(func_file,func_name)(*args)\n\t\t\texcept Exception as e:\n\t\t\t\tret_msg = 'Error :: '+str(e)\n\t\t\tself.c.send(ret_msg)\n\t\tself.c.close()\n\n\n\n\n\n\n\n\n\t\n","repo_name":"niteshvijay1995/ChotaBaadal","sub_path":"jioServer.py","file_name":"jioServer.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"12829284551","text":"import logging\nimport os\nimport warnings\nfrom pathlib import Path\nfrom typing import List\n\nfrom dotenv import find_dotenv, load_dotenv\nfrom yacs.config import CfgNode\nfrom yacs.config import CfgNode as CN\nfrom yacs.config import _assert_with_logging, _valid_type\n\nfrom config import _C\n\n\ndef check_cfg(C):\n pass\n\n\ndef get_cfg_defaults():\n \"\"\"\n Get a yacs CfgNode object with default values\n \"\"\"\n # Return a clone so that the defaults will not be altered\n # It will be subsequently overwritten with local YAML.\n return _C.clone()\n\ndef save_to_yaml(cfg, path_output):\n \"\"\"\n Save the current config to a YAML file.\n :param cfg: CfgNode object to be saved\n :param path_output: path to output files\n \"\"\"\n path_output = Path(path_output)\n path_output.parent.mkdir(parents=True, exist_ok=True)\n with open(path_output, \"w\") as f:\n f.write(cfg.dump())\n \ndef load_from_yaml(path_cfg_data, path_cfg_override=None, list_cfg_override=None):\n \"\"\"\n Load a config from a YAML file.\n :param path_cfg_data: path to path_cfg_data files\n :param path_cfg_override: path to path_cfg_override actual\n :param list_cfg_override: [key1, value1, key2, value2, ...]\n :return: cfg_base incorporating the overwrite.\n \"\"\"\n cfg_base = get_cfg_defaults()\n if path_cfg_data is not None:\n cfg_base.merge_from_file(path_cfg_data)\n if path_cfg_override is not None:\n cfg_base.merge_from_file(path_cfg_override)\n if list_cfg_override is not None:\n cfg_base.merge_from_list(list_cfg_override)\n return cfg_base\n\ndef convert_to_dict(cfg_node):\n def _convert_to_dict(cfg_node, key_list):\n _VALID_TYPES = {tuple, list, str, int, float, bool, type(None)}\n if not isinstance(cfg_node, CfgNode):\n _assert_with_logging(\n _valid_type(cfg_node),\n \"Key {} with value {} is not a valid type; valid types: {}\".format(\n \".\".join(key_list), type(cfg_node), _VALID_TYPES\n ),\n )\n return cfg_node\n else:\n cfg_dict = dict(cfg_node)\n for k, v in cfg_dict.items():\n cfg_dict[k] = _convert_to_dict(v, key_list + [k])\n return cfg_dict\n\n return _convert_to_dict(cfg_node, [])\n\n\ndef combine_cfgs(\n path_cfg_data: Path = None,\n path_cfg_override: Path = None,\n list_cfg_override: List = None,\n):\n \"\"\"\n An internal facing routine thaat combined CFG in the order provided.\n :param path_output: path to output files\n :param path_cfg_data: path to path_cfg_data files\n :param path_cfg_override: path to path_cfg_override actual\n :param list_cfg_override: [key1, value1, key2, value2, ...]\n :return: cfg_base incorporating the overwrite.\n \"\"\"\n if path_cfg_data is not None:\n path_cfg_data = Path(path_cfg_data)\n if path_cfg_override is not None:\n path_cfg_override = Path(path_cfg_override)\n # Path order of precedence is:\n # Priority 1, 2, 3, 4, 5 respectively\n # .env > List > other CFG YAML > data.yaml > default.yaml\n\n # Load default lowest tier one:\n # Priority 5:\n cfg_base = get_cfg_defaults()\n\n # Merge from the path_data\n # Priority 4:\n if path_cfg_data is not None and path_cfg_data.exists():\n cfg_base.merge_from_file(path_cfg_data.absolute())\n\n # Merge from other cfg_path files to further reduce effort\n # Priority 3:\n if path_cfg_override is not None and path_cfg_override.exists():\n cfg_base.merge_from_file(path_cfg_override.absolute())\n\n # Merge from List\n # Priority 2:\n if list_cfg_override is not None:\n cfg_base.merge_from_list(list_cfg_override)\n\n # Merge from .env\n # Priority 1:\n list_cfg = update_cfg_using_dotenv()\n if list_cfg is not []:\n cfg_base.merge_from_list(list_cfg)\n\n check_cfg(cfg_base)\n\n return cfg_base\n\n\ndef update_cfg_using_dotenv() -> list:\n \"\"\"\n In case when there are dotenvs, try to return list of them.\n # It is returning a list of hard overwrite.\n :return: empty list or overwriting information\n \"\"\"\n # If .env not found, bail\n if find_dotenv() == \"\":\n warnings.warn(\".env files not found. YACS config file merging aborted.\")\n return []\n\n # Load env.\n load_dotenv(find_dotenv(), verbose=True)\n\n # Load variables\n list_key_env = {\n \"DATASET.ROOT_DIR\",\n \"DATASET.VOXEL_INDEX_DIR\",\n \"MODEL.BACKBONE.PRETRAINED_WEIGHT_DIR\",\n \"TRAINER.CALLBACKS.CHECKPOINT.ROOT_DIR\",\n \"RESULTS_DIR\",\n }\n\n # Instantiate return list.\n path_overwrite_keys = []\n logging.info(\"merge from .env\")\n # Go through the list of key to be overwritten.\n for key in list_key_env:\n\n # Get value from the env.\n value = os.getenv(key)\n logging.info(f\"{key}={value}\")\n # If it is none, skip. As some keys are only needed during training and others during the prediction stage.\n if value is None:\n continue\n\n # Otherwise, adding the key and the value to the dictionary.\n path_overwrite_keys.append(key)\n path_overwrite_keys.append(value)\n\n return path_overwrite_keys\n\n\n# The flatten and unflatten snippets are from an internal lfads_tf2 implementation.\n\n\ndef flatten_dict(dictionary, level=[]):\n \"\"\"Flattens a dictionary by placing '.' between levels.\n\n This function flattens a hierarchical dictionary by placing '.'\n between keys at various levels to create a single key for each\n value. It is used internally for converting the configuration\n dictionary to more convenient formats. Implementation was\n inspired by `this StackOverflow post\n `_.\n\n Parameters\n ----------\n dictionary : dict\n The hierarchical dictionary to be flattened.\n level : str, optional\n The string to append to the beginning of this dictionary,\n enabling recursive calls. By default, an empty string.\n\n Returns\n -------\n dict\n The flattened dictionary.\n\n See Also\n --------\n lfads_tf2.utils.unflatten : Performs the opposite of this operation.\n\n \"\"\"\n\n tmp_dict = {}\n for key, val in dictionary.items():\n if type(val) == dict:\n tmp_dict.update(flatten_dict(val, level + [key]))\n else:\n tmp_dict[\".\".join(level + [key])] = val\n return tmp_dict\n\n\ndef unflatten(dictionary):\n \"\"\"Unflattens a dictionary by splitting keys at '.'s.\n\n This function unflattens a hierarchical dictionary by splitting\n its keys at '.'s. It is used internally for converting the\n configuration dictionary to more convenient formats. Implementation was\n inspired by `this StackOverflow post\n `_.\n\n Parameters\n ----------\n dictionary : dict\n The flat dictionary to be unflattened.\n\n Returns\n -------\n dict\n The unflattened dictionary.\n\n See Also\n --------\n lfads_tf2.utils.flatten : Performs the opposite of this operation.\n\n \"\"\"\n\n resultDict = dict()\n for key, value in dictionary.items():\n parts = key.split(\".\")\n d = resultDict\n for part in parts[:-1]:\n if part not in d:\n d[part] = dict()\n d = d[part]\n d[parts[-1]] = value\n return resultDict\n\n\ndef dict_to_list(config):\n config_list = []\n for key, val in config.items():\n # print(key, val, type(val))\n config_list.append(key)\n config_list.append(val)\n return config_list\n","repo_name":"huzeyann/MemoryEncodingModel","sub_path":"mem/config_utils.py","file_name":"config_utils.py","file_ext":"py","file_size_in_byte":7635,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"78"} +{"seq_id":"4416166912","text":"#!/usr/bin/python3\n\n'''\nthe module that contains the function\n'''\n\n\nclass Student:\n \"\"\"A class of students\"\"\"\n def __init__(self, first_name, last_name, age):\n \"\"\"Initialization of the instance variables\"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\"returns dict rpstn student attributes\"\"\"\n item = (list, dict, str, int, bool)\n obj_dict = {}\n if attrs is None:\n for key, value in self.__dict__.items():\n if isinstance(value, (list, dict, str, int, bool)):\n obj_dict[key] = value\n else:\n for key, value in self.__dict__.items():\n if key in attrs and isinstance(value, item):\n obj_dict[key] = value\n return obj_dict\n\n def reload_from_json(self, json):\n \"\"\"replaces all attributes of the Student instance\"\"\"\n for key in json:\n try:\n setattr(self, key, json[key])\n except FileNotFoundError:\n pass\n","repo_name":"OfficialEcho95/alx-higher_level_programming","sub_path":"0x0B-python-input_output/11-student.py","file_name":"11-student.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18926193820","text":"from typing import List\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Ellipse\nfrom matplotlib.cm import viridis\nfrom flylips.wing import Wing\n\n\ndef combined_plot(wings: List[Wing]):\n \"\"\"\n A function to plot the inferred ellipses for each wing.\n\n It will take a list of Wing objects and plot the inferred ellipse on the same\n plot. The plot will be saved to the current working directory.\n \"\"\"\n fig, ax = plt.subplots()\n colors = np.linspace(0, 1, len(wings))\n custom_legend = []\n labels = []\n for ix,wing in enumerate(wings):\n _, _, a, b, _ = wing.params\n if a < b:\n a, b = b, a\n ellipse = Ellipse(xy=[0,0], width=2*a, height=2*b, angle = 0, fill=False, color=viridis(colors[ix]), label = wing.name)\n custom_legend.append(Line2D([0], [0], color=viridis(colors[ix])))\n labels.append(wing.name)\n ax.add_patch(ellipse)\n plt.axis('equal')\n plt.legend(custom_legend, labels)\n plt.savefig('combined.png')\n plt.close()\n","repo_name":"andersgs/flylips","sub_path":"src/flylips/combined_plot.py","file_name":"combined_plot.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20263769295","text":"import random\n\nwith open('sowpods.txt') as f:\n words = list(f)\n\nword = random.choice(words).strip()\nwordList = list(word)\nboardList = [\"_ \" for i in wordList]\nwrongList = []\n\nwhile True:\n lives = 6 - len(wrongList)\n board = \"\".join(boardList)\n print(\"\")\n if lives == 6:\n print(\" -------\\n\"\n \"| \", board, \"\\n\"\n \"|\\n\"\n \"| \", \"incorrect letters:\", wrongList, \"\\n\")\n\n if lives == 5:\n print(\" -------\\n\"\n \"| O \", board, \"\\n\"\n \"|\\n\"\n \"| \", \"incorrect letters:\", wrongList, \"\\n\")\n\n if lives == 4:\n print(\" -------\\n\"\n \"| O \", board, \"\\n\"\n \"| / \\n\"\n \"| \", \"incorrect letters:\", wrongList, \"\\n\")\n\n if lives == 3:\n print(\" -------\\n\"\n \"| O \", board, \"\\n\"\n \"| /|\\n\"\n \"| \", \"incorrect letters:\", wrongList, \"\\n\")\n\n if lives == 2:\n print(\" -------\\n\"\n \"| O \", board, \"\\n\"\n \"| /|\\ \\n\"\n \"| \", \"incorrect letters:\", wrongList, \"\\n\")\n\n if lives == 1:\n print(\" -------\\n\"\n \"| O \", board, \"\\n\"\n \"| /|\\ \\n\"\n \"| / \", \"incorrect letters:\", wrongList, \"\\n\")\n\n if lives == 0:\n print(\" -------\\n\"\n \"| O \", board, \"\\n\"\n \"| /|\\ \\n\"\n \"| / \\ \", \"incorrect letters:\", wrongList, \"\\n\")\n print(\"u lose! the word was:\", word)\n break\n\n # win condition\n if \"_ \" not in boardList:\n print(\"u win!\")\n break\n\n guess = input(\"guess your letter: \").upper()\n\n # check if single letter and if already guessed\n if guess.isalpha() and len(guess) == 1:\n if guess in wrongList or (guess + \" \") in boardList:\n print(\"u already guessed {}. enter another letter\".format(guess))\n continue\n else:\n print(\"invalid input. please enter a single letter.\")\n continue\n\n pos = -1\n for i in wordList:\n pos += 1\n if guess == i:\n boardList[pos] = i + \" \"\n\n if guess not in wordList:\n wrongList.append(guess)\n","repo_name":"peaknees/hangman","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39453626726","text":"from tkinter import *\n\nroot = Tk()\n\nroot.geometry(\"300x300\")\n\nroot.title(\" Simple Calculator\")\n\nw = Label(root, text=\"choose calculation\", font=\"helvetica\", fg=\"Green\")\nw.pack()\n\nb1 = Button(root, text=\"Addition\", width=30)\nb1.pack()\n\nroot.mainloop()\n\n\n\n\n","repo_name":"Yoorse/SimpleCalcutator","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38041616139","text":"import random\nimport socket\nfrom zeroconf import Zeroconf, ServiceInfo\nfrom parameter import REMOTE_NAME_DEFAULT\nfrom webserver import HTTPServerController\n\n\nclass Zeroauth:\n MAX_PORT = 65536\n MIN_PORT = 1024\n\n def __init__(self, deviceName=REMOTE_NAME_DEFAULT):\n self.hostname = socket.gethostname()\n try:\n self.ip = ip = socket.gethostbyname(self.hostname)\n except:\n self.hostname += '.local'\n self.ip = ip = socket.gethostbyname(self.hostname)\n\n self.deviceName = deviceName\n self.service = \"_spotify-connect._tcp.local.\"\n self.name = self.deviceName + \"._spotify-connect._tcp.local.\"\n self.port = random.randint(Zeroauth.MIN_PORT, Zeroauth.MAX_PORT)\n self.desc = {\"CPath\": \"/spotzc\", \"VERSION\": \"1.0\"}\n\n self.info = ServiceInfo(\n type_=self.service,\n name=self.name,\n addresses=[socket.inet_aton(self.ip)],\n port=self.port,\n properties=self.desc,\n # server=self.hostname,\n )\n\n self.zeroconf = Zeroconf()\n self.server = HTTPServerController(self.port)\n # self.register_status_listener = receiver_controller.register_status_listener\n self.register_listener = self.server.register_listener\n\n def start(self):\n self.server.start_server_thread()\n self.zeroconf.register_service(self.info)\n\n def stop(self):\n self.zeroconf.unregister_service(self.info)\n self.zeroconf.close()\n self.server.stop_server_thread()\n","repo_name":"ykelle/spotipy-control","sub_path":"zeroauth.py","file_name":"zeroauth.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"12941175034","text":"# https://leetcode.com/contest/biweekly-contest-94/problems/reward-top-k-students/\nfrom typing import List\n\nclass Solution:\n def topStudents(\n self,\n positive_feedback: List[str],\n negative_feedback: List[str],\n report: List[str],\n student_id: List[int],\n k: int\n ) -> List[int]:\n answer = []\n\n p = set(positive_feedback)\n n = set(negative_feedback)\n for i, rep in enumerate(report):\n point = 0\n rrr = rep.split(\" \")\n for word in rrr:\n if word in p:\n point += 3\n elif word in n:\n point -= 1\n\n answer.append((-point, student_id[i]))\n answer.sort()\n answer = [v for k, v in answer]\n answer = answer[:k]\n\n return answer\n\n\ndef read_data():\n # positive_feedback = [\"smart\", \"brilliant\", \"studious\"]\n # negative_feedback = [\"not\"]\n # report = [\"this student is not studious\",\"the student is smart\"]\n # student_id = [1, 2]\n # k = 2\n\n # positive_feedback = [\"fkeofjpc\", \"qq\", \"iio\"]\n # negative_feedback = [\"jdh\", \"khj\", \"eget\", \"rjstbhe\", \"yzyoatfyx\", \"wlinrrgcm\"]\n # report = [\n # \"rjstbhe eget kctxcoub urrmkhlmi yniqafy fkeofjpc iio yzyoatfyx khj iio\",\n # \"gpnhgabl qq qq fkeofjpc dflidshdb qq iio khj qq yzyoatfyx\",\n # \"tizpzhlbyb eget z rjstbhe iio jdh jdh iptxh qq rjstbhe\",\n # \"jtlghe wlinrrgcm jnkdbd k iio et rjstbhe iio qq jdh\",\n # \"yp fkeofjpc lkhypcebox rjstbhe ewwykishv egzhne jdh y qq qq\",\n # \"fu ql iio fkeofjpc jdh luspuy yzyoatfyx li qq v\",\n # \"wlinrrgcm iio qq omnc sgkt tzgev iio iio qq qq\",\n # \"d vhg qlj khj wlinrrgcm qq f jp zsmhkjokmb rjstbhe\"\n # ]\n # student_id = [96537918, 589204657, 765963609, 613766496, 43871615, 189209587, 239084671, 908938263]\n # k = 3\n\n positive_feedback = [\"m\", \"eveszfubew\"]\n negative_feedback = [\"iq\", \"etwuedg\", \"egpakyk\", \"da\", \"qkmhvgxg\", \"q\", \"zs\", \"ujmy\", \"mh\"]\n report = [\n \"eveszfubew jebebqp iq eveszfubew eveszfubew iq daej eveszfubew q da\",\n \"ohfz zs ujmy egpakyk eveszfubew pffeq q qkmhvgxg kdgqq ipp\",\n \"cceierguau mh da eveszfubew m etwuedg ikeft egpakyk ltnibxljfi m\",\n \"km m iq rab inooo ujmy tlrdyu yqhn m xlkhebs\",\n \"q etwuedg m eveszfubew ixrfzwmb m jyltumdwt dacmewk odbllqdiq eveszfubew\"\n ]\n student_id = [643903773, 468275834, 993893529, 509587004, 61125507]\n k = 5\n # Output:\n # [61125507, 509587004, 993893529, 643903773, 468275834]\n # Expected:\n # [61125507, 643903773, 993893529, 509587004, 468275834]\n\n return positive_feedback, negative_feedback, report, student_id, k\n\ndef main():\n positive_feedback, negative_feedback, report, student_id, k = read_data()\n sol = Solution()\n print(sol.topStudents(positive_feedback, negative_feedback, report, student_id, k))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ruslan515/Competitions","sub_path":"coding/Leetcode/Biweekly Contest 94/6274.py","file_name":"6274.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"30541998430","text":"def render(field, owner):\n if field.visibility != 'public':\n field.add_prop(field.visibility)\n\n if field.const:\n field.add_prop('const')\n\n if field.has_prop('type'):\n field.remove_all_props('type')\n if field.type is not None:\n if field.nullable:\n field.add_prop('type', field.type.name)\n else:\n field.add_prop('type', '!' + field.type.name)\n\n result = ['/**', field.render_comment(), ' */']\n\n if field.value is not None:\n value_str = ' = %s' % field.value\n else:\n value_str = ''\n\n if owner.name is None or owner.name == '':\n result.append('%s%s;' % (field.name, value_str))\n elif field.static:\n if field.name.startswith(owner.name + '.'):\n result.append('%s%s;' % (field.name, value_str))\n else:\n result.append('%s.%s%s;' % (owner.name, field.name, value_str))\n else:\n result.append('%s.prototype.%s%s;' % (owner.name, field.name, value_str))\n\n return \"\\n\".join(result)\n","repo_name":"theRobinator/autocode","sub_path":"renderers/closure/fieldrenderer.py","file_name":"fieldrenderer.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9134677144","text":"import logging\nimport os\nimport socket\n\nfrom threading import Thread\n\n\nfrom gi.repository import GObject as gobject\n\nfrom constants import DATA_SOCKET, DATA_DIR\n\nclass SingleWindowSocket:\n\t\"\"\"\n\tCalled on startup to emulate a singleton.\n\n\tA socket file is one of the ways to makes sure there is only one\n\tinstance of the program. Otherwise, it will mess up downloads when\n\tthey both download to the same file.\n\t\"\"\"\n\tdef __init__(self, url, main):\n\t\tself.caller = main\n\t\tself.RUN = False # When true, start program.\n\t\ttry:\n\t\t\tos.makedirs(DATA_DIR)\n\t\texcept OSError as e:\n\t\t\t# Errno 17 means something already exists\n\t\t\tif e.errno == 17 and os.path.isdir(DATA_DIR):\n\t\t\t\t# No problem\n\t\t\t\tpass\n\t\t\telif e.errno == 17 and not os.path.isdir(DATA_DIR):\n\t\t\t\tlogging.warn('%s already exists, but is not a directory: ' + str(e))\n\t\t\telse:\n\t\t\t\tlogging.warn('Error creating data directory: ' + str(e))\n\n\t\tif os.path.exists(DATA_SOCKET):\n\t\t\ttry:\n\t\t\t\tself.sendUrl(url)\n\t\t\texcept socket.error as msg:\n\t\t\t\tlogging.error(\"Possible stale socket (%s). Starting server.\" % str(msg))\n\t\t\t\tos.remove(DATA_SOCKET)\n\t\t\t\tself.RUN = True\n\t\t\t\tThread(target=self.server).start()\n\t\telse:\n\t\t\tself.RUN = True\n\t\t\tThread(target = self.server).start()\n\n\tdef sendUrl(self, url):\n\t\t\"\"\"\n\t\tSends to currently running instance.\n\t\t\"\"\"\n\t\ts = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n\t\ts.connect(DATA_SOCKET)\n\t\ts.send(url)\n\t\ts.close()\n\n\tdef server(self):\n\t\t\"\"\"\n\t\tListens for urls and loads in this process.\n\t\t\"\"\"\n\t\ts = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n\t\ts.settimeout(None) # important! Otherwise default timeout will apply.\n\n\t\ttry:\n\t\t\tos.mkdir(DATA_DIR)\n\t\texcept OSError as e:\n\t\t\tif e.errno == 17:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tlogging.error('Error creating socket: %s.' % str(e))\n\t\ts.bind(DATA_SOCKET)\n\n\t\twhile True:\n\t\t\turl = s.recv(65536) # Wait for a url to load.\n\t\t\tif url == 'EXIT':\n\t\t\t\tos.remove(DATA_SOCKET)\n\t\t\t\treturn # End this thread to let program exit normally.\n\t\t\tgobject.idle_add(self.caller.gotoURL, url, True)\n\t\t\tgobject.idle_add(self.caller.window.present)\n","repo_name":"rbrito/tunesviewer","sub_path":"src/SingleWindowSocket.py","file_name":"SingleWindowSocket.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"78"} +{"seq_id":"28859333418","text":"import RPi.GPIO as GPIO\nimport time\n\ndac = [26, 19, 13, 6, 5, 11, 9, 10]\ncomp = 4\ntroyka = 17\narray = [0] * len(dac)\ndac_reverse = list(reversed(dac))\nleds = [24, 25, 8, 7, 12, 16, 20, 21]\n\n\ndef decimal2binary(value):\n return [int(bit) for bit in bin(value)[2:].zfill(8)]\n\n\ndef adc():\n for i in range(7, -1, -1):\n GPIO.output(dac_reverse[i], 1)\n array[7 - i] = 1\n time.sleep(0.001)\n if GPIO.input(comp) == 0:\n GPIO.output(dac_reverse[i], 0)\n array[7 - i] = 0\n return\n\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(leds, GPIO.OUT)\nGPIO.setup(dac, GPIO.OUT)\nGPIO.setup(troyka, GPIO.OUT, initial=GPIO.HIGH)\nGPIO.setup(comp, GPIO.IN)\n\ntry:\n while True:\n v = 0\n array = [0] * len(dac)\n GPIO.output(dac, array)\n adc()\n\n for i in range(8):\n v += array[i] * (2 ** (7 - i))\n volt = v * 3.3 / 256\n print(decimal2binary(v), v, ' ', volt)\n GPIO.output(leds, 0)\n\n for i in range(9):\n if v < i * 32 + 5:\n for j in range(i):\n GPIO.output(leds[j], 1)\n break\n\nfinally:\n GPIO.output(dac, 0)\n GPIO.cleanup()\n","repo_name":"BatyaPng/OIP","sub_path":"5 - ADC/5-3-adc-volume.py","file_name":"5-3-adc-volume.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1951537805","text":"import json\nimport logging\nimport os\nimport requests\nimport string\nimport time\n\nfrom ochopod.api import Tool\nfrom ochopod.bindings.generic.marathon import Pod\nfrom ochopod.core.tools import Shell\nfrom ochopod.core.utils import shell\nfrom ochopod.models.piped import Actor as Piped\nfrom ochopod.models.reactive import Actor as Reactive\nfrom os.path import join\n\n\nlogger = logging.getLogger('ochopod')\n\n\nif __name__ == '__main__':\n\n #\n # - generate a random 32 characters token (valid for the lifetime of the pod)\n # - use it to implement a SHA1 digest verification\n # - this token can also be defined when deploying the pod\n #\n settings = json.loads(os.environ['pod'])\n alphabet = string.letters + string.digits + '+/'\n randomized = ''.join(alphabet[ord(c) % len(alphabet)] for c in os.urandom(32))\n token = settings['token'] if 'token' in settings else randomized\n shell(\"echo 'jenkins:%s' | sudo -S chpasswd\" % token)\n\n class Run(Tool):\n \"\"\"\n Dedicated tool to upload/trigger CD scripts from the ochothon CLI. The tool will perform the SHA1\n signature and allow to specify arbitrary variables on the command line. The servo output will passed back\n to the CLI.\n\n CLI usage:\n $ exec *.servo --force run my-scripts-folder script.py --variables key:value\n \"\"\"\n\n tag = 'run'\n\n def define_cmdline_parsing(self, parser):\n\n parser.add_argument('tgz', type=str, nargs=1, help='the CD bundle as a TGZ archive')\n parser.add_argument('scripts', type=str, nargs='+', help='1+ scripts to run')\n parser.add_argument('-v', '--variables', action='store', dest='variables', type=str, nargs='+', help='key:value mappings')\n\n def body(self, args, cwd):\n\n #\n # - force the output to be formatted in JSON\n # - add any variable defined on the command line using -v\n #\n headers = {'Accept': 'application/json'}\n if args.variables:\n for value in args.variables:\n tokens = value.split(':')\n headers['X-Var-%s' % tokens[0]] = tokens[1]\n\n #\n # - fetch the uploaded TGZ archive in our temp. directory\n # - compute its SHA1 digest\n # - format the corresponding X-Signature header\n #\n tgz = join(cwd, args.tgz[0])\n code, lines = shell('openssl dgst -sha1 -hmac \"%s\" %s' % (token, tgz))\n assert code == 0, 'failed to sign the archive'\n headers['X-Signature'] = 'sha1=%s' % lines[0].split(' ')[1]\n\n #\n # - fire a POST /run request to ourselves\n # - pass the response back to the CLI\n #\n with open(tgz, 'rb') as f:\n\n files = {'tgz': f.read()}\n reply = requests.post('http://localhost:5000/run/%s' % '+'.join(args.scripts), files=files, headers=headers)\n assert reply.status_code < 300, 'invalid response (HTTP %d)' % reply.status_code\n js = json.loads(reply.text)\n return 0 if js['ok'] else 1, js['log']\n\n class Model(Reactive):\n\n depends_on = ['portal']\n\n class Strategy(Piped):\n\n cwd = '/opt/servo'\n\n check_every = 60.0\n\n pid = None\n\n since = 0.0\n\n def sanity_check(self, pid):\n\n #\n # - simply use the provided process ID to start counting time\n # - this is a cheap way to measure the sub-process up-time\n # - display our token as well\n #\n now = time.time()\n if pid != self.pid:\n self.pid = pid\n self.since = now\n\n lapse = (now - self.since) / 3600.0\n\n return \\\n {\n 'token': token,\n 'uptime': '%.2f hours (pid %s)' % (lapse, pid)\n }\n\n def can_configure(self, cluster):\n\n assert len(cluster.dependencies['portal']) == 1, 'need 1 portal'\n\n def configure(self, cluster):\n\n #\n # - get our pod details\n #\n pod = cluster.pods[cluster.key]\n\n #\n # - query the ochothon proxy secret token via an internal POST /info\n #\n reply = requests.post('http://%s/info' % cluster.grep('portal', 8080))\n js = json.loads(reply.text)\n\n #\n # - look the ochothon portal up @ TCP 9000\n # - update the resulting connection string into a small JSON payload\n # - add the ochothon proxy token as well\n # - persist it into /opt/slave/.portal\n # - this will be used by the CI/CD scripts to issue commands\n #\n hints = \\\n {\n 'ip': cluster.grep('portal', 9000),\n 'token': js['metrics']['token'] if 'token' in js['metrics'] else ''\n }\n\n with open('/opt/servo/.portal', 'w') as f:\n f.write(json.dumps(hints))\n\n #\n # - pass the token and our local IP:port connection string (used for the callback\n # mechanism)\n #\n return 'python hook.py', \\\n {\n 'token': token,\n 'local': '%s:%d' % (pod['ip'], pod['ports']['5000'])\n }\n\n Pod().boot(Strategy, model=Model, tools=[Run, Shell])","repo_name":"autodesk-cloud/ci-ochopod","sub_path":"images/marathon/servo/resources/pod/pod.py","file_name":"pod.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"13364349691","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 15 16:14:05 2022\n\n@author: romas\n\"\"\"\n\nimport learning_algorithms as la\n\nimport matplotlib.pyplot as plt\n\n\ndef calculate_layer(curr_layer, next_layer, in_arr, learning_algorithm):\n\n res = []\n\n i = 0\n\n while i < len(curr_layer):\n\n j = 0\n\n weight_arr = curr_layer[i].get_weights()\n\n while j < len(weight_arr):\n\n exit_value = weight_arr[j] * in_arr[i]\n\n curr_layer[i].set_exit_value(j, exit_value)\n\n curr_layer[i].update_S(exit_value)\n\n if i == 0:\n\n res.insert(j, exit_value)\n\n else:\n\n res[j] += exit_value\n\n j += 1\n\n i += 1\n\n j = 0\n\n while j < len(res):\n\n res[j] = next_layer[j].get_activation_function_value(res[j]+next_layer[j].get_bias())\n\n j += 1\n\n return res\n\ndef calculate_result(neur_arr, in_arr, learning_algorithm):\n\n i = 0\n\n while i < len(neur_arr)-1:\n\n in_arr = calculate_layer(neur_arr[i], neur_arr[i+1], in_arr, learning_algorithm)\n\n i += 1\n\n return in_arr\n\ndef calculate_error(exp_res, res, learning_algorithm):\n\n i = 0\n\n err = 0\n\n err_arr = []\n\n while i < len(exp_res):\n\n diff = exp_res[i]-res[i]\n\n err += diff\n\n if learning_algorithm == 0:\n\n err_arr.append(diff)\n\n i += 1\n\n if learning_algorithm == 0:\n\n return err, err_arr\n\n else:\n\n return err\n\ndef calculating_cycle(set_data, set_res, neur_arr, learning_algorithm):\n\n result = calculate_result(neur_arr, set_data, learning_algorithm)\n\n if learning_algorithm == 0:\n\n error, error_arr = calculate_error(set_res, result, learning_algorithm)\n\n return result, error, error_arr\n\n else:\n\n error = calculate_error(set_res, result, learning_algorithm)\n\n return result, error\n\ndef learning_cycle(neur_arr, neur_layer_arr, set_data, set_res, eta, batch, learning_algorithm, *args):\n\n result = []\n\n i = 0\n\n while i < len(set_data):\n\n err_for_backprop = []\n\n if learning_algorithm == 0:\n\n res, error_for_i, err_for_backprop = calculating_cycle(set_data[i], set_res[i], neur_arr, learning_algorithm)\n\n else:\n\n res, error_for_i = calculating_cycle(set_data[i], set_res[i], neur_arr, learning_algorithm)\n\n error_for_i = round(error_for_i, 6)\n\n result.append(res)\n\n if i == 0:\n\n error = error_for_i**2\n\n else:\n\n error += error_for_i**2\n\n if learning_algorithm == 0:\n\n delta_w, delta_w_bias = la.backpropagation(neur_arr, result, eta, err_for_backprop, batch, i, args[0], args[1])\n\n for n in neur_arr:\n\n for m in n:\n\n m.set_S(0)\n\n if (i+1)%batch == 0 and learning_algorithm == 1:\n\n neur_arr = la.genetic(neur_arr, neur_layer_arr, len(set_res), set_data, set_res, args[0], args[1], args[2], args[3], args[4], args[5], args[6])\n\n i += 1\n\n error /= 2\n\n if learning_algorithm == 1:\n\n return neur_arr, result, error\n\n return result, error, delta_w, delta_w_bias\n\n\ndef learning_process(error_threshold, epochs_threshold, set_data, set_res, neur_arr, neur_layer_arr, eta, batch, learning_algorithm, *args):\n\n error = 1\n\n err_arr = []\n\n delta_w = []\n\n delta_w_bias = []\n\n count = 0\n\n if learning_algorithm == 0:\n\n for n in neur_arr:\n\n temp_arr = []\n\n for m in n:\n\n temp_arr.append(0)\n\n delta_w.append(temp_arr)\n\n delta_w_bias.append(temp_arr)\n\n\n while error > error_threshold and count < epochs_threshold:\n\n if learning_algorithm == 0:\n\n result, error, delta_w, delta_w_bias = learning_cycle(neur_arr, neur_layer_arr, set_data, set_res, eta, batch, learning_algorithm, delta_w, delta_w_bias)\n\n else:\n\n neur_arr, result, error = learning_cycle(neur_arr, neur_layer_arr, set_data, set_res, eta, batch, learning_algorithm, args[0], args[1], args[2], args[3], args[4], args[5], args[6])\n\n err_arr.append(error)\n\n count += 1\n\n return result, neur_arr, err_arr\n\ndef calculate_set_res(neur_arr, set_data, set_res):\n\n result = []\n\n i = 0\n\n while i < len(set_data):\n\n res, error_for_i = calculating_cycle(set_data[i], set_res[i], neur_arr, 1)\n\n error_for_i = round(error_for_i, 6)\n\n result.append(res)\n\n if i == 0:\n\n error = error_for_i**2\n\n else:\n\n error += error_for_i**2\n\n i += 1\n\n error /= 2\n\n return result, error\n\ndef main_calculation(error_threshold, epochs_threshold, tr_set_data, tr_set_res, test_set_data, test_set_res, neur_arr, neur_layer_arr, eta, batch, learning_algorithm, *args):\n\n in_parm_for_graph = 1\n\n\n if learning_algorithm == 0:\n\n print(\"Алгоритм зворотного поширення помилки\")\n\n elif learning_algorithm == 1:\n\n print(\"Генетичний алгоритм\")\n\n\n if learning_algorithm == 0:\n\n tr_res, neur_arr, tr_err_arr = learning_process(error_threshold, epochs_threshold, tr_set_data, tr_set_res, neur_arr, neur_layer_arr, eta, batch, learning_algorithm)\n\n else:\n\n tr_res, neur_arr, tr_err_arr = learning_process(error_threshold, epochs_threshold, tr_set_data, tr_set_res, neur_arr, neur_layer_arr, eta, batch, learning_algorithm, args[0], args[1], args[2], args[3], args[4], args[5], args[6])\n\n\n tr_err = tr_err_arr[-1]\n\n tr_res, tr_err = calculate_set_res(neur_arr, tr_set_data, tr_set_res)\n\n tr_err_arr.append(tr_err)\n\n\n i = 0\n\n while i < len(tr_res):\n\n j = 0\n\n while j < len(tr_res[i]):\n\n tr_res[i][j] = round(tr_res[i][j], 6)\n\n j += 1\n\n i += 1\n\n\n print()\n\n print(\"Навчальна вибірка\")\n\n print(f\"Похибка: {tr_err}\")\n\n print()\n\n\n plt.figure(1+learning_algorithm*3)\n\n plt.title(f\"Навчальна вибірка, похибка {round(tr_err, 3)}\")\n \n plt.xlabel(\"ΔK\", fontsize = \"x-large\")\n \n plt.ylabel(\"da/dN\", fontsize = \"x-large\")\n\n i = 0\n\n while i < len(tr_res):\n\n plt.scatter(tr_set_data[i][in_parm_for_graph], tr_set_res[i], c = \"red\", marker=\"D\")\n\n plt.scatter(tr_set_data[i][in_parm_for_graph], tr_res[i], c = \"blue\", marker=\"^\")\n\n i += 1\n\n plt.legend([\"Очікуваний результат\" , \"Отриманий результат\"], loc = \"upper right\")\n\n plt.show()\n\n\n plt.figure(2+learning_algorithm*3)\n\n\n if learning_algorithm == 0:\n\n plt.title(\"Крива навчання мережі зворотним поширенням помилки\")\n\n elif learning_algorithm == 1:\n\n plt.title(\"Крива навчання мережі генетичним алгоритмом\")\n\n\n\n i = 0\n\n x = []\n\n while i < len(tr_err_arr):\n\n x.append(i)\n\n i += 1\n\n plt.xlabel(\"x\", fontsize = \"x-large\")\n \n plt.ylabel(\"y\", fontsize = \"x-large\")\n\n plt.plot(x, tr_err_arr)\n\n plt.show()\n\n\n print(\"Тестова вибірка\")\n\n test_res, test_err = calculate_set_res(neur_arr, test_set_data, test_set_res)\n\n i = 0\n\n while i < len(test_res):\n\n j = 0\n\n while j < len(test_res[i]):\n\n test_res[i][j] = round(test_res[i][j], 6)\n\n j += 1\n\n i += 1\n\n\n print(f\"Похибка: {test_err}\")\n\n print()\n\n\n plt.figure(3+learning_algorithm*3)\n\n plt.title(f\"Тестов�� вибірка, похибка {round(test_err, 3)}\")\n \n plt.xlabel(\"ΔK\", fontsize = \"x-large\")\n \n plt.ylabel(\"da/dN\", fontsize = \"x-large\")\n\n i = 0\n\n while i < len(test_res):\n\n plt.scatter(test_set_data[i][in_parm_for_graph], test_set_res[i], c = \"red\", marker=\"D\")\n\n plt.scatter(test_set_data[i][in_parm_for_graph], test_res[i], c = \"blue\", marker=\"^\")\n\n i += 1\n\n plt.legend([\"Очікуваний результат\" , \"Отриманий результат\"], loc = \"upper right\")\n\n plt.show()","repo_name":"Romchyk-S/Thesis","sub_path":"Програма/calculations.py","file_name":"calculations.py","file_ext":"py","file_size_in_byte":8148,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"23552837741","text":"import os\nimport xlsxwriter\n\n# Set the directory path you want to extract folder names from\npath = r\"F:\\20230224_056_MFup_Checked\"\n\n# Create an Excel workbook and worksheet\nworkbook = xlsxwriter.Workbook('folder_names.xlsx')\nworksheet = workbook.add_worksheet()\n\n# Set the row and column variables\nrow = 0\ncol = 0\n\n# Loop through the directory and get the folder names\nfor folder in os.listdir(path):\n if os.path.isdir(os.path.join(path, folder)):\n worksheet.write(row, col, folder)\n row += 1\n\n# Close the workbook\nworkbook.close()\n","repo_name":"FedericoMollica/IMPORT_CODES","sub_path":"Folder_Names_to_excel_column.py","file_name":"Folder_Names_to_excel_column.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"37800551920","text":"import pandas as pd\npd.options.mode.chained_assignment = None\n\nPATH = r'/Users/nicole/Downloads/FoodServiceData_23_0.csv'\n\ndf = pd.read_csv(PATH) \ndf_grades = (df.loc[df['Grade']== 'C'])\n\n\ndf_grades['EstablishmentName'] = df_grades['EstablishmentName'].str.split(' #', expand = True)\ndf_list = (df_grades.loc[:, 'EstablishmentName']).values.tolist()\n# print(df_list)\n\n\ndef frequency(x_list):\n list_dict = dict()\n for i in x_list:\n if i in list_dict:\n list_dict[i] += 1\n else:\n list_dict[i] = 1\n list_dict = {key:value for key, value in list_dict.items()}\n return list_dict\n\n\nlist_dict = frequency(df_list)\n\nhighest_freq = sorted(list_dict, key=list_dict.get, reverse = True)[:3]\nhighest_freq_list = []\nsep = ', '\n\ndef change(y):\n y = sorted(y, reverse = True)\n return y \n\n\nfor key, value in list_dict.items():\n if key in highest_freq:\n highest_freq_list.append(value)\n\nprint('{0} are the food service establishments with the greatest frequency of \\'C\\' grades. \\nThey appear {1} times, respectively.'.format(sep.join(highest_freq), sep.join(str(i) for i in change(highest_freq_list))))","repo_name":"nicxle/data-analysis-projects","sub_path":"foodservicedata_sort_2.py","file_name":"foodservicedata_sort_2.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9025164491","text":"import sys\nimport heapq as h\nimport copy\ninput = sys.stdin.readline\n\nn = int(input())\nnameToIdx = {}\nnames = [0 for _ in range(2*n)]\nidx = 0\ngraph = [[] for _ in range(2*n)]\nind = [0 for _ in range(2*n)]\n\nfor _ in range(n):\n a,b = input().split()\n if a not in nameToIdx:\n nameToIdx[a] = idx\n names[idx] = a\n idx+=1\n if b not in nameToIdx:\n nameToIdx[b] = idx\n names[idx] = b\n idx += 1\n graph[nameToIdx[a]].append(nameToIdx[b])\n ind[nameToIdx[b]] += 1\n\nans = []\nq = []\nfor i in range(idx):\n if ind[i] == 0:\n h.heappush(q, names[i])\n\ntq = []\nwhile q:\n cur = h.heappop(q)\n\n ans.append(cur)\n for nxt in graph[nameToIdx[cur]]:\n ind[nxt] -= 1\n if ind[nxt] == 0:\n h.heappush(tq, names[nxt])\n\n if len(q) == 0:\n q = copy.deepcopy(tq)\n tq = []\n\nflag = True\nfor i in ind:\n if i != 0:\n flag = False\n\nif flag:\n for i in ans:\n print(i)\nelse:\n print(-1)","repo_name":"HiGeuni/Problem-Solving","sub_path":"BaekJoon/23509_topological_sort.py","file_name":"23509_topological_sort.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23904073727","text":"import os.path\nfrom .basepage import BasePage\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom UItest.common.loggen import LogGen\nfrom selenium import webdriver\nfrom time import sleep\nlogger = LogGen(logger=\"MainPage\").getlog()\n#定义主页面中所涉及到的元素,userid及退出按钮,通过xpath方式识别\nclass MainPage(BasePage):\n userid_loc = (By.XPATH,'.//*[@id=\\'top\\']/div[3]/ul/li[6]')\n #exit_btn_loc = (By.XPATH,'.//*[@id=\\'ECS_MEMBERZONE\\']/font/a[2]')\n exit_btn_loc = (By.XPATH, './/*[ @id=\\'top\\']/div[3]/ul/li[1]/a/span')\n xitongguanli_loc = (By.ID,'firstMenu0')\n yewuzhunbeizhanghu_loc = (By.ID,'firstMenu12')\n #定义打开超链接方法,并将此操作写入日志\n def open(self,base_url):\n self.open(self.base_url,self.pagetitle)\n logger.info(\"打开连接: %s \" % base_url)\n #定义显示userid信息,并将此操作写入日志\n def show_userid(self):\n userid = self.find_element(*self.userid_loc).text\n logger.info(\"当前用户id是: %s \" % userid)\n return userid\n def open_page(self,driver):\n self.find_element(*self.xitongguanli_loc).click()\n print('DEBUG_已经点击了系统管理')\n sleep(3)\n self.find_element(*self.yewuzhunbeizhanghu_loc).click()\n print('DEBUG_已经点击了业务准备账户')\n #driver = webdriver.Chrome()\n sleep(2)\n driver.find_element_by_xpath(\"//span[text()=' 业务准备账户垫付']\").click()\n sleep(3)\n print('DEBUG_已经点击了业务准备账户垫付')\n target = driver.find_element_by_xpath(\"//*[text()='逾期明细手工垫付']\")\n driver.execute_script(\"arguments[0].scrollIntoView();\", target)\n print('DEBUG_移动滚动条到“逾期明细手工垫付”')\n sleep(2)\n driver.find_element_by_xpath(\"//li[text()='逾期明细手工垫付']\").click()\n logger.info('打开“逾期明细手工垫付”页面')\n\n #定义退出操作,点击退出按钮,并写入日志\n def exit_sys(self):\n self.find_element(*self.exit_btn_loc).click()\n logger.info(\"注销测试系统\")\n","repo_name":"UserWangjn/JieYueProject_2","sub_path":"UItest/pages/mainpage.py","file_name":"mainpage.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9914911652","text":"from django.shortcuts import render, HttpResponse\nfrom home.models import Task\n\n# Create your views here.\ndef home(request):\n context = {'success': False, 'name': \"Ronald\"}\n if request.method == \"POST\":\n title = request.POST[\"title\"]\n desc = request.POST[\"desc\"]\n # print(title, desc)\n ins = Task(taskTitle=title, taskDesc=desc)\n ins.save()\n context = {'success': True, 'name': \"Ronald\"}\n return render(request, 'index.html', context)\n\ndef tasks(request):\n allTasks = Task.objects.all()\n # [print(f'{item.taskTitle} : {item.taskDesc}') for item in allTasks]\n context = {'tasks': allTasks}\n return render(request, 'tasks.html', context)","repo_name":"Ronnie5562/Backend-dev","sub_path":"Django/todoList/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"14097312955","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n# Использовать словарь, содержащий следующие ключи: название пункта назначения рейса;\r\n# номер рейса; тип самолета. Написать программу, выполняющую следующие действия: ввод\r\n# с клавиатуры данных в список, состоящий из словарей заданной структуры; записи должны\r\n# быть размещены в алфавитном порядке по названиям пунктов назначения; вывод на экран\r\n# пунктов назначения и номеров рейсов, обслуживаемых самолетом, тип которого введен с\r\n# клавиатуры; если таких рейсов нет, выдать на дисплей соответствующее сообщение\r\n\r\nfrom datetime import date\r\nimport sys\r\n\r\nif __name__ == '__main__':\r\n school = {'1a': 30, '1б': 30,\r\n '2а': 30, '2б': 30,\r\n '3а': 30, '3б': 30,\r\n '4а': 25, '4б': 30,\r\n '5а': 16, '5б': 30,\r\n '6а': 20, '6б': 17,\r\n '7а': 17, '7б': 13,\r\n '8а': 22, '8б': 25,\r\n '9a': 29, '9б': 30,\r\n '10a': 18, '10б': 13,\r\n '11a': 7, '11б': 8\r\n }\r\n print(school)\r\n\r\n school['5б'] += 10\r\n school['9в'] = 20\r\n school.pop('5б', 30)\r\n\r\n sum = 0\r\n for item in school.values():\r\n sum += item\r\n print(school)\r\n print(\"Количество учеников во всех классах: \", sum)\r\n","repo_name":"EndMad/L6","sub_path":"Pr_3.py","file_name":"Pr_3.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13632085390","text":"from torch import nn\nfrom transformers import AutoModel\nimport torch\nfrom timm.models.vision_transformer import Block, Attention, Mlp\nfrom timm.models.mobilenetv3 import mobilenetv3_small_075\nfrom timm.models.resnet import Bottleneck\n\nclass Where2D(nn.Module):\n def __init__(self):\n super().__init__()\n self.encoder_buffer = nn.Linear(768, 12800)\n self.act = nn.GELU()\n self.decoder = nn.Sequential(\n nn.LayerNorm(256),\n Attention(256, num_heads=8, qkv_bias=False, attn_drop=0, proj_drop=0),\n nn.LayerNorm(256),\n Mlp(256, out_features=768),\n nn.GELU(),\n Block(768, 8),\n )\n\n def forward(self, input):\n x = self.encoder_buffer(input['pooler_output'])\n y = torch.where(x > 0, x, torch.zeros_like(x))\n y = torch.where(x < 0, y, torch.ones_like(x))\n z = self.act(y.view(-1, 50, 256))\n z = self.decoder(z)\n return z\n\n\nclass WhereIsFeatures(nn.Module):\n def __init__(self, embed_dims=1024):\n super().__init__()\n self.encoder = nn.Sequential(\n Block(512, 8),\n Block(512, 8),\n )\n self.decoder = nn.Sequential(\n Block(512, 8),\n Block(512, 8),\n nn.Sigmoid(),\n )\n self.buffer_e = nn.Sequential(\n Block(512, 8),\n Block(512, 8),\n Block(512, 8),\n nn.Linear(512, embed_dims, 1),\n )\n self.buffer_d = nn.Sequential(\n nn.Linear(embed_dims, 512, 1),\n nn.LayerNorm(512),\n Block(512, 8),\n Block(512, 8),\n Block(512, 8),\n nn.Sigmoid(),\n )\n self.sigmoid = nn.Sigmoid()\n\n def where(self, gt, hard_limit=False):\n x = self.buffer_e(gt)\n ecd = self.sigmoid(x)\n if hard_limit:\n y = torch.where(x > 0.5, ecd, torch.zeros_like(ecd))\n ecd = torch.where(x < 0.5, y, torch.ones_like(ecd))\n x = self.buffer_d(ecd)\n return x, ecd, gt\n\n def encode(self, inputs):\n return self.encoder(inputs.unsqueeze(1))\n\n def decode(self, inputs):\n return self.decoder(inputs)[:, 0]\n\n\nclass WhereIsCLIP(nn.Module):\n def __init__(self):\n super().__init__()\n self.encoder = AutoModel.from_pretrained('openai/clip-vit-base-patch32').vision_model\n self.buffer = nn.Linear(768, 8192)\n self.decoder = nn.Sequential(\n nn.ConvTranspose2d(8192, 1024, 2, stride=2), # 2\n nn.BatchNorm2d(1024),\n nn.GELU(),\n nn.ConvTranspose2d(1024, 512, 2, stride=2), # 4\n nn.BatchNorm2d(512),\n nn.GELU(),\n nn.ConvTranspose2d(512, 256, 2, stride=2), # 8\n nn.BatchNorm2d(256),\n nn.GELU(),\n nn.ConvTranspose2d(256, 128, 2, stride=2), # 16\n nn.BatchNorm2d(128),\n nn.GELU(),\n nn.ConvTranspose2d(128, 128, 2, stride=2), # 32\n nn.BatchNorm2d(128),\n nn.GELU(),\n nn.ConvTranspose2d(128, 128, 2, stride=2), # 64\n nn.BatchNorm2d(128),\n nn.GELU(),\n nn.ConvTranspose2d(128, 64, 2, stride=2), # 128\n nn.BatchNorm2d(64),\n nn.GELU(),\n nn.Conv2d(64, 64, 7), # 122\n nn.BatchNorm2d(64),\n nn.GELU(),\n nn.Conv2d(64, 64, 7), # 116\n nn.BatchNorm2d(64),\n nn.GELU(),\n nn.Conv2d(64, 64, 5), # 112\n nn.BatchNorm2d(64),\n nn.GELU(),\n nn.ConvTranspose2d(64, 32, 2, stride=2), # 224\n nn.BatchNorm2d(32),\n nn.GELU(),\n nn.Conv2d(32, 3, 1), # 224\n )\n\n def forward(self, input):\n with torch.no_grad():\n x = self.encoder(input) # batch, embed_dim\n x = self.buffer(x['pooler_output'])\n y = torch.where(x > 0, x, torch.zeros_like(x))\n y = torch.where(x < 0, y, torch.ones_like(x))\n z = self.decoder(y.unsqueeze(2).unsqueeze(2))\n return z\n\n\nclass SimPlerModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.backbone = mobilenetv3_small_075(pretrained=True, features_only=True)\n # self.buffer = nn.Conv2d(432, 8192, 1)\n self.encoder = nn.Sequential(\n nn.Conv2d(432, 2048, 3, padding=1),\n nn.BatchNorm2d(2048),\n nn.GELU(),\n Bottleneck(2048, 512),\n Bottleneck(2048, 512),\n Bottleneck(2048, 512),\n Bottleneck(2048, 512),\n )\n self.decoder = nn.Sequential(\n nn.ConvTranspose2d(2048, 256, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.ReLU(),\n nn.ConvTranspose2d(256, 3, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.Sigmoid()\n )\n\n def forward(self, inputs):\n with torch.no_grad():\n x = self.backbone(inputs)[4]\n x = self.encoder(x)\n z = self.decoder(x)\n return z\n\n\nclass ToyModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 16, 3, padding=1)\n self.conv2 = nn.Conv2d(16, 4, 3, padding=1)\n self.pool = nn.MaxPool2d(2, 2)\n self.buffer_e = nn.Conv2d(4, 1024, 1)\n self.buffer_d = nn.Conv2d(1024, 4, 1)\n\n self.t_conv1 = nn.ConvTranspose2d(4, 16, 2, stride=2)\n self.t_conv2 = nn.ConvTranspose2d(16, 3, 2, stride=2)\n\n self.classifier = nn.Linear(256, 2)\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def encode(self, x):\n x = self.relu(self.conv1(x))\n x = self.pool(x)\n x = self.relu(self.conv2(x))\n x = self.pool(x)\n return x\n\n def where(self, x):\n x = self.sigmoid(self.buffer_e(x))\n y = torch.where(x > 0.5, x, torch.zeros_like(x))\n y = torch.where(x < 0.5, y, torch.ones_like(x))\n x = self.relu(self.buffer_d(y))\n return x\n\n def decode(self, x):\n x = self.relu(self.t_conv1(x))\n x = self.sigmoid(self.t_conv2(x))\n return x\n\n def classify(self, x):\n x = self.classifier(x.view(-1, 256))\n return x\n\n def forward(self, x, classify):\n if classify:\n x = self.encode(x)\n return self.classify(x)\n else:\n with torch.no_grad():\n x = self.encode(x)\n x = self.where(x)\n return self.decode(x)\n\n\nclass BigToyModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 64, 3, stride=2, padding=1),\n nn.BatchNorm2d(64),\n nn.GELU(),\n nn.Conv2d(64, 128, 1),\n nn.BatchNorm2d(128),\n nn.GELU(),\n Bottleneck(128, 32),\n Bottleneck(128, 32),\n Bottleneck(128, 32),\n Bottleneck(128, 32),\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(128, 256, 3, stride=2, padding=1),\n nn.BatchNorm2d(256),\n nn.GELU(),\n Bottleneck(256, 64),\n Bottleneck(256, 64),\n Bottleneck(256, 64),\n Bottleneck(256, 64),\n )\n self.pool = nn.MaxPool2d(2, 2)\n self.buffer_e = nn.Sequential(\n Bottleneck(256, 64),\n Bottleneck(256, 64),\n Bottleneck(256, 64),\n Bottleneck(256, 64),\n nn.Conv2d(256, 8192, 1),\n )\n self.buffer_d = nn.Sequential(\n nn.Conv2d(8192, 256, 1),\n nn.BatchNorm2d(256),\n Bottleneck(256, 64),\n Bottleneck(256, 64),\n Bottleneck(256, 64),\n )\n\n self.t_conv1 = nn.Sequential(\n nn.ConvTranspose2d(256, 256, 2, stride=2),\n nn.GELU(),\n Bottleneck(256, 64),\n Bottleneck(256, 64),\n nn.ConvTranspose2d(256, 256, 2, stride=2),\n nn.GELU(),\n Bottleneck(256, 64),\n Bottleneck(256, 64),\n )\n self.t_conv2 = nn.Sequential(\n nn.ConvTranspose2d(256, 256, 2, stride=2),\n nn.GELU(),\n Bottleneck(256, 64),\n Bottleneck(256, 64),\n nn.ConvTranspose2d(256, 256, 2, stride=2),\n nn.GELU(),\n Bottleneck(256, 64),\n nn.Conv2d(256, 3, 1),\n )\n\n self.classifier = nn.Linear(1024, 2)\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def decode(self, x):\n x = self.t_conv1(x)\n x = self.sigmoid(self.t_conv2(x))\n return x\n\n def encode(self, x):\n x = self.relu(self.conv1(x))\n x = self.pool(x)\n x = self.relu(self.conv2(x))\n x = self.pool(x)\n return x\n\n def where(self, gt, hard_limit=False):\n x = self.sigmoid(self.buffer_e(gt))\n if hard_limit:\n y = torch.where(x > 0.5, x, torch.zeros_like(x))\n x = torch.where(x < 0.5, y, torch.ones_like(x))\n x = self.buffer_d(x)\n return x, gt\n\n def classify(self, x):\n x = self.classifier(x.view(x.size(0), -1))\n return x\n\n def forward(self, x, classify, hard_limit, iswhere=False):\n if classify:\n x = self.encode(x)\n return self.classify(x)\n else:\n with torch.no_grad():\n x = self.encode(x)\n if not iswhere:\n x = self.where(x, hard_limit)\n with torch.no_grad():\n x = self.decode(x)\n return x\n else:\n return self.decode(x)\n\n\nif __name__ == '__main__':\n model = SimPlerModel()\n model(torch.zeros((2, 3, 224, 224)))\n","repo_name":"DableUTeeF/where_is_this","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31906070531","text":"from typing import List\nclass Solution:\n def countBits(self, num: int) -> List[int]:\n temp = [0]\n for i in range(1,num+1):\n temp.append(temp[i>>1]+(i&1))\n return temp\nif __name__ == \"__main__\":\n a= Solution()\n print(a.countBits(5))\n\n#0000\n#0001\n#0010\n#0011\n#0100","repo_name":"H-Maktub/leetcode","sub_path":"题库_python/338.py","file_name":"338.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"55604413","text":"from typing import List, Dict\nfrom math import ceil\nfrom collections import defaultdict\nfrom .. import T\n\n\nclass BTreeNode:\n def __init__(self, degree=3, leaf=False):\n \"\"\"\n Creates a BTree Node of degree 'm'\n @param degree: The maximum number of children in a node\n @param leaf:\n \"\"\"\n self.leaf = leaf\n self.degree = degree\n\n self.children: Dict[int, BTreeNode] = defaultdict()\n self.min_children = ceil(self.degree / 2)\n\n self.keys: Dict[int, T] = defaultdict()\n self.max_keys = self.degree - 1\n self.min_keys = ceil((self.degree / 2) - 1)\n\n def add_key(self, key: T):\n \"\"\"\n Adds a key to the node at index 0. Raises an exception if there is a key at that index\n @param key: Key to add\n \"\"\"\n self.__validate_key_size(key)\n self.add_key_at(key=key, index=0)\n\n def add_key_at(self, key: T, index: int):\n \"\"\"\n Adds a key to the node at a given index\n @param key: Key to add\n @param index: position to add key\n \"\"\"\n self.__validate_key_size(key)\n\n key_at_index_exists = self.keys.get(index, None)\n if key_at_index_exists:\n raise Exception(f\"Key at index {index} already exists\")\n\n self.keys[key] = index\n\n def remove_key(self, key: T) -> T:\n \"\"\"\n Removes a key from the node and returns it\n @param key: Key to remove\n @return: Key to remove\n \"\"\"\n if len(self.keys) == self.min_keys:\n raise Exception(f\"Deleting key {key} from {self} will violate minimum keys of this node.\")\n\n key_exists = self.keys.get(key, False)\n if not key_exists:\n raise Exception(f\"Key {key} does not exist on node\")\n\n data = self.keys.pop(key)\n return data\n\n def add_child(self, node: 'BTreeNode'):\n \"\"\"\n Adds a child to the list of children on this node\n @param node to add as a child to this node\n \"\"\"\n if len(self.children) == self.degree:\n raise Exception(\"Node has maximum children already\")\n self.children.append(node)\n\n def __validate_key_size(self, key: T):\n if len(self.keys) == self.max_keys:\n raise Exception(f\"Adding key {key} to {self} will violate maximum keys of this node.\")\n","repo_name":"BrianLusina/PythonSnips","sub_path":"datastructures/trees/btree/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"38263702544","text":"from fastapi import APIRouter, Depends\nfrom starlette.exceptions import HTTPException\nfrom src.auth import get_current_user\n\nfrom .models import (\n LoginRequest, LoginResponse,\n UserCreateRequest, UserCreateResponse, UserSingleResponse, UserListResponse,\n )\n\nfrom .service import (\n get_user_by_id, get_user_by_email, get_list_of_users,\n insert_user, login_user_service\n)\n\n\nrouter = APIRouter()\n\n\n@router.post(\"/login\", response_model=LoginResponse)\nasync def login_user(user_details: LoginRequest):\n \"\"\"\n Handles 'login authentication' api for the user. Returns access token if verified.\n \"\"\"\n try:\n user = await login_user_service(user=user_details)\n return user\n \n except Exception as ex:\n raise HTTPException(status_code=500, detail=f\"Exception occured: {ex}\")\n\n\n@router.post(\"/users\", response_model=UserCreateResponse)\nasync def create_user(user: UserCreateRequest):\n \"\"\"\n Handle 'create user' api and creates new user in db.\n \"\"\"\n try:\n user_dict = user.dict()\n user_data = await get_user_by_email(email=user_dict[\"email\"])\n if user_data is not None:\n raise ValueError()\n \n saved_user = await insert_user(user=user)\n return saved_user\n \n except ValueError as ex:\n raise HTTPException(status_code=406, detail=\"user already exists\")\n \n except Exception as ex:\n raise HTTPException(status_code=500, detail=f\"Exception occured: {ex}\")\n \n\n\n@router.get(\"/users/{user_id}\", response_model=UserSingleResponse)\nasync def get_single_user(user_id: str, valid_user=Depends(get_current_user)):\n \"\"\"\n Handles 'get user by user_id' api. Return user if available.\n \"\"\"\n try:\n user = await get_user_by_id(user_id=user_id)\n return user\n\n except ValueError as ex:\n raise HTTPException(status_code=406, detail=\"user with given id does not exist\")\n \n except Exception as ex:\n raise HTTPException(status_code=500, detail=f\"Exception occured: {ex}\")\n\n\n@router.get(\"/users\", response_model=UserListResponse)\nasync def get_all_users(page: int = 1, size: int = 20, valid_user=Depends(get_current_user)):\n \"\"\"\n Handles 'get all users' api. Return paginated list of users from db.\n \"\"\"\n try:\n users_list_paginated = await get_list_of_users(page=page, size=size)\n return users_list_paginated\n\n except Exception as ex:\n raise HTTPException(status_code=500, detail=f\"Exception occured: {ex}\")","repo_name":"AnmolNagvanshi/cognavi-blog","sub_path":"src/blog_users/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9437963822","text":"import argparse\n\nfrom app.datasets.datasets import Datasets\nfrom app.datasets.regex import Regex\nfrom app.experiments.textcat_experiments import TextcatExperiment\nfrom app.training.training import Training\n\ndef main():\n path = '/home/andre-pereira/projects/aisolutions/dataset_version_control/backend'\n \n dataset_name = 'licitacoes_contratos'\n path_handle = path +'/app/models/event_search_licitacao_contrato.py'\n \n dataset_name2 = 'vida_funcional'\n path_handle2 = path + '/app/models/event_search_vida_funcional.py'\n \n # cria dataset\n df = Datasets.execute()\n # regex\n #salva prodigy\n Regex.execute(df, dataset_name)\n \n \n #treina machine learning\n # salva modelo treinado\n \n Training.execute()\n \n # classifica texto\n #salva prodigy\n \n TextcatExperiment.execute()\n #matriz de confusao, score e metricas\n #salva metricas no banco postgres \n \n\n\nif __name__ == '__main__':\n main()","repo_name":"andrepreira/dataset_version_control","sub_path":"backend/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26062107330","text":"from art import logo\n\n#Calculator\n\n#Add\ndef add(n1, n2):\n return n1 + n2\n\n#Subtract\ndef subtract(n1, n2):\n return n1 - n2\n\n#Multiply\ndef multiply(n1, n2):\n return n1 * n2\n\n#Divide\ndef divide(n1, n2):\n return n1 / n2\n\noperations = {\n \"+\": add,\n \"-\": subtract,\n \"*\": multiply,\n \"/\": divide,\n}\n\ndef calculator():\n print(logo)\n \n final_answer = 0\n\n num1 = float(input(\"What's the first number?: \"))\n\n for symbol in operations:\n print(symbol)\n operation_symbol = input(\"Pick an opertation from the line above: \")\n num2 = float(input(\"What's the second number?: \"))\n calculation_function = operations[operation_symbol]\n first_answer = calculation_function(num1, num2)\n final_answer = first_answer\n\n print(f\"{num1} {operation_symbol} {num2} = {first_answer}\")\n\n more = True\n while more == True:\n answer = input(f\"Type 'y' to continue calculating with {final_answer}, or type 'n' to start a new calculation: \").lower()\n\n if answer == \"y\":\n operation_symbol = input(\"Pick an operation: \")\n num3 = float(input(\"Pick the next number: \"))\n calculation_function = operations[operation_symbol]\n second_answer = calculation_function(final_answer, num3)\n print(f\"{final_answer} {operation_symbol} {num3} = {second_answer}\")\n final_answer = second_answer\n else: \n more = False\n calculator()\n\ncalculator()\n","repo_name":"jdaeira/Python-Bootcamp","sub_path":"Section10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29270091107","text":"e1 = 0\ne2 = 0\ne3 = 0\ndef valores():\n global e1, e2, e3\n e1 = int(input('Digite o numero de um dos lados: '))\n while(e1<=0):\n e1 = int(input('Digite o numero de um dos lados QUE NÃO SEJA ZERO OU NEGATIVO: '))\n\n e2 = int(input('Digite o numero de um dos lados: '))\n while(e2<=0):\n e2 = int(input('Digite o numero de um dos lados QUE NÃO SEJA ZERO OU NEGATIVO: '))\n\n e3 = int(input('Digite o numero de um dos lados: '))\n while(e3<=0):\n e3 = int(input('Digite o numero de um dos lados QUE NÃO SEJA ZERO OU NEGATIVO: '))\n\ndef isTriangulo(e1, e2, e3):\n t1 = abs(e2 - e3) < e1 < e2 + e3\n t2 = abs(e1 - e3) < e2 < e1 + e3\n t3 = abs(e1 - e2) < e3 < e1 + e2\n if(t1 and t2 and t3):\n return True\n else:\n return False\n\ndef triangulo(e1, e2, e3):\n existe = isTriangulo(e1, e2, e3)\n if(existe):\n if ( e1 == e2 and e1 == e3 ):\n print('equilatero')\n elif (e1 == e2 or e1 == e3 or e2 == e3):\n print('Isoceles')\n else:\n print('escaleno')\n else:\n print('Não é um triangulo')\n\ndef run():\n valores()\n triangulo(e1, e2, e3)\n n = int(input('tentar novamente? (0 para não e 1 para sim) '))\n while (n > 2 or n < 0):\n n = int(input('tentar novamente? (0 para não e 1 para sim) '))\n if (n == 0):\n print('bye')\n else:\n print(f'lados anteriores a = {e1}, b = {e2}, c = {e3}')\n run()\n\nrun()\n","repo_name":"RelampagoMarquin/teste","sub_path":"triangulo/triangulo.py","file_name":"triangulo.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"917109125","text":"\"\"\"\nFilter and subsample a sequence set.\n\"\"\"\n\nfrom Bio import SeqIO\nfrom collections import defaultdict\nfrom typing import Collection\nimport random, os, re\nimport pandas as pd\nimport numpy as np\nimport sys\nimport datetime\nfrom tempfile import NamedTemporaryFile\nimport treetime.utils\n\nfrom .index import index_sequences\nfrom .io import open_file, read_sequences, write_sequences\nfrom .utils import read_metadata, read_strains, get_numerical_dates, run_shell_command, shquote, is_date_ambiguous\n\ncomment_char = '#'\nMAX_NUMBER_OF_PROBABILISTIC_SAMPLING_ATTEMPTS = 10\n\n\ndef read_vcf(filename):\n if filename.lower().endswith(\".gz\"):\n import gzip\n file = gzip.open(filename, mode=\"rt\", encoding='utf-8')\n else:\n file = open(filename, encoding='utf-8')\n\n chrom_line = next(line for line in file if line.startswith(\"#C\"))\n file.close()\n headers = chrom_line.strip().split(\"\\t\")\n sequences = headers[headers.index(\"FORMAT\") + 1:]\n\n # because we need 'seqs to remove' for VCF\n return sequences, sequences.copy()\n\n\ndef write_vcf(input_filename, output_filename, dropped_samps):\n if _filename_gz(input_filename):\n input_arg = \"--gzvcf\"\n else:\n input_arg = \"--vcf\"\n\n if _filename_gz(output_filename):\n output_pipe = \"| gzip -c\"\n else:\n output_pipe = \"\"\n\n drop_args = [\"--remove-indv \" + shquote(s) for s in dropped_samps]\n\n call = [\"vcftools\"] + drop_args + [input_arg, shquote(input_filename), \"--recode --stdout\", output_pipe, \">\", shquote(output_filename)]\n\n print(\"Filtering samples using VCFTools with the call:\")\n print(\" \".join(call))\n run_shell_command(\" \".join(call), raise_errors = True)\n # remove vcftools log file\n try:\n os.remove('out.log')\n except OSError:\n pass\n\ndef read_priority_scores(fname):\n try:\n with open(fname, encoding='utf-8') as pfile:\n return defaultdict(float, {\n elems[0]: float(elems[1])\n for elems in (line.strip().split('\\t') if '\\t' in line else line.strip().split() for line in pfile.readlines())\n })\n except Exception as e:\n print(f\"ERROR: missing or malformed priority scores file {fname}\", file=sys.stderr)\n raise e\n\ndef filter_by_query(sequences, metadata_file, query):\n \"\"\"Filter a set of sequences using Pandas DataFrame querying against the metadata file.\n\n Parameters\n ----------\n sequences : list[str]\n List of sequence names to filter\n metadata_file : str\n Path to the metadata associated wtih the sequences\n query : str\n Query string for the dataframe.\n\n Returns\n -------\n list[str]:\n List of sequence names that match the given query\n \"\"\"\n filtered_meta_dict, _ = read_metadata(metadata_file, query)\n return [seq for seq in sequences if seq in filtered_meta_dict]\n\ndef register_arguments(parser):\n input_group = parser.add_argument_group(\"inputs\", \"metadata and sequences to be filtered\")\n input_group.add_argument('--metadata', required=True, metavar=\"FILE\", help=\"sequence metadata, as CSV or TSV\")\n input_group.add_argument('--sequences', '-s', help=\"sequences in FASTA or VCF format\")\n input_group.add_argument('--sequence-index', help=\"sequence composition report generated by augur index. If not provided, an index will be created on the fly.\")\n\n metadata_filter_group = parser.add_argument_group(\"metadata filters\", \"filters to apply to metadata\")\n metadata_filter_group.add_argument(\n '--query',\n help=\"\"\"Filter samples by attribute.\n Uses Pandas Dataframe querying, see https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#indexing-query for syntax.\n (e.g., --query \"country == 'Colombia'\" or --query \"(country == 'USA' & (division == 'Washington'))\")\"\"\"\n )\n metadata_filter_group.add_argument('--min-date', type=numeric_date, help=\"minimal cutoff for date; may be specified as an Augur-style numeric date (with the year as the integer part) or YYYY-MM-DD\")\n metadata_filter_group.add_argument('--max-date', type=numeric_date, help=\"maximal cutoff for date; may be specified as an Augur-style numeric date (with the year as the integer part) or YYYY-MM-DD\")\n metadata_filter_group.add_argument('--exclude-ambiguous-dates-by', choices=['any', 'day', 'month', 'year'],\n help='Exclude ambiguous dates by day (e.g., 2020-09-XX), month (e.g., 2020-XX-XX), year (e.g., 200X-10-01), or any date fields. An ambiguous year makes the corresponding month and day ambiguous, too, even if those fields have unambiguous values (e.g., \"201X-10-01\"). Similarly, an ambiguous month makes the corresponding day ambiguous (e.g., \"2010-XX-01\").')\n metadata_filter_group.add_argument('--exclude', type=str, nargs=\"+\", help=\"file(s) with list of strains to exclude\")\n metadata_filter_group.add_argument('--exclude-where', nargs='+',\n help=\"Exclude samples matching these conditions. Ex: \\\"host=rat\\\" or \\\"host!=rat\\\". Multiple values are processed as OR (matching any of those specified will be excluded), not AND\")\n metadata_filter_group.add_argument('--exclude-all', action=\"store_true\", help=\"exclude all strains by default. Use this with the include arguments to select a specific subset of strains.\")\n metadata_filter_group.add_argument('--include', type=str, nargs=\"+\", help=\"file(s) with list of strains to include regardless of priorities or subsampling\")\n metadata_filter_group.add_argument('--include-where', nargs='+',\n help=\"Include samples with these values. ex: host=rat. Multiple values are processed as OR (having any of those specified will be included), not AND. This rule is applied last and ensures any sequences matching these rules will be included.\")\n\n sequence_filter_group = parser.add_argument_group(\"sequence filters\", \"filters to apply to sequence data\")\n sequence_filter_group.add_argument('--min-length', type=int, help=\"minimal length of the sequences\")\n sequence_filter_group.add_argument('--non-nucleotide', action='store_true', help=\"exclude sequences that contain illegal characters\")\n\n subsample_group = parser.add_argument_group(\"subsampling\", \"options to subsample filtered data\")\n subsample_group.add_argument('--group-by', nargs='+', help=\"categories with respect to subsample; two virtual fields, \\\"month\\\" and \\\"year\\\", are supported if they don't already exist as real fields but a \\\"date\\\" field does exist\")\n subsample_limits_group = subsample_group.add_mutually_exclusive_group()\n subsample_limits_group.add_argument('--sequences-per-group', type=int, help=\"subsample to no more than this number of sequences per category\")\n subsample_limits_group.add_argument('--subsample-max-sequences', type=int, help=\"subsample to no more than this number of sequences\")\n probabilistic_sampling_group = subsample_group.add_mutually_exclusive_group()\n probabilistic_sampling_group.add_argument('--probabilistic-sampling', action='store_true', help=\"Enable probabilistic sampling during subsampling. This is useful when there are more groups than requested sequences. This option only applies when `--subsample-max-sequences` is provided.\")\n probabilistic_sampling_group.add_argument('--no-probabilistic-sampling', action='store_false', dest='probabilistic_sampling')\n subsample_group.add_argument('--priority', type=str, help=\"\"\"tab-delimited file with list of priority scores for strains (e.g., \"\\\\t\") and no header.\n When scores are provided, Augur converts scores to floating point values, sorts strains within each subsampling group from highest to lowest priority, and selects the top N strains per group where N is the calculated or requested number of strains per group.\n Higher numbers indicate higher priority.\n Since priorities represent relative values between strains, these values can be arbitrary.\"\"\")\n subsample_group.add_argument('--subsample-seed', help=\"random number generator seed to allow reproducible sub-sampling (with same input data). Can be number or string.\")\n\n output_group = parser.add_argument_group(\"outputs\", \"possible representations of filtered data (at least one required)\")\n output_group.add_argument('--output', '--output-sequences', '-o', help=\"filtered sequences in FASTA format\")\n output_group.add_argument('--output-metadata', help=\"metadata for strains that passed filters\")\n output_group.add_argument('--output-strains', help=\"list of strains that passed filters (no header)\")\n\n parser.set_defaults(probabilistic_sampling=True)\n\ndef run(args):\n '''\n filter and subsample a set of sequences into an analysis set\n '''\n # Validate arguments before attempting any I/O.\n # Don't allow sequence output when no sequence input is provided.\n if args.output and not args.sequences:\n print(\n \"ERROR: You need to provide sequences to output sequences.\",\n file=sys.stderr)\n return 1\n\n # Confirm that at least one output was requested.\n if not any((args.output, args.output_metadata, args.output_strains)):\n print(\n \"ERROR: You need to select at least one output.\",\n file=sys.stderr)\n return 1\n\n # Don't allow filtering on sequence-based information, if no sequences or\n # sequence index is provided.\n SEQUENCE_ONLY_FILTERS = [\n args.min_length,\n args.non_nucleotide\n ]\n if not args.sequences and not args.sequence_index and any(SEQUENCE_ONLY_FILTERS):\n print(\n \"ERROR: You need to provide a sequence index or sequences to filter on sequence-specific information.\",\n file=sys.stderr)\n return 1\n\n # Load inputs, starting with metadata.\n try:\n # Metadata are the source of truth for which sequences we want to keep\n # in filtered output.\n meta_dict, meta_columns = read_metadata(args.metadata)\n metadata_strains = set(meta_dict.keys())\n except ValueError as error:\n print(\"ERROR: Problem reading in {}:\".format(args.metadata))\n print(error)\n return 1\n\n #Set flags if VCF\n is_vcf = False\n is_compressed = False\n if args.sequences and any([args.sequences.lower().endswith(x) for x in ['.vcf', '.vcf.gz']]):\n is_vcf = True\n if args.sequences.lower().endswith('.gz'):\n is_compressed = True\n\n ### Check users has vcftools. If they don't, a one-blank-line file is created which\n # allows next step to run but error very badly.\n if is_vcf:\n from shutil import which\n if which(\"vcftools\") is None:\n print(\"ERROR: 'vcftools' is not installed! This is required for VCF data. \"\n \"Please see the augur install instructions to install it.\")\n return 1\n\n # Read in files\n\n # If VCF, open and get sequence names\n if is_vcf:\n vcf_sequences, _ = read_vcf(args.sequences)\n sequence_strains = set(vcf_sequences)\n elif args.sequences or args.sequence_index:\n # If FASTA, try to load the sequence composition details and strain\n # names to be filtered.\n index_is_autogenerated = False\n sequence_index_path = args.sequence_index\n\n # Generate the sequence index on the fly, for backwards compatibility\n # with older workflows that don't generate the index ahead of time.\n if sequence_index_path is None:\n # Create a temporary index using a random filename to avoid\n # collisions between multiple filter commands.\n index_is_autogenerated = True\n with NamedTemporaryFile(delete=False) as sequence_index_file:\n sequence_index_path = sequence_index_file.name\n\n print(\n f\"WARNING: A sequence index was not provided, so we are generating one.\",\n \"Generate your own index ahead of time with `augur index` and pass it with `augur filter --sequence-index`.\",\n file=sys.stderr\n )\n index_sequences(args.sequences, sequence_index_path)\n\n sequence_index = pd.read_csv(\n sequence_index_path,\n sep=\"\\t\"\n )\n\n # Remove temporary index file, if it exists.\n if index_is_autogenerated:\n os.unlink(sequence_index_path)\n\n # Calculate summary statistics needed for filtering.\n sequence_index[\"ACGT\"] = sequence_index.loc[:, [\"A\", \"C\", \"G\", \"T\"]].sum(axis=1)\n sequence_strains = set(sequence_index[\"strain\"].values)\n else:\n sequence_strains = None\n\n if sequence_strains is not None:\n # Calculate the number of strains that don't exist in either metadata or sequences.\n num_excluded_by_lack_of_metadata = len(sequence_strains - metadata_strains)\n num_excluded_by_lack_of_sequences = len(metadata_strains - sequence_strains)\n\n # Intersect sequence strain names with metadata strains.\n available_strains = metadata_strains & sequence_strains\n else:\n num_excluded_by_lack_of_metadata = None\n num_excluded_by_lack_of_sequences = None\n\n # When no sequence data are available, we treat the metadata as the\n # source of truth.\n available_strains = metadata_strains\n\n # Track the strains that are available to select by the filters below, after\n # accounting for availability of metadata and sequences.\n seq_keep = available_strains.copy()\n\n #####################################\n #Filtering steps\n #####################################\n\n # Exclude all strains by default.\n if args.exclude_all:\n num_excluded_by_all = len(available_strains)\n seq_keep = set()\n\n # remove strains explicitly excluded by name\n # read list of strains to exclude from file and prune seq_keep\n num_excluded_by_name = 0\n if args.exclude:\n try:\n to_exclude = read_strains(*args.exclude)\n num_excluded_by_name = len(seq_keep & to_exclude)\n seq_keep = seq_keep - to_exclude\n except FileNotFoundError as e:\n print(\"ERROR: Could not open file of excluded strains '%s'\" % args.exclude, file=sys.stderr)\n sys.exit(1)\n\n # exclude strain my metadata field like 'host=camel'\n # match using lowercase\n num_excluded_by_metadata = {}\n if args.exclude_where:\n for ex in args.exclude_where:\n try:\n col, val = re.split(r'!?=', ex)\n except (ValueError,TypeError):\n print(\"invalid --exclude-where clause \\\"%s\\\", should be of from property=value or property!=value\"%ex)\n else:\n to_exclude = set()\n for seq_name in seq_keep:\n if \"!=\" in ex: # i.e. property!=value requested\n if meta_dict[seq_name].get(col,'unknown').lower() != val.lower():\n to_exclude.add(seq_name)\n else: # i.e. property=value requested\n if meta_dict[seq_name].get(col,'unknown').lower() == val.lower():\n to_exclude.add(seq_name)\n\n num_excluded_by_metadata[ex] = len(seq_keep & to_exclude)\n seq_keep = seq_keep - to_exclude\n\n # exclude strains by metadata, using Pandas querying\n num_excluded_by_query = 0\n if args.query:\n filtered = set(filter_by_query(list(seq_keep), args.metadata, args.query))\n num_excluded_by_query = len(seq_keep - filtered)\n seq_keep = filtered\n\n # filter by sequence length\n num_excluded_by_length = 0\n if args.min_length:\n if is_vcf: #doesn't make sense for VCF, ignore.\n print(\"WARNING: Cannot use min_length for VCF files. Ignoring...\")\n else:\n is_in_seq_keep = sequence_index[\"strain\"].isin(seq_keep)\n is_gte_min_length = sequence_index[\"ACGT\"] >= args.min_length\n\n seq_keep_by_length = set(\n sequence_index[\n (is_in_seq_keep) & (is_gte_min_length)\n ][\"strain\"].tolist()\n )\n\n num_excluded_by_length = len(seq_keep) - len(seq_keep_by_length)\n seq_keep = seq_keep_by_length\n\n # filter by ambiguous dates\n num_excluded_by_ambiguous_date = 0\n if args.exclude_ambiguous_dates_by and 'date' in meta_columns:\n seq_keep_by_date = set()\n for seq_name in seq_keep:\n if not is_date_ambiguous(meta_dict[seq_name]['date'], args.exclude_ambiguous_dates_by):\n seq_keep_by_date.add(seq_name)\n\n num_excluded_by_ambiguous_date = len(seq_keep) - len(seq_keep_by_date)\n seq_keep = seq_keep_by_date\n\n # filter by date\n num_excluded_by_date = 0\n if (args.min_date or args.max_date) and 'date' in meta_columns:\n dates = get_numerical_dates(meta_dict, fmt=\"%Y-%m-%d\")\n tmp = {s for s in seq_keep if dates[s] is not None}\n if args.min_date:\n tmp = {s for s in tmp if (np.isscalar(dates[s]) or all(dates[s])) and np.max(dates[s])>args.min_date}\n if args.max_date:\n tmp = {s for s in tmp if (np.isscalar(dates[s]) or all(dates[s])) and np.min(dates[s])>> numeric_date(\"2020.42\")\n 2020.42\n >>> numeric_date(\"2020-06-04\")\n 2020.42486...\n \"\"\"\n try:\n return float(date)\n except ValueError:\n return treetime.utils.numeric_date(datetime.date(*map(int, date.split(\"-\", 2))))\n\n\nclass TooManyGroupsError(ValueError):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return str(self.msg)\n\n\ndef _calculate_total_sequences(\n hypothetical_spg: float, sequence_lengths: Collection[int],\n) -> float:\n # calculate how many sequences we'd keep given a hypothetical spg.\n return sum(\n min(hypothetical_spg, sequence_length)\n for sequence_length in sequence_lengths\n )\n\n\ndef _calculate_sequences_per_group(\n target_max_value: int,\n sequence_lengths: Collection[int]\n) -> int:\n \"\"\"This is partially inspired by\n https://github.com/python/cpython/blob/3.8/Lib/bisect.py\n\n This should return the spg such that we don't exceed the requested\n number of samples.\n\n Parameters\n ----------\n target_max_value : int\n the total number of sequences allowed across all groups\n sequence_lengths : Collection[int]\n the number of sequences in each group\n\n Returns\n -------\n int\n maximum number of sequences allowed per group to meet the required maximum total\n sequences allowed\n\n >>> _calculate_sequences_per_group(4, [4, 2])\n 2\n >>> _calculate_sequences_per_group(2, [4, 2])\n 1\n >>> _calculate_sequences_per_group(1, [4, 2])\n Traceback (most recent call last):\n ...\n augur.filter.TooManyGroupsError: Asked to provide at most 1 sequences, but there are 2 groups.\n \"\"\"\n\n if len(sequence_lengths) > target_max_value:\n # we have more groups than sequences we are allowed, which is an\n # error.\n\n raise TooManyGroupsError(\n \"Asked to provide at most {} sequences, but there are {} \"\n \"groups.\".format(target_max_value, len(sequence_lengths)))\n\n lo = 1\n hi = target_max_value\n\n while hi - lo > 2:\n mid = (hi + lo) // 2\n if _calculate_total_sequences(mid, sequence_lengths) <= target_max_value:\n lo = mid\n else:\n hi = mid\n\n if _calculate_total_sequences(hi, sequence_lengths) <= target_max_value:\n return int(hi)\n else:\n return int(lo)\n\n\ndef _calculate_fractional_sequences_per_group(\n target_max_value: int,\n sequence_lengths: Collection[int]\n) -> float:\n \"\"\"Returns the fractional sequences per group for the given list of group\n sequences such that the total doesn't exceed the requested number of\n samples.\n\n Parameters\n ----------\n target_max_value : int\n the total number of sequences allowed across all groups\n sequence_lengths : Collection[int]\n the number of sequences in each group\n\n Returns\n -------\n float\n fractional maximum number of sequences allowed per group to meet the\n required maximum total sequences allowed\n\n >>> np.around(_calculate_fractional_sequences_per_group(4, [4, 2]), 4)\n 1.9375\n >>> np.around(_calculate_fractional_sequences_per_group(2, [4, 2]), 4)\n 0.9688\n\n Unlike the integer-based version of this function, the fractional version\n can accept a maximum number of sequences that exceeds the number of groups.\n In this case, the function returns a fraction that can be used downstream,\n for example with Poisson sampling.\n\n >>> np.around(_calculate_fractional_sequences_per_group(1, [4, 2]), 4)\n 0.4844\n \"\"\"\n lo = 1e-5\n hi = target_max_value\n\n while (hi / lo) > 1.1:\n mid = (lo + hi) / 2\n if _calculate_total_sequences(mid, sequence_lengths) <= target_max_value:\n lo = mid\n else:\n hi = mid\n\n return (lo + hi) / 2\n","repo_name":"dandye/augur","sub_path":"augur/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":35442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"34960466063","text":"from collections import deque\n\n\nn = int(input())\n\ndata = []\n\nfor _ in range(n):\n data.append(list(map(int, input().split())))\n\ndx = [-1, 0, 1, 0]\ndy = [0, -1, 0, 1]\n\n\ndef bfs(x, y, size, count, result):\n visited = [[False] * n for _ in range(n)]\n\n queue = deque()\n queue.append((x, y, 0))\n visited[x][y] = True\n\n change = False\n saveDist = -2\n ans = (20, 20)\n\n while queue:\n mx, my, dist = queue.popleft()\n if dist == saveDist + 1:\n break\n for i in range(4):\n nx, ny = mx + dx[i], my + dy[i]\n if 0 <= nx <= n - 1 and 0 <= ny <= n - 1 and visited[nx][ny] == False:\n if data[nx][ny] == 0 or data[nx][ny] == size:\n queue.append((nx, ny, dist + 1))\n visited[nx][ny] = True\n elif data[nx][ny] < size:\n change = True\n if saveDist == -2:\n saveDist = dist\n if ans[0] > nx or ans[0] == nx and ans[1] > ny:\n ans = (nx, ny)\n\n if change == True:\n count += 1\n if size == count:\n size += 1\n count = 0\n result += saveDist + 1\n nx, ny = ans\n data[nx][ny] = 9\n data[x][y] = 0\n return bfs(nx, ny, size, count, result)\n else:\n return result\n\n\nOK = True\nfor i in range(n):\n if OK:\n for j in range(n):\n if data[i][j] == 9:\n print(bfs(i, j, 2, 0, 0))\n OK = False\n break\n","repo_name":"dbwhdtjr0457/study","sub_path":"CodeTest/python/baekjoon/16236.py","file_name":"16236.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22635652646","text":"import os\nimport base64\nimport boto3\nimport gzip\nimport json\nimport logging\nimport os\n\nfrom botocore.exceptions import ClientError\nfrom template import Template\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef getBatchId(ERROR_MESSAGE):\n ERROR_MESSAGE = ERROR_MESSAGE.replace(\"\\n\",\"\")\n\n if ERROR_MESSAGE.find(\"BatchId\") != -1:\n searchWord = ERROR_MESSAGE.find(\"BatchId\")\n lengthWord = len(ERROR_MESSAGE)\n\n ERROR_MESSAGE = ERROR_MESSAGE[searchWord:lengthWord]\n ERROR_MESSAGE = ERROR_MESSAGE.replace(\"'\", \"\")\n ERROR_MESSAGE = ERROR_MESSAGE.replace(\",\", \"\")\n ERROR_MESSAGE = ERROR_MESSAGE.replace(\"]\", \"\")\n ERROR_MESSAGE = ERROR_MESSAGE.replace(\"BatchId:\", \"\")\n return ERROR_MESSAGE\n else:\n return ''\n\ndef logpayload(event):\n logger.setLevel(logging.DEBUG)\n logger.debug(event['awslogs']['data'])\n compressed_payload = base64.b64decode(event['awslogs']['data'])\n uncompressed_payload = gzip.decompress(compressed_payload)\n log_payload = json.loads(uncompressed_payload)\n return log_payload\n\n\ndef error_details(payload):\n error_msg = \"\"\n log_events = payload['logEvents']\n logger.debug(payload)\n loggroup = payload['logGroup']\n logstream = payload['logStream']\n lambda_func_name = loggroup.split('/')\n logger.debug(f'LogGroup: {loggroup}')\n logger.debug(f'Logstream: {logstream}')\n logger.debug(f'Function name: {lambda_func_name[3]}')\n logger.debug(log_events)\n for log_event in log_events:\n error_msg += log_event['message']\n logger.debug('Message: %s' % error_msg.split(\"\\n\"))\n return loggroup, logstream, error_msg, lambda_func_name\n\n\ndef publish_message_sns(loggroup, logstream, error_msg, lambda_func_name):\n sns_arn = os.environ['snsARN'] # Getting the SNS Topic ARN passed in by the environment variables.\n\n snsclient = boto3.client('sns')\n try:\n message = \"\"\n message += \"\\nLambda error summary\" + \"\\n\\n\"\n message += \"##########################################################\\n\"\n message += \"# LogGroup Name:- \" + str(loggroup) + \"\\n\"\n message += \"# LogStream:- \" + str(logstream) + \"\\n\"\n message += \"# Log Message:- \" + \"\\n\"\n message += \"# \\t\\t\" + str(error_msg.split(\"\\n\")) + \"\\n\"\n message += \"##########################################################\\n\"\n\n # Sending the notification...\n snsclient.publish(\n TargetArn=sns_arn,\n Subject=f'Execution error for Lambda - {lambda_func_name[3]}',\n Message=message\n )\n except ClientError as e:\n logger.error(\"An error occured: %s\" % e)\n\ndef publish_message_ses(loggroup, logstream, error_msg, lambda_func_name, templateType):\n # The character encoding for the email.\n CHARSET = \"UTF-8\"\n RECIPIENT = os.environ['RECIPIENT_ALERT']\n SENDER = os.environ['SENDER_ALERT']\n SUBJECT = f'Execution error for Lambda - {lambda_func_name[3]}'\n # The email body for recipients with non-HTML email clients.\n BODY_TEXT = (\"Lambda error \\r\\n\"\n \" error \"\n \" summary \"\n )\n\n logger.info(\"Message-Error-22: %s\" % str(error_msg.split(\"\\n\")))\n\n ERROR_MESSAGE = str(error_msg.split(\"\\n\"))\n # BATCH_ID = getBatchId(ERROR_MESSAGE)\n\n BODY_HTML_TEMPLATE = \"\"\n templateMail = Template(templateType)\n if templateType == 'Price': \n BODY_HTML_TEMPLATE = templateMail.getHtmlMailBrandliveRed(ERROR_MESSAGE)\n elif templateType == 'Inventory':\n BODY_HTML_TEMPLATE = templateMail.getHtmlMailBrandliveGray(ERROR_MESSAGE)\n else :\n BODY_HTML_TEMPLATE = templateMail.getHtmlMailInfracommerceCherry(ERROR_MESSAGE)\n\n # BODY_HTML_TEMPLATE = Template. (123459789, ERROR_MESSAGE)\n\n sesclient = boto3.client('ses')\n try:\n # Sending the notification...\n sesclient.send_email(\n Source=SENDER,\n Destination={\n 'ToAddresses': [\n RECIPIENT\n ],\n },\n Message={\n 'Body': {\n 'Html': {\n 'Charset': CHARSET,\n 'Data': BODY_HTML_TEMPLATE,\n },\n 'Text': {\n 'Charset': CHARSET,\n 'Data': BODY_TEXT,\n },\n },\n 'Subject': {\n 'Charset': CHARSET,\n 'Data': SUBJECT,\n },\n },\n )\n\n except ClientError as e:\n logger.error(\"An error occured: %s\" % e)\n else:\n logger.info(\"Email sent!\")\n\n\ndef user_scan_log(event, context):\n logger.info(\"user_scan_log event\")\n pload = logpayload(event)\n lgroup, lstream, errmessage, lambdaname = error_details(pload)\n publish_message_ses(lgroup, lstream, errmessage, lambdaname, \"Inventory\")\n\ndef profile_scan_log(event, context):\n logger.info(\"profile_scan_log event\")\n pload = logpayload(event)\n lgroup, lstream, errmessage, lambdaname = error_details(pload)\n publish_message_ses(lgroup, lstream, errmessage, lambdaname, \"Price\")\n\n# def orders_hook_scan_log(event, context):\n# logger.info(\"orders_hook_scan_log event\")\n# pload = logpayload(event)\n# lgroup, lstream, errmessage, lambdaname = error_details(pload)\n# publish_message_ses(lgroup, lstream, errmessage, lambdaname, \"Order\")","repo_name":"avaslegend/aws-nest-app","sub_path":"notification/alert_handler.py","file_name":"alert_handler.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34535688159","text":"\"\"\"\nDP\n개미 전사\n제한 : 1초 / 128MB\n\"\"\"\n\nimport sys\n\nN = int(input())\nK = list(map(int, sys.stdin.readline().rstrip().split()))\nresult = [0] * N\nresult[0] = K[0]\nresult[1] = max(K[0], K[1])\n\nfor i in range(2, N):\n result[i] = max(result[i-1], result[i-2] + K[i])\n\nprint(result[N-1])\n","repo_name":"PeopleAndService/AlgorithmStudy","sub_path":"suho/withpython/DP/8_3_ant.py","file_name":"8_3_ant.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"ko","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"72744403451","text":"from __future__ import annotations\n\nimport argparse\n\nimport gdb\nfrom pwnlib.util.cyclic import cyclic\n\nimport pwndbg.color.message as M\nimport pwndbg.commands\n\nparser = argparse.ArgumentParser(description=\"Spray memory with cyclic() generated values\")\nparser.add_argument(\"addr\", help=\"Address to spray\")\nparser.add_argument(\n \"length\",\n help=\"Length of byte sequence, when unspecified sprays until the end of vmmap which address belongs to\",\n type=int,\n nargs=\"?\",\n default=0,\n)\nparser.add_argument(\n \"--value\",\n help=\"Value to spray memory with, when prefixed with '0x' treated as hex string encoded big-endian\",\n type=str,\n required=False,\n)\nparser.add_argument(\n \"-x\",\n \"--only-funcptrs\",\n help=\"Spray only addresses whose values points to executable pages\",\n action=\"store_true\",\n)\n\n\n@pwndbg.commands.ArgparsedCommand(parser)\n@pwndbg.commands.OnlyWhenRunning\ndef spray(addr, length, value, only_funcptrs) -> None:\n if length == 0:\n page = pwndbg.gdblib.vmmap.find(addr)\n if page is None:\n print(\n M.error(\n f\"Invalid address {addr}: can't find vmmap containing it to determine the spray length\"\n )\n )\n return\n length = page.end - int(addr)\n\n value_bytes = b\"\"\n\n if value:\n if value.startswith(\"0x\"):\n value_bytes = int(value, 16).to_bytes((len(value[2:]) + 1) // 2, byteorder=\"big\")\n else:\n value_bytes = bytes(value, \"utf-8\")\n\n value_length = len(value_bytes)\n value_bytes = value_bytes * (int(length) // value_length)\n\n if length % value_length != 0:\n value_bytes += value_bytes[: (length % value_length)]\n else:\n value_bytes = cyclic(length, n=pwndbg.gdblib.arch.ptrsize)\n\n try:\n if only_funcptrs:\n mem = pwndbg.gdblib.memory.read(addr, length)\n\n addresses_written = 0\n ptrsize = pwndbg.gdblib.arch.ptrsize\n for i in range(0, len(mem) - (length % ptrsize), ptrsize):\n ptr_candidate = pwndbg.gdblib.arch.unpack(mem[i : i + ptrsize])\n page = pwndbg.gdblib.vmmap.find(ptr_candidate)\n if page is not None and page.execute:\n pwndbg.gdblib.memory.write(addr + i, value_bytes[i : i + ptrsize])\n addresses_written += 1\n print(M.notice(f\"Overwritten {addresses_written} function pointers\"))\n else:\n pwndbg.gdblib.memory.write(addr, value_bytes)\n except gdb.MemoryError as e:\n print(M.error(e))\n","repo_name":"pwndbg/pwndbg","sub_path":"pwndbg/commands/spray.py","file_name":"spray.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":6166,"dataset":"github-code","pt":"78"} +{"seq_id":"35478032759","text":"from pathlib import Path\nimport json\nimport codecs\nimport hashlib\n\n\ndef to_sha1(input):\n h = hashlib.sha1(input.encode('utf-8'))\n output = h.hexdigest().upper()\n print(\"output length \", len(output))\n return output\n\n\ndef to_hex(input_string):\n output_hex = input_string.encode('utf-8').hex().upper()\n return output_hex\n\n\ndef file_to_hex(filename):\n with Path(filename).open(\"rb\") as file:\n input = json.load(file)\n text = json.dumps(input, sort_keys=True,\n indent=4, separators=(',', ': '))\n hex_value = to_hex(text)\n return hex_value\n\n\ndef memo_to_hex(file_url, meta_url):\n data = {\n 'file_url': file_url,\n 'metadata_url': meta_url\n }\n\n return to_hex(str(data))\n\n\ndef uri_hex(file_url):\n data = {\n 'file_url': file_url\n }\n\n return to_hex(str(data))\n\n\ndef memo_json_to_hex(data_json):\n data = {\n 'enc_data': data_json\n }\n\n return to_hex(str(data))\n\n\ndef hex_to_ascii(hex_string):\n binary_str = codecs.decode(hex_string, \"hex\")\n string_value = str(binary_str, 'utf-8')\n return string_value\n\n\ndef get_explorer_addr(account):\n return f\"https://test.bithomp.com/explorer/{account}\"\n","repo_name":"EdienAS/Hubsecure_storage","sub_path":"xrpl_api/helper/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"30122581626","text":"# import necessary libraries\nimport numpy as np\nfrom keras.datasets import mnist\nfrom keras.utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.preprocessing.image import ImageDataGenerator\n\n# load the MNIST dataset\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# preprocess the data\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\n\n# convert class vectors to binary class matrices\ny_train = to_categorical(y_train, 10)\ny_test = to_categorical(y_test, 10)\n\n# reshape the data for the convolutional layers\nx_train = x_train.reshape((x_train.shape[0], 28, 28, 1))\nx_test = x_test.reshape((x_test.shape[0], 28, 28, 1))\n\n# define the model\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\n\n# compile and fit the model\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# create an instance of the ImageDataGenerator class\ndatagen = ImageDataGenerator(\n rotation_range=30,\n width_shift_range=0.2,\n height_shift_range=0.1,\n zoom_range=0.3\n)\n# fit the data generator to the training data\ndatagen.fit(x_train)\n\n# use the flow() method to generate augmented data\n# specify the number of epochs to run and the batch size\nmodel.fit_generator(datagen.flow(x_train, y_train, batch_size=128), epochs=10, verbose=1, validation_data=(x_test, y_test))\n\nmodel.save('./model.h5')\n\n","repo_name":"sh1nj1/classifier-dl-handwritten-digits","sub_path":"src/train-classification-of-handwritten-digits.py","file_name":"train-classification-of-handwritten-digits.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2366935743","text":"name = input(\"Enter name: \")\nhours = int(input(\"Enter number of hours worked weekly: \"))\nrate = float(input(\"Enter hourly pay rate: \"))\ncpfrate = float(input(\"Enter CPF contribution rate(%): \"))\n\ngross = hours * rate\ncpf = cpfrate / 100 * gross\nnet = gross - cpf\n\npayroll = \"\"\"Payroll statement for {0:}\nNumber of hours worked in week: {1:}\nHourly pay rate: ${2:.2f}\nGross pay = ${3:.2f}\nCPF contribution at {4:}% = ${5:.2f}\nNet pay = ${6:.2f} \"\"\".format(name, hours, rate, gross, cpfrate, cpf, net)\n \nprint(payroll)\n","repo_name":"Airiinnn/cp2019","sub_path":"p01/q7_generate_payroll.py","file_name":"q7_generate_payroll.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74612484091","text":"# time O(n), space O(n)\n# travel from left and right to calculate the product of nums[:i]\n# excluding nums[i], saved into left[], and then right to left \n# to calcualte the produce of nums[i+1:], saved into right[]\n# then the answer will be left[i]*right[i]\n\nclass Solution(object):\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n n = len(nums)\n left = [0]*n # left[i] means the product of nums[:i], excluding nums[i]\n right = [0]*n\n left[0] = 1\n right[n-1] = 1\n for i in range(1, n):\n left[i] = left[i-1]*nums[i-1]\n for i in range(n-2, -1, -1):\n right[i] = right[i+1]*nums[i+1]\n \n res=[]\n for i in range(n):\n res.append(left[i]*right[i])\n \n return res\n \n","repo_name":"CaizhiXu/LeetCode-Solutions-Python-Weimin","sub_path":"0238. Product of Array Except Self.py","file_name":"0238. Product of Array Except Self.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19855849687","text":"import os\n\nnumbers = []\nnumClusters = 0\nnumBits = 0\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nwith open(os.path.join(__location__, 'clusteringBig.txt')) as file:\n\tnumbers = file.readlines()\n\tfirstLine = numbers[0].rstrip().split(\" \")\n\tnumClusters = int(firstLine[0])\n\tnumBits = int(firstLine[1])\n\tnumbers.pop(0)\n\tfor i in range(len(numbers)):\n\t\tnumbers[i] = int(numbers[i].rstrip().replace(\" \", \"\"), 2)\n\tnumbers.sort()\n\tfor i in range(len(numbers)):\n\t\tnumbers[i] = [numbers[i], 0, i]\n\nnumClusters2 = numClusters\n\ndef find(index):\n\tif numbers[index][2] != index:\n\t\tnumbers[index][2] = find(numbers[index][2])\n\treturn numbers[index][2]\ndef union(index1, index2):\n\trootIndex1 = find(index1)\n\trootIndex2 = find(index2)\n\tif rootIndex1 == rootIndex2:\n\t\treturn\n\telse:\n\t\tnumbers[rootIndex1][2] = rootIndex2\n\t\tglobal numClusters\n\t\tnumClusters -= 1\n\nxors1 = []\nfor i in range(numBits):\n\txors1.append(pow(2, i))\n\nxors2 = []\nfor i in range(numBits - 1):\n\tfor j in range(i + 1, numBits):\n\t\txors2.append(pow(2, i) + pow(2, j))\n\ndef findPossibleDistance1(index):\n\tresults = []\n\tfor i in xors1:\n\t\tresults.append(numbers[index][0] ^ i)\n\treturn results\ndef findPossibleDistance2(index):\n\tresults = []\n\tfor i in xors2:\n\t\tresults.append(numbers[index][0] ^ i)\n\treturn results\n\ndef findIndex(number):\n\thiIndex = numClusters2 - 1\n\tloIndex = 0\n\tmidIndex = (hiIndex + loIndex) // 2\n\twhile (hiIndex >= loIndex):\n\t\tif number > numbers[midIndex][0]:\n\t\t\tloIndex = midIndex + 1\n\t\telif number < numbers[midIndex][0]:\n\t\t\thiIndex = midIndex - 1\n\t\telse:\n\t\t\treturn midIndex\n\t\tmidIndex = (hiIndex + loIndex) // 2\n\treturn -1\n\nfor i in range(len(numbers) - 1):\n\tif numbers[i][0] == numbers[i + 1][0]:\n\t\tunion(i, i + 1)\nprint(numClusters)\nfor i in range(len(numbers)):\n\tpossibles = findPossibleDistance1(i)\n\tfor j in possibles:\n\t\tpossibleIndex = findIndex(j)\n\t\tif possibleIndex != -1:\n\t\t\tunion(possibleIndex, i)\nprint(numClusters)\n\nfor i in range(len(numbers)):\n\tif i % (numClusters2 // 100) == 0:\n\t\tprint(str(100.0 * i / numClusters2) + \"%\")\n\tpossibles = findPossibleDistance2(i)\n\tfor j in possibles:\n\t\tpossibleIndex = findIndex(j)\n\t\tif possibleIndex != -1:\n\t\t\tunion(possibleIndex, i)\nprint(numClusters)","repo_name":"adbforlife/Algorithms","sub_path":"unionFindBig.py","file_name":"unionFindBig.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4276889468","text":"\"\"\"\nSimilarity functions take a pair of tensors with the same shape, and compute a similarity function\non the vectors in the last dimension. For example, the tensors might both have shape\n`(batch_size, sentence_length, embedding_dim)`, and we will compute some function of the two\nvectors of length `embedding_dim` for each position `(batch_size, sentence_length)`, returning a\ntensor of shape `(batch_size, sentence_length)`.\n\nThe similarity function could be as simple as a dot product, or it could be a more complex,\nparameterized function. The SimilarityFunction class exposes an API for a Layer that wants to\nallow for multiple similarity functions, such as for initializing and returning weights.\n\nIf you want to compute a similarity between tensors of different sizes, you need to first tile them\nin the appropriate dimensions to make them the same before you can use these functions. The\nAttention and MatrixAttention layers do this.\n\"\"\"\n\nfrom typing import List\n\nfrom keras import backend as K\nfrom overrides import overrides\n\nfrom typing import List\n\nfrom keras import activations, initializers\nimport tensorflow as tf\ndef switch(cond, then_tensor, else_tensor):\n \"\"\"\n Keras' implementation of K.switch currently uses tensorflow's switch function, which only\n accepts scalar value conditions, rather than boolean tensors which are treated in an\n elementwise function. This doesn't match with Theano's implementation of switch, but using\n tensorflow's where, we can exactly retrieve this functionality.\n \"\"\"\n\n cond_shape = cond.get_shape()\n input_shape = then_tensor.get_shape()\n if cond_shape[-1] != input_shape[-1] and cond_shape[-1] == 1:\n # This happens when the last dim in the input is an embedding dimension. Keras usually does not\n # mask the values along that dimension. Theano broadcasts the value passed along this dimension,\n # but TF does not. Using K.dot() since cond can be a tensor.\n cond = K.dot(tf.cast(cond, tf.float32), tf.ones((1, input_shape[-1])))\n return tf.where(tf.cast(cond, dtype=tf.bool), then_tensor, else_tensor)\n\nclass SimilarityFunction:\n def __init__(self, name: str, initialization: str='glorot_uniform', activation: str='linear'):\n self.name = name\n self.init = initializers.get(initialization)\n self.activation = activations.get(activation)\n\n def initialize_weights(self, tensor_1_dim: int, tensor_2_dim: int) -> List['K.variable']:\n \"\"\"\n Called in a `Layer.build()` method that uses this SimilarityFunction, here we both\n initialize whatever weights are necessary for this similarity function, and return them so\n they can be included in `Layer.trainable_weights`.\n\n\n Parameters\n ----------\n tensor_1_dim : int\n The last dimension (typically ``embedding_dim``) of the first input tensor. We need\n this so we can initialize weights appropriately.\n tensor_2_dim : int\n The last dimension (typically ``embedding_dim``) of the second input tensor. We need\n this so we can initialize weights appropriately.\n \"\"\"\n raise NotImplementedError\n\n def compute_similarity(self, tensor_1, tensor_2):\n \"\"\"\n Takes two tensors of the same shape, such as (batch_size, length_1, length_2,\n embedding_dim). Computes a (possibly parameterized) similarity on the final dimension and\n returns a tensor with one less dimension, such as (batch_size, length_1, length_2).\n \"\"\"\n raise NotImplementedError\n\nclass CosineSimilarity(SimilarityFunction):\n \"\"\"\n This similarity function simply computes the cosine similarity between each pair of vectors. It has\n no parameters.\n \"\"\"\n def __init__(self, **kwargs):\n super(CosineSimilarity, self).__init__(**kwargs)\n\n @overrides\n def initialize_weights(self, tensor_1_dim: int, tensor_2_dim: int) -> List['K.variable']:\n if tensor_1_dim != tensor_2_dim:\n raise ValueError(\"Tensor dims must match for cosine product similarity, but \"\n \"were {} and {}\".format(tensor_1_dim, tensor_2_dim))\n return []\n\n @overrides\n def compute_similarity(self, tensor_1, tensor_2):\n return K.sum(K.l2_normalize(tensor_1, axis=-1) * K.l2_normalize(tensor_2, axis=-1),\n axis=-1)\n\ndef masked_softmax(vector, mask):\n \"\"\"\n `K.softmax(vector)` does not work if some elements of `vector` should be masked. This performs\n a softmax on just the non-masked portions of `vector` (passing None in for the mask is also\n acceptable; you'll just get a regular softmax).\n\n We assume that both `vector` and `mask` (if given) have shape (batch_size, vector_dim).\n\n In the case that the input vector is completely masked, this function returns an array\n of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model\n that uses categorial cross-entropy loss.\n \"\"\"\n # We calculate masked softmax in a numerically stable fashion, as done\n # in https://github.com/rkadlec/asreader/blob/master/asreader/custombricks/softmax_mask_bricks.py\n if mask is not None:\n # Here we get normalized log probabilities for\n # enhanced numerical stability.\n mask = K.cast(mask, \"float32\")\n input_masked = mask * vector\n shifted = mask * (input_masked - K.max(input_masked, axis=1,\n keepdims=True))\n # We add epsilon to avoid numerical instability when\n # the sum in the log yields 0.\n normalization_constant = K.log(K.sum(mask * K.exp(shifted), axis=1,\n keepdims=True) + K.epsilon())\n normalized_log_probabilities = mask * (shifted - normalization_constant)\n unmasked_probabilities = K.exp(normalized_log_probabilities)\n return switch(mask, unmasked_probabilities, K.zeros_like(unmasked_probabilities))\n else:\n # There is no mask, so we use the provided ``K.softmax`` function.\n return K.softmax(vector)\n","repo_name":"aneesh-joshi/Similarity-Learning-Evaluation-Scripts","sub_path":"sl_eval/models/utils/tensor_ops.py","file_name":"tensor_ops.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"9214900951","text":"# Time :2022-6-20 17:04\n# Author:Houtaroy\nimport cv2\nimport numpy as np\nfrom flask import Flask, request\nfrom flask_cors import CORS\nfrom paddleocr import PaddleOCR\n\nocr = PaddleOCR(use_angle_cls=True, lang=\"ch\")\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/', methods=['POST'])\ndef hello_world():\n if request.method == 'POST':\n return {\n \"code\": 200,\n \"message\": \"识别成功\",\n \"content\": content(request.files['file'])\n }\n\n\ndef content(file):\n ocr_result = ocr.ocr(cv2.imdecode(np.frombuffer(file.read(), dtype=np.uint8), cv2.IMREAD_COLOR))\n return \" \".join([line[1][0] for line in ocr_result])\n","repo_name":"houtaroy/python-samples","sub_path":"ocr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26058917124","text":"import cv2 as cv\nimport os\nimport numpy as np\n\nimport sys\nimport darknet as darknet_module\n\nimport openpose as openpose_module\nimport liftnet as liftnet_module\n\nimport time as time\nfrom crop import Crop, Basic_Crop\nfrom helpers import bones_mpi\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm, colors\n\nif __name__ == \"__main__\":\n img_path = (\"/cvlabdata2/home/kicirogl/ActiveDrone/simulation_results/2019-10-30-10-50/02_01/0/images/img_0.png\")\n im = cv.imread(img_path)\n\n start1 = time.time()\n predictions = darknet_module.detect(img_path)\n end1 = time.time()\n\n max_confidence = -1\n bounding_box = None\n for prediction in predictions:\n confidence = prediction[1]\n if (prediction[0] == b'person') and confidence>max_confidence:\n max_confidence = confidence\n bounding_box = prediction[2]\n\n cropping_tool=Basic_Crop (margin=0.2)\n cropping_tool.update_bbox(bounding_box)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.imshow(im)\n bbox_corners_x, bbox_corners_y = cropping_tool.return_bbox_coord()\n plt.plot(bbox_corners_x, bbox_corners_y)\n plt.savefig(\"/cvlabdata2/home/kicirogl/ActiveDrone/my_scripts/yolo.png\", bbox_inches='tight', pad_inches=0)\n plt.close(fig)\n\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cropped_im = cropping_tool.crop_image(im)\n ax.imshow(cropped_im)\n plt.savefig(\"/cvlabdata2/home/kicirogl/ActiveDrone/my_scripts/basic_crop.png\", bbox_inches='tight', pad_inches=0)\n plt.close(fig)\n\n poses, heatmaps, heatmaps_scales, poses_scales = openpose_module.run_only_model(cropped_im, [0.5,1,1.5])\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.imshow(cropped_im)\n for i, bone in enumerate(bones_mpi):\n p0, = ax.plot(poses[0, bone], poses[1,bone], color = \"r\", linewidth=2)\n plt.savefig(\"/cvlabdata2/home/kicirogl/ActiveDrone/my_scripts/basic_crop_openpose.png\", bbox_inches='tight', pad_inches=0)\n plt.close(fig)\n\n\n uncropped_pose = cropping_tool.uncrop_pose(poses)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.imshow(im)\n for i, bone in enumerate(bones_mpi):\n p0, = ax.plot(uncropped_pose[0, bone], uncropped_pose[1,bone], color = \"r\", linewidth=2)\n plt.savefig(\"/cvlabdata2/home/kicirogl/ActiveDrone/my_scripts/openpose.png\", bbox_inches='tight', pad_inches=0)\n plt.close(fig)","repo_name":"senakicir/ActiveMoCap","sub_path":"my_scripts/test_yolo.py","file_name":"test_yolo.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"} +{"seq_id":"3734835705","text":"from django.urls import path, re_path, include\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom rest_framework import routers\nfrom .views import EvaluadoViewset, EvaluadorSerializer, EvaluadorViewset\n\nrouter = routers.DefaultRouter()\nrouter.register('Evaluado', EvaluadoViewset)\nrouter.register('Evaluador', EvaluadorViewset)\n\nurlpatterns = [\n #------Usuario-------\n path('correoEvaluado/', views.correoEvaluado, name='correoEvaluado'),\n path('login/', views.inicio, name='login'),\n path('loginA/', views.loginA, name='loginA'),\n path('loginE/', views.loginE, name='loginE'),\n path('index/', views.index, name='index'),\n path('caso1/', views.caso1, name=\"caso1\"),\n path('vistaA/', views.vistaA, name='vistaA'),\n path('vistaE/', views.vistaE, name='vistaE'),\n path('evaluacionesPendientes/', views.evaluacionesPendientes, name='evaluacionesPendientes'),\n path('notas/', views.notas, name='notas'),\n path('prueba/', views.prueba, name='prueba'),\n path('', views.perfil, name=\"perfil\"),\n path('seleccion/', views.seleccion, name='seleccion'),\n path('video/', views.video, name='video'),\n path('foto/', views.foto, name='foto'),\n path('cuestionario/', views.cuestionario, name='cuestionario'),\n path('final/', views.final, name='final'),\n path('creaEvaluado/', views.creaEvaluado, name='creaEvaluado'),\n path('creaEvaluador/', views.creaEvaluador, name='creaEvaluador'),\n path('creaActividad/', views.creaActividad, name='creaActividad'),\n path('asignarEvaluacion/', views.asignarEvaluacion, name='asignarEvaluacion'),\n path('actividadPendiente/', views.actividadPendiente, name='actividadPendiente'),\n path('revisionPendiente/', views.revisionPendiente, name='revisionPendiente'),\n path('actividadRealizada/', views.actividadRealizada, name='actividadRealizada'),\n path('editarEvaluado/', views.editarEvaluado, name='editarEvaluado'),\n path('actualizarEvaluado/', views.actualizarEvaluado, name='actualizarEvaluado'),\n path('eliminarEvaluado/', views.eliminarEvaluado, name='eliminarEvaluado'),\n path('editarEvaluador/', views.editarEvaluador, name='editarEvaluador'),\n path('actualizarEvaluador/', views.actualizarEvaluador, name='actualizarEvaluador'),\n path('eliminarEvaluador/', views.eliminarEvaluador, name='eliminarEvaluador'),\n path('evaluar/', views.evaluar, name='evaluar'),\n path('api/', include(router.urls)),\n path('probar_rut/', views.probar_rut, name='probar_rut')\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"Piper616/TestGo4","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1704283762","text":"import praw, sys, os\n\nuser_agent = \"Tournament Reminder by /u/0ffkilter\"\n\nconfig_file = open('%s%s' %(os.getcwd(), '/Config.txt'), 'r')\nconfig = config_file.read().split('\\n')\nconfig_file.close()\n\nreddit = praw.Reddit(user_agent = user_agent)\nreddit.login('stunfiskhelperbot', config[1])\n\npart_file = open('%s%s' %(os.getcwd(), '/Participants.txt'), 'r')\nparts = part_file.read().split('\\n')\npart_file.close()\n\nmessage = (\"Hello! You are receiving this message because you are \"\n \"signed up for /r/stunfisk's Bucket O' Mons Tournament! If you \"\n \"did not sign up for the tournament, that means that /u/0ffkilter \"\n \"typed in someone's name wrong. You should probably let him know. \\n\\n\"\n\n \"In Any case, this is a reminder that Round 1 of the Tournament is out! \\n\\n\"\n\n \"The Theme is 'power pokes', and the post can be found \"\n \"[here](http://www.reddit.com/r/stunfisk/comments/2cejgl/tournament_bucket_o_mons_round_1_announcement/)\\n\\n \"\n\n \"You have until Tuesday, August 5th 12:00 PST to complete your match! \\n\\n\"\n\n \"Additional rules and regulations can be found on the aforementioned post. \\n\\n\"\n\n \"Send Questions or comments to /u/0ffkilter!\")\n\nsubject = \"Reminder for Bucket O' Mons Tournament!\"\n\nparts = ['bigyeIIowtaxi']\n\nfor participant in parts:\n try:\n\n reddit.send_message(participant, subject, message)\n print('Sent -> %s' %participant)\n except:\n print('Failed -> %s' %participant)\n\n","repo_name":"0ffkilter/StunfiskBot","sub_path":"TournamentReminder.py","file_name":"TournamentReminder.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39222480410","text":"import random\nimport shutil\nfrom tzlocal import get_localzone\nfrom pathlib import Path\n\nfrom fastapi import APIRouter, Depends, HTTPException, UploadFile # Importing necessary modules\nfrom sqlalchemy.orm import Session\n\nfrom ...model.crud import get_posts_quantity, get_user_quantity, get_comments_quantity\nfrom ... import config\nfrom ...dependencies.db import get_db\nfrom ...dependencies.oauth2scheme import oauth2Scheme\nfrom ...utilities.dir_tool import save_user_avatar\nfrom ...utilities.token_tools import get_user_name_by_token\nfrom ...utilities.userdata_tool import auth_user_by_name\n\n# Creating a new APIRouter object with a prefix, tags, dependencies and responses defined.\nuserdata_router = APIRouter(\n prefix=\"/userdata\",\n tags=['User data'],\n dependencies=[Depends(get_db)],\n responses={\n 404: {\n \"Description\": \"Not Found\"\n }\n }\n)\n\n\n# Endpoint for getting user name using token\n@userdata_router.put(\"/get/username\")\ndef get_user_name(token: str = Depends(oauth2Scheme)):\n user_name = get_user_name_by_token(token=token)\n return {'username': user_name}\n\n\n# Endpoint for setting user profile background image\n@userdata_router.put(\"/set/background\")\ndef create_user_profile_background(background: UploadFile,\n token: str = Depends(oauth2Scheme),\n db: Session = Depends(get_db)):\n file_suffix: str = background.filename.split(\".\")[-1] # Extracting file extension from filename\n user_uuid = auth_user_by_name(db=db, token=token) # Authenticating user using token\n background_path: Path = Path(config.BACKGROUND_DIR + \"/\" + user_uuid) # Defining the path to store background image\n if background_path.exists(): # Removing existing background image\n shutil.rmtree(background_path)\n background_path.mkdir(exist_ok=True) # Creating directory to store the background image\n try:\n with open(str(background_path.joinpath(user_uuid + str(random.randint(0, 9999)) + \".\" + file_suffix)), \"wb\") \\\n as f: # Saving the background image with a random number added to its filename.\n content = background.file.read()\n f.write(content)\n except IOError:\n print(IOError)\n\n return {\"status\": \"success\"}\n\n\n# Endpoint for deleting user profile background image\n@userdata_router.delete(\"/delete/background\")\ndef delete_user_profile_background(token: str = Depends(oauth2Scheme),\n db: Session = Depends(get_db)):\n pass\n\n\n# Endpoint for setting user avatar image\n@userdata_router.put(\"/set/avatar\")\ndef create_user_avatar(avatar: UploadFile,\n db: Session = Depends(get_db),\n token: str = Depends(oauth2Scheme)):\n avatar_path: Path = Path(config.AVATAR_DIR) # Defining the path to store avatar image\n user_uuid = auth_user_by_name(db=db, token=token) # Authenticating user using token\n file_suffix = avatar.filename.split(\".\")[-1] # Extracting file extension from filename\n if not save_user_avatar(avatar=avatar, user_uuid=user_uuid, file_suffix=file_suffix, avatar_path=avatar_path):\n # If the avatar image cannot be saved on the server, return an HTTPException with status code and details.\n raise HTTPException(\n status_code=500,\n detail=\"Cannot save file one server.\"\n )\n else:\n return {\"status\": \"success\"} # On success, return status message\n\n\n# Endpoint for deleting user avatar image\n@userdata_router.delete(\"/delete/avatar/{user_name}\")\ndef delete_user_avatar(user_name: str):\n pass # Placeholder code �� function yet to be implemented\n\n\n@userdata_router.get('/get/user_num')\ndef get_users_num(db: Session = Depends(get_db)):\n users_num = get_user_quantity(db=db)\n return users_num\n\n\n@userdata_router.get('/get/posts_num')\ndef get_posts_num(db: Session = Depends(get_db)):\n posts_num = get_posts_quantity(db=db)\n return posts_num\n\n\n@userdata_router.get('/get/comments_num')\ndef get_comments_num(db: Session = Depends(get_db)):\n comments_num = get_comments_quantity(db=db)\n return comments_num\n\n\n@userdata_router.get(\"/get/timezone\")\ndef get_server_timezone():\n tz = str(get_localzone())\n return tz\n","repo_name":"WeepingDogel/tinygallery-backend","sub_path":"app/routers/userdata/userdata.py","file_name":"userdata.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"78"} +{"seq_id":"23226161509","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 14 13:22:18 2020\r\n\r\n\"\"\"\r\n\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.datasets import load_digits\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.decomposition import PCA, NMF\r\nfrom sklearn.feature_selection import SelectKBest,chi2, f_classif, mutual_info_classif, RFE, SelectFromModel,SelectFdr\r\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler\r\n\r\nprint(__doc__)\r\n\r\npipe = Pipeline([\r\n # the reduce_dim stage is populated by the param_grid\r\n ('reduce_dim', 'passthrough'),\r\n ('classify', SVC(kernel='rbf', gamma='auto', max_iter=10000))\r\n])\r\n\r\nN_FEATURES_OPTIONS = [50, 100, 150, 200]\r\nC_OPTIONS = [1, 10, 100, 1000]\r\nparam_grid = [\r\n {\r\n 'reduce_dim': [PCA(iterated_power=7), NMF()],\r\n 'reduce_dim__n_components': N_FEATURES_OPTIONS,\r\n 'classify__C': C_OPTIONS\r\n },\r\n {\r\n 'reduce_dim': [SelectKBest(chi2)],\r\n 'reduce_dim__k': N_FEATURES_OPTIONS,\r\n 'classify__C': C_OPTIONS\r\n },\r\n]\r\nreducer_labels = ['PCA', 'NMF', 'KBest(chi2)']\r\n\r\ngrid = GridSearchCV(pipe, n_jobs=1, param_grid=param_grid)\r\n#X, y = load_digits(return_X_y=True)\r\ndf=pd.read_csv('nusrat_all_dataset.csv')\r\n\r\nfeature_set = list(df.columns.values) \r\nfeature_set.remove('Class')\r\nX=(df.drop(columns=['Class'])).values\r\nY=(df['Class'])\r\n\r\n# drop rows with nan\r\nY=Y[~np.isnan(X).any(axis=1)]\r\nX=X[~np.isnan(X).any(axis=1)]\r\n\r\nX = MinMaxScaler().fit_transform(X)\r\ny= np.zeros(Y.shape)\r\nclass_names=list(np.unique(Y))\r\nclass_num=0\r\nnumber_of_classes=np.unique(Y).shape[0]\r\nfor classes in np.unique(Y):\r\n y[Y==classes]=int(class_num)\r\n print('Class '+ classes + ': ' + str(class_num))\r\n class_num=class_num+1\r\n\r\n\r\ngrid.fit(X, y)\r\n\r\nmean_scores = np.array(grid.cv_results_['mean_test_score'])\r\n# scores are in the order of param_grid iteration, which is alphabetical\r\nmean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))\r\n# select score for best C\r\nmean_scores = mean_scores.max(axis=0)\r\nbar_offsets = (np.arange(len(N_FEATURES_OPTIONS)) *\r\n (len(reducer_labels) + 1) + .5)\r\n\r\nplt.figure()\r\nCOLORS = 'bgrcmyk'\r\nfor i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):\r\n plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])\r\n\r\nplt.title(\"Comparing feature reduction techniques\")\r\nplt.xlabel('Reduced number of features')\r\nplt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)\r\nplt.ylabel('Digit classification accuracy')\r\nplt.ylim((0, 1))\r\nplt.legend(loc='upper left')\r\n\r\nplt.show()","repo_name":"NusratAsrafi/Malware-Family-Classificaton","sub_path":"malware_multi_class_feature_select_bar_plot.py","file_name":"malware_multi_class_feature_select_bar_plot.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37949238846","text":"#!/usr/bin/env python3\n#\n#\n# Data generation for KShivendu/dbpedia-entities-openai-1M dataset on huggingface.\n\nimport shutil\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nimport lance\nimport pyarrow as pa\nfrom datasets import DownloadConfig, load_dataset\n\nschema = pa.schema(\n [\n pa.field(\"_id\", pa.string()),\n pa.field(\"title\", pa.string()),\n pa.field(\"text\", pa.string()),\n pa.field(\"openai\", pa.list_(pa.float32(), 1536)),\n ]\n)\n\n\ndef to_fixed_size_array(array, dim):\n return pa.FixedSizeListArray.from_arrays(array.values, dim)\n\n\ndef convert_dataset():\n for batch in load_dataset(\n \"KShivendu/dbpedia-entities-openai-1M\",\n download_config=DownloadConfig(num_proc=8, resume_download=True),\n split=\"train\",\n ).data.to_batches():\n yield pa.RecordBatch.from_arrays(\n [\n batch[\"_id\"],\n batch[\"title\"],\n batch[\"text\"],\n to_fixed_size_array(batch[\"openai\"], 1536),\n ],\n schema=schema,\n )\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"-o\", \"--output\", type=str, default=\"dbpedia.lance\")\n parser.add_argument(\n \"-g\",\n \"--max_rows_per_group\",\n type=int,\n default=10240,\n metavar=\"ROWS\",\n help=\"set max rows per group\",\n )\n parser.add_argument(\n \"-f\",\n \"--max_rows_per_file\",\n type=int,\n default=2048 * 100,\n metavar=\"ROWS\",\n help=\"set max rows per file\",\n )\n args = parser.parse_args()\n\n if Path(args.output).exists():\n shutil.rmtree(args.output)\n\n lance.write_dataset(\n convert_dataset(),\n args.output,\n schema=schema,\n max_rows_per_group=args.max_rows_per_group,\n max_rows_per_file=args.max_rows_per_file,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lancedb/lance","sub_path":"benchmarks/dbpedia-openai/datagen.py","file_name":"datagen.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":2819,"dataset":"github-code","pt":"78"} +{"seq_id":"43335374872","text":"import cv2 as cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import path\nfrom random import randrange\nimport os\nfrom math import sqrt\nfrom shapely.geometry import Polygon,LineString,Point\n\ndef find_metric(point,poly,dists):\n p = Point(point)\n poly_coords = poly.exterior.coords\n\n # points = \n metrics = []\n pt1 = Point(poly_coords[0])\n dis1 = p.distance(pt1)\n for point,dist in zip(poly_coords[1:],dists):\n dis2 = p.distance(point)\n metrics.append(abs(dis2+dis1-dist))\n dis1 = dis2\n return min(metrics)\n\ndef calculate_dists(poly):\n poly_coords = poly.exterior.coords\n dists = []\n pt1 = Point(poly_coords[0])\n for point in poly_coords[1:]:\n dists.append(pt1.distance(point))\n pt1 = Point(point)\n return dists\n\ndef calculate_image_metric(w,h,poly):\n dists = calculate_dists(poly)\n image_metrics = np.ndarray([h,w],dtype=np.float32)\n for i in range(w):\n for j in range(h):\n image_metrics[j][i] = find_metric(Point(i,j),poly,dists)\n return image_metrics\n\ndef get_mask(w,h,quad):\n mask = np.ndarray([h,w],dtype = np.bool)\n for i in range(w):\n for j in range(h):\n mask[j][i] = quad.contains(Point(i,j))\n return mask\n\ndef get_quad(width,height,H):\n four_points = [0,0,0,0]\n four_points[0] = np.array([0,0,1])\n four_points[1] = np.array([width-1,0,1])\n four_points[2] = np.array([width-1,height-1,1])\n four_points[3] = np.array([0,height-1,1])\n for i in range(4):\n x = np.matmul(H,four_points[i])\n x = np.array([x[0]/x[2],x[1]/x[2]],dtype= np.float32)\n four_points[i] = x\n return Polygon(four_points)\n\ndef get_new_mask(mask1,tx,ty,h,w):\n mask1_new = np.ndarray((h,w),dtype=np.bool)\n for i in range(h):\n for j in range(w):\n if (i>=ty and j >=tx):\n mask1_new[i][j] = mask1[i-ty][j-tx]\n else:\n mask1_new[i][j] = 0\n return mask1_new\ndef shift_poly(poly,tx,ty):\n points = poly.exterior.coords\n newpoints = []\n for point in points:\n newpoints.append((point[0]+tx,point[1]+ty))\n return Polygon(newpoints)\n\ndef blend(img1,mask1,metric1,img2,mask2,metric2):\n dst = np.array(img1)\n assert(img1.shape == img2.shape)\n for i in img1.shape[0]:\n for j in img1.shape[1]:\n if (mask1[i][j] and mask2[i][j]):\n for k in range(3):\n dst[i][j][k] = np.uint8(min((metric1[i][j]*img1[i][j][k] + metric2[i][j]*img2[i][j][k])/(metric1[i][j] + metric2[i][j]),255))\n # dst[i][j] = np.array((metric1[i][j]*img1[i][j] + metric2[i][j]*img2[i][j])/(metric1[i][j] + metric2[i][j]),\n elif (mask1[i][j]):\n dst[i][j] = img1[i][j].copy()\n elif (mask2[i][j]):\n dst[i][j] = img2[i][j].copy()\n else:\n dst[i][j] = np.array([0,0,0],dtype=np.uint8)\n return dst\n\ndef combine(img1,H1,img2,H2,w,h,mask1,poly):\n img_1 = cv2.warpPerspective(img1,H1,(w,h))\n img_2 = cv2.warpPersepctive(img2,H2,(w,h))\n tx = H1[0][2]\n ty = H1[1][2]\n mask1_new = get_new_mask(mask1,tx,ty,h,w)\n quad = get_quad(img2.shape[1],img2.shape[0],H2)\n mask2 = get_mask(w,h,quad)\n mask_new = mask1_new + mask2\n poly1_new = shift_poly(poly)\n poly_new = poly1_new.union(quad)\n img1_metric = calculate_image_metric(w,h,poly1_new)\n img2_metric = calculate_image_metric(w,h,quad)\n dst = blend(img1,mask1_new,img1_metric,img2,mask2,img2_metric)\n return (dst,mask_new,poly_new)\n \ndef panaroma(imglist,Hpairs,wh):\n img1 = imglist[0]\n mask1 = np.ones([img1.shape[0],img1.shape[1]],dtype=np.bool)\n poly1 = Polygon((0,0),(img1.shape[1],0),(img1.shape[1],img1.shape[0]),(0,img1.shape[0]))\n\n for i in range(Hpairs):\n H1,H2 = Hpairs[i]\n w,h = wh[i]\n img2 = imglist[i+1]\n img1,mask1,poly1 = combine(img1,H1,img2,H2,w,h,mask1,poly1)\n \n return img1\n\n","repo_name":"rahuljain1310/Stitch-Panaroma","sub_path":"panaro.py","file_name":"panaro.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23016341451","text":"from pathlib import Path\n\nfrom skbuild import setup\n\ncmake_args = [\n \"-DBUILD_TESTING:BOOL=OFF\",\n \"-Dconan_deps=ON\",\n \"-DCMAKE_POSITION_INDEPENDENT_CODE=ON\",\n]\n\nsetup(\n name=\"so3\",\n version=\"1.3.6\",\n author=\"Jason McEwen\",\n install_requires=[\"numpy\", \"scipy\"],\n extras_require={\n \"dev\": [\n \"setuptools\",\n \"wheel\",\n \"scikit-build\",\n \"cmake\",\n \"ninja\",\n \"cython\",\n \"conan\",\n \"black\",\n \"pytest\",\n ]\n },\n description=\"Fast and exact Wigner Transforms\",\n url=\"http://astro-informatics.github.io/so3/\",\n package_dir={\"so3\": \"src/so3\"},\n cmake_args=cmake_args,\n cmake_languages=(\"C\",),\n license=\"GPL-3\",\n packages=[\"so3\"],\n long_description=Path(__file__).with_name(\"README.md\").read_text(),\n long_description_content_type=\"text/markdown\",\n)\n","repo_name":"astro-informatics/so3","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"74812337212","text":"#!/usr/bin/env python\nimport pandas as pd\nimport numpy as np\nimport multiprocessing\n\n\ndef calc_tanimoto(pairs, mols, fps, proc_id, ret):\n tanimoto = []\n for i, j in pairs:\n fp1 = fps[i]\n fp2 = fps[j]\n\n i_mol = mols[i]\n j_mol = mols[j]\n\n n1 = np.count_nonzero(fp1)\n n2 = np.count_nonzero(fp2)\n comm = np.count_nonzero(np.logical_and(fp1,fp2))\n t = comm / (n1 + n2 - comm)\n tanimoto.append((i, j, i_mol, j_mol, t))\n ret[proc_id] = tanimoto\n\n\nif __name__ == '__main__':\n NPROC = 4\n\n df = pd.read_csv('fingerprint.csv')\n fps = df['fingerprint'].to_list() \n fps = [ np.array(list(map(int,list(x)))) for x in fps ]\n mols = df['mol'].tolist()\n\n manager = multiprocessing.Manager()\n ret = manager.dict()\n\n # pair 나누기\n split_pairs = [ [] for x in range(NPROC) ]\n count = 0\n for i in range(len(fps)-1):\n for j in range(i+1,len(fps)):\n ind = count % NPROC\n split_pairs[ind].append((i,j))\n count += 1\n\n procs = []\n for proc_id in range(NPROC):\n pairs = split_pairs[proc_id]\n proc = multiprocessing.Process(target=calc_tanimoto,\n args=(pairs, mols, fps, proc_id, ret))\n procs.append(proc)\n proc.start()\n\n for proc in procs:\n proc.join()\n\n tanimoto = []\n for key in sorted(ret.keys()):\n tanimoto_split = ret[key]\n for t in tanimoto_split:\n tanimoto.append(t)\n \n df = pd.DataFrame(tanimoto)\n df.columns = ['ind1', 'ind2', 'i','j','tanimoto']\n df = df.sort_values(by=['ind1','ind2'])[['i','j','tanimoto']]\n df.to_csv('tanimoto.csv', index=False)\n\n","repo_name":"LAIDD-DB/Large-scale-Computation-using-Parallel-Processing","sub_path":"tanimoto_multiprocessing/calc_tanimoto.py","file_name":"calc_tanimoto.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9350700284","text":"import json\nfrom sklearn.externals import joblib\n\nmodel_name = 'Model_1566096478.5759013.joblib'\nmodel = joblib.load(model_name)\n\ndef predict(event, context):\n body = {\n \"message\": \"OK\",\n }\n\n params = event['queryStringParameters']\n\n medInc = float(params['medInc']) / 100000\n houseAge = float(params['houseAge'])\n aveRooms = float(params['aveRooms'])\n aveBedrms = float(params['aveBedrms'])\n population = float(params['population'])\n aveOccup = float(params['aveOccup'])\n latitude = float(params['latitude'])\n longitude = float(params['longitude'])\n\n inputVector = [medInc, houseAge, aveRooms, aveBedrms, population, aveOccup,\n latitude, longitude]\n data = [inputVector]\n\n predictedPrice = model.predict(data)[0] * 100000 #1 usd\n predictedPrice = round(predictedPrice, 2)\n body['predictedPrice'] = predictedPrice\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body),\n \"headers\" : {\n \"Access-Control-Allow-Origin\": '*'\n }\n }\n\n return response\n\ndef do_main():\n event = {\n 'queryStringParameters': {\n 'medInc' : 2000000,\n 'houseAge' : 10,\n 'aveRooms' : 4,\n 'aveBedrms' : 1,\n 'population' : 800,\n 'aveOccup' : 3,\n 'latitude' : 37.54,\n 'longitude' : -121.72\n }\n }\n\n response = predict(event, None)\n body = json.loads(response['body'])\n print('Price:', body['predictedPrice'])\n\n with open('event.json','w') as event_file:\n event_file.write(json.dumps(event))\n\n#do_main()\n\n\n","repo_name":"kevsestrella/UdemyAWSServerless","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27893518510","text":"import nltk\nfrom nltk.sentiment import SentimentIntensityAnalyzer\n\nclass SentimentAnalyzer:\n def __init__(self):\n nltk.download('vader_lexicon') # Download the required lexicon\n self.sia = SentimentIntensityAnalyzer()\n\n def analyze_sentiment(self, text):\n sentiment_score = self.sia.polarity_scores(text)\n sentiment = self.get_sentiment_label(sentiment_score)\n return sentiment, sentiment_score\n\n def get_sentiment_label(self, sentiment_score):\n compound_score = sentiment_score['compound']\n if compound_score >= 0.05:\n return \"Positive\"\n elif compound_score <= -0.05:\n return \"Negative\"\n else:\n return \"Neutral\"\n\nif __name__ == \"__main__\":\n text = input(\"Enter the text to analyze sentiment: \")\n analyzer = SentimentAnalyzer()\n sentiment, sentiment_score = analyzer.analyze_sentiment(text)\n \n print(\"Sentiment:\", sentiment)\n print(\"Sentiment Scores:\", sentiment_score)\n\n","repo_name":"Cvader42/My-Jarvis","sub_path":"sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1288353399","text":"from typing import List\n\n\nRNASEQ = {\n \"AUG\": \"Methionine\", \n \"UUU\": \"Phenylalanine\", \n \"UUC\": \"Phenylalanine\", \n \"UUA\": \"Leucine\", \n \"UUG\": \"Leucine\", \n \"UCU\": \"Serine\", \n \"UCC\": \"Serine\", \n \"UCA\": \"Serine\", \n \"UCG\": \"Serine\", \n \"UAU\": \"Tyrosine\", \n \"UAC\": \"Tyrosine\", \n \"UGU\": \"Cysteine\", \n \"UGC\": \"Cysteine\", \n \"UGG\": \"Tryptophan\", \n \"UAA\": \"STOP\", \n \"UAG\": \"STOP\", \n \"UGA\": \"STOP\"\n }\n\ndef proteins(strand: str) -> List[str]:\n protein = []\n for x in range(0, len(strand), 3):\n value = RNASEQ[strand[x : x + 3]]\n if value == \"STOP\":\n break\n protein.append(value)\n return protein","repo_name":"svlmandava/exercism","sub_path":"protein-translation/protein_translation.py","file_name":"protein_translation.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2430882418","text":"import os\nimport time\nimport json\nimport random\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom openai.error import InvalidRequestError\n\nfrom langchain.llms import OpenAI\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import (\n\tSystemMessage,\n HumanMessage,\n AIMessage\n)\n\nfrom config import *\nfrom utils import formatting\n\n\n\nclass LawAgent:\n\n rechtsfrage: str\n summary: dict\n gesetze_durchsucht: list\n\n bundesrecht_index: dict\n prompts: dict\n messages: list\n conversation_history: list\n\n chat: ChatOpenAI\n chat_16k: ChatOpenAI\n\n\n def __init__(self) -> None:\n # Load Bundesrecht Index Filled\n with open(os.path.join(\"ris\", \"bundesrecht_index_filled.json\"), \"r\") as f:\n self.bundesrecht_index = json.load(f)\n\n # Load Prompts\n self.prompts = {\n \"system\": self.init_prompt(\"01_get_gesetze\", \"01_system.txt\"),\n \"kategorie_waehlen\": self.init_prompt(\"01_get_gesetze\", \"02_kategorie_waehlen.txt\"),\n \"gesetz_waehlen\": self.init_prompt(\"01_get_gesetze\", \"03_gesetz_waehlen.txt\"),\n \"zusammenfassung_erstellen\": self.init_prompt(\"01_get_gesetze\", \"04_zusammenfassung_erstellen.txt\"),\n \"gesetzestext_teil_waehlen\": self.init_prompt(\"01_get_gesetze\", \"05_gesetzestext_teil_waehlen.txt\"),\n \"gesetzestext_teil_zeigen\": self.init_prompt(\"01_get_gesetze\", \"06_gesetzestext_teil_zeigen.txt\"),\n \"gesetzestext_gesamt\": self.init_prompt(\"01_get_gesetze\", \"07_gesetzestext_gesamt.txt\"),\n \"finaler_report\": self.init_prompt(\"01_get_gesetze\", \"08_finalen_report_erstellen.txt\"),\n\n \"extrahiere_fachbegriffe\": self.init_prompt(\"02_erklaere_final_report\", \"01_analysiere_finalen_report.txt\"),\n \"fragen_generieren\": self.init_prompt(\"02_erklaere_final_report\", \"02_generiere_frage_fuer_fachbegriff.txt\"),\n }\n\n self.chat = ChatOpenAI(\n model=\"gpt-3.5-turbo\",\n temperature=0,\n max_tokens=2048\n )\n self.chat_16k = ChatOpenAI(\n model=\"gpt-3.5-turbo-16k\",\n temperature=0,\n max_tokens=4096\n )\n\n self.llm_curie = OpenAI(\n model=\"text-curie-001\",\n temperature=0,\n max_tokens=1024\n )\n\n self.summary = dict()\n self.gesetze_durchsucht = list()\n self.messages = list()\n self.conversation_history = list()\n\n\n def init_prompt(self, dir, prompt_name):\n with open(os.path.join(CHAIN_DIR, dir, prompt_name), \"r\") as f: prompt = f.read()\n return prompt\n \n\n def run(self, question, max_interations=5):\n # main function to answer a question\n\n ## INIT MAIN VARIABLES FOR AGENT\n if isinstance(question, str):\n self.rechtsfrage = question\n self.add_message(\n SystemMessage(\n content=self.prompts[\"system\"]\\\n .replace(\"{rechtsfrage}\", self.rechtsfrage)\\\n .replace(\"{gesetze_durchsucht}\", str(self.gesetze_durchsucht)) \\\n .replace(\"{summary}\", str(self.summary))\n )\n )\n # TODO: Question interpretation and refinement -> alignment between agent and user on interpretation of question\n\n\n \n elif isinstance(question, dict):\n # TODO: init variables from previous conversation state\n raise NotImplementedError\n \n try:\n analysis = None\n final_report = None\n fachbegriffe = None\n explained_fachbegriffe = dict()\n for i in range(max_interations):\n\n ## CHOOSE GESETZ\n # define layers, choose gesetz, erstelle zusammenfassung und reset messages\n layers = self.define_layers()\n gesetz = self.choose_gesetz(layers)\n self.summarize_progress()\n self.reset_messages(out=True)\n if \"nichts gefunden\" in gesetz.lower(): continue\n\n ## SCRAPE GESETZ\n gesetz_id = gesetz.split(\" - \")[0]\n gesetz_structure = self.get_gesetz_structure(gesetz_id)\n gesetz_is_long = len(str(gesetz_structure)) > 8096\n\n if gesetz_is_long:\n ## CHOOSE SEKTION VON GESETZ \n # choose sektion and analyse\n geltende_fassung = None\n chosen_section = self.choose_section_from_gesetz(gesetz, gesetz_structure)\n while geltende_fassung is None:\n\n while chosen_section not in gesetz_structure.keys():\n self.retry_completion()\n print(\"retrying completion\")\n\n if isinstance(gesetz_structure[chosen_section], list):\n t = gesetz_structure[chosen_section]\n while len(t) == 1: t = t[0]\n geltende_fassung = \"\\n\".join(t)\n \n elif isinstance(gesetz_structure[chosen_section], dict):\n gesetz_structure = gesetz_structure[chosen_section]\n chosen_section = self.choose_section_from_gesetz(gesetz, gesetz_structure)\n \n # TODO: prevent infinite loop\n\n analysis = self.analyze_section_from_gesetz(gesetz, geltende_fassung)\n # add gesetz to gesetze_durchsucht\n analyzed_section = analysis[\"analysierte_sektion\"]\n self.gesetze_durchsucht.append(f\"{gesetz} - {analyzed_section}\")\n \n else:\n ## ANALYZE FULL GESETZ\n # analyse\n analysis = self.analyze_full_gesetz(gesetz, gesetz_structure)\n # add gesetz to gesetze_durchsucht\n self.gesetze_durchsucht.append(gesetz)\n\n\n ## DECISION\n naechster_schritt = analysis[\"naechster_schritt\"]\n if naechster_schritt.lower() != \"done\":\n # create summary and start over\n self.summarize_progress()\n self.reset_messages()\n continue\n else:\n # create summary and final report\n self.summarize_progress()\n final_report = self.create_final_report()\n\n # reset messages\n self.reset_messages()\n\n ## ERKLAERE FACHBEGRIFFE\n # extract fachbegriffe\n fachbegriffe = self.extract_fachbegriffe(final_report)\n\n # explain fachbegriffe\n if len(fachbegriffe) > 0:\n fragen_for_fachbegriffe = self.generate_questions_for_fachbegriffe()\n assert isinstance(fragen_for_fachbegriffe, dict)\n\n for fachbegriff in fragen_for_fachbegriffe.keys():\n \n # Create new law agent to answer question\n la = LawAgent()\n answer = la.run(fragen_for_fachbegriffe[fachbegriff][\"frage\"])\n\n # Add answer to explained_fachbegriffe\n explained_fachbegriffe[fachbegriff] = {\n \"frage\": fragen_for_fachbegriffe[fachbegriff][\"frage\"],\n \"antwort\": answer\n }\n\n # TODO: update final report with answer\n # TODO: update gesetze_durchsucht with answer \n\n break\n\n\n\n except KeyboardInterrupt:\n # TODO: stop and summarize conversation\n # TODO: save whole conversation status\n pass\n \n\n\n # TODO: handle if final_report is None\n\n # save whole conversation\n save_dict = {\n \"rechtsfrage\": self.rechtsfrage,\n \"gesetze_durchsucht\": self.gesetze_durchsucht,\n \"summary\": self.summary,\n \"last_analysis\": analysis,\n \"final_report\": final_report,\n \"fachbegriffe\": explained_fachbegriffe if len(explained_fachbegriffe.keys()) == 0 else fachbegriffe,\n \"conversation_history\": [f\"{m.type}: {m.content}\" for m in self.conversation_history]\n }\n # save conversation history\n frage_str = self.rechtsfrage.replace(\" \", \"_\").replace(\"?\", \"\").replace(\"!\", \"\").replace(\".\", \"\").strip()\n with open(os.path.join(\"answered\", f\"conversation_history_{frage_str}.json\"), \"w\") as f:\n json.dump(save_dict, f, indent=4, ensure_ascii=False)\n\n # reset all variables after run\n self.rechtsfrage = None\n self.messages = list()\n self.conversation_history = list()\n self.gesetze_durchsucht = list()\n self.summary = dict()\n\n return save_dict\n\n\n def extract_fachbegriffe(self, finaler_report):\n\n # only keep the einfache_antwort\n assert \"einfache_antwort\" in finaler_report.keys()\n finaler_report = {\"antwort\": finaler_report[\"einfache_antwort\"]}\n\n output_format = {\n \"extrahierte_fachbegriffe\": [\"fachbegriff_1\", \"...\"]\n }\n none_found_format = {\n \"extrahierte_fachbegriffe\": []\n }\n current_human_message = HumanMessage(\n content=self.prompts[\"extrahiere_fachbegriffe\"].format(\n rechtsfrage=self.rechtsfrage,\n finaler_report=formatting.dict_to_string(finaler_report),\n output_format=\n f\"{formatting.dict_to_string(output_format)}\\n\"\n \"XOR wenn keine Fachbegriffe in der Antwort enhalten sind:\\n\"\n f\"{formatting.dict_to_string(none_found_format)}\"\n )\n )\n\n fachbegriffe = self.get_chat_completion(current_human_message)\n assert \"extrahierte_fachbegriffe\" in fachbegriffe.keys()\n assert isinstance(fachbegriffe[\"extrahierte_fachbegriffe\"], list)\n return fachbegriffe[\"extrahierte_fachbegriffe\"]\n\n\n def generate_questions_for_fachbegriffe(self):\n \n output_format = {\n \"fragen\": [\n {\n \"fachbegriff\": \"fachbegriff_1\",\n \"frage\": \"eine rechtsfrage die du dir selbst stellen würdest um diesen fachbegriff zu erklaeren\"\n }\n ]\n }\n \n current_human_message = HumanMessage(\n content=self.prompts[\"fragen_generieren\"].format(\n output_format=formatting.dict_to_string(output_format)\n )\n )\n\n questions = self.get_chat_completion(current_human_message)\n assert \"fragen\" in questions.keys()\n assert isinstance(questions[\"fragen\"], list)\n return {\n e[\"fachbegriff\"]: {\n \"frage\": e[\"frage\"]\n }\n for e in questions[\"fragen\"]\n }\n\n\n def lookup_bundesrecht(self, layers):\n if len(layers) == 0:\n return self.bundesrecht_index.keys()\n elif len(layers) == 1:\n l1 = layers[0]\n return [ k for k in self.bundesrecht_index[l1].keys() if not k.endswith(\" FREI\") ]\n elif len(layers) == 2:\n l1, l2 = layers\n try: return [ k for k in self.bundesrecht_index[l1][l2].keys() if not k.endswith(\" FREI\") ]\n except KeyboardInterrupt: raise KeyboardInterrupt\n except: return None\n else:\n raise ValueError(\"Too many layers.\")\n\n\n def bundesrecht_gesetze_for_category(self, layers):\n assert len(layers) <= 3\n \n first, second, third = layers\n\n if third is None:\n gesetze = self.bundesrecht_index[first][second]\n assert isinstance(gesetze, list)\n return gesetze \n else:\n gesetze = self.bundesrecht_index[first][second][third]\n assert isinstance(gesetze, list)\n return gesetze\n \n\n\n def define_layers(self):\n \n # Define initial variables\n layers = list()\n\n ## Set layers\n layers = self.define_layer(layers, f\"Zu beantwortende Rechtsfrage: {self.rechtsfrage}\") # first layer\n assert len(layers) == 1\n assert layers[0] in self.bundesrecht_index.keys()\n layers = self.define_layer(layers, f\"Du hast {layers[0]} gewaehlt.\") # second layer\n assert len(layers) == 2\n assert layers[1] in self.bundesrecht_index[layers[0]].keys()\n layers = self.define_layer(layers, f\"Du hast {layers[1]} gewaehlt.\") # third layer\n assert 2 <= len(layers) <= 3\n\n if layers[-1] is not None:\n assert isinstance(self.bundesrecht_index[layers[0]][layers[1]], dict)\n assert isinstance(self.bundesrecht_index[layers[0]][layers[1]][layers[2]], list)\n\n return layers\n\n\n def define_layer(self, layers, context):\n\n next_layer = self.lookup_bundesrecht(layers)\n if next_layer is None: return layers + [None]\n elif len(next_layer) == 0: return layers + [None]\n assert len(next_layer) > 0\n\n # get 2 random choices if possible\n if len(next_layer) < 2: rand = next_layer[1]\n else:\n random_choices = random.sample(next_layer, 2)\n rand = f\"{random_choices[0]}, {random_choices[1]}, ...\"\n \n output_format = {\"kategorie\": f\"gewaehlte Kategorie inklusive voranstehende Zahl. z.B. {rand}\"}\n \n # Define human message\n current_human_message = HumanMessage(\n content=self.prompts[\"kategorie_waehlen\"].format(\n context=context,\n categories=\"\\n\".join(next_layer),\n output_format=formatting.dict_to_string(output_format)\n )\n )\n response = self.get_chat_completion(current_human_message)\n\n # Define chosen layers and return\n chosen_category = response[\"kategorie\"]\n if len(layers) == 0: layers = [chosen_category]\n else: layers.append(chosen_category)\n return layers\n\n\n def summarize_progress(self):\n output_format = {\n \"zusammenfassung\": \"eine kurze, aber detailierte zusammenfassung ueber deinen bisherigen Fortschritt\",\n \"frage_beantwortet\": \"hast du die frage schon beantwortet? waehle aus folgender liste: 'ja' | 'noch nicht' \",\n \"begruendung\": \"begruende deine Entscheidung\",\n }\n current_human_message = HumanMessage(\n content=self.prompts[\"zusammenfassung_erstellen\"].format(\n output_format=formatting.dict_to_string(output_format)\n )\n )\n\n summary = self.get_chat_completion(current_human_message)\n self.summary = summary\n\n \n def choose_gesetz(self, layers):\n # Choose gesetz to look through\n\n # zusammenfassung = self.summary[\"zusammenfassung\"]\n context = f\"Zu beantwortende Rechtsfrage: {self.rechtsfrage}\" #\\n\\nZusammenfassung des bisherigen Fortschritts: {zusammenfassung}\"\n\n gesetze = [\n g[\"gesetzesnummer\"] + \" - \" + g[\"kurztitel\"].replace(\" - \", \"; \")\n for g in self.bundesrecht_gesetze_for_category(layers)\n if len(str(g[\"gesetzesnummer\"]).strip()) > 0\n ]\n output_format = {\"nummer\": \"die davorstehende nummer des gesetzes oder 'nichts gefunden'\", \"titel\": \"der titel des gewählten gesetzes oder 'nichts gefunden'\"}\n current_human_message = HumanMessage(\n content=self.prompts[\"gesetz_waehlen\"].format(\n context=context,\n laws=\"\\n\".join(gesetze),\n gesetze_durchsucht=\"\\n\".join(self.gesetze_durchsucht),\n output_format=formatting.dict_to_string(output_format)\n )\n )\n response = self.get_chat_completion(current_human_message)\n if \"nichts gefunden\" in response[\"nummer\"].lower() or \"nichts gefunden\" in response[\"titel\"].lower():\n return \"nichts gefunden\"\n else:\n gesetz = response[\"nummer\"] + \" - \" + response[\"titel\"]\n return gesetz\n\n\n\n\n def choose_section_from_gesetz(self, gesetz, gesetz_structure):\n context = f\"Zu beantwortende Rechtsfrage: {self.rechtsfrage}\\n\\nZusammenfassung des bisherigen Fortschritts: {self.summary['zusammenfassung']}\"\n output_format = {\n \"gewaehlte_sektionen\": [\"sektion (ganze zeile zitiert!!)\", \"...\" ]\n }\n choose_section_message = HumanMessage(\n content=self.prompts[\"gesetzestext_teil_waehlen\"].format(\n context=context,\n gesetz=gesetz,\n struktur=\"\\n\".join([s for s in gesetz_structure.keys()]),\n output_format=formatting.dict_to_string(output_format)\n ) + \"\\n\\nAchte darauf, dass du immer die gesamte Zeile zitierst und nicht nur die Nummer der Sektion!\"\n )\n response = self.get_chat_completion(choose_section_message, model=\"16k\")\n chosen_sections = response[\"gewaehlte_sektionen\"]\n\n # chosen_sections = [s[\"paragraph\"] + \" - \" + s[\"name\"] for s in chosen_sections]\n\n return chosen_sections[0] # TODO: return all chosen sections\n \n\n\n\n def analyze_full_gesetz(self, gesetz, gesetz_structure):\n geltende_fassung = str()\n for k, v in gesetz_structure.items():\n content = \" \".join(v)\n geltende_fassung += f\"{k}\\n\"\n geltende_fassung += f\"{content}\\n\\n\"\n geltende_fassung = geltende_fassung.strip()\n\n output_format = {\n \"vermutung\": \"stelle eine Vermutungen an ob dieses Gesetz ausreichend ist um die Frage zu beantworten? waehle aus folgender liste: 'ja' | 'nein'\",\n \"begruendung\": \"eine kurze begruendung warum\",\n \"loesungsansatz\": \"wie koennte die frage beantwortet werden?\",\n \"naechster_schritt\": \"was sollte als naechstes getan werden? waehle aus folgender liste: 'neues gesetz waehlen' | 'done' \"\n }\n show_chosen_section_message = HumanMessage(\n content=self.prompts[\"gesetzestext_gesamt\"].format(\n gesetz=gesetz,\n geltende_fassung=geltende_fassung,\n output_format=formatting.dict_to_string(output_format)\n )\n )\n\n # get chat completion and return analysis of gesetz\n analysis = self.get_chat_completion(show_chosen_section_message, model=\"16k\")\n return analysis\n\n \n\n\n def analyze_section_from_gesetz(self, gesetz, geltende_fassung):\n\n output_format = {\n \"vermutung\": \"stelle eine Vermutungen an ob der gebene Teil ausreichend ist um die Frage zu beantworten? waehle aus folgender liste: 'ja' | 'nein'\",\n \"begruendung\": \"eine kurze begruendung warum\",\n \"analysierte_sektion\": \"der name oder id der sektion die analysiert wurde\",\n \"loesungsansatz\": \"wie koennte die frage beantwortet werden?\",\n \"naechster_schritt\": \"was sollte als naechstes getan werden? waehle aus folgender liste: 'neues gesetz waehlen' | 'done' \"\n }\n show_chosen_section_message = HumanMessage(\n content=self.prompts[\"gesetzestext_teil_zeigen\"].format(\n gesetz=gesetz,\n geltende_fassung=geltende_fassung,\n output_format=formatting.dict_to_string(output_format)\n )\n )\n\n # get chat completion and return analysis of gesetz\n analysis = self.get_chat_completion(show_chosen_section_message, model=\"16k\")\n return analysis\n \n\n \n def create_final_report(self):\n output_format = {\n \"zusammenfassung\": \"fasse noch einmal zusammen wie du beim beantworten der Frage vorgegangen bist\",\n \"komplexe_antwort\": \"gib eine möglichst genaue und komplexe antwort und erklaerung; zusätzliche informationen sind gerne gesehen; gerichtet an einen juristischen Experten\",\n \"einfache_antwort\": \"gib eine einfache und kurze antwort; vermeide informationen nach welchen nicht explizit gefragt wird, sowie Fachjargon; gerichtet an einen juristischen Laien\",\n \"begruendung\": \"begruende deine antwort\",\n }\n\n current_human_message = HumanMessage(\n content=self.prompts[\"finaler_report\"].format(\n output_format=formatting.dict_to_string(output_format)\n )\n )\n\n # get chat completion and return the final report\n final_report = self.get_chat_completion(current_human_message, model=\"16k\")\n return final_report\n \n\n def retry_completion(self):\n \n # specify human message so the law agent tries again\n current_human_message = HumanMessage(\n content=\\\n \"Thanks a lot! But your output does not follow the specified format. \"\\\n \"Please try again. Do not explain yourself and do not give excuses. \"\\\n \"Make sure that your answer matches the previously specified output format exactly!\"\n )\n\n # get chat completion and return the response\n response = self.get_chat_completion(current_human_message, model=\"16k\")\n return response\n\n\n \n def get_chat_completion(self, human_message, model=\"4k\"):\n assert model in [\"4k\", \"16k\"]\n\n # clean human message\n human_message = HumanMessage(\n content=formatting.clean_text_for_prompt(human_message.content) + f\"\\n\\nDeine JSON-Antwort:\"\n )\n\n # append human message to conversation history and agent memory\n self.add_message(human_message)\n \n # get chat completion\n try:\n if model == \"4k\": response = self.chat(self.messages)\n if model == \"16k\": response = self.chat_16k(self.messages)\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except InvalidRequestError:\n response = self.chat_16k(self.messages)\n\n # append response message to conversation history and agent memory\n self.add_message(response)\n\n # do checks and return \n time.sleep(1)\n assert isinstance(response, AIMessage)\n return json.loads(response.content)\n # TODO: handle this better! -> i.e. return a message that the agent did not understand the human message\n\n\n def reset_messages(self, out=False):\n\n if out: \n for m in self.messages:\n assert isinstance(m, SystemMessage) or isinstance(m, HumanMessage) or isinstance(m, AIMessage)\n if isinstance(m, SystemMessage): u = \"System\"\n if isinstance(m, HumanMessage): u = \"Human\"\n if isinstance(m, AIMessage): u = \"AI\"\n print(u, m.content)\n\n self.messages = self.messages[:1]\n\n\n def add_message(self, message):\n self.messages.append(message)\n self.conversation_history.append(message)\n \n\n def get_gesetz_structure(self, gesetz_id):\n\n # Get Geltende Fassung von Gesetz\n gesetz_structure_path = os.path.join(\"ris\", \"bundesrecht\", f\"gesetz_structure_{gesetz_id}.json\")\n if not os.path.exists(gesetz_structure_path):\n url = f\"https://www.ris.bka.gv.at/GeltendeFassung.wxe?Abfrage=Bundesnormen&Gesetzesnummer={gesetz_id}\"\n html = requests.get(url).text\n soup = BeautifulSoup(html, \"html.parser\")\n \n pagebase = soup.find(\"div\", {\"id\": \"pagebase\"})\n content = pagebase.find(\"div\", {\"id\": \"content\"})\n document_contents = content.find_all(\"div\", {\"class\": \"documentContent\"})\n \n gesetz_structure = {}\n curr_ueberschr_g1 = None\n curr_ueberschr_para = None\n curr_gld_symbol = None\n for doc_content in document_contents:\n \n # old version\n # text_nodes = doc_content.find_all(string=True)\n # text_nodes = [tn.strip() for tn in text_nodes if len(tn.strip()) > 0]\n # text_nodes = [tn for tn in text_nodes if tn.lower() != \"text\"]\n # text_nodes = [tn for tn in text_nodes if not tn.startswith(\"Art. \")]\n # text_nodes = [formatting.clean_text_for_prompt(tn) for tn in text_nodes]\n # section_name, section_content = \" - \".join(text_nodes[:2]), text_nodes[2:]\n # section_content = [c for c in section_content if not c.startswith(\"§\")]\n # section_content = [c for c in section_content if not c.startswith(\"Paragraph \")]\n # section_content = [formatting.clean_text_for_prompt(c) for c in section_content]\n # gesetz_structure[section_name.replace(\" \", \"\")] = text_nodes[2:]\n\n # new version\n # gesetz_structure = {\n # \"ueberschr_g1\": {\n # \"ueberschr_para\": {\n # \"gld_symbol\": [ (title), absatz_text, absatz_text, ... ]\n # },\n # \"gld_symbol\": [ (title), absatz_text, absatz_text, ... ]\n # }\n # }\n\n #

\n ueberschr_g1 = doc_content.find_all(\"h4\", {\"class\": \"UeberschrG1\"})\n if len(ueberschr_g1) > 0:\n assert len(ueberschr_g1) == 1\n curr_ueberschr_g1 = ueberschr_g1[0].text.strip()\n if curr_ueberschr_g1 not in gesetz_structure.keys(): \n gesetz_structure[curr_ueberschr_g1] = {}\n \n curr_ueberschr_para = None\n curr_gld_symbol = None\n\n\n #

Abstammung

\n ueberschr_para = doc_content.find_all(\"h4\", {\"class\": \"UeberschrPara\"})\n if len(ueberschr_para) > 0:\n assert len(ueberschr_para) == 1\n curr_ueberschr_para = formatting.key_formatting_for_dict(ueberschr_para[0].text.strip())\n if curr_ueberschr_para not in gesetz_structure[str(curr_ueberschr_g1)].keys():\n gesetz_structure[curr_ueberschr_g1][curr_ueberschr_para] = {}\n\n curr_gld_symbol = None\n\n # #
\n # para_mit_abs = doc_content.find_all(\"div\", {\"class\": \"ParagraphMitAbsatzzahl\"})\n # if len(para_mit_abs) > 0:\n # assert len(para_mit_abs) == 1\n # curr_para_mit_abs = para_mit_abs[0]\n\n \n #
\n gld_symbol = doc_content.find_all(\"div\", {\"class\": \"MarginTop4\"})\n if len(gld_symbol) > 0:\n # Paragraph 8,\n text = gld_symbol[0].find_all(\"span\", {\"class\": \"sr-only\"})\n assert len(text) > 0\n curr_gld_symbol = formatting.key_formatting_for_dict(text[0].text)\n\n else:\n #
\n gld_symbol = doc_content.find_all(\"h5\", {\"class\": \"GldSymbol\"})\n if len(gld_symbol) > 0:\n curr_gld_symbol = formatting.key_formatting_for_dict(gld_symbol[0].find_all(\"span\", {\"class\": \"sr-only\"})[0].text)\n \n \n\n wai_absatz_list = doc_content.find_all(\"ol\", {\"class\": \"wai-absatz-list\"})\n wai_list = doc_content.find_all(\"ol\", {\"class\": \"wai-list\"})\n top = doc_content.find_all(\"div\", {\"class\": \"MarginTop4\"})\n if len(wai_absatz_list) > 0:\n law_text = []\n for wal in wai_absatz_list:\n lis = wai_absatz_list[0].find_all(\"li\")\n \n for li in lis:\n absatz_zahl = li.find_all(\"span\", {\"class\": \"sr-only\"})[0].text\n law_text.append(li.text)\n \n elif len(wai_list) > 0:\n law_text = []\n for wl in wai_list:\n lis = wl.find_all(\"li\")\n # try to find title for list\n #
\n if len(top) > 0:\n law_text.append([top[0].text] + [ li.text for li in lis])\n else:\n law_text.append([ li.text for li in lis])\n\n # elif len(top) > 0:\n # law_text = top[0].text.strip()\n # law_text = [law_text]\n else:\n law_text = [doc_content.text.strip()]\n\n # TODO: add to gesetz_structure\n # ...\n if curr_ueberschr_g1 is not None:\n if curr_ueberschr_para is not None:\n if isinstance(gesetz_structure[curr_ueberschr_g1][curr_ueberschr_para], dict):\n if curr_gld_symbol not in gesetz_structure[curr_ueberschr_g1][curr_ueberschr_para].keys():\n gesetz_structure[curr_ueberschr_g1][curr_ueberschr_para][curr_gld_symbol] = [law_text]\n else:\n gesetz_structure[curr_ueberschr_g1][curr_ueberschr_para][curr_gld_symbol].append(law_text)\n else:\n if curr_gld_symbol not in gesetz_structure[curr_ueberschr_g1].keys():\n gesetz_structure[curr_ueberschr_g1][curr_gld_symbol] = [law_text]\n else:\n gesetz_structure[curr_ueberschr_g1][curr_gld_symbol].append(law_text)\n else:\n if curr_gld_symbol not in gesetz_structure[curr_ueberschr_g1].keys():\n gesetz_structure[curr_ueberschr_g1][curr_gld_symbol] = [law_text]\n else:\n gesetz_structure[curr_ueberschr_g1][curr_gld_symbol].append(law_text)\n else:\n continue\n\n\n # # create cleaner gesetz structure\n # gesetz_structure = self.structure_gesetz_helper(gesetz_structure)\n\n # save as json\n with open(gesetz_structure_path, \"w\") as f:\n json.dump(gesetz_structure, f, indent=4, ensure_ascii=False)\n\n else:\n with open(gesetz_structure_path, \"r\") as f:\n gesetz_structure = json.load(f, strict=False)\n\n return gesetz_structure\n \n\n\n def structure_gesetz_helper(self, gesetz_structure):\n\n # load prompt\n prompt = None\n with open(os.path.join(\"chains\", \"00_helper\", \"clean_gesetz_section_title.txt\"), \"r\") as f:\n prompt = f.read()\n assert prompt is not None\n\n # initialize variables for loop\n cleaned_section_names = []\n section_names = list(gesetz_structure.keys())\n\n # loop over all section names\n for i, section_name in enumerate(section_names):\n\n # format prompt and get response\n formatted_prompt = prompt.format(eingabe=section_name)\n response = self.llm_curie.generate([formatted_prompt]).generations[0][0].text\n \n # clean response\n response = response.split(\"\\n\")[0]\n response = response.strip()\n\n # append cleaned response to list\n cleaned_section_names.append(response)\n\n # Create new gesetz structure and return it\n new_gesetz_structure = {}\n assert len(cleaned_section_names) == len(gesetz_structure.keys())\n for old, new in zip(gesetz_structure.keys(), cleaned_section_names):\n new_gesetz_structure[new] = gesetz_structure[old.replace(\" \", \"\")]\n \n\n return new_gesetz_structure\n \n\n\n def format_gesetz_structure(self):\n pass\n\n\n def format_gesetz_section_content(self):\n pass\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n \n la = LawAgent()\n fragen = [\n \"Welche Voraussetzungen müssen erfüllt sein, damit eine Person in Österreich die Staatsbürgerschaft erlangen kann?\",\n # \"Welche Behörde ist in Österreich für die Registrierung von Unternehmen zuständig und welche Schritte sind erforderlich, um ein Unternehmen rechtlich anzumelden?\",\n # \"Wie schnell darf ich auf der Autobahn mit einem Fahrrad fahren?\",\n # \"Was sind die rechtlichen Bestimmungen für die Kündigung eines Arbeitsvertrags in Österreich und welche Rechte haben Arbeitnehmer und Arbeitgeber in diesem Zusammenhang?\",\n # \"Welche gesetzlichen Regelungen gelten in Österreich für den Schutz des geistigen Eigentums, insbesondere für Markenrechte und Urheberrechte?\",\n # \"Wie lange darf ein sich ein 15 jähriger in der Nacht draußen aufhalten?\",\n # \"Welche steuerrechtlichen Regelungen gelten in Österreich für die Besteuerung von Einkommen aus dem Verkauf von Immobilien und wie hoch ist der Steuersatz?\"\n ]\n for frage in fragen:\n la.run(frage)\n\n\n print(\"...done\")\n\n\n\n","repo_name":"beocca/lawAgent","sub_path":"law_agent.py","file_name":"law_agent.py","file_ext":"py","file_size_in_byte":33618,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"8240827744","text":"# 3 - Задайте список из n чисел последовательности (1+1/n)**n и выведите на экран их сумму.\n\n# *Пример:*\n# - Для n = 6: {1: 2.0, 2: 2.25, 3: 2.37037037037037, 4: 2.44140625, 5: 2.4883199999999994, 6: 2.5216263717421135}\n\nfrom Funct import input_pos_num\n\nnum_n = input_pos_num(\"Введите N: \")\n\ndict_of_num = {}\n\nfor i in range(1,num_n+1):\n dict_of_num[i] = (1+1/i)**i\nprint(dict_of_num)\n\nres = 0\n\nfor i in dict_of_num:\n res += float(dict_of_num[i])\nprint(f\"Сумма чисел последовательности = {res:.2f}\")","repo_name":"grt2143/PY-HW2","sub_path":"Task_3.py","file_name":"Task_3.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18129343127","text":"class Description:\n \"\"\"Statement Implementation Description\n\n A summary of how the containing control statement is implemented by\nthe component or capability.\n\n Attributes:\n prose (str):Default value holder for raw data in texts\n\n \"\"\"\n\n contexts = [\n \"oscal-component-definition\",\n \"oscal-implementation-common\",\n ]\n parameters = [\n ]\n subcomponents = [\n \"prose\",\n ]\n\n def __init__(\n self,\n use_name='description',\n prose=None,\n ):\n self._prose = None\n self.prose = \\\n prose\n self.use_name = use_name\n\n def __str__(self):\n\n return str(self.prose)\n\n @classmethod\n def fromDict(cls, obj):\n newcls = cls(\n prose=obj.get(\n 'prose',\n None),\n )\n return newcls\n\n @property\n def prose(self):\n \"\"\"Default value holder for raw data in texts\n \"\"\"\n return self._prose\n\n @prose.setter\n def prose(self, x):\n self._prose = x\n","repo_name":"SHRGroup/pyoscal","sub_path":"pyoscal/core/oscal_component_definition/Description.py","file_name":"Description.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39623056","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef risk(theta1, theta2):\n return theta1**2 + theta2**2 - 400*(theta1 + theta2)\n\n\nminv, maxv = 0, 400\neval_nums = 400\n\ntheta1, theta2 = np.meshgrid(np.linspace(minv, maxv, eval_nums),\n np.linspace(minv, maxv, eval_nums))\n\nvalues = risk(theta1, theta2)\n\nfig, ax = plt.subplots(figsize=(4, 4))\nax.grid(False)\nCS = ax.contour(theta1, theta2, values, levels=10, linewidths=0.5)\nax.set_title(\"Contour for the Risk Function\")\nax.set_xlabel(r\"$\\theta_1$ / m\")\nax.set_ylabel(r\"$\\theta_2$ / m\")\nax.clabel(CS)\nfig.tight_layout()\nfig.savefig(\"hw1_1b.pdf\", dpi=500)\n","repo_name":"li-ju666/SLDM_Homework","sub_path":"hw1/hw1_1b.py","file_name":"hw1_1b.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37260617679","text":"from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import *\nfrom builtins import object\n\nfrom types import ListType, TupleType\n\nclass Interface(object):\n \"\"\"base class for interfaces\"\"\"\n def is_implemented_by(cls, instance):\n return implements(instance, cls)\n is_implemented_by = classmethod(is_implemented_by)\n\n \ndef implements(obj, interface):\n \"\"\"return true if the give object (maybe an instance or class) implements\n the interface\n \"\"\"\n kimplements = getattr(obj, '__implements__', ())\n if not isinstance(kimplements, (list, tuple)):\n kimplements = (kimplements,)\n for implementedinterface in kimplements:\n if issubclass(implementedinterface, interface):\n return True\n return False\n\n\ndef extend(klass, interface, _recurs=False):\n \"\"\"add interface to klass'__implements__ if not already implemented in.\n\n if klass is subclassed, ensure subclasses __implements__ it as well.\n \n NOTE: klass should be e new class.\n \"\"\"\n if not implements(klass, interface):\n try:\n kimplements = klass.__implements__\n kimplementsklass = type(kimplements)\n kimplements = list(kimplements)\n except AttributeError:\n kimplementsklass = tuple\n kimplements = []\n kimplements.append(interface)\n klass.__implements__ = kimplementsklass(kimplements)\n for subklass in klass.__subclasses__():\n extend(subklass, interface, _recurs=True)\n elif _recurs:\n for subklass in klass.__subclasses__():\n extend(subklass, interface, _recurs=True)\n","repo_name":"jlachowski/clonedigger","sub_path":"clonedigger/logilab/common/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"78"} +{"seq_id":"15939755755","text":"# ------------------------------------------------------\n#\n# TestSimpleKalmanFilter.py\n# By: Fred Stakem\n# Created: 4.15.13\n#\n# ------------------------------------------------------\n\n# Libs\nimport unittest\nimport random\nimport matplotlib.pyplot as plt\nimport math\nimport numpy\nimport Globals as globals\nfrom Utilities import *\nfrom SimpleKalmanFilter import SimpleKalmanModel\nfrom SimpleKalmanFilter import SimpleKalmanFilter\n\nclass SimpleKalmanFilterTest(unittest.TestCase):\n \n # Setup logging\n logger = getLogger('SimpleKalmanFilterTest')\n single_filter_graph_file = '../output/SimpleKalmanFilterSingle.png'\n multiple_filter_graph_file = '../output/SimpleKalmanFilterDouble.png'\n error_graph_file = '../output/SimpleKalmanFilterError.png'\n kalman_gain_graph_file = '../output/SimpleKalmanFilterGain.png'\n \n \n def setUp(self):\n pass\n \n def tearDown(self):\n pass\n \n @log_test(logger, globals.log_separator)\n def testFilter(self):\n init_x = 5.0\n init_p = 0.35\n test_data = [4.5, 4.8, 5.1, 4.75, 4.4]\n expected_estimates = [4.96, 4.95, 4.96, 4.95, 4.92]\n model = SimpleKalmanModel(A=1.0, H=1.0, Q=0.0, R=5.0)\n filter = SimpleKalmanFilter(model, init_x, init_p)\n \n SimpleKalmanFilterTest.logger.debug('Initial filter:\\n%s' % (filter))\n for i, x in enumerate(test_data):\n estimate = filter(x)\n output = 'Additional data: %s Expected estimate: %s Actual estimate: %s' % (str(x), str(expected_estimates[i]), str(estimate))\n SimpleKalmanFilterTest.logger.debug(output)\n assert numpy.allclose([estimate], [expected_estimates[i]], 0.1) , 'SimpleKalmanFilter class filtered incorrectly.'\n \n @log_test(logger, globals.log_separator)\n def testFilterOneCurveCurveGraphically(self):\n test_data = self.generateSignal(5, 100)\n time = range(0, 100, 1)\n filtered_data = []\n init_x = 5.0\n init_p = 0.35\n model = SimpleKalmanModel(A=1.0, H=1.0, Q=0.0, R=5.0)\n filter = SimpleKalmanFilter(model, init_x, init_p)\n \n for x in test_data:\n filtered_data.append( filter(x) )\n \n fig = plt.figure()\n subplot = fig.add_subplot(111)\n subplot.plot(time, test_data, 'o-')\n subplot.plot(time, filtered_data, 'ko-')\n subplot.set_xlabel('Time (s)')\n subplot.set_ylabel('Voltage (V)')\n subplot.set_title('SimpleKalmanFilterTest: Voltage vs Time')\n \n plt.savefig(SimpleKalmanFilterTest.single_filter_graph_file)\n \n @log_test(logger, globals.log_separator)\n def testFilterTwoCurvesGraphically(self):\n test_data = self.generateSignal(5, 100)\n time = range(0, 100, 1)\n filtered_data_a = []\n init_x = 5.0\n init_p = 0.35\n model = SimpleKalmanModel(A=1.0, H=1.0, Q=0.0, R=5.0)\n filter_a = SimpleKalmanFilter(model, init_x, init_p)\n \n filtered_data_b = []\n init_x = 5.5\n init_p = 0.35\n model = SimpleKalmanModel(A=1.0, H=1.0, Q=0.0, R=6.0)\n filter_b = SimpleKalmanFilter(model, init_x, init_p)\n \n for x in test_data:\n filtered_data_a.append( filter_a(x) )\n filtered_data_b.append( filter_b(x) )\n \n fig = plt.figure()\n subplot = fig.add_subplot(111)\n subplot.plot(time, test_data, 'o-')\n subplot.plot(time, filtered_data_a, 'ko-')\n subplot.plot(time, filtered_data_b, 'go-')\n subplot.set_xlabel('Time (s)')\n subplot.set_ylabel('Voltage (V)')\n subplot.set_title('SimpleKalmanFilterTest: Voltage vs Time')\n \n plt.savefig(SimpleKalmanFilterTest.multiple_filter_graph_file)\n \n @log_test(logger, globals.log_separator)\n def testFilterErrorGraphically(self):\n test_data = self.generateSignal(5, 100)\n time = range(0, 100, 1)\n error_data = []\n init_x = 5.0\n init_p = 0.35\n model = SimpleKalmanModel(A=1.0, H=1.0, Q=0.0, R=5.0)\n filter = SimpleKalmanFilter(model, init_x, init_p)\n \n for x in test_data:\n filter(x)\n error_data.append(filter.P)\n \n fig = plt.figure()\n subplot = fig.add_subplot(111)\n subplot.plot(time, error_data, 'ko-')\n subplot.set_xlabel('Time (s)')\n subplot.set_ylabel('Error (P)')\n subplot.set_title('SimpleKalmanFilterTest: Error Covariance vs Time')\n \n plt.savefig(SimpleKalmanFilterTest.error_graph_file)\n \n @log_test(logger, globals.log_separator)\n def testFilterGainGraphically(self):\n test_data = self.generateSignal(5, 100)\n time = range(0, 100, 1)\n gain_data = []\n init_x = 5.0\n init_p = 0.35\n model = SimpleKalmanModel(A=1.0, H=1.0, Q=0.0, R=5.0)\n filter = SimpleKalmanFilter(model, init_x, init_p)\n \n for x in test_data:\n filter(x)\n gain_data.append(filter.K)\n \n fig = plt.figure()\n subplot = fig.add_subplot(111)\n subplot.plot(time, gain_data, 'ko-')\n subplot.set_xlabel('Time (s)')\n subplot.set_ylabel('Kalman Gain (K)')\n subplot.set_title('SimpleKalmanFilterTest: Kalman Gain vs Time')\n \n plt.savefig(SimpleKalmanFilterTest.kalman_gain_graph_file)\n \n def generateSignal(self, value, num_samples):\n variation = value * 0.1\n signal = [value] * num_samples\n \n y = []\n for i in signal:\n signal_n_noise = random.uniform(value + variation, value - variation)\n y.append(signal_n_noise)\n \n return y\n \n \n \n ","repo_name":"fstakem/KalmanLearnin","sub_path":"BeginnerBook/TestSimpleKalmanFilter.py","file_name":"TestSimpleKalmanFilter.py","file_ext":"py","file_size_in_byte":5770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17301683858","text":"# Given four integers n, a, b, and c, return the nth ugly number.\n\n# Ugly numbers are positive integers that are divisible by a, b, or c.\n\n \n\n# Example 1:\n\n# Input: n = 3, a = 2, b = 3, c = 5\n# Output: 4\n# Explanation: The ugly numbers are 2, 3, 4, 5, 6, 8, 9, 10... The 3rd is 4.\n# Example 2:\n\n# Input: n = 4, a = 2, b = 3, c = 4\n# Output: 6\n# Explanation: The ugly numbers are 2, 3, 4, 6, 8, 9, 10, 12... The 4th is 6.\n# Example 3:\n\n# Input: n = 5, a = 2, b = 11, c = 13\n# Output: 10\n# Explanation: The ugly numbers are 2, 4, 6, 8, 10, 11, 12, 13... The 5th is 10.\n# Example 4:\n\n# Input: n = 1000000000, a = 2, b = 217983653, c = 336916467\n# Output: 1999999984\n\nclass Solution:\n def nthUglyNumber(self, n: int, a: int, b: int, c: int) -> int:\n # least common multiple\n def lcm(a, b):\n return a * b // math.gcd(a, b)\n nums = sorted([a, b, c])\n nums2 = [lcm(a, b), lcm(b, c), lcm(a, c)]\n nums3 = lcm(nums2[0], c)\n lo, hi = n, nums[-1] * n\n while lo < hi:\n mid = (lo + hi) // 2\n rank = (sum(mid // n for n in nums) -\n sum(mid // n for n in nums2) + \n mid // nums3 )\n if rank < n:\n lo = mid + 1\n else:\n hi = mid\n return lo","repo_name":"FelixAlvarado/notes","sub_path":"february 2020/ugly_numbers.py","file_name":"ugly_numbers.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70185560892","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport tensorflow.python.keras.api._v1.keras as keras\n\n\ndef plot_series(time, series, format=\"-\", start=0, end=None):\n plt.plot(time[start:end], series[start:end], format)\n plt.xlabel(\"Time\")\n plt.ylabel(\"Value\")\n plt.grid(True)\n\n\ndef trend(time, slope=0):\n return slope * time\n\n\ndef seasonal_pattern(season_time):\n \"\"\"Just an arbitrary pattern\"\"\"\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))\n\n\ndef seasonality(time, period, amplitude=1, phase=0):\n \"\"\"Repeats the same pattern at each period\"\"\"\n season_time = ((time + phase) % period) / period\n return amplitude * seasonal_pattern(season_time)\n\n\ndef noise(time, noise_level=1, seed=None):\n rnd = np.random.RandomState(seed)\n return rnd.randn(len(time)) * noise_level\n\n\ndef windowed_dataset(series, window_size, batch_size, shuffle_buffer):\n dataset = tf.data.Dataset.from_tensor_slices(series)\n dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)\n dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))\n dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-1], window[-1]))\n dataset = dataset.batch(batch_size=batch_size).prefetch(1)\n return dataset\n\n\nif __name__ == '__main__':\n time = np.arange(4 * 365 + 1, dtype=\"float32\")\n\n baseline = 10\n amplitude = 40\n slope = 0.05\n noise_level = 5\n\n # Build time series\n series = (baseline +\n trend(time=time, slope=slope) +\n seasonality(time=time, period=365, amplitude=amplitude))\n series += noise(time=time, noise_level=noise_level, seed=1)\n\n # Data for train and eval\n split_time = 1000\n time_train = time[:split_time]\n x_train = series[:split_time]\n\n time_val = time[split_time:]\n x_val = series[split_time:]\n\n window_size = 20\n batch_size = 32\n shuffle_buffer_size = 1000\n\n dataset = windowed_dataset(series=x_train,\n window_size=window_size,\n batch_size=batch_size,\n shuffle_buffer=shuffle_buffer_size)\n print(dataset)\n layer0 = keras.layers.Dense(units=1, input_shape=[window_size])\n model = keras.models.Sequential([layer0])\n model.compile(loss='mse', optimizer=keras.optimizers.SGD(learning_rate=1e-6, momentum=0.9))\n model.fit(dataset, epochs=5, verbose=0)\n\n print(f\"Layer Weights {layer0.get_weights()}\")\n\n forecast = []\n for time in range(len(series) - window_size):\n forecast.append(model.predict(series[time:time + window_size][np.newaxis]))\n\n forecast = forecast[split_time - window_size:]\n results = np.array(forecast)[:, 0, 0]\n\n plt.figure(figsize=(10, 6))\n plot_series(time_val, x_val)\n plot_series(time_val, results)\n plt.show()\n\n print(keras.metrics.mean_absolute_error(x_val, results).numpy())\n","repo_name":"brokencranium/neural-nets","sub_path":"series/LinearRegressionNN.py","file_name":"LinearRegressionNN.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71050099451","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\n\n\nimport os\n\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n\n for filename in filenames:\n\n print(os.path.join(dirname, filename))\n\n\n\n# Any results you write to the current directory are saved as output.\n# Carregando os dados\n\ndf = pd.read_csv('/kaggle/input/costa-rican-household-poverty-prediction/train.csv')\n\ntest = pd.read_csv('/kaggle/input/costa-rican-household-poverty-prediction/test.csv')\n\n\n\ndf.shape, test.shape\n## Separando as features:\n\n\n\nx_train = df['Id']\n\nx_test = test['Id']\n\ny_train = df['Target'] \n\n## y_test = test['Target'] Não existe\n\n\n\n# Juntando os dataframes\n\ndf_all = df.append(test)\n\n\n\ndf_all.shape\n## Gráficos\n\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\n\n\n\n## Modelos\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.model_selection import train_test_split,cross_val_score, GridSearchCV\n\nfrom sklearn.metrics import classification_report,multilabel_confusion_matrix\n\ndef grafico_pizza(labels,var,titulo,legenda):\n\n sizes = [df[var].value_counts()[0],df[var].value_counts()[1]]\n\n explode = (0, 0.1) \n\n\n\n fig1, ax1 = plt.subplots()\n\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n\n shadow=True, startangle=90)\n\n ax1.axis('equal') \n\n ax1.set_title(titulo)\n\n ax1.legend(title=legenda,\n\n loc=\"center left\",\n\n bbox_to_anchor=(1, 0, 0.5, 1))\n\n plt.show()\ndef grafico_barras(variaveis,eixoX,titulo):\n\n eixoY = []\n\n for v in variaveis: \n\n eixoY.append(df[v].value_counts()[1])\n\n \n\n plt.figure(figsize=(20,5))\n\n sns.barplot(x = eixoX,y = eixoY).set_title(titulo)\n\n plt.show()\n### Super lotação de quartos\n\nlabels = 'Não','Sim'\n\nvar = 'hacdor'\n\ntitulo = 'Superlotação de quartos'\n\nlegenda = 'Legenda'\n\ngrafico_pizza(labels,var,titulo,legenda)\n### Super lotação de espaços\n\nlabels = 'Não','Sim'\n\nvar = 'hacapo'\n\ntitulo = 'Superlotação de espaços'\n\nlegenda = 'Legenda'\n\ngrafico_pizza(labels,var,titulo,legenda)\n### Possui geladeira\n\nlabels = 'Não','Sim'\n\nvar = 'refrig'\n\ntitulo = 'Possui geladeira?'\n\nlegenda = 'Legenda'\n\ngrafico_pizza(labels,var,titulo,legenda)\n### Possui tablet\n\nlabels = 'Não','Sim'\n\nvar = 'v18q'\n\ntitulo = 'Possui tablet?'\n\nlegenda = 'Legenda'\n\ngrafico_pizza(labels,var,titulo,legenda)\n## Material predominante na parte de fora da casa \n\nvariaveis = 'paredblolad','paredzocalo','paredpreb','pareddes','paredmad','paredzinc','paredfibras','paredother'\n\neixoX = ['Bloco ou tijolo','Encaixe','Pré moldado ou Cimento','Resíduo','Madeira','Zinco','Fibras Naturais','Outro']\n\ntitulo = 'Material predominante na parte de fora da casa'\n\ngrafico_barras(variaveis,eixoX,titulo)\n## Material predominante no piso\n\nvariaveis = 'pisomoscer','pisocemento','pisoother','pisonatur','pisonotiene','pisomadera'\n\neixoX = ['Mosaico, Cerâmica ou Terrazo','Cimento','Outro','Natural','Não há piso','Madeira']\n\ntitulo = 'Material predominante no piso'\n\ngrafico_barras(variaveis,eixoX,titulo)\n## Material predominante no teto\n\nvariaveis = 'techozinc','techoentrepiso','techocane','techootro'\n\neixoX = ['Folha de metal ou zinco','Fibro Cimentou ou Mezanino','Fibras naturais','Outro']\n\ntitulo = 'Material predominante no teto'\n\ngrafico_barras(variaveis,eixoX,titulo)\n### Possui Teto\n\nlabels = 'Não','Sim'\n\nvar = 'cielorazo'\n\ntitulo = 'Possui teto?'\n\nlegenda = 'Legenda'\n\ngrafico_pizza(labels,var,titulo,legenda)\n## Abastecimento de água\n\nvariaveis = 'abastaguadentro','abastaguafuera','abastaguano'\n\neixoX = ['Interior da casa','Fora da casa','Não há abastecimento']\n\ntitulo = 'Abastecimento de água'\n\ngrafico_barras(variaveis,eixoX,titulo)\n## Abastecimento de eletricidade\n\nvariaveis = 'public','planpri','noelec','coopele'\n\neixoX = ['CNFL, ICE, ESPH / JASEC','Privada','Sem eletricidade','Cooperativa']\n\ntitulo = 'Abastecimento de Eletricidade'\n\ngrafico_barras(variaveis,eixoX,titulo)\n## Banheiros\n\nvariaveis = 'sanitario1','sanitario2','sanitario3','sanitario5','sanitario6'\n\neixoX = ['Sem banheiro','Banheiro com esgoto','Banheiro com fossa','Banheiro conectado a buraco','Banheiro conectado a outro sistema']\n\ntitulo = 'Banheiros'\n\ngrafico_barras(variaveis,eixoX,titulo)\n# Principal fonte de energia para cozinhar\n\nvariaveis = 'energcocinar1','energcocinar2','energcocinar3','energcocinar4'\n\neixoX = ['Sem cozinha','Elétrica','Gás','Carvão']\n\ntitulo = 'Principal fonte de energia para cozinhar'\n\ngrafico_barras(variaveis,eixoX,titulo)\n# Descarte de lixo\n\n### elimbasu5 sempre 0!\n\nvariaveis = 'elimbasu1','elimbasu2','elimbasu3','elimbasu4','elimbasu6'\n\neixoX = ['Caminhão Tanque','Botânica ou Enterrada','Queima','Terreno Baldio','Outros']\n\ntitulo = 'Descarte de lixo'\n\ngrafico_barras(variaveis,eixoX,titulo)\n# Situação das paredes\n\nvariaveis = 'epared1','epared2','epared3'\n\neixoX = ['Parede ruim','Parede regular','Parede boa']\n\ntitulo = 'Situação das paredes'\n\ngrafico_barras(variaveis,eixoX,titulo)\n# Situação do teto\n\nvariaveis = 'etecho1','etecho2','etecho3'\n\neixoX = ['Teto ruim','Teto regular','Teto bom']\n\ntitulo = 'Situacao do teto'\n\ngrafico_barras(variaveis,eixoX,titulo)\n## Situacao do chão\n\nvariaveis = 'eviv1','eviv2','eviv3'\n\neixoX = ['Chão ruim','Chão regular','Chão bom']\n\ntitulo = 'Situação do chão'\n\ngrafico_barras(variaveis,eixoX,titulo)\n### Pessoa incacitada\n\nlabels = 'Não','Sim'\n\nvar = 'dis'\n\ntitulo = 'Pessoa incapacitada?'\n\nlegenda = 'Legenda'\n\ngrafico_pizza(labels,var,titulo,legenda)\n### Distribuição do sexo\n\nlabels = 'Não','Sim'\n\nvar = 'male'\n\ntitulo = 'Distribuição do sexo'\n\nlegenda = 'Legenda'\n\ngrafico_pizza(labels,var,titulo,legenda)\ntitulo = 'Estado civil'\n\nvariaveis = 'estadocivil1','estadocivil2','estadocivil3','estadocivil4','estadocivil5','estadocivil6','estadocivil7'\n\neixoX = ['< 10 anos ','Free','Casado','Divorciado','Separado','viúvo','Solteiro']\n\ngrafico_barras(variaveis,eixoX,titulo)\ntitulo = 'Parentesco'\n\nvariaveis = 'parentesco1','parentesco2','parentesco3','parentesco4','parentesco5','parentesco6','parentesco7','parentesco8','parentesco9','parentesco10','parentesco11','parentesco12'\n\neixoX = ['Chefe de família','Cônjugue','Filho','Divorciado','Genro/Nora','Neto','Pai','Sogro','Irmão','Cunhada','Outro Familiar','Outro Não Familiar']\n\ngrafico_barras(variaveis,eixoX,titulo)\ntitulo = 'Nível de educação'\n\nvariaveis = 'instlevel1','instlevel2','instlevel3','instlevel4','instlevel5','instlevel6','instlevel7','instlevel8','instlevel9'\n\neixoX = ['Sem nível de educação','Primário Incompleto','Primário Completo','Secundário Incompleto','Secundário Completo','Técnico Incompleto','Técnico Completo','Graduação','Ensino Superior']\n\ngrafico_barras(variaveis,eixoX,titulo)\ntitulo = 'Tipo de Moradia'\n\nvariaveis = 'tipovivi1','tipovivi2','tipovivi3','tipovivi4','tipovivi5'\n\neixoX = ['Casa própria e quitada','Própria e parcelada','Alugada','Precária','Outro (Atribuído / Empresatado)']\n\ngrafico_barras(variaveis,eixoX,titulo)\n### Possui Computador ?\n\nlabels = 'Não','Sim'\n\nvar = 'computer'\n\ntitulo = 'Possui Computador ?'\n\nlegenda = 'Legenda'\n\ngrafico_pizza(labels,var,titulo,legenda)\n### Possui Televisão ?\n\nlabels = 'Não','Sim'\n\nvar = 'television'\n\ntitulo = 'Possui Televisão ?'\n\nlegenda = 'Legenda'\n\ngrafico_pizza(labels,var,titulo,legenda)\n### Possui telefone Celular ?\n\nlabels = 'Não','Sim'\n\nvar = 'mobilephone'\n\ntitulo = 'Possui telefone Celular ?'\n\nlegenda = 'Legenda'\n\ngrafico_pizza(labels,var,titulo,legenda)\ntitulo = 'Região'\n\nvariaveis = 'lugar1','lugar2','lugar3','lugar4','lugar5','lugar6'\n\neixoX = ['Central','Chorotega','Pacífico central','Brunca','Huetar Atlântica','Huetar Norte']\n\ngrafico_barras(variaveis,eixoX,titulo)\n### Zonas\n\nlabels = 'Rural','Urbana'\n\nvar = 'area1'\n\ntitulo = 'Zona Urbana / Rural'\n\nlegenda = 'Legenda'\n\ngrafico_pizza(labels,var,titulo,legenda)\n## Separando as variáveis\n\nvarNumericas = ['v2a1','rooms','v18q','v18q1','r4h1','r4h2','r4h3','r4m1','r4m2','r4m3','r4t1','r4t2','r4t3','tamhog','tamviv','escolari','rez_esc','hhsize','hogar_nin','hogar_adul','hogar_mayor','hogar_total','dependency','edjefe','edjefa','meaneduc','bedrooms','overcrowding','qmobilephone','age','SQBescolari','SQBage','SQBhogar_total','SQBedjefe','SQBhogar_nin','SQBovercrowding','SQBdependency','SQBmeaned','agesq']\n## Correlações\n\nplt.figure(figsize=(20,20));\n\nsns.heatmap(df[varNumericas].corr(), square=True ,annot=True, linewidths=1,vmin=-1,vmax=1,cmap='RdYlGn')\nnaoUsar = ['idhogar','Id','Target'] # ID\n\nnaoUsarNumericas = ['tamhog','hogar_total','agesq','hhsize'] ## Correlação 1\n## Tirando as variáveis\n\nvarNumericas = np.setdiff1d(varNumericas,naoUsarNumericas)\n\n## Correlações\n\nplt.figure(figsize=(20,20));\n\nsns.heatmap(df[varNumericas].corr(), square=True ,annot=True, linewidths=1,vmin=-1,vmax=1,cmap='RdYlGn')\ndf[varNumericas].describe().transpose()\n## Análise da Variável Alvo\n\ndf['Target'].value_counts().sort_values()\neixoX = ['Pobreza Extrema','Pobreza Moderada','Famílias Vulneráveis','Famílias Não Vulneráveis']\n\nplt.figure(figsize=(20,5))\n\nsns.barplot(x = eixoX,y = df['Target'].value_counts().sort_values()).set_title(titulo)\n\nplt.show()\n# Quais colunas do dataframe são do tipo object\n\ndf_all.select_dtypes('object').head()\n# Analisando os dados da coluna edjefa\n\ndf_all['edjefa'].value_counts()\n# Analisando os dados da coluna edjefe\n\ndf_all['edjefe'].value_counts()\n## Analisando a coluna dependency\n\ndf_all['dependency'].value_counts()\n# Transformar 'yes' em 1 e 'no' em 0\n\n\n\nmapeamento = {'yes': 1, 'no': 0}\n\ndf_all['edjefa'] = df_all['edjefa'].replace(mapeamento).astype(int)\n\ndf_all['edjefe'] = df_all['edjefe'].replace(mapeamento).astype(int)\n\ndf_all['dependency'] = df_all['dependency'].replace(mapeamento).astype(float)\n# Quais colunas do dataframe são do tipo object\n\ndf_all.select_dtypes('object').head()\n# Visualizando do comando info\n\ndf_all.info()\n# Verificando os valores nulos\n\ndf_all.isnull().sum()\n# Prenchendo com -1 os valores nulos de v2a1\n\ndf_all['v2a1'].fillna(-1, inplace=True)\n\n# Prenchendo com 0 os valores nulos de v18q1\n\ndf_all['v18q1'].fillna(0, inplace=True)\n\n# Prenchendo com -1 os valores nulos de SQBmeaned, meaneduc e rez_esc\n\ndf_all['SQBmeaned'].fillna(-1, inplace=True)\n\ndf_all['meaneduc'].fillna(-1, inplace=True)\n\ndf_all['rez_esc'].fillna(-1, inplace=True)\n# Verificando os valores nulos novamente\n\ndf_all.isnull().sum()\n# Separando as colunas para treinamento\n\nfeats = [c for c in df_all.columns if c not in ['Id', 'idhogar', 'Target']]\n# Separar os dataframes\n\ntrain, test = df_all[~df_all['Target'].isnull()], df_all[df_all['Target'].isnull()]\n\n\n\ntrain.shape, test.shape\n# Instanciando o random forest classifier\n\nrf = RandomForestClassifier(n_jobs=-1, n_estimators=200, random_state=42)\n# Treinando o modelo\n\nrf.fit(train[feats], train['Target'])\n# Prever o Target de teste usando o modelo treinado\n\ntest['Target'] = rf.predict(test[feats]).astype(int)\n# Vamos verificar as previsões\n\ntest['Target'].value_counts(normalize=True)\n# Criando o arquivo para submissão\n\ntest[['Id', 'Target']].to_csv('submission_1.csv', index=False)\nfig=plt.figure(figsize=(15, 20))\n\n\n\n# Avaliando a importancia de cada coluna (cada variável de entrada)\n\npd.Series(rf.feature_importances_, index=feats).sort_values().plot.barh()\nvarNaoUtilizadas = ['Id', 'idhogar', 'Target'] ## Ids e alvo\n\nvarNaoUtilizadasCat = ['female','area2'] ## Duplicadas\n\nvarNaoUtilizadasNum = ['tamhog','hogar_total','hhsize'] ## Correlação = 1\n\nvarNaoUtilizadasSQ = ['SQBescolari','SQBage','SQBhogar_total','SQBedjefe','SQBhogar_nin','SQBovercrowding','SQBdependency','SQBmeaned','agesq']\n\nvarNaoUtilizadas = varNaoUtilizadas + varNaoUtilizadasCat + varNaoUtilizadasNum + varNaoUtilizadasSQ\n\nvarNaoUtilizadas\n# Separando as colunas para treinamento\n\nfeats = [c for c in df_all.columns if c not in varNaoUtilizadas]\n# Treinando o modelo\n\nrf.fit(train[feats], train['Target'])\n# Prever o Target de teste usando o modelo treinado\n\ntest['Target'] = rf.predict(test[feats]).astype(int)\n# Vamos verificar as previsões\n\ntest['Target'].value_counts(normalize=True)\n# Criando o arquivo para submissão\n\ntest[['Id', 'Target']].to_csv('submission.csv', index=False)\n\n# 0.36832 contra 0.36781 da primeira\n## Usar || Nao Usar \n\n## 'techozinc' || 'techoentrepiso','techocane','techootro'\n\n## 'abastaguadentro' || 'abastaguafuera','abastaguano'\n\n## 'public' || 'planpri','noelec','coopele'\n\n## 'sanitario3' || 'sanitario1','sanitario2', 'sanitario5','sanitario6'\n\n## 'energcocinar2' || 'energcocinar1', 'energcocinar3','energcocinar4'\n\n## 'elimbasu1' || 'elimbasu2','elimbasu3','elimbasu4','elimbasu6'\n\n## 'tipovivi1' || 'tipovivi2','tipovivi3','tipovivi4','tipovivi5'\n\nvarNaoUtilizadas = ['Id', 'idhogar', 'Target'] ## Ids e alvo\n\nvarNaoUtilizadasCat = ['female','area2'] ## Duplicadas\n\nvarNaoUtilizadasNum = ['tamhog','hogar_total','hhsize'] ## Correlação = 1\n\nvarNaoUtilizadasSQ = ['SQBescolari','SQBage','SQBhogar_total','SQBedjefe','SQBhogar_nin','SQBovercrowding','SQBdependency','SQBmeaned','agesq']\n\nvarPoucosRegistros = ['techoentrepiso','techocane','techootro','abastaguafuera','abastaguano','sanitario1','sanitario2', 'sanitario5','sanitario6','energcocinar1', 'energcocinar3','energcocinar4','elimbasu2','elimbasu3','elimbasu4','elimbasu6','tipovivi2','tipovivi3','tipovivi4','tipovivi5']\n\nvarNaoUtilizadas = varNaoUtilizadas + varNaoUtilizadasCat + varNaoUtilizadasNum + varNaoUtilizadasSQ + varPoucosRegistros\n\nvarNaoUtilizadas\n# Separando as colunas para treinamento\n\nfeats = [c for c in df_all.columns if c not in varNaoUtilizadas]\n# Treinando o modelo\n\nrf.fit(train[feats], train['Target'])\n# Prever o Target de teste usando o modelo treinado\n\ntest['Target'] = rf.predict(test[feats]).astype(int)\n# Vamos verificar as previsões\n\ntest['Target'].value_counts(normalize=True)\n# Criando o arquivo para submissão\n\ntest[['Id', 'Target']].to_csv('submission.csv', index=False)\n\n# 0.35910\nrf = RandomForestClassifier(max_depth=None, random_state=42, n_jobs=4, n_estimators=700,\n\n min_impurity_decrease=1e-3, min_samples_leaf=2,\n\n verbose=0, class_weight='balanced')\n# Separando as colunas para treinamento\n\nfeats = [c for c in df_all.columns if c not in varNaoUtilizadas]\n# Treinando o modelo\n\nrf.fit(train[feats], train['Target'])\n# Prever o Target de teste usando o modelo treinado\n\ntest['Target'] = rf.predict(test[feats]).astype(int)\n# Vamos verificar as previsões\n\ntest['Target'].value_counts(normalize=True)\n# Criando o arquivo para submissão\n\ntest[['Id', 'Target']].to_csv('submission.csv', index=False)\n\n# 0.42693\nrf.get_params().keys()\nparam_grid = {'max_depth': [None,5,10],\n\n 'max_leaf_nodes': [None,2,6],\n\n 'min_impurity_decrease' : [1,1e-3],\n\n 'n_jobs': [-1],\n\n 'min_samples_leaf': [2,4],\n\n 'n_estimators': [100,300,700],\n\n 'class_weight' : [None,'balanced']}\n\n\n\ngrid = GridSearchCV(rf,param_grid=param_grid,cv=4,scoring='f1_macro')\ngrid.fit(train[feats], train['Target'])\ngrid_df = pd.DataFrame(grid.cv_results_)\n\ngrid_df\n## Modelo com melhores parâmetros\n\ngrid_df.sort_values('rank_test_score',ascending=True).iloc[0,:]\n## acessando os melhores parametros\n\ngrid.best_params_\nrf = RandomForestClassifier(max_depth=None, random_state=42, n_jobs=4, n_estimators=100,max_leaf_nodes=None,\n\n min_impurity_decrease=0.001, min_samples_leaf=4,\n\n verbose=0, class_weight='balanced')\n# Separando as colunas para treinamento\n\nfeats = [c for c in df_all.columns if c not in varNaoUtilizadas]\n# Treinando o modelo\n\nrf.fit(train[feats], train['Target'])\n# Prever o Target de teste usando o modelo treinado\n\ntest['Target'] = rf.predict(test[feats]).astype(int)\n# Vamos verificar as previsões\n\ntest['Target'].value_counts(normalize=True)\n# Criando o arquivo para submissão\n\ntest[['Id', 'Target']].to_csv('submission.csv', index=False)\n\n# 0.42100\nfig=plt.figure(figsize=(15, 20))\n\n\n\n# Avaliando a importancia de cada coluna (cada variável de entrada)\n\npd.Series(rf.feature_importances_, index=feats).sort_values().plot.barh()","repo_name":"aorursy/new-nb-1","sub_path":"asgvitor_1931133123-trabalho-1-vitor-alves.py","file_name":"asgvitor_1931133123-trabalho-1-vitor-alves.py","file_ext":"py","file_size_in_byte":16585,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2141239362","text":"import pandas as pd\nfrom os.path import getsize, join, dirname, abspath, exists\nfrom qiita_db.util import get_filepath_information, compute_checksum\nfrom qiita_db.sql_connection import TRN\n\n\nwith TRN:\n sql = \"\"\"SELECT filepath_id\n FROM qiita.filepath\"\"\"\n TRN.add(sql)\n fids = TRN.execute_fetchflatten()\n\n\nfpath = join(dirname(abspath(__file__)), 'support_files', 'patches',\n 'python_patches', '74.py.cache.tsv')\ncache = dict()\nif exists(fpath):\n df = pd.read_csv(fpath, sep='\\t', index_col=0, dtype=str,\n names=['filepath_id', 'checksum', 'fp_size'])\n cache = df.to_dict('index')\n\nfor fid in fids:\n if fid not in cache:\n finfo = get_filepath_information(fid)\n try:\n size = getsize(finfo['fullpath'])\n except FileNotFoundError:\n size = 0\n\n try:\n checksum = compute_checksum(finfo['fullpath'])\n except FileNotFoundError:\n checksum = ''\n else:\n checksum = cache[fid]['checksum']\n size = cache[fid]['fp_size']\n\n with TRN:\n sql = \"\"\"UPDATE qiita.filepath\n SET fp_size = %s, checksum = %s\n WHERE filepath_id = %s\"\"\"\n TRN.add(sql, tuple([size, checksum, fid]))\n TRN.execute()\n","repo_name":"qiita-spots/qiita","sub_path":"qiita_db/support_files/patches/python_patches/74.py","file_name":"74.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"78"} +{"seq_id":"40922764915","text":"import logging\nimport sqlite3\n\nimport pandas as pd\n\n\ndef init_db(db: str):\n # Create a connection to the database\n conn = create_connection(db)\n\n # Create the \"pulse_data\" table if it doesn't exist\n if conn is not None:\n create_table_file = \"\"\"CREATE TABLE IF NOT EXISTS pulse_data (\n date timestamp,\n systolic integer,\n diastolic integer,\n pulse integer,\n notes text,\n measurement_method text,\n row_hash text,\n last_update timestamp\n );\"\"\"\n create_table(conn, create_table_file)\n\n return conn\n\n\ndef create_connection(db_file: str):\n \"\"\"create a database connection to the SQLite database\n specified by db_file\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except sqlite3.Error as e:\n logging.error(e)\n return conn\n\n\ndef create_table(conn, create_table_sql: str):\n \"\"\"create a table from the create_table_sql statement\n :param conn: Connection object\n :param create_table_sql: a CREATE TABLE statement\n :return:\n \"\"\"\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n except sqlite3.Error as e:\n logging.error(e)\n\n\ndef get_critical_values(df: pd.DataFrame) -> list:\n critical_list = []\n # Get the columns that we want to check\n systolic = df[\"systolic\"]\n diastolic = df[\"diastolic\"]\n pulse = df[\"pulse\"]\n date = df[\"date\"]\n\n # Iterate over the rows of the DataFrame\n for i in range(len(df)):\n # Check if any of the values are critical\n if (\n systolic.iloc[i] > 140\n or diastolic.iloc[i] > 90\n or pulse.iloc[i] > 100\n or pulse.iloc[i] < 60\n ):\n critical_list.append(date.iloc[i])\n\n return critical_list\n\n\ndef transform_df_date(df: pd.DataFrame) -> pd.DataFrame:\n\n # Create a new \"date\" column by combining the \"Datum\" and \"Zeit\" columns\n df[\"date\"] = pd.to_datetime(df[\"Datum\"] + df[\"Zeit\"], format=\"%d. %B %Y%H:%M\")\n\n # Drop the \"Datum\" and \"Zeit\" columns\n df.drop(columns=[\"Zeit\"], inplace=True)\n return df\n","repo_name":"chinne/blood-pressure-app","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31908569538","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom functools import partial\n\nDefaultConv2D = partial(keras.layers.Conv2D, kernel_size=3, strides=1,\n padding=\"SAME\", use_bias=False)\n\nclass ResidualUnit(keras.layers.Layer):\n def __init__(self, filters, strides=1, activation=\"relu\", **kwargs):\n super().__init__(**kwargs)\n self.activation = keras.activations.get(activation)\n self.main_layers = [\n DefaultConv2D(filters, strides=strides),\n keras.layers.BatchNormalization(),\n self.activation,\n DefaultConv2D(filters),\n keras.layers.BatchNormalization()]\n self.skip_layers = []\n if strides > 1:\n self.skip_layers = [\n DefaultConv2D(filters, kernel_size=1, strides=strides),\n keras.layers.BatchNormalization()]\n\n def call(self, inputs):\n Z = inputs\n for layer in self.main_layers:\n Z = layer(Z)\n skip_Z = inputs\n for layer in self.skip_layers:\n skip_Z = layer(skip_Z)\n return self.activation(Z + skip_Z)\n\nclass Resnet34(keras.Model):\n def __init__(\n self,\n output_dim,\n input_shape=[224, 224, 3],\n **kwargs \n ):\n super().__init__(**kwargs)\n self.low_conv = [\n DefaultConv2D(64, 7, strides=2, padding=\"same\", use_bias=False,\n input_shape=input_shape),\n keras.layers.BatchNormalization(),\n keras.layers.Activation(\"relu\"),\n keras.layers.MaxPool2D(pool_size=3, strides=2, padding=\"same\")\n ]\n \n prev_filters = 64\n self.rus = []\n for filters in [64] * 3 + [128] * 4 + [256] * 6 + [512] * 3:\n strides = 1 if filters == prev_filters else 2\n self.rus.append(ResidualUnit(filters, strides))\n prev_filters = filters\n \n self.avg_pool = keras.layers.GlobalAvgPool2D()\n self.out = [\n keras.layers.Flatten(),\n keras.layers.Dense(output_dim, activation=\"softmax\")\n ]\n \n def call(self, inputs):\n for unit in self.low_conv:\n z = unit(inputs)\n for ru in self.rus:\n z = ru(z)\n \n z = self.avg_pool(z)\n for unit in self.out:\n z = unit(z)\n return z \n\n\n","repo_name":"minhngt62/dslab-training","sub_path":"session_cnn/models/resnet/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"35396396699","text":"import asyncio\nfrom asyncio import sleep\nfrom random import choice\nfrom userbot.events import register\n\nT_R_D = [\n \"@PrajjuS\",\n \"@Vin02vin\",\n \"@Iamsaisharan\",\n \"@venomsamurai\",\n]\n\n@register(outgoing=True, pattern=\"^.trd$\")\nasync def truthrdare(trd):\n \"\"\"Truth or Dare\"\"\"\n await trd.edit(\"`Choosing Name...`\")\n await sleep(1.5)\n await trd.edit(\"`..............`\")\n await sleep(1.5)\n msg = await trd.edit(\"`Name is.....`\")\n await sleep(3)\n await trd.delete()\n await msg.reply(\"**∆ Truth or Dare ∆**\\n\\n__Name:__ \" + choice(T_R_D))\n \n","repo_name":"LUCKYRAJPUTOP/VibeXUserbot","sub_path":"userbot/modules/trd.py","file_name":"trd.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41413644188","text":"import torch\nfrom torch import nn\n\n# 通道注意力的一个实现\nclass SENet(nn.Module):\n def __init__(self, channel, ratio = 16):\n super(SENet, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // 16, False),\n nn.ReLU(),\n nn.Linear(channel // 16, channel, False),\n nn.Sigmoid()\n )\n\n def forward(self, x ):\n b, c, h, w = x.size()\n\n # b, c, 1, 1\n avg = self.avg_pool(x).view([b, c]) # 去掉最后两个1,1\n fc = self.fc(avg).view([b, c, 1, 1])\n return x * fc # 得到了权值之后再乘以原来的变量\n\n","repo_name":"zzzwind/unet","sub_path":"models/senet.py","file_name":"senet.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37724839182","text":"\"\"\"\n\n\"\"\"\n\nfrom plot_helper import plot_current_source\nimport pyNN.neuron as sim\n\nsim.setup()\n\npopulation = sim.Population(30, sim.IF_cond_exp(tau_m=10.0))\npopulation[27:28].record_v()\n\nsteps = sim.StepCurrentSource(times=[50.0, 110.0, 150.0, 210.0],\n amplitudes=[0.4, 0.6, -0.2, 0.2])\nsteps.inject_into(population[(6, 11, 27)])\nsteps._record()\n\nsim.run(250.0)\n\nt, i_inj = steps._get_data()\nv = population.get_data().segments[0].analogsignals[0]\n\nplot_current_source(t, i_inj, v,\n #v_range=(-66, -49),\n v_ticks=(-66, -64, -62, -60),\n i_range=(-0.3, 0.7),\n i_ticks=(-0.2, 0.0, 0.2, 0.4, 0.6),\n t_range=(0, 250))\n","repo_name":"NeuralEnsemble/PyNN","sub_path":"doc/pyplots/step_source.py","file_name":"step_source.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":256,"dataset":"github-code","pt":"78"} +{"seq_id":"27500749056","text":"import os\nimport numpy as np\n\nTRAIN_FILE = \"/mnt/workspace/users/leekt/HighlyAccurate/dataLoader/kitti_360_train.txt\"\n# TRAIN_START = 0\n# TRAIN_END = 99\nTRAIN_INTERVALS = [(0, 99), (5000, 5099)]\n\n\nTEST_FILE = \"/mnt/workspace/users/leekt/HighlyAccurate/dataLoader/kitti_360_test.txt\"\n# TEST_START = 100\n# TEST_END = 199\nTEST_INTERVALS = [(100, 199), (5100, 5199)]\nTEST_NOISE_SCALE = 1\n\nDRIVE_DIR = \"2013_05_28_drive_0000_sync\"\nCAMERA_DIRS = [\"image_01/data_rect\"]\n\nif __name__ == '__main__':\n np.random.seed(2023)\n\n # Generate train data\n with open(TRAIN_FILE, 'w') as f:\n for camera_dir in CAMERA_DIRS:\n # for i in range(TRAIN_START, TRAIN_END + 1):\n for start, end in TRAIN_INTERVALS:\n for i in range(start, end + 1):\n img_index = f\"{i:010}\" + \".png\\n\"\n content = os.path.join(DRIVE_DIR, camera_dir, img_index)\n f.writelines(content)\n\n with open(TEST_FILE, 'w') as f:\n for camera_dir in CAMERA_DIRS:\n # for i in range(TEST_START, TEST_END + 1):\n for start, end in TEST_INTERVALS:\n for i in range(start, end + 1):\n gt_shift_x = np.random.uniform(-TEST_NOISE_SCALE,\n TEST_NOISE_SCALE)\n gt_shift_y = np.random.uniform(-TEST_NOISE_SCALE,\n TEST_NOISE_SCALE)\n gt_shift_theta = np.random.uniform(\n -TEST_NOISE_SCALE, TEST_NOISE_SCALE)\n img_index = f\"{i:010}\" + \".png\"\n shift_data = f\" {gt_shift_x:.4f} {gt_shift_y:.4f} {gt_shift_theta:.4f}\\n\"\n content = os.path.join(DRIVE_DIR, camera_dir, img_index) + shift_data \n f.writelines(content)\n","repo_name":"Cross-view-localization-ROB-590/cross-view-localization","sub_path":"dataLoader/generate_kitti_360_data.py","file_name":"generate_kitti_360_data.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24943742521","text":"import os, glob, tempfile, shutil\n\nfrom . import Config, router, request, send_from_directory, send_file, json_response, APIError, require_auth\n\ndef bool_arg(name, request):\n\t''' Convert query string arguments to boolean values.\n\n\t\tIf key is present in the request args dict and is\n\t\tany of the string in the match array, returns True\n\t\totherwise False.\n\t'''\n\tkey = request.args.get(name)\n\treturn str(key).lower() in [ '1', 'true', 't', 'yes', 'y' ]\n\n@router.route('/logs/')\n@router.route('/logs/')\n@require_auth\ndef logs(filename=None):\n\t''' Returns the requested log file and optionally clears it.\n\n\t\tIf request made without parameters, just returns a list of\n\t\tthe log files available as list of { filename, size }.\n\n\t\tquery string parameters:\n\t\t\tdownload (bool) - download identified log file (as opposed to stream content)\n\t\t\tclear (bool) - clear the identified log file (content returned before clear)\n\t'''\n\tlogdir = Config.PATHS.LOGDIR\n\n\tif not filename:\n\t\t# return the list of logs available if no name requested\n\t\n\t\tlogs = [] # array of { filename, file size }\n\n\t\tfor f in glob.glob(os.path.join(logdir, '*.log*' )):\n\t\t\tpath = os.path.join(logdir, f)\n\n\t\t\tif os.path.isfile(path):\n\t\t\t\tlogs.append({ os.path.basename(f): os.path.getsize(path) })\n\n\t\treturn json_response({ \"logs\": logs })\n\n\t# see if requested log is available\n\tpath = os.path.join(logdir, filename)\n\n\tif os.path.isfile(path) and path in glob.glob(os.path.join(logdir, '*.log*')):\n\t\t# check that file exists and matches the log file globbing pattern\n\t\t# then check the query params for processing options\n\n\t\tdownload = bool_arg('download', request)\n\t\tclear = bool_arg('clear', request)\n\n\t\t# if clearing, use a temporary file to return current log\n\t\tif clear:\n\n\t\t\ttf = tempfile.NamedTemporaryFile(mode=\"r+b\")\n\t\t\twith open(path, 'r+b') as f:\n\t\t\t\tshutil.copyfileobj(f, tf)\t\t\t\n\t\t\ttf.seek(0)\n\t\t\tresponse = send_file(tf, mimetype='text/plain', as_attachment=download, attachment_filename=filename)\n\t\t\ttf.seek(0, os.SEEK_END)\n\t\t\tsize = tf.tell()\n\t\t\ttf.seek(0)\n\n\t\t\t# If we're clearing a rotated log file, it's name will\n\t\t\t# end in a number (e.g., \"cme.log.1, cme.log.2, ...\").\n\t\t\t# We'll delete those file completely, else we'll just\n\t\t\t# clear the file content.\n\t\t\tif os.path.splitext(path)[1] == '.log':\n\t\t\t\t# clear log content \n\t\t\t\topen(path, \"w\").close()\n\n\t\t\telse:\n\t\t\t\t# delete log file\n\t\t\t\tos.remove(path)\n\n\t\t\tresponse.headers.extend({\n\t\t\t\t'Content-Length': size\n\t\t\t})\n\n\t\telse:\n\t\t\t# set response header to indicate file download\n\t\t\tresponse = send_from_directory(logdir, filename, mimetype='text/plain', \n\t\t\t\tattachment_filename=filename, as_attachment=download)\n\n\t\tresponse.headers.extend({\n\t\t\t'Cache-Control': 'no-cache'\n\t\t})\n\n\t\treturn response\n\n\traise APIError('Invalid log file request', 404)\n","repo_name":"Transtector/Avalanche-Cme-api","sub_path":"cmeapi/api_routes/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37411050682","text":"import os\n\nimport h5py\nfrom torchvision import transforms\nfrom torchvision.transforms import RandomResizedCrop\n\nfrom nupic.research.frameworks.pytorch.dataset_utils import (\n CachedDatasetFolder,\n HDF5Dataset,\n ImageNetPolicy,\n)\n\nIMAGENET_NUM_CLASSES = {\n 10: [\n \"n01440764\", \"n02102040\", \"n02979186\", \"n03000684\", \"n03028079\",\n \"n03394916\", \"n03417042\", \"n03425413\", \"n03445777\", \"n03888257\"\n ],\n 100: [\n \"n01440764\", \"n01592084\", \"n01601694\", \"n01630670\", \"n01631663\",\n \"n01664065\", \"n01677366\", \"n01693334\", \"n01734418\", \"n01751748\",\n \"n01755581\", \"n01855672\", \"n01877812\", \"n01978287\", \"n01981276\",\n \"n02025239\", \"n02027492\", \"n02033041\", \"n02056570\", \"n02089867\",\n \"n02091244\", \"n02091635\", \"n02093428\", \"n02094258\", \"n02104365\",\n \"n02105251\", \"n02106662\", \"n02107312\", \"n02108422\", \"n02112350\",\n \"n02129165\", \"n02174001\", \"n02268443\", \"n02317335\", \"n02410509\",\n \"n02423022\", \"n02454379\", \"n02457408\", \"n02488291\", \"n02497673\",\n \"n02536864\", \"n02640242\", \"n02655020\", \"n02727426\", \"n02783161\",\n \"n02808304\", \"n02841315\", \"n02871525\", \"n02892201\", \"n02971356\",\n \"n02979186\", \"n02981792\", \"n03018349\", \"n03125729\", \"n03133878\",\n \"n03207941\", \"n03250847\", \"n03272010\", \"n03372029\", \"n03400231\",\n \"n03457902\", \"n03481172\", \"n03482405\", \"n03602883\", \"n03680355\",\n \"n03697007\", \"n03763968\", \"n03791053\", \"n03804744\", \"n03837869\",\n \"n03854065\", \"n03891332\", \"n03954731\", \"n03956157\", \"n03970156\",\n \"n03976657\", \"n04004767\", \"n04065272\", \"n04120489\", \"n04149813\",\n \"n04192698\", \"n04200800\", \"n04252225\", \"n04259630\", \"n04332243\",\n \"n04335435\", \"n04346328\", \"n04350905\", \"n04404412\", \"n04461696\",\n \"n04462240\", \"n04509417\", \"n04550184\", \"n04606251\", \"n07716358\",\n \"n07718472\", \"n07836838\", \"n09428293\", \"n13040303\", \"n15075141\"\n ],\n}\n\n\ndef imagenet(\n data_path, train_dir=\"train\", val_dir=\"val\", num_classes=1000,\n use_auto_augment=False, sample_transform=None, target_transform=None,\n replicas_per_sample=1, train=True\n):\n \"\"\"\n Create train and val set of Imagenet dataset.\n \"\"\"\n\n if train:\n dataset = create_train_dataset(\n data_path=data_path,\n train_dir=train_dir,\n num_classes=num_classes,\n use_auto_augment=use_auto_augment,\n sample_transform=sample_transform,\n target_transform=target_transform,\n replicas_per_sample=replicas_per_sample,\n )\n else:\n dataset = create_validation_dataset(\n data_path=data_path,\n val_dir=val_dir,\n num_classes=num_classes,\n )\n\n return dataset\n\n\ndef create_train_dataset(\n data_path, train_dir, num_classes=1000, use_auto_augment=False,\n sample_transform=None, target_transform=None, replicas_per_sample=1\n):\n \"\"\"\n Configure Imagenet training dataset\n\n Creates :class:`CachedDatasetFolder` :class:`HDF5Dataset` pre-configured\n for the training cycle\n\n :param data_path: The directory or hdf5 file containing the dataset\n :param train_dir: The directory or hdf5 group containing the training data\n :param num_classes: Limit the dataset size to the given number of classes\n :param sample_transform: List of transforms acting on the samples\n to be added to the defaults below\n :param target_transform: List of transforms acting on the targets\n :param replicas_per_sample: Number of replicas to create per sample\n in the batch (each replica is transformed\n independently). Used in maxup.\n\n :return: CachedDatasetFolder or HDF5Dataset\n \"\"\"\n if use_auto_augment:\n transform = transforms.Compose(\n transforms=[\n RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n ImageNetPolicy(),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],\n inplace=True\n ),\n ],\n )\n else:\n transform = transforms.Compose(\n transforms=[\n RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],\n inplace=True\n ),\n ],\n )\n\n transform = transforms.Compose(\n transforms=[transform] + (sample_transform or []))\n\n if h5py.is_hdf5(data_path):\n # Use fixed Imagenet classes if mapping is available\n if num_classes in IMAGENET_NUM_CLASSES:\n classes = IMAGENET_NUM_CLASSES[num_classes]\n dataset = HDF5Dataset(hdf5_file=data_path, root=train_dir,\n classes=classes, transform=transform,\n target_transform=target_transform,\n replicas_per_sample=replicas_per_sample)\n else:\n dataset = HDF5Dataset(hdf5_file=data_path, root=train_dir,\n num_classes=num_classes, transform=transform,\n target_transform=target_transform,\n replicas_per_sample=replicas_per_sample)\n else:\n dataset = CachedDatasetFolder(root=os.path.join(data_path, train_dir),\n num_classes=num_classes, transform=transform,\n target_transform=target_transform)\n return dataset\n\n\ndef create_validation_dataset(data_path, val_dir, num_classes=1000):\n \"\"\"\n Configure Imagenet validation dataloader\n\n Creates :class:`CachedDatasetFolder` or :class:`HDF5Dataset` pre-configured\n for the validation cycle.\n\n :param data_path: The directory or hdf5 file containing the dataset\n :param val_dir: The directory containing or hdf5 group the validation data\n :param num_classes: Limit the dataset size to the given number of classes\n :return: CachedDatasetFolder or HDF5Dataset\n \"\"\"\n\n transform = transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],\n inplace=True\n ),\n ]\n )\n if h5py.is_hdf5(data_path):\n if num_classes in IMAGENET_NUM_CLASSES:\n classes = IMAGENET_NUM_CLASSES[num_classes]\n dataset = HDF5Dataset(hdf5_file=data_path, root=val_dir,\n classes=classes, transform=transform)\n else:\n dataset = HDF5Dataset(hdf5_file=data_path, root=val_dir,\n num_classes=num_classes, transform=transform)\n else:\n dataset = CachedDatasetFolder(root=os.path.join(data_path, val_dir),\n num_classes=num_classes, transform=transform)\n return dataset\n","repo_name":"numenta/nupic.research","sub_path":"src/nupic/research/frameworks/pytorch/datasets/imagenet_factory.py","file_name":"imagenet_factory.py","file_ext":"py","file_size_in_byte":7206,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"78"} +{"seq_id":"23275105934","text":"#!/usr/bin/python3\nimport requests\nimport nltk\nimport pickle\nimport numpy as np\nimport json\nimport random\nimport tensorflow as tf\n\nfrom keras.models import load_model\nfrom keras.applications import inception_v3\n\nAPI_URL = \"http://localhost:5500/v1/models/msg_classifier/versions/1:predict\"\n\nclass ChatLocal:\n def __init__(self):\n self.model = load_model('chatbot_model.h5')\n self.intents = json.loads(open('intents.json').read())\n self.words = pickle.load(open('words.pkl','rb'))\n self.classes = pickle.load(open('classes.pkl','rb'))\n\n def _clean_up_sentence(self, sentence):\n sentence_words = nltk.word_tokenize(sentence)\n return sentence_words\n\n def _bow(self, sentence, words, show_details=True):\n # tokenize the pattern\n sentence_words = self._clean_up_sentence(sentence)\n # bag of words - matrix of N words, vocabulary matrix\n bag = [0]*len(words)\n for s in sentence_words:\n for i,w in enumerate(self.words):\n if w == s:\n # assign 1 if current word is in the vocabulary position\n bag[i] = 1\n if show_details:\n print (\"found in bag: %s\" % w)\n return(np.array(bag))\n def _predict_class_local(self, sentence, model):\n # filter out predictions below a threshold\n p = self._bow(sentence, self.words,show_details=False)\n res = model.predict(np.array([p]))[0]\n print(res)\n ERROR_THRESHOLD = 0.25\n results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]\n # sort by strength of probability\n results.sort(key=lambda x: x[1], reverse=True)\n return_list = []\n for r in results:\n return_list.append({\"intent\": self.classes[r[0]], \"probability\": str(r[1])})\n print(return_list)\n return return_list\n\n def _getResponse(self, ints, intents_json):\n tag = ints[0]['intent']\n list_of_intents = intents_json['intents']\n for i in list_of_intents:\n if(i['tag']== tag):\n result = random.choice(i['responses'])\n break\n return result\n\n def chatbot_response(self, msg):\n ints = self._predict_class_local(msg, self.model)\n res = self._getResponse(ints, self.intents)\n return res\n","repo_name":"adilGhaffarDev/PyChatBot","sub_path":"chatlocal.py","file_name":"chatlocal.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33775340699","text":"import mock\nimport io\nimport os\nimport shutil\nimport stat\nimport subprocess\nimport tempfile\nimport unittest2 as unittest\n\nfrom scality_manila_utils import utils\nfrom scality_manila_utils.exceptions import EnvironmentException\n\n\nclass TestUtils(unittest.TestCase):\n def setUp(self):\n self.test_directories = []\n\n def tearDown(self):\n for directory in self.test_directories:\n shutil.rmtree(directory)\n\n @mock.patch('os.geteuid', return_value=1000)\n @mock.patch('os.getegid', return_value=1000)\n @mock.patch('os.seteuid')\n @mock.patch('os.setegid')\n def test_elevated_privileges(self, setegid, seteuid, getegid, geteuid):\n unprivileged_uid = os.geteuid()\n unprivileged_gid = os.getegid()\n\n with utils.elevated_privileges():\n # Privileges should be elevated inside the context manager\n seteuid.assert_called_once_with(0)\n setegid.assert_called_once_with(0)\n seteuid.reset_mock()\n setegid.reset_mock()\n\n # Privileges should be reset once outside the context manager\n seteuid.assert_called_once_with(unprivileged_uid)\n setegid.assert_called_once_with(unprivileged_gid)\n\n def test_find_pids(self):\n # Setup a directory to serve as /proc\n proc_path = tempfile.mkdtemp()\n self.test_directories.append(proc_path)\n processes = ((10, 'p1'), (20, 'p2'), (30, 'p3'), (40, 'p4'))\n for pid, process_name in processes:\n self._create_proc_entry(proc_path, pid, process_name)\n\n # Add some other directories under proc\n non_processes = ('otherdir', 'noprocess', 'sys')\n for directory in non_processes:\n os.mkdir(os.path.join(proc_path, directory))\n\n # Mocks for `os.listdir` and `os.path.join`\n def listdir_mock(*args, **kwargs):\n proc_listing = [str(pid) for pid, _ in processes]\n proc_listing.extend(non_processes)\n return proc_listing\n\n def join_mock(procdir, pid, status):\n return \"/{proc:s}/{pid:s}/{status:s}\".format(proc=proc_path,\n pid=pid,\n status=status)\n\n with mock.patch('os.listdir', side_effect=listdir_mock):\n with mock.patch('os.path.join', side_effect=join_mock):\n for not_a_process in non_processes:\n self.assertEqual(utils.find_pids(not_a_process), [])\n\n for pid, process_name in processes:\n self.assertEqual(utils.find_pids(process_name), [pid])\n\n def _create_proc_entry(self, proc_path, pid, process_name):\n pid_path = os.path.join(proc_path, str(pid))\n os.mkdir(pid_path)\n with io.open(os.path.join(pid_path, 'status'), 'wt') as f:\n f.write(u'Name: {0:s}'.format(process_name))\n\n def test_binary_check(self):\n self.test_directories = [tempfile.mkdtemp(), tempfile.mkdtemp()]\n binary_name = 'bin'\n\n with self.assertRaises(EnvironmentException):\n utils.binary_check(binary_name, [])\n utils.binary_check('', self.test_directories)\n utils.binary_check(binary_name, self.test_directories)\n\n # Put the expected binary in a test directory\n binary_path = os.path.join(self.test_directories[-1], binary_name)\n io.open(binary_path, 'wb').close()\n # Should be ok\n utils.binary_check(binary_name, self.test_directories)\n\n @mock.patch('scality_manila_utils.utils.find_pids')\n def test_process_check(self, find_pids):\n find_pids.return_value = []\n process_name = 'sfused'\n\n with self.assertRaises(EnvironmentException):\n utils.process_check(process_name)\n\n find_pids.assert_called_once_with(process_name)\n find_pids.reset_mock()\n find_pids.return_value = [100]\n utils.process_check(process_name)\n find_pids.assert_called_once_with(process_name)\n\n def test_safe_write(self):\n testdir = tempfile.mkdtemp()\n self.test_directories.append(testdir)\n test_file = os.path.join(testdir, 'testfile')\n sometext = 'abc123'\n mode = 0o444\n\n utils.safe_write(sometext, test_file, mode)\n\n # Check for expected permission bitmask\n self.assertEqual(stat.S_IMODE(os.stat(test_file).st_mode), mode)\n\n # Check contents\n with io.open(test_file, 'rt') as f:\n self.assertEqual(f.read(), sometext)\n\n @mock.patch('subprocess.check_call')\n def test_nfs_mount(self, check_call):\n export_path = '127.0.0.1:/'\n with utils.nfs_mount(export_path) as root:\n self.assertTrue(os.path.exists(root))\n check_call.assert_called_once_with(['mount', export_path, root])\n check_call.reset_mock()\n\n self.assertFalse(os.path.exists(root))\n check_call.assert_called_once_with(['umount', root])\n\n # Check that cleanup is made when an exception is raised\n class TestException(Exception):\n \"\"\"Cleanup test exception\"\"\"\n\n check_call.reset_mock()\n try:\n with utils.nfs_mount(export_path) as root:\n self.assertTrue(os.path.exists(root))\n check_call.assert_called_once_with(['mount', export_path,\n root])\n check_call.reset_mock()\n raise TestException\n except TestException:\n self.assertFalse(os.path.exists(root))\n check_call.assert_called_once_with(['umount', root])\n\n def test_fsync_path(self):\n fd = 10\n path = '/'\n with mock.patch('os.open', return_value=fd) as osopen:\n with mock.patch('os.fsync') as fsync:\n with mock.patch('os.close') as osclose:\n utils.fsync_path(path)\n osopen.assert_called_once_with(\n path,\n os.O_RDONLY | os.O_DIRECTORY\n )\n fsync.assert_called_once_with(fd)\n osclose.assert_called_once_with(fd)\n\n @mock.patch('os.seteuid', mock.Mock())\n @mock.patch('os.setegid', mock.Mock())\n def test_is_stored_on_sofs(self):\n header = (\n 'Filesystem 1024-blocks Used Available Capacity Mounted on'\n )\n fuse_line = '/dev/fuse 4088408 0 4088408 0% /r'\n bad_line = '/dev/sda1 20608636 1119716 18619320 6% /'\n\n on_sofs = header + '\\n' + fuse_line\n with mock.patch('subprocess.check_output', return_value=on_sofs,\n autospec=True) as df:\n path = '/r/some/share'\n self.assertTrue(utils.is_stored_on_sofs(path))\n df.assert_called_once_with(['df', '-P', path])\n\n no_sofs = header + '\\n' + bad_line\n with mock.patch('subprocess.check_output', return_value=no_sofs,\n autospec=True) as df:\n path = '/var/log'\n self.assertFalse(utils.is_stored_on_sofs(path))\n df.assert_called_once_with(['df', '-P', path])\n\n side_effect = subprocess.CalledProcessError(None, None, None)\n with mock.patch('subprocess.check_output',\n autospec=True, side_effect=side_effect) as df:\n self.assertRaises(subprocess.CalledProcessError,\n utils.is_stored_on_sofs, path)\n df.assert_called_once_with(['df', '-P', path])\n\n @mock.patch('subprocess.Popen', autospec=True, spec_set=True)\n def test_execute_when_cmd_failed(self, mock_popen):\n type(mock_popen.return_value).returncode = mock.PropertyMock(\n return_value=1)\n mock_popen.return_value.communicate.return_value = (b'out', b'err')\n\n cmd = ['cmd', 'arg1']\n try:\n utils.execute(cmd, \"error: {stdout}, {stderr}\")\n except EnvironmentError as exc:\n self.assertEqual('error: out, err', exc.args[0])\n else:\n self.fail(\"Should have raised an EnvironmentError\")\n\n mock_popen.assert_called_once_with(cmd, stdout=-1, stderr=-1)\n\n @mock.patch('subprocess.Popen', autospec=True, spec_set=True)\n def test_execute_when_cmd_succeeded(self, mock_popen):\n type(mock_popen.return_value).returncode = mock.PropertyMock(\n return_value=0)\n mock_popen.return_value.communicate.return_value = (b'out', b'err')\n\n cmd = ['cmd', 'arg1']\n self.assertEqual((u'out', u'err'), utils.execute(cmd, \"\"))\n\n mock_popen.assert_called_once_with(cmd, stdout=-1, stderr=-1)\n","repo_name":"scality/scality-manila-utils","sub_path":"test/unit/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":8689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24755232836","text":"# ||===========================================================================||\n# || Source https://www.useblackbox.io/search : how to do fractals with python ||\n# ||===========================================================================||\n\nfrom turtle import Screen, Turtle\n\n\ndef fractal(level, turtle, length, direction=90):\n for _ in range(3):\n turtle.forward(length)\n\n if level > 1:\n fractal(level - 1, turtle, int(length / 2), -direction)\n\n turtle.right(direction)\n\n turtle.forward(length)\n turtle.right(direction)\n\n\ndef main():\n screen = Screen()\n turtle = Turtle()\n turtle.speed('fastest') # because I have no patience\n\n fractal(8, turtle, 100, 90)\n\n turtle.hideturtle()\n screen.exitonclick()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Malamdg/fractal-generator","sub_path":"src/Snippets/TurtleFractal1.py","file_name":"TurtleFractal1.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3624634545","text":"import numpy as np\nimport torch\n\n\nclass OUNoise(object):\n def __init__(self, action_dim, mu=0.0, theta=0.15, max_sigma=0.4, min_sigma=0.4, decay_period=100000):\n self.state = None\n self.mu = mu\n self.theta = theta\n self.sigma = max_sigma\n self.max_sigma = max_sigma\n self.min_sigma = min_sigma\n self.decay_period = decay_period\n self.action_dim = action_dim\n self.reset()\n\n def reset(self):\n self.state = np.ones(self.action_dim) * self.mu\n\n def evolve_state(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)\n self.state = x + dx\n return self.state\n\n def get_action(self, action, t=0):\n ou_state = self.evolve_state()\n self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)\n return torch.tensor([action + ou_state]).float()\n\n\nclass NormalNoiseStrategy:\n def __init__(self, action_dim, std=10, exploration_noise_ratio=0.1):\n self.action_dim = action_dim\n self.std = std\n self.exploration_noise_ratio = exploration_noise_ratio\n\n def reset(self):\n pass\n\n def get_action(self, action, t, max_exploration=False):\n if max_exploration:\n noise_scale = self.std\n else:\n noise_scale = self.exploration_noise_ratio * self.std\n\n noise = np.random.normal(loc=0, scale=noise_scale, size=self.action_dim)\n noisy_action = action + noise\n return torch.tensor([noisy_action]).float()\n\n\nclass NormalNoiseDecayStrategy:\n def __init__(self, action_dim, std=10, init_noise_ratio=0.5, min_noise_ratio=0.1, decay_steps=20):\n self.t = 0\n self.action_dim = action_dim\n self.std = std\n self.noise_ratio = init_noise_ratio\n self.init_noise_ratio = init_noise_ratio\n self.min_noise_ratio = min_noise_ratio\n self.decay_steps = decay_steps\n\n def _noise_ratio_update(self):\n noise_ratio = 1 - self.t / self.decay_steps\n noise_ratio = (self.init_noise_ratio - self.min_noise_ratio) * noise_ratio + self.min_noise_ratio\n noise_ratio = np.clip(noise_ratio, self.min_noise_ratio, self.init_noise_ratio)\n self.t += 1\n return noise_ratio\n\n def reset(self):\n self.t = 0\n\n def get_action(self, action, t, max_exploration=False):\n if max_exploration:\n noise_scale = self.std\n else:\n noise_scale = self.noise_ratio * self.std\n\n noise = np.random.normal(loc=0, scale=noise_scale, size=self.action_dim)\n noisy_action = action + noise\n\n self.noise_ratio = self._noise_ratio_update()\n return torch.tensor([noisy_action]).float()\n","repo_name":"srosy2/recsys","sub_path":"project/infrastructure/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23007534384","text":"import random\nimport pygame\nimport math\nfrom time import time\n\nfrom globalconst import *\nfrom gameobjects import *\nfrom sound import *\n\nBALL__SPEED_DIV=8*4 # slows down all ball movement\nBALL__SPEED_COLLISION_MULT_GROUND=80 # in % per impact\nBALL__WALL_HEIGHT=12\nBALL_SHADOW_COLOR=(0,96,0)\n\nclass Ball(GameObject):\n\n def __init__(self, gamestate):\n super(Ball, self).__init__(0, 0, 'o')\n\n #self.tile=tile # set by super()\n self.width=5 # to match sprite\n self.height=4 # to match sprite\n\n #self.x=0 # set by respawn()\n #self.y=0 # set by respawn()\n #self.z=0 # set by respawn(), ground at 0, positive up\n #self.xdir=0 # set by respawn(), velocity in pixels per frame\n #self.ydir=0 # set by respawn(), velocity in pixels per frame\n #self.zdir=0 # set by respawn(), velocity in pixels per frame\n self.respawn(gamestate)\n\n def stop(self):\n\n self.z=0\n self.xdir=0\n self.ydir=0\n self.zdir=0\n\n def respawn(self, gamestate):\n\n # default\n self.x=self.spawnx\n self.y=self.spawny\n self.z=8\n self.xdir=0\n self.ydir=0\n self.zdir=0\n\n # find and use random 'o' in level\n spawnPoints=[]\n for y in range(LEV_H):\n for x in range(LEV_W):\n if gamestate.getLevel()[y][x]=='o':\n spawnPoints.append([x*8+2,y*8+2])\n spawnPoint=random.choice(spawnPoints)\n self.x=spawnPoint[0]\n self.y=spawnPoint[1]\n\n self.state = \"ALIVE\"\n playSound('whistle')\n\n def __getLevelTile(self, gamestate): # at current x y\n\n tileX=round(self.x/TILE_W)\n if tileX<0:\n tileX=0\n if tileX>LEV_W-1:\n tileX=LEV_W-1\n\n tileY=round(self.y/TILE_H)\n if tileY<0:\n tileY=0\n if tileY>LEV_H-1:\n tileY=LEV_H-1\n\n return gamestate.getLevel()[tileY][tileX]\n\n def kick(self, xdir, ydir, zdir):\n\n self.xdir=xdir\n self.ydir=ydir\n self.zdir=zdir\n\n playSound('kick')\n\n def update(self, gamestate):\n\n if self.state == \"DEAD\":\n if self.time_of_death + BALL_RESPAWN_TIME < time():\n self.respawn(gamestate)\n else:\n return\n if self.state == \"GOAL\":\n if self.time_of_death + BALL_RESPAWN_TIME_AFTER_GOAL < time():\n self.respawn(gamestate)\n else:\n return\n\n # move z\n oldZ=self.z\n self.zdir-=1 # gravity\n self.z+=self.zdir/BALL__SPEED_DIV\n\n # collide z\n levelTile=self.__getLevelTile(gamestate)\n if levelTile==\"#\":\n if self.z-5: # snap to 0?\n self.zdir=0\n self.xdir=0\n self.ydir=0\n self.zdir=math.fabs(self.zdir * BALL__SPEED_COLLISION_MULT_GROUND/100)\n self.xdir= self.xdir * BALL__SPEED_COLLISION_MULT_GROUND/100\n self.ydir= self.ydir * BALL__SPEED_COLLISION_MULT_GROUND/100\n self.z=oldZ\n\n # move x\n oldX=self.x\n oldLevelTile=self.__getLevelTile(gamestate)\n self.x+=self.xdir/BALL__SPEED_DIV\n levelTile=self.__getLevelTile(gamestate)\n\n # collide x\n hit=False\n if self.x<0:\n hit=True\n if self.x>SCR_W:\n hit=True\n if levelTile==\"#\" and self.zSCR_H:\n hit=True\n if levelTile==\"#\" and self.zshadowShrinkageXMax:\n shadowShrinkageX=shadowShrinkageXMax\n shadowShrinkageY=self.z/8\n shadowShrinkageYMax=self.height/2-1\n if shadowShrinkageY>shadowShrinkageYMax:\n shadowShrinkageY=shadowShrinkageYMax\n pygame.draw.rect(screen,BALL_SHADOW_COLOR,pygame.Rect(self.x+shadowShrinkageX,self.y+1+shadowShrinkageY,self.width-2*shadowShrinkageX,self.height-2*shadowShrinkageY))\n\n def draw(self, screen, tiles, gamestate):\n\n if self.state in ['DEAD', 'GOAL']:\n return\n\n ## above wall indicator\n ## replaced by shadow disappearing\n #if self.z>BALL__WALL_HEIGHT:\n # x=round(self.x+self.width/2)-1\n # y=self.y+self.height/2\n # pygame.draw.line(screen,(255,64,32),(x-2,y-BALL__WALL_HEIGHT),(x+2,y-BALL__WALL_HEIGHT))\n # pygame.draw.line(screen,(255,64,32),(x,y),(x,y-BALL__WALL_HEIGHT))\n\n # ball sprite\n # offset to match bounding box\n screen.blit(tiles[self.tile],(self.x-1,self.y-3-self.z))\n","repo_name":"SpieleentwicklungBodensee/snake_soccer","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2146640809","text":"from setuptools import setup\n\npackage_name = 'yolo_trt_ros2'\nutils = 'yolo_trt_ros2/utils'\nplugins = 'yolo_trt_ros2/plugins'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name, utils, plugins],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='jetson',\n maintainer_email='jetson@todo.todo',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'yolo_trt_node = yolo_trt_ros2.yolo_trt_node:main'\n ],\n },\n)\n","repo_name":"elmexx/eWolf_ROS2","sub_path":"yolo_trt_ros2/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41235076606","text":"n = int(input())\np = list(map(int, input().split()))\np.sort()\ncount = [0] * (n)\n\n\nfor i in range(n):\n if i == 0:\n count[0] += p[0]\n continue\n count[i] = count[i - 1] + p[i]\n\nprint(sum(count))\n","repo_name":"ocxh/std_algorithm","sub_path":"BOJ/11399.py","file_name":"11399.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35166923432","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time, sys, json\n\nimport tf_cart_pole_agent\nfrom plot_eval import colors\n\ndef read_trajectories(filename, test_denom=4):\n '''reads cartpole trajectories and returns them as (xs, actions, nxs) tuples for training set and test set each.\n 1 / `test_denom` of data points is returned as test set, so default is 25% test, 75% train'''\n train = [[], [], []]\n test = [[], [], []]\n with open(filename) as f:\n for line_nb, line in enumerate(f):\n trajectory = json.loads(line)\n for i in range(0, len(trajectory) - 2, 2):\n if line_nb % test_denom == 0:\n test[0].append(trajectory[i])\n test[1].append(trajectory[i + 1])\n test[2].append(trajectory[i + 2])\n else:\n train[0].append(trajectory[i])\n train[1].append(trajectory[i + 1])\n train[2].append(trajectory[i + 2])\n return train, test\n\ndef train_model(agent, train_set, test_set, batch_size=100, episodes=1000):\n '''samples `batch_size` data points each to do `episodes` model training steps.\n returns model error over time.'''\n agent.experience['xs'], agent.experience['actions'], agent.experience['nxs'] = train_set\n\n errors = [agent.get_model_error(*test_set)]\n\n for i in range(episodes):\n xs, actions, dxs = agent.sample_experience(batch_size, agent.model_training_noise)\n agent.net_session.run(agent.train_model, feed_dict={\n agent.net_xs: xs,\n agent.net_actions: actions,\n agent.net_dxs: dxs})\n\n agent.model_training_noise *= agent.model_training_noise_decay\n\n err = agent.get_model_error(*test_set)\n errors.append(err)\n\n if i % 100 == 0:\n print('\\r%i / %i' % (i, episodes), end='', flush=True)\n\n print()\n\n return errors\n\n\nif __name__ == '__main__':\n # param_sets = [{'model_training_noise': 0},\n # {'model_training_noise': 1.0},\n # {'model_training_noise': 2.0},\n # {'model_training_noise': 3.0}]\n\n # param_sets = [{'model_training_noise': 0},\n # {'model_training_noise': 3.0},\n # {'model_training_noise': 5.0},\n # {'model_training_noise': 10., 'model_training_noise_decay': 0.99}]\n\n # lrelu = lambda x: tf.nn.leaky_relu(x, alpha=0.01)\n # param_sets = [{'model_training_noise': 2.0, 'model_training_noise_decay': 1.0, 'model_afunc': tf.nn.relu, 'learn_model': 'delta'},\n # {'model_training_noise': 2.0, 'model_training_noise_decay': 1.0, 'model_afunc': tf.nn.relu, 'learn_model': 'absolute'}]\n\n # param_sets = [{'learn_model': 'delta', 'rect_leakiness': 0.0},\n # {'learn_model': 'delta', 'rect_leakiness': 0.2},\n # {'learn_model': 'delta', 'rect_leakiness': 1.0},\n # {'learn_model': 'delta', 'rect_leakiness': 1.5}]\n\n param_sets = [{'model_afunc': tf.nn.relu},\n {'model_afunc': tf.nn.sigmoid},\n {'model_afunc': tf.nn.tanh},\n {'model_afunc': None}]\n\n try:\n trajectories_filename = sys.argv[1]\n except Exception as e:\n trajectories_filename = 'cartpole-trajectories.txt'\n\n # over how many runs to average per param set\n iterations = 10\n\n train, test = read_trajectories(trajectories_filename, 5)\n print('train set: %i, test set: %i' % (len(train[0]), len(test[0])))\n\n for i, params in enumerate(param_sets):\n print('parameters:', params)\n errors = []\n for _ in range(iterations):\n agent = tf_cart_pole_agent.Agent(**params, random_seed=31415926+iterations)\n errors.append(train_model(agent, train, test))\n\n errors = np.asarray(errors)\n stderr = errors.std(axis=0, ddof=1) / np.sqrt(errors.shape[0])\n mean_error = np.mean(errors, axis=0)\n\n label = ' '.join([str(k) + '=' + str(v) for k, v in sorted(params.items())])\n #label = 'η=%.1f' % params['model_training_noise'] + ((' γ=%f' % params['model_training_noise_decay']).rstrip('0') if 'model_training_noise_decay' in params else '')\n #label = params['learn_model']\n #label = ('α=%.2f' % params['rect_leakiness']).rstrip('0')\n plt.plot(mean_error, label=label, color=colors[i])\n #plt.errorbar(range(mean_error.shape[0]), mean_error, stderr, color=colors[i])\n\n plt.legend()\n plt.xlabel('number of trainings')\n plt.ylabel('model error')\n plt.show()\n","repo_name":"maltesie/ddpg-reinforcement-learning","sub_path":"code/pg/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31711984342","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Discover which environments belong to several named organizations.\n\nThis script talks to the server listed in ``BASE_URL``. ``BASE_URL`` should\n*not* have a trailing slash. In other words, \".com\" is good, but \".com/\" is\nbad.\n\n\"\"\"\nimport json\nimport pprint\nimport requests\n\n\nBASE_URL = 'https://example.com'\nORG_NAMES = ('TestOrg1', 'TestOrg2')\n\n\ndef main():\n \"\"\"Search for organizations, then print out the environments in each.\n\n Organizations are searched for by name. Exit if more or less than one\n organization is returned when searching for a given organization name.\n\n \"\"\"\n # Get the IDs of several organizations.\n organizations = {} # ID → name\n for org_name in ORG_NAMES:\n response = requests.get(\n BASE_URL + '/katello/api/v2/organizations',\n data=json.dumps({'search': 'name={}'.format(org_name)}),\n auth=('admin', 'changeme'),\n headers={'content-type': 'application/json'},\n verify=False,\n )\n response.raise_for_status()\n results = response.json()['results']\n if len(results) != 1:\n print(\n 'Expected to find one organization, but instead found {0}'\n .format(results)\n )\n exit(1)\n organizations[results[0]['id']] = org_name\n\n # Discover which environments belong to those organizations.\n for org_id, org_name in organizations.items():\n response = requests.get(\n BASE_URL + '/katello/api/v2/environments',\n data=json.dumps({'organization_id': org_id}),\n auth=('admin', 'changeme'),\n headers={'content-type': 'application/json'},\n verify=False,\n )\n response.raise_for_status()\n results = response.json()['results']\n print(\n 'There are {} environments in organization {} (ID {}): '\n .format(len(results), org_name, org_id)\n )\n pprint.PrettyPrinter(indent=4).pprint(results)\n print()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pombredanne/nailgun","sub_path":"docs/get_org_envs.py","file_name":"get_org_envs.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"21867541976","text":"from app import db\nfrom app.api import bp\nfrom flask import jsonify, request, abort\n\nfrom app.models import Question, QuestionSchema, Answer, AnswerSchema, QuesAnswerSchema\n\nquestion_schema = QuestionSchema()\nquestions_schema = QuestionSchema(many=True)\nanswer_schema = AnswerSchema()\nanswers_schema = AnswerSchema(many=True)\n\nquestion_ans_schema = QuesAnswerSchema() # Nested Schema of Question Schema with Answer Schema\nques_ans_schema = QuesAnswerSchema(many=True)\n\n\n@bp.route(\"/questions\", methods=['GET'])\n@bp.route(\"/questions/\", methods=['GET'])\ndef get_questions(page=1):\n per_page = 15\n orderby = request.args.get('orderby')\n questions = []\n paginate_obj = None\n if orderby == \"views\":\n paginate_obj = Question.query.order_by(Question.ViewCount.desc()).paginate(page, per_page,\n error_out=False)\n questions = paginate_obj.items\n if orderby == \"score\":\n paginate_obj = Question.query.order_by(Question.Score.desc()).paginate(page, per_page,\n error_out=False)\n questions = paginate_obj.items\n if orderby != \"views\" and \"score\":\n paginate_obj = Question.query.order_by(Question.CreationDate.desc()).paginate(page, per_page,\n error_out=False)\n questions = paginate_obj.items\n if len(questions) == 0:\n abort(404, description='Page not found')\n result = questions_schema.dump(questions)\n total_pages = paginate_obj.pages\n total_pages = {\n 'total_pages': total_pages\n }\n result.insert(0, total_pages)\n return jsonify(result)\n\n\n@bp.route(\"/question/\", methods=['GET'])\ndef question_details(question_id):\n question = Question.query.filter_by(Id=question_id).first()\n if question is None:\n abort(404, description=\"Resource not found\")\n return jsonify(question_ans_schema.dump(question))\n\n\n@bp.route(\"/search_q\", methods=['GET'])\ndef search_q():\n search_term = request.args.get('q')\n search_term = f\"%{search_term}%\"\n questions = Question.query.filter((Question.Body.like(search_term)) | Question.Title.like(search_term)).all()\n print(questions)\n return jsonify(ques_ans_schema.dump(questions))\n\n\n@bp.route(\"/search_a\", methods=['GET'])\ndef search_a():\n search_term = request.args.get('q')\n search_term = f\"%{search_term}%\"\n answers = Answer.query.filter(Answer.Body.like(search_term)).all()\n print(answers)\n response = []\n for answer in answers:\n if answer.question is not None: # some answers in xml file didn't had a question for them\n dump = question_ans_schema.dump(answer.question)\n response.append(dump)\n return jsonify(response)\n\n\n@bp.errorhandler(404)\ndef resource_not_found(error):\n return jsonify(error=str(error)), 404\n\n\n@bp.errorhandler(500)\ndef internal_error(error):\n db.session.rollback()\n return jsonify(error=str(error)), 500\n","repo_name":"Akshay090/bioinformatics-api","sub_path":"app/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"4024121060","text":"from datetime import date\r\n\r\nano = int(input(' qual ano vc nasceu? '))\r\natual = date.today().year\r\nidade = atual - ano\r\n\r\nprint(' quem nasceu em {} tem {} anos em {}'. format(ano,idade,atual))\r\nif idade == 18:\r\n print('aliste-se ')\r\n\r\n","repo_name":"lidianehonorato/lidianehonorato","sub_path":"python 2/aula12alistamentomilitar.py","file_name":"aula12alistamentomilitar.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11077333277","text":"from werkzeug import serving, exceptions as wzexc\nimport sys\nif sys.version_info < (3,):\n import httplib as httplibclient\n import urlparse\nelse:\n from http import client as httplibclient\n import urllib.parse as urlparse\n\nfrom baltrad.bdbserver import backend\nfrom baltrad.bdbserver.web import auth, routing, util as webutil\n\nimport logging\nlogger = logging.getLogger(\"baltard.bdbserver.app\")\n\nclass Application(object):\n def __init__(self, backend):\n self.backend = backend\n self.url_map = routing.URL_MAP\n \n def dispatch_request(self, request):\n adapter = self.url_map.bind_to_environ(request.environ)\n ctx = webutil.RequestContext(request, self.backend)\n ctx.enable_remove_all_files = self.enable_remove_all_files\n try:\n endpoint, values = adapter.match()\n handler = routing.get_handler(endpoint)\n return handler(ctx, **values)\n except wzexc.HTTPException as e:\n logger.warning(\"HTTPException occured: %s\", e)\n return e\n except Exception as e:\n logger.exception(\"Unknown exception\")\n raise\n \n def __call__(self, env, start_response):\n request = webutil.Request(env)\n response = self.dispatch_request(request)\n return response(env, start_response)\n \n @classmethod\n def from_conf(cls, backend, conf):\n \"\"\"create instance from configuration.\n\n :param conf: a :class:`~.config.Properties` instance to configure\n from\n \"\"\"\n result = Application(backend)\n result.enable_remove_all_files = conf.get_boolean(\n \"baltrad.bdb.server.enable_remove_all_files\", False\n )\n return result\n\ndef from_conf(conf):\n \"\"\"create the entire WSGI application from conf\n\n this will wrap the application with necessary middleware\n \"\"\"\n be = backend.from_conf(conf)\n if not be.is_operational():\n raise SystemExit(\"backend is not operational\")\n app = Application.from_conf(be, conf)\n authmw = auth.AuthMiddleware.from_conf(app, conf)\n return authmw\n \ndef serve(uri, app):\n \"\"\"serve the application using werkzeug\n \"\"\"\n\n uri = urlparse.urlparse(uri)\n host = uri.hostname\n port = uri.port or 80\n\n serving.run_simple(host, port, app) \n","repo_name":"baltrad/baltrad-db","sub_path":"server/src/baltrad/bdbserver/web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"7864130965","text":"#DO NOT TOUCH\nfrom os import system\nsystem('cls')\n#DO NOT TOUCH\n\n#Variable name convenctions\n\n# UpperCamelCase:\n# \t(The first Letter of every word in capital letters)\n# \tMyVariableName = \"This is a UpperCamelCase Variable\"\nMyVar = \"Hello World\"\nprint(MyVar)\n\n# lowerCamelCase\n# \t(The first letter is in lowercase, the rest of first letters go in upper case)\n# \tmyVariableName = \"this is a lowerCamelCase variable\"\nmyVar = \"Goodbye World\"\nprint(myVar)\n\n# snake_case:\n# \t(convention of words that are separed by underscore instead of spaces)\n# \tmy_variable_name = \"This is a a snake_case variable\"\nmy_var = \"How r you\"\nprint(my_var)\n","repo_name":"crypto-degenerate/PythonBasics","sub_path":"03-Variable-Name-Convenctions/VariableNameConvenctions.py","file_name":"VariableNameConvenctions.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25796459135","text":"# from Calibration_Util import DataHandler as dh \n# from Calibration_Util import FileIO as io\nimport keras.optimizers\nfrom keras.layers import Input, Dense, merge, Activation, add\nfrom keras.models import Model\nfrom keras import callbacks as cb\nimport numpy as np\nimport matplotlib\nfrom keras.layers.normalization import BatchNormalization\nfrom .. import CostFunctions as cf\n# import MultiMMD as m\nfrom .. import Monitoring as mn\nfrom keras.regularizers import l2\nfrom sklearn import decomposition\nfrom keras.callbacks import LearningRateScheduler\nimport math\nfrom ..visual import scatterHist as sh\nfrom keras import initializers\nimport tensorflow as tf\nimport keras.backend as K\n\nimport pandas as pd\n# from ggplot import *\n\nMMD = 'MMD'\nMULTI_MMD = 'MULTI_MMD'\n\n\nclass ResNet():\n def __init__(self, layer_sizes=[20, 20], l2_penalty=1e-2):\n self.layer_sizes = layer_sizes\n self.l2_penalty = l2_penalty\n\n def load_data(self, source_path, target_path):\n self.source_df = pd.read_csv(source_path, sep=',', header=0, index_col=0)\n self.target_df = pd.read_csv(target_path, sep=',', header=0, index_col=0)\n\n self.source = self.source_df.loc[:, \"PC1\":].values\n self.target = self.target_df.loc[:, \"PC1\":].values\n\n self.inputDim = self.target.shape[1]\n\n def init_res_net(self, target_sample_size=100, n_neighbors=10, val_split=0.1, cost=MMD):\n # input\n calibInput = Input(shape=(self.inputDim, ))\n\n # block 1\n block1_bn1 = BatchNormalization()(calibInput)\n block1_a1 = Activation('relu')(block1_bn1)\n block1_w1 = Dense(self.layer_sizes[1], activation='linear', kernel_regularizer=l2(self.l2_penalty),\n kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block1_a1)\n block1_bn2 = BatchNormalization()(block1_w1)\n block1_a2 = Activation('relu')(block1_bn2)\n block1_w2 = Dense(self.layer_sizes[0], activation='linear', kernel_regularizer=l2(self.l2_penalty),\n kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block1_a2)\n block1_output = add([block1_w2, calibInput])\n\n # block 2\n block2_bn1 = BatchNormalization()(block1_output)\n block2_a1 = Activation('relu')(block2_bn1)\n block2_w1 = Dense(self.layer_sizes[1], activation='linear', kernel_regularizer=l2(self.l2_penalty),\n kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block2_a1)\n block2_bn2 = BatchNormalization()(block1_w1)\n block2_a2 = Activation('relu')(block2_bn2)\n block2_w2 = Dense(self.layer_sizes[0], activation='linear', kernel_regularizer=l2(self.l2_penalty),\n kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block2_a2)\n block2_output = add([block2_w2, block1_output])\n\n # block 3\n block3_bn1 = BatchNormalization()(block2_output)\n block3_a1 = Activation('relu')(block3_bn1)\n block3_w1 = Dense(self.layer_sizes[1], activation='linear', kernel_regularizer=l2(self.l2_penalty),\n kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block3_a1)\n block3_bn2 = BatchNormalization()(block3_w1)\n block3_a2 = Activation('relu')(block3_bn2)\n block3_w2 = Dense(self.layer_sizes[0], activation='linear', kernel_regularizer=l2(self.l2_penalty),\n kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block3_a2)\n self.block3_output = add([block3_w2, block2_output])\n\n self.calibMMDNet = Model(inputs=calibInput, outputs=self.block3_output)\n\n def step_decay(epoch):\n initial_lrate = 0.1\n drop = 0.1\n epochs_drop = 250.0\n lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))\n return lrate\n\n self.lrate = LearningRateScheduler(step_decay)\n optimizer = keras.optimizers.Adam()\n\n if cost == MMD:\n cost = cf.MMD(self.block3_output, self.target, MMDTargetValidation_split=val_split,\n MMDTargetSampleSize=target_sample_size, n_neighbors=n_neighbors)\n source_labels = np.zeros(self.source.shape)\n elif cost == MULTI_MMD:\n tissue_map = {'breast': 0, 'thyroid': 1, 'prostate': 2}\n tm = lambda t: tissue_map[t]\n source_labels = self.source_df['tissue'].map(tm).values\n source_labels = np.repeat(source_labels, self.source.shape[1]).reshape(self.source.shape)\n\n self.target_labels = self.target_df['tissue'].map(tm).values\n cost = cf.MultiMMD(self.block3_output, self.target, self.target_labels, target_val_split=val_split, target_sample_size=target_sample_size, n_neighbors=n_neighbors)\n else:\n print(\"ERROR: you must specify a cost function\")\n return\n\n self.source_labels = source_labels\n self.cost = cost\n\n self.calibMMDNet.compile(optimizer=optimizer, loss=lambda y_true, y_pred: self.cost.KerasCost(y_true, y_pred))\n\n K.get_session().run(tf.global_variables_initializer())\n\n def train(self, epochs=2000, batch_size=20, validation_split=0.1, verbose=1, callbacks=[]):\n # self.lrate, cb.EarlyStopping(monitor='val_loss', patience=50, mode='auto')\n self.calibMMDNet.fit(self.source, self.source_labels, epochs=epochs, batch_size=batch_size, validation_split=validation_split, verbose=verbose, callbacks=callbacks)\n\n\n def predict(self, data=None):\n if data is None:\n print(\"predicting on self.source\")\n self.calibrated_source = self.calibMMDNet.predict(self.source)\n else:\n print(\"predicting on provided data\")\n self.calibrated_source = self.calibMMDNet.predict(data)\n\n self.calibrated_source_df = pd.DataFrame(self.calibrated_source, index=self.source_df.index, columns=self.source_df.columns[2:])\n\n self.calibrated_source_df.insert(0, 'study', self.source_df['study'])\n self.calibrated_source_df.insert(1, 'tissue', self.source_df['tissue'])\n\n def pca(self):\n pca = decomposition.PCA()\n\n # data = np.append(self.target, self.source, axis=0)\n\n pca.fit(self.target)\n # pca.fit(data)\n\n self.target_sample_pca = pca.transform(self.target)\n self.projection_before = pca.transform(self.source)\n self.projection_after = pca.transform(self.calibrated_source)\n\n self.target_pca_df = pd.DataFrame(self.target_sample_pca, index=self.target_df.index, columns=self.target_df.columns[2:])\n self.target_pca_df.insert(0, 'study', self.target_df['study'])\n self.target_pca_df.insert(1, 'tissue', self.target_df['tissue'])\n\n self.source_pca_df = pd.DataFrame(self.projection_before, index=self.source_df.index, columns=self.source_df.columns[2:])\n self.source_pca_df.insert(0, 'study', self.source_df['study'])\n self.source_pca_df.insert(1, 'tissue', self.source_df['tissue'])\n\n self.calibrated_source_pca_df = pd.DataFrame(self.projection_after, index=self.source_df.index, columns=self.source_df.columns[2:])\n self.calibrated_source_pca_df.insert(0, 'study', self.source_df['study'])\n self.calibrated_source_pca_df.insert(1, 'tissue', self.source_df['tissue'])\n\n def save_calibrated(self, path=''):\n self.calibrated_source_df.to_csv(path)\n","repo_name":"tyrvi/rnaResNet","sub_path":"src/models/rna_resnet.py","file_name":"rna_resnet.py","file_ext":"py","file_size_in_byte":7404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39656175732","text":"from sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom scipy.stats import norm\nimport statistics\n\ndef Years_comparation(climber_clean):\n '''\n This function compares the mean grade between men and women that had been climbing X years using Aspin-Welch t-test method \n inputs:\n climber_clean -> climbers dataframe\n output:\n A dataframe of the comparison\n '''\n Z_list = []\n y_list = []\n Zc_list = []\n Accept_list = []\n alfa = 0.05\n\n for i in range(1,23):\n climber_clean2 = climber_clean[climber_clean.years_cl > i]\n climber_men = climber_clean2[climber_clean2.sex == 0]\n climber_women = climber_clean2[climber_clean2.sex == 1]\n Z,df = Compare_means(climber_men.grades_mean,climber_women.grades_mean,show = False)\n Zc = round(norm.ppf(1-alfa/2),2)\n Z_list.append(Z)\n y_list.append(i)\n Zc_list.append(Zc)\n Accept_list.append(Z < Zc)\n\n years_comp = pd.DataFrame({\"Z\":Z_list,\"Zc\":Zc_list,\"Accept\":Accept_list,\"years\":y_list}) \n return years_comp\n\n\ndef Compare_means(x1,x2,alfa = 0.05, show = True):\n '''\n This function returns the result of a t student calculation for 2 means\n \n Inputs:\n x1 -> panda series\n x2 -> another panda series\n alfa -> significance level \n show -> if true it plots the results\n \n Output:\n t -> is the t value of the comparison\n values_df -> the df with the data analyzed\n '''\n def Get_params(x):\n mean = round(np.mean(x),2)\n sdev = round(np.std(x),2)\n num = round(x.shape[0],2)\n return mean,sdev,num\n\n def Values_df(v_men,v_women):\n met = {'metrics':['mean','sdev','num'],'men':v_men,'women':v_women}\n return pd.DataFrame(met)\n\n def t_calculation(mean1,mean2,sdv1,sdv2,n1,n2):\n t = (mean1-mean2)/np.sqrt((sdv1**2/n1)+(sdv2**2/n2))\n return t\n\n values_df = Values_df(Get_params(x1),Get_params(x2))\n\n t = np.round(t_calculation(values_df.men[0],values_df.women[0],\n values_df.men[1],values_df.women[1],\n values_df.men[2],values_df.women[2]),2)\n \n Zc = round(norm.ppf(1-alfa/2),2)\n\n if (show == True):\n display(values_df)\n \n if (t > Zc):\n print(\"\\nt: \",t,\" > Zc:\",Zc)\n print(\"We reject the Null Hypotesis Ho -> There are differences between them\")\n elif (t < Zc):\n print(\"\\nt: \",t,\" < Zc:\",Zc)\n print(\"We accept the Null Hypotesis Ho -> There are not differences between them\")\n\n x_axis = np.arange(20, 90, 0.01)\n plt.plot(x_axis, norm.pdf(x_axis, values_df.men[0], values_df.men[1]))\n plt.plot(x_axis, norm.pdf(x_axis, values_df.women[0], values_df.women[1]))\n plt.xlabel('Grades')\n plt.legend('mw')\n plt.title('Mean Comparative')\n plt.show()\n\n return t,values_df\n\n\ndef MetricsResults (y_train, y_pred_train,y_test,y_pred_test):\n '''\n This python file returns a dF with all the metrics from the train test true and predicted values \n '''\n \n def mean_absolute_percentage_error(y_true, y_pred): \n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n \n def Metrics_df(R_train,R_test):\n met = {'metrics':['R2','MSE','RMSE','MAE','MAPE'],'Train':R_train,'Test':R_test}\n return pd.DataFrame(met)\n \n def Metrics(y_true, y_pred):\n R2 = round(r2_score(y_true, y_pred),2)\n MSE = round(mean_squared_error(y_true, y_pred, squared=True),2)\n RMSE = round(mean_squared_error(y_true, y_pred, squared=False),2)\n MAE = round(mean_absolute_error(y_true, y_pred),2)\n MAPE = round(mean_absolute_percentage_error(y_true, y_pred),2)\n return [R2,MSE,RMSE,MAE,MAPE]\n\n return Metrics_df(Metrics(y_train, y_pred_train),Metrics(y_test, y_pred_test))\n\n\ndef r2_train_test(X_normalized_train_age,X_normalized_test_age,y_train_age, y_test_age,k_max = 3,weights = 'uniform'):\n '''\n This function is used to check the R2 relation with the train and test set for the KNN model \n '''\n \n \n r2_train = []\n r2_test = []\n n_neighbors = list(range(1,k_max))\n for k in n_neighbors:\n knn = KNeighborsRegressor(n_neighbors=k, weights = weights) \n knn.fit(X_normalized_train_age,y_train_age)\n\n pred = knn.predict(X_normalized_train_age) \n r2_train.append(r2_score(y_train_age, pred))\n\n pred = knn.predict(X_normalized_test_age) \n r2_test.append(r2_score(y_test_age, pred))\n\n plt.scatter(n_neighbors,r2_train)\n plt.scatter(n_neighbors,r2_test)\n plt.xlabel(\"K number\")\n plt.ylabel(\"R2\")\n plt.legend(['train','test'])\n plt.show()","repo_name":"jordi-zaragoza/Climbing-Data-Analysis","sub_path":"src/metrics_jor.py","file_name":"metrics_jor.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"12769410463","text":"# pylint: disable=E1101, E0611\n#! /usr/bin/python\n\"\"\"QtDesigner test\"\"\"\nfrom __future__ import print_function\nimport sys\nimport time\nimport threading\nimport socket\nimport pickle\nfrom PyQt4 import Qt, QtCore\nfrom PyQt4.QtGui import QWidget\nfrom temperature_controller_gui_2 import Ui_temp_control\nfrom PyExpLabSys.common.plotters import DataPlotter\nfrom PyExpLabSys.common.supported_versions import python2_only\nimport temperature_controller_config as config\npython2_only(__file__)\n\n\nclass TemperatureControllerComm(threading.Thread):\n \"\"\" Communicates with temperature controller over network \"\"\"\n def __init__(self):\n threading.Thread.__init__(self)\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.settimeout(0.5)\n self.running = True\n self.status = {}\n self.status['temperature'] = 0\n self.status['setpoint'] = 0\n self.status['dutycycle'] = 0\n self.status['connected'] = False\n self.status['temp_connected'] = False\n\n def read_param(self, param):\n \"\"\" Read a parameter from the controller \"\"\"\n data = param + '#raw'\n error = 1\n # TODO: Investigate the reason for these network errors\n while (error < 50) and (error > 0):\n time.sleep(0.1)\n self.sock.sendto(data, (config.controller_hostname, config.controller_pull_port))\n received = self.sock.recv(1024)\n try:\n value = float(received[received.find(',') + 1:])\n error = 0\n #print 'Error: ' + str(error)\n except ValueError:\n error = error + 1\n #print 'Error: ' + str(error)\n value = -1\n return value\n\n def run(self):\n while self.running is True:\n try:\n self.status['temperature'] = self.read_param('temperature')\n self.status['temp_connected'] = True\n except socket.error:\n self.status['temp_connected'] = False\n try:\n self.status['dutycycle'] = self.read_param('dutycycle')\n print(self.status['dutycycle'])\n self.status['setpoint'] = self.read_param('setpoint')\n self.status['connected'] = True\n except socket.error:\n self.status['connected'] = False\n if not self.status['temp_connected']:\n self.status['connected'] = False\n time.sleep(0.2)\n\n\nclass SimplePlot(QWidget):\n \"\"\"Simple example with a Qwt plot in a Qt GUI\"\"\"\n def __init__(self, temp_control_comp):\n super(SimplePlot, self).__init__()\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.settimeout(0.5)\n\n self.tcc = temp_control_comp\n\n # Set up the user interface from Designer.\n self.gui = Ui_temp_control()\n self.gui.setupUi(self)\n\n # Init local variables\n self.scale = 1E-8\n self.active = False\n self.start = None\n self.ramp_start = 0\n self.ramp = {}\n self.ramp['time'] = {}\n self.ramp['temp'] = {}\n self.ramp['step'] = {}\n # Set up plot (using pretty much all the possible options)\n self.plots_l = ['temperature', 'setpoint']\n self.plots_r = ['dutycycle']\n self.plotter = DataPlotter(\n self.plots_l, right_plotlist=self.plots_r, parent=self,\n left_log=False, title='Temperature control',\n yaxis_left_label='Temperature', yaxis_right_label='Dutycycle',\n xaxis_label='Time since start [s]',\n legend='right', left_thickness=[2, 3], right_thickness=2,\n left_colors=['firebrick', 'darkolivegreen'],\n right_colors=['darksalmon'])\n self.gui.horizontalLayout.removeWidget(self.gui.place_holder_qwt)\n self.gui.place_holder_qwt.setParent(None)\n self.gui.horizontalLayout.addWidget(self.plotter.plot)\n\n # Connect signals\n QtCore.QObject.connect(self.gui.start_ramp_button,\n QtCore.SIGNAL('clicked()'),\n self.on_start_ramp)\n QtCore.QObject.connect(self.gui.stop_ramp_button,\n QtCore.SIGNAL('clicked()'),\n self.on_stop_ramp)\n QtCore.QObject.connect(self.gui.start_button,\n QtCore.SIGNAL('clicked()'),\n self.on_start)\n QtCore.QObject.connect(self.gui.stop_button,\n QtCore.SIGNAL('clicked()'),\n self.on_stop)\n QtCore.QObject.connect(self.gui.quit_button,\n QtCore.SIGNAL('clicked()'),\n QtCore.QCoreApplication.instance().quit)\n QtCore.QObject.connect(self.gui.new_setpoint,\n QtCore.SIGNAL('returnPressed()'),\n self.update_setpoint)\n def on_start(self):\n \"\"\"Start button method\"\"\"\n print('<< start pressed >>')\n if not self.active:\n self.start = time.time()\n self.active = True\n # Reset plot\n for key in self.plotter.data.keys():\n self.plotter.data[key] = []\n QtCore.QTimer.singleShot(0, self.plot_iteration)\n else:\n print('...already running!')\n\n def update_setpoint(self):\n \"\"\"Update setpoint button method\"\"\"\n print('<< Updating setpoint >>')\n new_setpoint = self.gui.new_setpoint.text()\n try:\n float(new_setpoint)\n except ValueError:\n message = '...ValueError: {}\\nOriginal setpoint used instead.'.format(repr(new_setpoint))\n new_setpoint = str(self.tcc.status['setpoint'])\n print(message)\n self.gui.new_setpoint.setProperty(\"text\", new_setpoint)\n data = 'raw_wn#setpoint:float:' + str(new_setpoint)\n self.sock.sendto(data, (config.controller_hostname, config.controller_push_port))\n received = self.sock.recv(1024)\n print(received)\n\n\n def on_start_ramp(self):\n \"\"\"Start temperature ramp\"\"\"\n print('<< Start ramp pressed >>')\n print('Current ramp settings:')\n print(self.ramp)\n self.ramp_start = time.time()\n for i in range(0, 11):\n self.ramp['time'][i] = int(self.gui.temperature_ramp.item(i, 0).text())\n self.ramp['temp'][i] = int(self.gui.temperature_ramp.item(i, 1).text())\n self.ramp['step'][i] = int(self.gui.temperature_ramp.item(i, 2).checkState()) == 2\n data = 'raw_wn#ramp:str:' + pickle.dumps(self.ramp)\n print(data)\n self.sock.sendto(data, (config.controller_hostname, config.controller_push_port))\n received = self.sock.recv(1024)\n print(received)\n print('New ramp settings:')\n print(self.ramp)\n\n def on_stop_ramp(self):\n \"\"\"Stop temperature ramp\"\"\"\n print('<< Stop ramp pressed >>')\n data = 'raw_wn#ramp:str:stop'\n self.sock.sendto(data, (config.controller_hostname, config.controller_push_port))\n received = self.sock.recv(1024)\n print(received)\n\n def on_stop(self):\n \"\"\"Stop button method\"\"\"\n print('<< Stop pressed >>')\n self.active = False\n\n def plot_iteration(self):\n \"\"\"method that emulates a single data gathering and plot update\"\"\"\n elapsed = time.time() - self.start\n if self.tcc.status['connected'] is True:\n self.gui.temperature.setProperty(\"text\", str(self.tcc.status['temperature']) + 'C')\n self.gui.power.setProperty(\"text\", str(self.tcc.status['dutycycle']) + 'W')\n self.gui.setpoint.setProperty(\"text\", str(self.tcc.status['setpoint']) + 'C')\n\n else:\n self.gui.current.setProperty(\"text\", '-')\n self.gui.voltage.setProperty(\"text\", '-')\n self.gui.temperature.setProperty(\"text\", '-')\n self.gui.power.setProperty(\"text\", '-')\n self.gui.resistance.setProperty(\"text\", '-')\n self.gui.setpoint.setProperty(\"text\", '-')\n try:\n if self.tcc.status['temp_connected'] is True:\n self.plotter.add_point('temperature',\n (elapsed, self.tcc.status['temperature']))\n if self.tcc.status['connected'] is True:\n self.plotter.add_point('setpoint', (elapsed, self.tcc.status['setpoint']))\n self.plotter.add_point('dutycycle', (elapsed, self.tcc.status['dutycycle']))\n except TypeError:\n pass\n\n if self.active:\n # Under normal curcumstances we would not add a delay\n QtCore.QTimer.singleShot(500, self.plot_iteration)\n\n\ndef main():\n \"\"\"Main method\"\"\"\n tcc = TemperatureControllerComm()\n tcc.start()\n\n app = Qt.QApplication(sys.argv)\n testapp = SimplePlot(tcc)\n testapp.show()\n app.exec_()\n tcc.running = False\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"PenelopeJones/batteries","sub_path":"PyExpLabSys/machines/furnaceroom/temp_control.py","file_name":"temp_control.py","file_ext":"py","file_size_in_byte":9077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"22129585634","text":"import time\nfrom typing import Any, Dict, List, Optional, Union\n\nimport lightning\nimport torch\n\nfrom lit_llms.moving_average import MovingAverage\n\n\nclass GPUMonitoringCallback(lightning.pytorch.callbacks.Callback):\n \"\"\"Monitoring the GPU utilization and memory usage per rank together with the processing time per batch to be\n consumed by other callbacks.\"\"\"\n\n def __init__(\n self,\n gpu_memory_logname: str = \"gpu_stats/max_memory\",\n gpu_util_logname: str = \"gpu_stats/utilization\",\n time_per_batch_logname: str = \"time/seconds_per_iter\",\n ):\n super().__init__()\n self.last_batch_start_time: Optional[float] = None\n self.gpu_utilizations10: List[MovingAverage] = []\n self.gpu_utilizations100: List[MovingAverage] = []\n self.running_utilizations_per_batch: List[Union[torch.Tensor, float]] = []\n\n self.seconds_per_iter10 = MovingAverage(window_size=10, sync_on_compute=False)\n self.seconds_per_iter100 = MovingAverage(window_size=100, sync_on_compute=False)\n\n self.gpu_memory_logname = gpu_memory_logname\n self.gpu_util_logname = gpu_util_logname\n self.time_per_batch_logname = time_per_batch_logname\n\n def _reset_running_utilizations(self) -> None:\n self.running_utilizations_per_batch = []\n\n def _init_gpu_util_trackers(self, world_size: int) -> None:\n if not self.gpu_utilizations10:\n for _ in range(world_size):\n self.gpu_utilizations10.append(MovingAverage(window_size=10, sync_on_compute=False))\n if not self.gpu_utilizations100:\n for _ in range(world_size):\n self.gpu_utilizations100.append(MovingAverage(window_size=100, sync_on_compute=False))\n\n @torch.no_grad()\n def on_train_batch_start(\n self,\n trainer: lightning.pytorch.Trainer,\n pl_module: lightning.pytorch.LightningModule,\n batch: Any,\n batch_idx: int,\n ) -> None:\n self._init_gpu_util_trackers(trainer.world_size)\n\n metrics = {}\n\n # only calc time after first batch\n if batch_idx:\n curr_time = time.time()\n assert self.last_batch_start_time is not None\n time_delta = curr_time - self.last_batch_start_time\n avg_time_delta = torch.tensor(trainer.strategy.reduce(time_delta), dtype=torch.float)\n self.seconds_per_iter10.update(avg_time_delta)\n self.seconds_per_iter100.update(avg_time_delta)\n self.last_batch_start_time = curr_time\n\n metrics[self.time_per_batch_logname] = avg_time_delta\n metrics[f\"{self.time_per_batch_logname}{self._average_postfix(10)}\"] = self.seconds_per_iter10.compute()\n metrics[f\"{self.time_per_batch_logname}{self._average_postfix(100)}\"] = self.seconds_per_iter100.compute()\n\n # collect the metrics on the current rank\n device = trainer.strategy.root_device\n\n max_memory = torch.tensor(torch.cuda.max_memory_allocated(), device=device, dtype=torch.float) / (\n 1024**3\n ) # in GB\n torch.cuda.reset_max_memory_allocated()\n\n # gather the metrics from all processes\n max_memory_total_rank = trainer.strategy.all_gather(max_memory)\n\n if self.running_utilizations_per_batch:\n curr_utils = sum(self.running_utilizations_per_batch) / len(self.running_utilizations_per_batch)\n curr_utils_total_rank = trainer.strategy.all_gather(curr_utils)\n self._reset_running_utilizations()\n\n # the metrics are in an N x 1 tensor where N is the total number of processes\n assert curr_utils_total_rank.size(0) == trainer.world_size\n else:\n curr_utils_total_rank = None\n\n # bookkeeping and compute statistics for each rank\n for i in range(trainer.world_size):\n metrics[f\"{self.gpu_memory_logname}_rank{i}\"] = max_memory_total_rank[i]\n if curr_utils_total_rank is not None:\n metrics[f\"{self.gpu_util_logname}_rank{i}\"] = curr_utils_total_rank[i]\n self.gpu_utilizations10[i].update(curr_utils_total_rank[i])\n self.gpu_utilizations100[i].update(curr_utils_total_rank[i])\n\n # update counts have to be the same for 10 and 100 metrics\n # check for protected and public because of https://github.com/Lightning-AI/metrics/pull/1370\n curr_update_count = getattr(\n self.gpu_utilizations10[i],\n \"_update_count\",\n getattr(self.gpu_utilizations10[i], \"update_count\", 1),\n )\n if curr_update_count > 10:\n metrics[f\"{self.gpu_util_logname}_rank{i}{self._average_postfix(10)}\"] = self.gpu_utilizations10[\n i\n ].compute()\n if curr_update_count > 100:\n metrics[f\"{self.gpu_util_logname}_rank{i}{self._average_postfix(100)}\"] = self.gpu_utilizations100[\n i\n ].compute()\n\n pl_module.log_dict(metrics, sync_dist=False, on_step=True, on_epoch=False, rank_zero_only=True)\n\n trainer.strategy.barrier()\n self._get_current_utilisation(trainer)\n self.last_batch_start_time = time.time()\n\n @torch.no_grad()\n def on_train_batch_end(\n self,\n trainer: lightning.pytorch.Trainer,\n pl_module: lightning.pytorch.LightningModule,\n outputs: Any,\n batch: Any,\n batch_idx: int,\n ) -> None:\n self._get_current_utilisation(trainer)\n\n @torch.no_grad()\n def on_before_backward(\n self, trainer: lightning.pytorch.Trainer, pl_module: lightning.pytorch.LightningModule, loss: torch.Tensor\n ) -> None:\n self._get_current_utilisation(trainer)\n\n @torch.no_grad()\n def on_after_backward(\n self, trainer: lightning.pytorch.Trainer, pl_module: lightning.pytorch.LightningModule\n ) -> None:\n self._get_current_utilisation(trainer)\n\n @torch.no_grad()\n def on_before_optimizer_step(\n self,\n trainer: lightning.pytorch.Trainer,\n pl_module: lightning.pytorch.LightningModule,\n optimizer: torch.optim.Optimizer,\n opt_idx: int = 0,\n ) -> None:\n self._get_current_utilisation(trainer)\n\n @torch.no_grad()\n def on_before_zero_grad(\n self,\n trainer: lightning.pytorch.Trainer,\n pl_module: lightning.pytorch.LightningModule,\n optimizer: torch.optim.Optimizer,\n ) -> None:\n self._get_current_utilisation(trainer)\n\n def _get_current_utilisation(self, trainer: lightning.pytorch.Trainer) -> None:\n self.running_utilizations_per_batch.append(\n torch.tensor(\n torch.cuda.utilization(),\n device=trainer.strategy.root_device,\n dtype=torch.float,\n )\n )\n\n def on_save_checkpoint(\n self,\n trainer: lightning.pytorch.Trainer,\n pl_module: lightning.pytorch.LightningModule,\n checkpoint: Dict[str, Any],\n ) -> None:\n for name_str in (\"gpu_utilizations10\", \"gpu_utilizations100\"):\n checkpoint[name_str] = [metric.state_dict() for metric in getattr(self, name_str)]\n\n for name_str in (\"seconds_per_iter10\", \"seconds_per_iter100\"):\n checkpoint[name_str] = getattr(self, name_str).state_dict()\n\n def on_load_checkpoint(\n self,\n trainer: lightning.pytorch.Trainer,\n pl_module: lightning.pytorch.LightningModule,\n checkpoint: Dict[str, Any],\n ) -> None:\n self._init_gpu_util_trackers(trainer.world_size)\n for name_str in (\"gpu_utilizations10\", \"gpu_utilizations100\"):\n for metric, state in zip(getattr(self, name_str), checkpoint.pop(name_str, [])):\n metric.load_state_dict(state)\n\n for name_str in (\"seconds_per_iter10\", \"seconds_per_iter100\"):\n getattr(self, name_str).load_state_dict(checkpoint.pop(name_str, {}))\n\n @staticmethod\n def _average_postfix(average_window: int) -> str:\n return f\"_averaged{average_window}\"\n","repo_name":"Lightning-Universe/lightning-LLMs","sub_path":"lit_llms/callbacks/monitoring.py","file_name":"monitoring.py","file_ext":"py","file_size_in_byte":8093,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"70187648573","text":"from gfootball.env import football_action_set\nfrom gfootball.env import football_env\nfrom gfootball.env import config\n\nfrom absl import app\nfrom absl import flags\nimport copy\nimport six.moves.cPickle\nimport tempfile\nimport tensorflow as tf\nimport os\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('trace_file', '', 'Trace file to replay')\nflags.DEFINE_integer('fps', 10, 'How many frames per second to render')\n\n\ndef modify_trace(replay):\n \"\"\"Adopt replay to the new framerate and add additional steps at the end.\"\"\"\n trace = []\n min_fps = replay[0]['debug']['config']['physics_steps_per_frame']\n assert FLAGS.fps % min_fps == 0, (\n 'Trace has to be rendered in framerate being multiple of {}'.formmat(\n min_fps))\n assert FLAGS.fps <= 100, ('Framerate of up to 100 is supported')\n empty_steps = int(FLAGS.fps / min_fps) - 1\n for f in replay:\n trace.append(f)\n idle_step = copy.deepcopy(f)\n idle_step['debug']['action'] = [football_action_set.action_idle\n ] * len(f['debug']['action'])\n for _ in range(empty_steps):\n trace.append(idle_step)\n # Add some empty steps at the end, so that we can record videos.\n for _ in range(10):\n trace.append(idle_step)\n return trace\n\ndef build_players(dump_file, spec):\n players = []\n for player in spec:\n player_type = 'replay:path={},players=1'.format(dump_file)\n for _ in range(config.parse_number_of_players(player)):\n players.append(player_type)\n return players\n\ndef replay(directory, dump, config_update={}):\n with open(dump, 'rb') as f:\n replay = six.moves.cPickle.load(f)\n trace = modify_trace(replay)\n fd, temp_path = tempfile.mkstemp(suffix='.dump')\n with tf.gfile.Open(temp_path, 'wb') as f:\n six.moves.cPickle.dump(trace, f)\n assert replay[0]['debug']['frame_cnt'] == 1, (\n 'Trace does not start from the beginning of the episode, can not replay')\n cfg = config.Config(replay[0]['debug']['config'])\n cfg['left_players'] = build_players(temp_path, cfg['left_players'])\n cfg['right_players'] = build_players(temp_path, cfg['right_players'])\n config_update['physics_steps_per_frame'] = int(100 / FLAGS.fps)\n config_update['real_time'] = False\n if 'render' not in config_update:\n config_update['render'] = True\n config_update['tracesdir'] = directory\n config_update['write_video'] = True\n cfg.update(config_update)\n env = football_env.FootballEnv(cfg)\n env.reset()\n done = False\n try:\n while not done:\n _, _, done, _ = env.step(None)\n except KeyboardInterrupt:\n env.write_dump('shutdown')\n exit(1)\n os.close(fd)\n\ndef main(_):\n replay('/tmp/dumps', FLAGS.trace_file)\n\nif __name__ == '__main__':\n app.run(main)\n","repo_name":"jgromero/football","sub_path":"gfootball/replay.py","file_name":"replay.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12163063006","text":"# This script creates all the agents\n\nimport os\nimport sys\nimport optparse\nimport re\n\ndef run():\n setUp()\n\ndef setUp():\n # Get network file to parse\n fileName = input(\"Please enter the name of the desired network file: \")\n #ADD error checking for input (ensure it's a valid network file)\n\n # Open desired file\n f = open(fileName, \"r\")\n \n tlAgentPools = []\n trafficLightDict = {}\n edges = []\n # Parse file to gather information\n for x in f:\n # Determine agent pools\n if \"0)*(lmbdas[:,1]>0)*(lmbdas[:,2]>0)\n valid = positives*(FA < 1.0)*(FA > 0.0)\n valid = valid.reshape((M1, M2, M3))\n\n # Find all voxels with invalid tensors within the mask\n ii, jj, kk = numpy.where((~valid)*mask)\n print(\"Number of invalid tensor voxels within the mask ROI: \", len(ii)) \n\n # Reshape D from N x 3 x 3 to M1 x M2 x M3 x 9\n D = D.reshape((M1,M2,M3,9))\n\n return valid, mask, D\n\nif __name__ =='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dti',type=str) \n parser.add_argument('--mask', type=str) \n parser.add_argument('--order', default=0, type=int) \n\n Z = parser.parse_args()\n\n check_dti_data(Z.dti, Z.mask, Z.order)\n\n\n\n\n\n","repo_name":"kent-and/mri2fem","sub_path":"mri2fem/mri2fem/chp5/check_dti.py","file_name":"check_dti.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"26194256404","text":"import discord\nimport asyncio\nimport random\n\ntoken = \"\"\n\ndiscordClient = discord.Client()\n\n@discordClient.event\nasync def on_ready():\n print('Logged in as')\n print(discordClient.user.name)\n print(discordClient.user.id)\n print('------')\n\n@discordClient.event\nasync def on_message(message):\n if message.content.startswith(\".c \", 0, 3):\n messageString = message\n choices = message.content[3:].split(',')\n randomChoice = random.choice(choices)\n\n if (randomChoice.startswith(' ') or randomChoice.endswith(' ')):\n randomChoice = randomChoice.strip(' ')\n #print ('\\'' + randomChoice + '\\'')\n\n await discordClient.send_message(message.channel, randomChoice)\n\ndiscordClient.run(token)\n","repo_name":"Smoothtalk/Smooth-Discord-Bots","sub_path":"choice.py","file_name":"choice.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2384391153","text":"import scrapy\nfrom lxml import etree\nfrom Design.items import MemberSpiderSpiderItem\n\nclass MemberSpiderSpider(scrapy.Spider):\n name = 'member_spider'\n allowed_domains = ['ac.nowcoder.com']\n custom_settings = {\n 'ITEM_PIPELINES': {'Design.pipelines.MemberSpiderSpiderPipeline': 300},\n }\n suffix = '/practice-coding'\n count = 0\n all = 0\n\n def start_requests(self):\n with open('search_page.txt', 'r', encoding='utf-8') as fp:\n self.k = fp.read()\n ins = set()\n with open(r'url_member\\url_member{}.txt'.format(self.k),'r',encoding='utf-8') as fp:\n for item in fp:\n if item in ins: continue\n ins.add(item)\n self.all = self.all + 2\n yield scrapy.Request(\n url = item,\n callback = self.parse,\n meta = {'id':item[44:].strip()}\n )\n yield scrapy.Request(\n url = item+self.suffix,\n callback = self.parse,\n meta = {'id':item[44:].strip()}\n )\n\n def parse(self, response):\n self.count = self.count + 1\n print('member_now:',format(self.count/self.all*100,'.3f'),'%')\n id = response.meta['id']\n response = etree.HTML(response.text)\n item = MemberSpiderSpiderItem()\n try:\n item['id'] = id\n if not response.xpath('/html/body/div[1]/div[2]/div[2]/div/ul/li/a/text()'):\n item['name'] = ' ' + response.xpath('/html/body/div/div[2]/div[1]/div[1]/div/div[2]/div[1]/a[1]/text()')[0]\n item['rating'] = ' ' + response.xpath('/html/body/div/div[2]/div[2]/section/div[1]/div[1]/div/text()')[0]\n item['rank'] = ' ' + response.xpath('/html/body/div/div[2]/div[2]/section/div[1]/div[2]/div/text()')[0]\n if item['rank'] == ' 暂无' or item['rank'] == ' 9999+': item['rank'] = ' 10000000'\n item['rating_contest'] = ' ' + response.xpath('/html/body/div/div[2]/div[2]/section/div[1]/div[3]/div/text()')[0]\n item['contest'] = ' ' + response.xpath('/html/body/div/div[2]/div[2]/section/div[1]/div[4]/div/text()')[0]\n item['attention'] = ' ' + response.xpath('/html/body/div/div[2]/div[1]/div[1]/div/div[2]/div[2]/div[3]/div/a[1]/text()')[0]\n item['fans'] = ' ' + response.xpath('/html/body/div/div[2]/div[1]/div[1]/div/div[2]/div[2]/div[3]/div/a[2]/text()')[0]\n item['challenge'] = False\n item['accept'] = False\n item['summit'] = False\n item['ac_rate'] = False\n else:\n item['name'] = False\n item['rating'] = False\n item['rank'] = False\n item['rating_contest'] = False\n item['contest'] = False\n item['attention'] = False\n item['fans'] = False\n item['challenge'] = ' ' + response.xpath('/html/body/div[1]/div[2]/div[2]/section/div[1]/div[1]/div/text()')[0]\n item['accept'] = ' ' + response.xpath('/html/body/div[1]/div[2]/div[2]/section/div[1]/div[2]/div/text()')[0]\n item['summit'] = ' ' + response.xpath('/html/body/div[1]/div[2]/div[2]/section/div[1]/div[3]/div/text()')[0]\n item['ac_rate'] = ' ' + response.xpath('/html/body/div[1]/div[2]/div[2]/section/div[1]/div[4]/div/text()')[0]\n except Exception:\n item['name'] = False\n item['rating'] = False\n item['rank'] = False\n item['rating_contest'] = False\n item['contest'] = False\n item['attention'] = False\n item['fans'] = False\n item['challenge'] = False\n item['accept'] = False\n item['summit'] = False\n item['ac_rate'] = False\n\n yield item\n pass\n","repo_name":"KRK11/NowcoderScrapy","sub_path":"Design/spiders/member_spider.py","file_name":"member_spider.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23995350806","text":"\"\"\"Tools for working with NIDM-Experiment files\"\"\"\n\nimport click\nfrom rdflib import Graph, util\nfrom nidm.core import Constants\nfrom nidm.experiment.Query import GetParticipantIDs\nfrom nidm.experiment.tools.click_base import cli\n\n\n# adding click argument parsing\n@cli.command()\n@click.option(\n \"--nidm_file_list\",\n \"-nl\",\n required=True,\n help=\"A comma separated list of NIDM files with full path\",\n)\n@click.option(\n \"--s\",\n \"-s\",\n required=False,\n is_flag=True,\n help=\"If parameter set then files will be merged by ndar:src_subjec_id of prov:agents\",\n)\n@click.option(\n \"--out_file\", \"-o\", required=True, help=\"File to write concatenated NIDM files\"\n)\ndef merge(nidm_file_list, s, out_file):\n \"\"\"\n This function will merge NIDM files. See command line parameters for supported merge operations.\n \"\"\"\n\n # graph = Graph()\n # for nidm_file in nidm_file_list.split(','):\n # graph.parse(nidm_file,format=util.guess_format(nidm_file))\n\n # create empty graph\n graph = Graph()\n # start with the first NIDM file and merge the rest into the first\n first = True\n for nidm_file in nidm_file_list.split(\",\"):\n # if merging by subject:\n if s:\n if first:\n # get list of all subject IDs\n first_file_subjids = GetParticipantIDs([nidm_file])\n first = False\n first_graph = Graph()\n first_graph.parse(nidm_file, format=util.guess_format(nidm_file))\n else:\n # load second graph\n graph.parse(nidm_file, format=util.guess_format(nidm_file))\n\n # get list of second file subject IDs\n GetParticipantIDs([nidm_file])\n\n # for each UUID / subject ID look in graph and see if you can find the same ID. If so get the UUID of\n # that prov:agent and change all the UUIDs in nidm_file to match then concatenate the two graphs.\n query = f\"\"\"\n\n PREFIX prov:\n PREFIX sio: \n PREFIX ndar: \n PREFIX rdf: \n PREFIX prov:\n\n SELECT DISTINCT ?uuid ?ID\n WHERE {{\n\n ?uuid a prov:Agent ;\n {Constants.NIDM_SUBJECTID} ?ID .\n FILTER(?ID =\n \"\"\"\n\n # add filters to above query to only look for subject IDs which are in the first file to merge into\n temp = True\n for ID in first_file_subjids[\"ID\"]:\n if temp:\n query = query + '\"' + ID + '\"'\n temp = False\n else:\n query = query + '|| ?ID= \"' + ID + '\"'\n\n query = query + \") }\"\n\n qres = graph.query(query)\n\n # if len(qres) > 0 then we have matches so load the nidm_file into a temporary graph so we can\n # make changes to it then concatenate it.\n if len(qres) > 0:\n # tmp = Graph()\n # tmp.parse(nidm_file,format=util.guess_format(nidm_file))\n\n # for each ID in the merged graph that matches an ID in the nidm_file graph\n for row in qres:\n # find ID from first file that matches ID in this file\n t = first_file_subjids[\"ID\"].str.match(row[\"ID\"])\n # then get uuid for that match from first file\n uuid_replacement = first_file_subjids.iloc[\n [*filter(t.get, t.index)][0], 0\n ]\n\n for s, p, o in graph.triples((None, None, None)):\n if s == row[\"uuid\"]:\n # print(f\"replacing subject in triple {s} {p} {o} with {uuid_to_replace}\")\n graph.add((uuid_replacement, p, o))\n graph.remove((row[\"uuid\"], p, o))\n elif o == row[\"uuid\"]:\n # print(f\"replacing object in triple {s} {p} {o} with {uuid_to_replace}\")\n graph.add((s, p, uuid_replacement))\n graph.remove((s, p, row[\"uuid\"]))\n elif p == row[\"uuid\"]:\n # print(f\"replacing predicate in triple {s} {p} {o} with {uuid_to_replace}\")\n graph.add((s, uuid_replacement, o))\n graph.remove((s, row[\"uuid\"], o))\n\n # merge updated graph\n\n graph = first_graph + graph\n\n graph.serialize(out_file, format=\"turtle\")\n\n\nif __name__ == \"__main__\":\n merge()\n","repo_name":"incf-nidash/PyNIDM","sub_path":"src/nidm/experiment/tools/nidm_merge.py","file_name":"nidm_merge.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"} +{"seq_id":"2054400846","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.setFixedSize(887, 611)\n Form.setStyleSheet(\"\")\n self.AddBookButton = QtWidgets.QPushButton(Form)\n self.AddBookButton.setGeometry(QtCore.QRect(770, 570, 101, 23))\n self.AddBookButton.setStyleSheet(\"background: rgb(255, 255, 255); border-radius: 5px;\")\n self.AddBookButton.setObjectName(\"AddBookButton\")\n self.SwipeLeftButton = QtWidgets.QPushButton(Form)\n self.SwipeLeftButton.setGeometry(QtCore.QRect(10, 570, 51, 20))\n self.SwipeLeftButton.setStyleSheet(\"background: rgb(255, 255, 255); border-radius: 5px;\")\n self.SwipeLeftButton.setObjectName(\"SwipeLeftButton\")\n self.SwipeRightButton = QtWidgets.QPushButton(Form)\n self.SwipeRightButton.setGeometry(QtCore.QRect(70, 570, 51, 20))\n self.SwipeRightButton.setStyleSheet(\"background: rgb(255, 255, 255); border-radius: 5px;\")\n self.SwipeRightButton.setObjectName(\"SwipeRightButton\")\n self.SortedAutorButton = QtWidgets.QPushButton(Form)\n self.SortedAutorButton.setGeometry(QtCore.QRect(800, 10, 21, 20))\n self.SortedAutorButton.setStyleSheet(\"background: rgb(255, 255, 255); border-radius: 10px;\")\n self.SortedAutorButton.setObjectName(\"SortedAutorButton\")\n self.SortedNameBookButton = QtWidgets.QPushButton(Form)\n self.SortedNameBookButton.setGeometry(QtCore.QRect(830, 10, 21, 20))\n self.SortedNameBookButton.setStyleSheet(\"background: rgb(255, 255, 255); border-radius: 10px;\")\n self.SortedNameBookButton.setObjectName(\"SortedNameBookButton\")\n self.SortedIDButton = QtWidgets.QPushButton(Form)\n self.SortedIDButton.setGeometry(QtCore.QRect(860, 10, 21, 20))\n self.SortedIDButton.setStyleSheet(\"background: rgb(255, 255, 255); border-radius: 10px;\")\n self.SortedIDButton.setObjectName(\"SortedIDButton\")\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Визуальный список книг\"))\n self.AddBookButton.setText(_translate(\"Form\", \"Добавить книгу\"))\n self.SwipeLeftButton.setText(_translate(\"Form\", \"←\"))\n self.SwipeRightButton.setText(_translate(\"Form\", \"→\"))\n self.SortedAutorButton.setText(_translate(\"Form\", \"А\"))\n self.SortedNameBookButton.setText(_translate(\"Form\", \"Н\"))\n self.SortedIDButton.setText(_translate(\"Form\", \"П\"))\n","repo_name":"Merrcurys/Visual-list-of-books-app","sub_path":"designer/main_interface.py","file_name":"main_interface.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43974790562","text":"from __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [(\"core\", \"0018_delete_userproxy\")]\n\n operations = [\n migrations.AlterField(\n model_name=\"user\",\n name=\"language\",\n field=models.CharField(\n default=\"nb\",\n max_length=3,\n verbose_name=\"Language\",\n choices=[(\"nb\", \"Norsk\"), (\"en\", \"English\")],\n ),\n ),\n migrations.AlterField(\n model_name=\"user\",\n name=\"photo\",\n field=models.ImageField(\n upload_to=\"photos/users/\", null=True, verbose_name=\"Photo\", blank=True\n ),\n ),\n migrations.AlterField(\n model_name=\"user\",\n name=\"year\",\n field=models.PositiveIntegerField(\n default=2015,\n max_length=3000,\n blank=True,\n help_text=\"Year the user was active.\",\n null=True,\n verbose_name=\"Active Year\",\n ),\n ),\n ]\n","repo_name":"itdagene-ntnu/itdagene","sub_path":"itdagene/core/migrations/0019_auto_20150130_1925.py","file_name":"0019_auto_20150130_1925.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"26087003285","text":"import numpy as np\n\n\ndef create_color_map(N=256, normalized=False):\n def bitget(byteval, idx):\n return (byteval & (1 << idx)) != 0\n\n dtype = 'float32' if normalized else 'uint8'\n cmap = np.zeros((N, 3), dtype=dtype)\n for i in range(N):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7-j)\n g = g | (bitget(c, 1) << 7-j)\n b = b | (bitget(c, 2) << 7-j)\n c = c >> 3\n\n cmap[i] = np.array([r, g, b])\n\n cmap = cmap/255 if normalized else cmap\n return cmap\n\n\ndef overlay_mask_on_image(image, mask, mask_opacity=0.6, mask_color=(0, 255, 0)):\n if mask.ndim == 3:\n assert mask.shape[2] == 1\n _mask = mask.squeeze(axis=2)\n else:\n _mask = mask\n mask_bgr = np.stack((_mask, _mask, _mask), axis=2)\n masked_image = np.where(mask_bgr > 0, mask_color, image)\n return ((mask_opacity * masked_image) + ((1. - mask_opacity) * image)).astype(np.uint8)\n","repo_name":"sabarim/STEm-Seg","sub_path":"stemseg/utils/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"78"} +{"seq_id":"13522215034","text":"class Grid ():\n def __init__ (self, table):\n self.weight_table = []\n self.x_size = len(table)\n self.y_size = len(table[0])\n self.sum_array = [[None] * self.y_size] * self.x_size\n\n # Copy the weight table\n for index_1 in range(self.x_size):\n self.weight_table.append([])\n for index_2 in range(self.y_size):\n self.weight_table[index_1].append(table[index_1][index_2])\n\n def lower_path_sum (self):\n return self.max_sum_at(self.x_size - 1, self.y_size - 1)\n\n def max_sum_at (self, i, j):\n if self.sum_array[i][j] != None:\n return self.sum_array[i][j]\n else:\n max_candidates = [0]\n if i != 0:\n max_candidates.append(self.max_sum_at(i-1, j))\n if j != 0:\n max_candidates.append(self.max_sum_at(i, j-1))\n return max(max_candidates) + self.weight_table[i][j]\n\n","repo_name":"r-tellechea/DSA-Training","sub_path":"Chapter 7. Dynamic programming/paths_in_a_grid.py","file_name":"paths_in_a_grid.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2031635240","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 22 13:07:17 2018\n\n@author: 124578\n\"\"\"\n\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.cross_validation import train_test_split\n\n#Initialize logging\nimport logging\nLOG_FILENAME = 'logfile.log'\nlogging.basicConfig(filename='logfile.log',format='%(asctime)s %(levelname)s %(message)s',level=logging.DEBUG,filemode='w')\n\n\nlogging.info('Starting Tensorflow Diabetes Predictive Analyzer')\n\n\ndf = pd.read_csv('diabetes.csv')\nlogging.info('Sample Training data')\nlogging.info(df.head(2))\n\nX = df[\n [\"Pregnancies\", 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']]\n\nY = df[['Outcome']]\n\ndf['NotDiabetes'] = 1 - df['Outcome']\ny = df[['Outcome', 'NotDiabetes']]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=0)\n\n#learning_rate variable\n\nlearningRate = tf.train.exponential_decay(learning_rate=0.01,global_step=1,decay_steps=X_train.shape[0],decay_rate=0.95,staircase=True)\n\n# Neural Network Parameters\n\nn_hidden_1 = 64\nn_hidden_2 = 32\nn_hidden_3 = 8\nn_input = X_train.shape[1]\nn_classes = y_train.shape[1]\ndropout = 0.5\nbeta=0.01\n\n#Training parameters\ntraining_epochs = 50\nbatch_size = 32\ndisplay_step = 10\n\nlog_vars='{}:{}:{}:{}:{}:{}:{}:{}'.format(n_input,n_hidden_1,n_hidden_2,n_hidden_3,str(int(dropout)*100)+'%',n_classes,batch_size,beta)\nlogging.info('Configurable Parameters:n_input,n_hidden_1,n_hidden_2,n_hidden_3,dropout,n_classes,batch_size,regularization:'+log_vars)\n\n# TensorFlow Graph input\nx = tf.placeholder(\"float\", [None, n_input])\ny = tf.placeholder(\"float\")\nkeep_prob = tf.placeholder(tf.float32)\n#regularizer = tf.contrib.layers.l2_regularizer(scale=0.1)\n\n# Store Layers weight and bias\n\nweights = {\n #'h1': tf.Variable(tf.random_uniform(shape=(n_input, n_hidden_1),minval=0,maxval=0.005,dtype=tf.float32, seed=0)), #maxval=0.005 , def =1\n 'h1': tf.Variable(tf.random_uniform(shape=(n_input, n_hidden_1), dtype=tf.float32)),\n 'h2': tf.Variable(tf.random_uniform(shape=(n_hidden_1, n_hidden_2),dtype=tf.float32)),\n 'h3': tf.Variable(tf.random_uniform(shape=(n_hidden_2, n_hidden_3),dtype=tf.float32)),\n 'out': tf.Variable(tf.random_uniform(shape=(n_hidden_3, n_classes), dtype=tf.float32))\n}\n\nbiases = {\n 'b1': tf.Variable(tf.random_uniform([n_hidden_1])),\n 'b2': tf.Variable(tf.random_uniform([n_hidden_2])),\n 'b3': tf.Variable(tf.random_uniform([n_hidden_3])),\n 'out1': tf.Variable(tf.random_uniform([n_classes]))\n}\n\n# Create Neural Network model with 3 hidden layers\ndef neural_network(x, weights, biases,keep_prob):\n # Hidden layer with relu activation\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n #layer_1 = tf.nn.dropout(layer_1, keep_prob)\n # Hidden layer with relu activation\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_2 = tf.nn.relu(layer_2)\n #layer_2 = tf.nn.dropout(layer_2, keep_prob)\n # Hidden layer with sigmoid activation\n layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])\n layer_3 = tf.nn.sigmoid(layer_3)\n # Output Layer with neurons = number of output classes\n out_layer = tf.matmul(layer_3, weights['out']) + biases['out1']\n\n return out_layer\n\ndef calculate_cost(nn_model,weights):\n # Defining loss fucntion\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=nn_model, labels=y))\n # Loss function using L2 Regularization\n regL2 = tf.nn.l2_loss(weights['h1']) + tf.nn.l2_loss(weights['h2']) + tf.nn.l2_loss(weights['h3']) +tf.nn.l2_loss(weights['out'])\n# Adding regularization to cost fn avoides overfitting of training data\n cost = tf.reduce_mean(cost + beta*regL2) \n return cost\n\n# Constructing model from given weights,bias and inputs\npred = neural_network(x, weights, biases,dropout)\n\n#calculate cost fucntion for the model\ncost=calculate_cost(pred,weights)\n \n#Optimization adam- adaptive moment estimation.Optimizer should minimize the cost fucntion\noptimizer = tf.train.AdamOptimizer(learning_rate=learningRate).minimize(cost)\n\n\n# Evaluate model\ncorrect_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n\n# Calculate accuracy\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n#Saver for the model\nsaver = tf.train.Saver()\n\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\n# Start training\nwith tf.Session() as sess:\n sess.run(init)\n # Training cycle\n for epoch in range(1,training_epochs+1):\n avg_cost = 0.\n total_batch = int(len(X_train) / batch_size)\n\n X_batches = np.array_split(X_train, total_batch)\n Y_batches = np.array_split(y_train, total_batch)\n\n # Loop over all batches\n for i in range(total_batch):\n batch_x, batch_y = X_batches[i], Y_batches[i]\n # Run optimization operation (backprop) and cost operation(to get loss value)\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})\n # Compute average loss\n avg_cost += c / total_batch\n # Display logs per epoch step\n if epoch % display_step == 0 or epoch ==1:\n batch_cost,acc_train = sess.run([cost,accuracy], feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})\n print(\"Step:\", '%d' % (epoch), \"avg cost=\", \"{:.4f}\".format(avg_cost),\" accuracy= \",\"{:.3f}\".format(acc_train))\n debugstr= 'epoch: '+ str(epoch+1) + ' ,cost= '+ format(avg_cost)+\" , accuracy= \"+format(acc_train)\n logging.debug(debugstr)\n\n \n acc_eval = accuracy.eval({x: X_test, y: y_test, keep_prob: 1})\n print(\"Test Accuracy:\", acc_eval)\n logging.info(\"Test Accuracy:\"+ str(acc_eval))\n #saver.save(sess, \"diabtest2-model2\")\n #saver.export_meta_graph('diabtest2-model.meta')\n \n#Export Meta Graph for calling via API\n\n#meta_graph_def = tf.train.export_meta_graph(filename='diabtest2-model.meta')\n\nprint(\"Done. Please see log files for details\")","repo_name":"Project75/snow","sub_path":"Practice/tensorflow_diabetes_classifier.py","file_name":"tensorflow_diabetes_classifier.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28309002465","text":"from Rewards import SalaryDirector, SalaryHeadTeacher, SalaryTeacher, Grades\nfrom Staff import Director, HeadTeacher, Teacher, Student\nfrom School import School, ClassRoom\n\nif __name__ == '__main__':\n school = School('Hillel IT School', 'High')\n\n director = Director('Test', 'Director', 65)\n headteacher = HeadTeacher('Test', 'Headteacher', 63)\n teacher1 = Teacher('Test', 'Teacher1', 60)\n teacher2 = Teacher('Test', 'Teacher2', 45)\n teacher3 = Teacher('Test', 'Teacher3', 35)\n\n salary_director = SalaryDirector(director)\n salary_head_teacher = SalaryHeadTeacher(headteacher)\n salary_teacher1 = SalaryTeacher(teacher1)\n salary_teacher2 = SalaryTeacher(teacher2)\n salary_teacher3 = SalaryTeacher(teacher3)\n\n salary_teacher3.print_fond_required()\n director.print_human_list()\n\n student1 = Student('Test', 'Student1', 16)\n student2 = Student('Test', 'Student2', 15)\n student3 = Student('Test', 'Student3', 14)\n student4 = Student('Test', 'Student4', 17)\n student5 = Student('Test', 'Student5', 17)\n student6 = Student('Test', 'Student6', 15)\n student7 = Student('Test', 'Student7', 14)\n student8 = Student('Test', 'Student8', 16)\n student9 = Student('Test', 'Student9', 14)\n student10 = Student('Test', 'Student10', 15)\n\n grade_student1 = Grades(student1)\n grade_student2 = Grades(student2)\n grade_student3 = Grades(student3)\n grade_student4 = Grades(student4)\n grade_student5 = Grades(student5)\n grade_student6 = Grades(student6)\n grade_student7 = Grades(student7)\n grade_student8 = Grades(student8)\n grade_student9 = Grades(student9)\n grade_student10 = Grades(student10)\n\n grade_student10.report_reward(10)\n grade_student10.report_reward(12)\n grade_student10.report_reward(9)\n print(grade_student10.get_all_rewards())\n grade_student10.personal_reward()\n\n director.print_human_list()\n salary_director.print_financial_fond()\n salary_director.print_fond_required()\n\n class_b10 = ClassRoom('b10', teacher1)\n class_a10 = ClassRoom('a10', teacher2)\n\n class_b10.print_classroom_list()\n class_a10.print_classroom_list()\n\n class_b10.new_student(student1)\n class_b10.new_student(student2)\n class_b10.new_student(student3)\n class_b10.new_student(student4)\n class_b10.new_student(student5)\n\n class_a10.new_student(student6)\n class_a10.new_student(student7)\n class_a10.new_student(student8)\n class_a10.new_student(student9)\n class_a10.new_student(student10)\n\n class_b10.print_classroom_list()\n class_a10.print_classroom_list()\n\n class_b10.delete_student(student5)\n class_b10.print_classroom_list()\n\n class_a10.print_all()\n del class_b10\n class_a10.print_all()\n\n\n\n\n\n\n\n","repo_name":"Regato/AQAHillel","sub_path":"HW11/HW11.py","file_name":"HW11.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15238819767","text":"import re\nimport os\nimport weakref\nimport numpy as np\n\nfrom collections import \\\n defaultdict\nfrom stat import \\\n ST_CTIME\n\nfrom .definitions import \\\n pluto2enzoDict, \\\n yt2plutoFieldsDict, \\\n parameterDict \\\n\nfrom yt.funcs import *\nfrom yt.data_objects.grid_patch import \\\n AMRGridPatch\nfrom yt.geometry.grid_geometry_handler import \\\n GridIndex\nfrom yt.data_objects.static_output import \\\n Dataset\nfrom yt.utilities.definitions import \\\n mpc_conversion, sec_conversion\nfrom yt.utilities.file_handler import \\\n HDF5FileHandler\nfrom yt.utilities.parallel_tools.parallel_analysis_interface import \\\n parallel_root_only\nfrom yt.utilities.io_handler import \\\n io_registry\n\nfrom yt.fields.field_info_container import \\\n FieldInfoContainer, NullFunc\nfrom .fields import PlutoFieldInfo, KnownPlutoFields\n\nclass PlutoGrid(AMRGridPatch):\n _id_offset = 0\n __slots__ = [\"_level_id\", \"stop_index\"]\n def __init__(self, id, index, level, start, stop):\n AMRGridPatch.__init__(self, id, filename = index.index_filename,\n index = index)\n self.Parent = []\n self.Children = []\n self.Level = level\n self.ActiveDimensions = stop - start + 1\n\n def get_global_startindex(self):\n \"\"\"\n Return the integer starting index for each dimension at the current\n level.\n\n \"\"\"\n if self.start_index != None:\n return self.start_index\n if self.Parent == []:\n iLE = self.LeftEdge - self.ds.domain_left_edge\n start_index = iLE / self.dds\n return np.rint(start_index).astype('int64').ravel()\n pdx = self.Parent[0].dds\n start_index = (self.Parent[0].get_global_startindex()) + \\\n np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)\n self.start_index = (start_index*self.ds.refine_by).astype('int64').ravel()\n return self.start_index\n\n def _setup_dx(self):\n # has already been read in and stored in index\n self.dds = self.index.dds_list[self.Level]\n self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds\n\nclass PlutoHierarchy(GridIndex):\n\n grid = PlutoGrid\n\n def __init__(self,ds,dataset_type='pluto_hdf5'):\n self.domain_left_edge = ds.domain_left_edge\n self.domain_right_edge = ds.domain_right_edge\n self.dataset_type = dataset_type\n self.field_indexes = {}\n self.dataset = weakref.proxy(ds)\n self.index_filename = os.path.abspath(\n self.dataset.parameter_filename)\n self.directory = ds.fullpath\n self._handle = ds._handle\n\n self.float_type = self._handle['/level_0']['data:datatype=0'].dtype.name\n self._levels = self._handle.keys()[2:]\n GridIndex.__init__(self,ds,dataset_type)\n\n def _detect_output_fields(self):\n ncomp = int(self._handle['/'].attrs['num_components'])\n self.field_list = [c[1] for c in self._handle['/'].attrs.items()[-ncomp:]]\n \n def _count_grids(self):\n self.num_grids = 0\n for lev in self._levels:\n self.num_grids += self._handle[lev]['Processors'].len()\n\n def _parse_index(self):\n f = self._handle # shortcut\n\n # this relies on the first Group in the H5 file being\n # 'Chombo_global' and the second 'Expressions'\n levels = f.keys()[2:]\n grids = []\n self.dds_list = []\n i = 0\n for lev in levels:\n level_number = int(re.match('level_(\\d+)',lev).groups()[0])\n boxes = f[lev]['boxes'].value\n dx = f[lev].attrs['dx']\n self.dds_list.append(dx * np.ones(3))\n for level_id, box in enumerate(boxes):\n si = np.array([box['lo_%s' % ax] for ax in 'ijk'])\n ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])\n pg = self.grid(len(grids),self,level=level_number,\n start = si, stop = ei)\n grids.append(pg)\n grids[-1]._level_id = level_id\n self.grid_left_edge[i] = dx*si.astype(self.float_type) + self.domain_left_edge\n self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1) + self.domain_left_edge\n self.grid_particle_count[i] = 0\n self.grid_dimensions[i] = ei - si + 1\n i += 1\n self.grids = np.empty(len(grids), dtype='object')\n for gi, g in enumerate(grids): self.grids[gi] = g\n# self.grids = np.array(self.grids, dtype='object')\n\n def _populate_grid_objects(self):\n for g in self.grids:\n g._prepare_grid()\n g._setup_dx()\n\n for g in self.grids:\n g.Children = self._get_grid_children(g)\n for g1 in g.Children:\n g1.Parent.append(g)\n self.max_level = self.grid_levels.max()\n\n def _setup_derived_fields(self):\n self.derived_field_list = []\n\n def _get_grid_children(self, grid):\n mask = np.zeros(self.num_grids, dtype='bool')\n grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)\n mask[grid_ind] = True\n return [g for g in self.grids[mask] if g.Level == grid.Level + 1]\n\nclass PlutoDataset(Dataset):\n _index_class = PlutoHierarchy\n _fieldinfo_fallback = PlutoFieldInfo\n _fieldinfo_known = KnownPlutoFields\n\n def __init__(self, filename, dataset_type='pluto_hdf5',\n storage_filename = None, ini_filename = None):\n self._handle = HDF5FileHandler(filename)\n self.current_time = self._handle.attrs['time']\n self.ini_filename = ini_filename\n self.fullplotdir = os.path.abspath(filename)\n Dataset.__init__(self,filename,dataset_type)\n self.storage_filename = storage_filename\n self.cosmological_simulation = False\n\n # These are parameters that I very much wish to get rid of.\n self.parameters[\"HydroMethod\"] = 'chombo' # always PPM DE\n self.parameters[\"DualEnergyFormalism\"] = 0 \n self.parameters[\"EOSType\"] = -1 # default\n\n def _set_units(self):\n \"\"\"\n Generates the conversion to various physical _units based on the parameter file\n \"\"\"\n self.units = {}\n self.time_units = {}\n if len(self.parameters) == 0:\n self._parse_parameter_file()\n self._setup_nounits_units()\n self.conversion_factors = defaultdict(lambda: 1.0)\n self.time_units['1'] = 1\n self.units['1'] = 1.0\n self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()\n seconds = 1 #self[\"Time\"]\n for unit in sec_conversion.keys():\n self.time_units[unit] = seconds / sec_conversion[unit]\n for key in yt2plutoFieldsDict:\n self.conversion_factors[key] = 1.0\n\n def _setup_nounits_units(self):\n z = 0\n mylog.warning(\"Setting 1.0 in code units to be 1.0 cm\")\n if not self.has_key(\"TimeUnits\"):\n mylog.warning(\"No time units. Setting 1.0 = 1 second.\")\n self.conversion_factors[\"Time\"] = 1.0\n for unit in mpc_conversion.keys():\n self.units[unit] = mpc_conversion[unit] / mpc_conversion[\"cm\"]\n\n\n def _localize(self, f, default):\n if f is None:\n return os.path.join(self.directory, default)\n return f\n\n def _parse_parameter_file(self):\n \"\"\"\n Reads in an inputs file in the 'pluto.ini' format. Probably not\n especially robust at the moment.\n \"\"\"\n\n ini_filename = 'pluto.ini'\n self.fullplotdir = os.path.abspath(self.parameter_filename)\n self.ini_filename = self._localize( \\\n self.ini_filename, ini_filename)\n self.unique_identifier = \\\n int(os.stat(self.parameter_filename)[ST_CTIME])\n lines = open(self.ini_filename).readlines()\n # read the file line by line, storing important parameters\n for lineI, line in enumerate(lines):\n try:\n param, sep, vals = [v.rstrip() for v in line.partition(' ')]\n #param, sep, vals = map(rstrip,line.partition(' '))\n except ValueError:\n mylog.error(\"ValueError: '%s'\", line)\n if pluto2enzoDict.has_key(param):\n paramName = pluto2enzoDict[param]\n t = map(parameterDict[paramName], vals.split())\n if len(t) == 1:\n self.parameters[paramName] = t[0]\n else:\n if paramName == \"RefineBy\":\n self.parameters[paramName] = t[0]\n else:\n self.parameters[paramName] = t\n\n # assumes 3D for now\n elif param.startswith(\"X1-grid\"):\n t = vals.split()\n low1 = float(t[1])\n high1 = float(t[4])\n N1 = int(t[2])\n elif param.startswith(\"X2-grid\"):\n t = vals.split()\n low2 = float(t[1])\n high2 = float(t[4])\n N2 = int(t[2])\n elif param.startswith(\"X3-grid\"):\n t = vals.split()\n low3 = float(t[1])\n high3 = float(t[4])\n N3 = int(t[2])\n \n self.dimensionality = 3\n self.domain_left_edge = np.array([low1,low2,low3])\n self.domain_right_edge = np.array([high1,high2,high3])\n self.domain_dimensions = np.array([N1,N2,N3])\n self.refine_by = self.parameters[\"RefineBy\"]\n \n @classmethod\n def _is_valid(self, *args, **kwargs):\n return os.path.isfile('pluto.ini')\n\n @parallel_root_only\n def print_key_parameters(self):\n for a in [\"current_time\", \"domain_dimensions\", \"domain_left_edge\",\n \"domain_right_edge\"]:\n if not hasattr(self, a):\n mylog.error(\"Missing %s in parameter file definition!\", a)\n continue\n v = getattr(self, a)\n mylog.info(\"Parameters: %-25s = %s\", a, v)\n","repo_name":"Xarthisius/yt-drone","sub_path":"yt/frontends/pluto/data_structures.py","file_name":"data_structures.py","file_ext":"py","file_size_in_byte":10093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30084674286","text":"from tkinter import *\n\ntop = Tk()\ncanv = Canvas(top)\nvar = IntVar()\ndef sel():\n selection = \"You selected the option \" + str(var.get())\n label.config(text = selection)\nBITMAP = \"\"\"\n#define im_width 1\n#define im_height 1\nstatic char im_bits[] = { 0x54 };\n\"\"\"\n \nbmp = BitmapImage(data=BITMAP)\nclass Gate_Image(object):\n def __init__(self,xpix,ypix,main_image):\n self.B_In = Button(canv,image=bmp,command=self.input_click)\n self.B_Out = Button(canv,image=bmp,command=self.output_click)\n self.B_Main = Button(canv,image=main_image,command=self.main_click)\n self.x = xpix\n self.y = ypix\n self.is_selected = False\n self.draw()\n def input_click(self):\n asdas = 0x54\n def output_click(self):\n asdasd=34\n def main_click(self):\n line = canv.create_line(10,10,500,500,fill=\"black\")\n canv.scale(line, 0, 0, 1.5, 2)\n print(\"outch!\")\n def draw(self):\n sm = 7\n bg = 50\n y_adj = (bg-sm) // 2\n self.B_In.place(height=sm,width=sm,x=self.x-sm,y=self.y+y_adj)\n self.B_Out.place(height=sm,width=sm,x=self.x+bg,y=self.y+y_adj)\n self.B_Main.place(height=bg,width=bg,x=self.x,y=self.y)\n\n \nor_bitmap = PhotoImage(file=\"or.gif\")\ngi = Gate_Image(10,10,or_bitmap)\ngi = Gate_Image(70,70,or_bitmap)\ncanv.create_rectangle(100,200,200,300,fill=\"black\")\n\ncanv.place(x=0,y=0,width=5000,height=5000)\ntop.mainloop()","repo_name":"benblack769/psm_interp","sub_path":"logic_maker/old/before_jim_changes/tkprac.py","file_name":"tkprac.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29412940800","text":"\"\"\"\n 起点->村庄->矿山->村庄->终点\n\"\"\"\n\n\n# 返回对应天的天气消耗的食物和水和钱数的总和\ndef Consume(gx, qx, sx):\n water = gx * 8 + qx * 5 + sx * 10 # 箱数\n food = gx * 6 + qx * 7 + sx * 10 # 箱数\n money = water * 5 + food * 10 # 钱数\n return [money, water, food]\n\n\n# 前八天消耗的数目,到达村庄需要八天\neightConsumeMoney = 2 * Consume(3, 3, 0)[0] + Consume(0, 0, 2)[0]\neightConsumeWater = 2 * Consume(3, 3, 0)[1] + Consume(0, 0, 2)[1]\neightConsumeFood = 2 * Consume(3, 3, 0)[2] + Consume(0, 0, 2)[2]\nprint(\"从起点到村庄八天消耗的水为:\" + str(eightConsumeWater) + \"箱,\"\"消耗的食物为\" + str(eightConsumeFood) + \"箱,消耗的钱为:\" + str(\n eightConsumeMoney) + \"元\")\ntenMoneyAfter = 10000 - eightConsumeMoney\nprint(\"到达村庄还剩\" + str(tenMoneyAfter) + \"元\")\nFoodStart = (1200 - (98 * 3)) / 2\nprint(\"起点购买食物\" + str(FoodStart) + \"箱\")\nWaterStart = 98\nprint(\"起点购买水\" + str(WaterStart) + \"箱\")\nFoodValage = 453 - eightConsumeFood\nprint(\"到达村庄还剩食物\" + str(FoodValage) + \"箱\")\nwaterValage = 98 - eightConsumeWater\nprint(\"到达村庄还剩水\" + str(waterValage) + \"箱\")\nValageAfter = 1200 - FoodValage * 2\nprint(\"到达村庄还可负重\" + str(ValageAfter) + \"千克\")\nwaterAdd = ValageAfter / 3\nprint(\"如果在村庄全部补水,需要补水\" + str(round(waterAdd)) + \"箱\" + \",花费\" + str(163 * 5 * 2) + \"元\")\nprint(\"此时准备从村庄走还剩水\" + str(153) + \"箱\" + \",还剩食物\" + str(345) + \"箱\" + \",还剩金钱\" + str(8530 - 1630) + \"元\")\nprint(\"====================================================================\")\n# 从起点经过村庄到达矿山还剩多少水\nmRemainWater = 153 - (2 * 5 + 2 * 8)\n# 从起点经过村庄到达矿山还剩多少食物\nmRemainFood = 355 - (2 * 6 + 2 * 7)\n# 从起点经过村庄到达矿山还剩多少钱\nmRemainMoney = 6900\nprint(\"从起点经过村庄到达矿山还剩的水为:\" + str(mRemainWater) + \"箱,\"\"剩余的食物为\" + str(mRemainFood) + \"箱,剩余的的钱为:\" + str(mRemainMoney) + \"元\")\nprint(\"====================================================================\")\n# 可求全部买水效益最大\n# 天气数组:晴朗->1;高温->2;沙暴->3;\nweather = [2, 2, 1, 3, 1, 2, 3, 1, 2, 2,\n 3, 2, 1, 2, 2, 2, 3, 3, 2, 2,\n 1, 1, 2, 1, 3, 2, 1, 1, 2, 2]\n# 最后结果\nmoney2 = []\nfor n in range(1, 16):\n nweather = weather[10:10 + n]\n # 采矿期间的各个天气占比\n sunNum = 0\n for i in nweather:\n if 1 == i:\n sunNum += 1\n hotNum = 0\n for i in nweather:\n if 2 == i:\n hotNum += 1\n stromNum = 0\n for i in nweather:\n if 3 == i:\n stromNum += 1\n\n # 在矿山消耗的水\n MineConsumeWater = 3 * Consume(hotNum, sunNum, stromNum)[1] + Consume(1, 0, 0)[1]\n # 在矿山消耗的食物\n MineConsumeFood = 3 * Consume(hotNum, sunNum, stromNum)[2] + Consume(1, 0, 0)[2]\n\n # 总钱数\n allMoney = 6900\n # 在矿山赚的钱\n earn = n * 1000\n resMoney = allMoney + earn\n money1 = [n, resMoney]\n # 如果超载,break\n # if MineConsumeWater > 127 or MineConsumeFood > 329:\n # break\n money2.append(money1)\n print(\"矿山工作第\" + str(n) + \"天后还剩水\" + str(127 - MineConsumeWater) + \"箱\")\n print(\"矿山工作第\" + str(n) + \"天后还剩食物\" + str(329 - MineConsumeFood) + \"箱\")\nprint(money2)\nprint(\"====================================================================\")\nprint(\"第三天回到村庄还剩水\" + str(24) + \"箱\")\nprint(\"第三天回到村庄还剩食物\" + str(242) + \"箱\")\nprint(\"第三天回到村庄还剩钱\" + str(9900) + \"元\")\nprint(\"=====================================================================\")\n\n# 如果此时直接回终点,先需要补充水,在第17天回到终点\nprint(\"如果此时直接回终点,先需要补充水,补充之后还剩\" + str(32) + \"箱\")\nprint(\"那么还剩钱\" + str(9900 - 8 * 10) + \"元\")\nprint(\"到终点还剩食物\" + str(242 - 16) + \"箱\")\n# 如果直接到终点还剩下面这些钱\nprint(\"如果从村庄直接返回终点剩余\" + str(9700 + 226 * 5))\n\n# 如果此时回矿山全部补充水\nprint(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\nprint(\"回到村庄���全部补充水,补充之后还剩水\" + str(round((1200 - 24 - 242 * 2) / 3 + 24)) + \"箱\")\nprint(\"回到村庄后不再补充食物,还剩食物\" + str(242) + \"箱\")\nprint(\"回到村庄补充之后,还剩钱\" + str(9900 - 231 * 5) + \"元\")\nprint(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n# 第19天回到矿山\nprint(\"回到矿山水还剩\" + str(223) + \"箱\")\nprint(\"回到矿山食物还剩\" + str(218) + \"箱\")\n# 最后结果\nmoney3 = []\nfor n in range(1, 9):\n nweather = weather[17:17 + n]\n # 采矿期间的各个天气占比\n sunNum = 0\n for i in nweather:\n if 1 == i:\n sunNum += 1\n hotNum = 0\n for i in nweather:\n if 2 == i:\n hotNum += 1\n stromNum = 0\n for i in nweather:\n if 3 == i:\n stromNum += 1\n\n # 挖矿消耗的水\n MineConsumeWater = 3 * Consume(hotNum, sunNum, stromNum)[1] + Consume(0,0,1)[1]\n # 挖矿消耗的食物\n MineConsumeFood = 3 * Consume(hotNum, sunNum, stromNum)[2] + Consume(0,0,1)[2]\n\n # 总钱数\n allMoney = 8745\n # 在矿山赚的钱\n earn = n * 1000\n resultMoney = allMoney + earn\n money1 = [n, resultMoney]\n # 如果食物或者水耗尽,break\n if MineConsumeWater > 223 or MineConsumeFood > 218:\n break\n money3.append(money1)\n print(\"矿山工作\" + str(n) + \"天后还剩水\" + str(223 - MineConsumeWater) + \"箱\")\n print(\"矿山工作\" + str(n) + \"天后还剩食物\" + str(218 - MineConsumeFood) + \"箱\")\nprint(money3)\n# 最后五天回到终点,先经过村庄补水\nprint(\"回到村庄剩水\" + str(10) + \"箱\")\nprint(\"需补充水\" + str(32) + \"箱\")\nprint(\"回到终点剩食物\" + str(5) + \"箱\")\nprint(\"需要补食物\" + str(33) + \"箱\")\nprint(\"最终剩余\" + str(16745 - 32 * 10 - 33 * 20) + \"元\")\n","repo_name":"IronmanJay/Python_Project","sub_path":"MathematicalModeling/QuestionOne/partThree.py","file_name":"partThree.py","file_ext":"py","file_size_in_byte":6192,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"78"} +{"seq_id":"73292982011","text":"\"\"\"\n Exercício 13\n\n Escreva um programa que pergunte o valor inicial de uma divida e o juros menal. Pergunte também o valor mensal que será pago.\nImprima o número de meses para que a divida seja paga, o total pago o total de juros pago.\n\"\"\"\n\nvalor_inicial_divida = float(input('Valor inicial da divida: R$ '))\njuros_mensal = float(input('Juros mensal (%): '))\nvalor_mensal_pago = float(input('Valor mensal pago: R$ '))\n\njuros = meses = 0\ntotal_pago = valor_inicial_divida\n\nwhile total_pago > 0:\n juros = (total_pago * (juros_mensal / 100)) + juros\n meses = meses + 1\n total_pago = total_pago + (total_pago * (juros_mensal / 100)) - valor_mensal_pago\n\nprint(f'\\nTotal pago: R$ {valor_inicial_divida + juros:.2f}')\nprint(f'Meses: {meses}')\nprint(f'Juros pago: R$ {juros:.2f}')","repo_name":"fabriciovale20/Livro-Introducao-a-Python3","sub_path":"Capítulo 5 (Repetições)/ex013.py","file_name":"ex013.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38135811869","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport tensorflow_probability as tfp\nfrom tensorflow.keras.layers import Dense\n\n\n# Replay Buffer of previously performed actions.\n# This is to allow for delayed learning.\nclass ReplayBuffer:\n def __init__(self, max_size, input_shape, n_actions):\n # max size.\n self.mem_size = max_size\n # first available memory position.\n self.mem_cntr = 0\n # Agent memory.\n self.state_memory = np.zeros((self.mem_size, *input_shape))\n # Memory of the new states seen as a result of actions taken.\n self.new_state_memory = np.zeros((self.mem_size, *input_shape))\n # Action memory\n self.action_memory = np.zeros((self.mem_size, n_actions))\n # Reward memory\n self.reward_memory = np.zeros(self.mem_size)\n # Array to keep track of the terminal flags received from the environment.\n # this is because the value of a terminal state is zero, so no reward should follow this.\n self.terminal_mermory = np.zeros(self.mem_size, dtype=np.bool)\n\n def store_transition(self, state, action, reward, state_, done ):\n index = self.mem_cntr % self.mem_size\n\n self.state_memory[index] = state\n self.new_state_memory[index] = state_\n self.action_memory[index] = action\n self.reward_memory[index] = reward\n self.terminal_mermory[index] = done\n\n self.mem_cntr += 1\n\n def sample_buffer(self, batch_size):\n max_mem = min(self.mem_cntr, self.mem_size)\n\n batch = np.random.choice(max_mem, batch_size)\n\n states = self.state_memory[batch]\n states_ = self.new_state_memory[batch]\n actions = self.action_memory[batch]\n rewards = self.reward_memory[batch]\n dones = self.terminal_mermory[batch]\n\n return states, actions, rewards, states_, dones\n\n","repo_name":"Jiynto/TrackManiaRL","sub_path":"TrackmaniaActorCritic/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15094657218","text":"import onnxruntime\nimport onnx\nimport numpy as np\nimport tensorrt as trt\n\n\nmodel_path = \"../weights/model701_checkpoint.onnx\"\nonnx_model = onnx.load(model_path)\nonnx.checker.check_model(onnx_model)\n\nbatch_size = 1\nimage_shape = [256, 128]\nimage_channel = 3\ninput_shape = [batch_size, image_channel, *image_shape]\nfake_input = np.random.random(input_shape).astype(np.float32)\n\nprint(\"||||====Test onnx====||||\")\nsession = onnxruntime.InferenceSession(model_path)\nprint(\"====INPUT====\")\nfor i in session.get_inputs():\n print(\"Name: {}, Shape: {}, Dtype: {}\".format(i.name, i.shape, i.type))\nprint(\"====OUTPUT====\")\nfor i in session.get_outputs():\n print(\"Name: {}, Shape: {}, Dtype: {}\".format(i.name, i.shape, i.type))\nprint(\"====INFER====\")\noutputs = session.run(None, {'input_0': fake_input})[0]\nprint(\"Shape: {}\".format(outputs.shape))","repo_name":"ewigspace1910/DAPRH","sub_path":"_misc/deepstream/scripts/test_onnx.py","file_name":"test_onnx.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"41252220045","text":"''' DADOS ESTATÍSTICOS DOS AUTÔMATOS\nBeatriz de Camargo Castex Ferreira - 10728077 - USP São Carlos - IFSC\n05/2020\n\nEste programa tem como objetivo calcular dados como a média, desvio padrão,\nevenness, entropia, etc. de features que retiramos de padrões formados por\nnossos autômatos, como número de bursts num split signal, por exemplo.\n\nSe houver algum conjunto de valores que não seja necessário ou possível\ncalcular, apenas desativálos com comentários.\n\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import log\nimport csv\n\ni = 0\n\n# Função gaussiana\n\n\ndef gaussian(x, mu, sig):\n return 1 / (np.sqrt(2 * np.pi) * sig) * np.exp(-(((x - mu) / sig)**2) / 2)\n\n\n# Bins que serão usados para plotar a gaussiana:\nbins = np.linspace(-0.05, 0.05, 1000)\n\n# Vetores onde iremos guardar os dados para fazer gráficos:\ngaus = [] # Dados das gaussianas\n\n# Primeiro abrimos o arquivo e lemos os dados coletados para cada automato:\n\nfile_name = 'inter_dist.csv' # !!! INSIRA AQUI NOME DO ARQUIVO !!!\nprint('\\nDistâncias intersinais') # Dados sendo análisados\n\n# Os arquivos estão organizados de forma que cada linha tem os dados de\n# um dos nossos autômatos.\nwith open(file_name, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n # Depois organizamos os padrões em arrays\n for row in reader:\n # Salvamos os dados\n dados = np.array([float(s) for s in row])\n\n # Identificamos o autômato em que estamos trabalhando\n if (i == 0):\n aut = 'a:'\n elif (i == 1):\n aut = 'b:'\n elif (i == 2):\n aut = 'c:'\n elif (i == 3):\n aut = 'e:'\n else:\n aut = 'do CDT-23:'\n\n print('\\nDados do autômato ' + aut)\n\n ''' MÉDIA, DESVIO E GAUSSIANA '''\n\n # Calculando a média e o desvio padrão dos dados\n mean = np.mean(dados)\n sdev = np.std(dados)\n\n # Mapeando a densidade de probabilidade normal (gaussiana):\n gaus.append(gaussian(bins, mean, sdev))\n\n # Imprimindo valores:\n print('Média: ', mean)\n print('Desvio Padrão: ', sdev)\n\n ''' ENTROPIA E EVENNESS '''\n\n # Calculando a entropia dos dados:\n entp = 0\n for x in range(len(dados)):\n if (dados[x] != 0):\n entp -= dados[x] * log(dados[x], 2)\n\n # Calculando a envenness dos dados:\n evns = 2**entp\n\n # Imprimindo valores:\n print('Entropia: ', entp)\n print('Evenness: ', evns)\n\n i = i + 1\n\n\n# Montando o gráfico da gaussiana:\nj = 0\nfor y in gaus:\n # Identificando o autômato:\n if (j == 0):\n colors, labels = 'orange', 'Autômato A'\n elif (j == 1):\n colors, labels = 'limegreen', 'Autômato B'\n elif (j == 2):\n colors, labels = 'blue', 'Autômato C'\n elif (j == 3):\n colors, labels = 'yellow', 'Autômato E'\n else:\n colors, labels = 'red', 'Autômato CDT-23'\n\n plt.plot(bins, y, label=labels, color=colors)\n\n j = j + 1\n\n# Plotando o gráfico\nplt.xlabel('Distâncias intersinais') # NOME DOS DADOS ANALISADOS\nplt.ylabel('Densidade de probabilidade normal')\nplt.legend()\nplt.show()\n","repo_name":"BeatrizCastex/data-treatment","sub_path":"6-statistics/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43697871200","text":"__author__ = 'ahmedlawi92@gmail.com'\n\nimport json\nimport string\nfrom enum import Enum\nfrom bs4 import BeautifulSoup\nimport requests\n\nclass BBRefScraper:\n\n __base_url = 'http://www.basketball-reference.com{s}'\n __url_key = 'info_page'\n\n\n def __init__(self, json_file):\n self.players = json.load(file(json_file))\n\n def get_table(self, player, table_type):\n player_url = self.players[player][self.__url_key]\n page = requests.get(self.__base_url.format(s=player_url))\n soup = BeautifulSoup(page.content, 'html.parser')\n return self.__scrape_table(soup.find('table', id=table_type.value))\n\n def __scrape_table(self, table):\n columns = [col.string for col in table.find_all('th')]\n stats = [{columns[i]: self.__format_line(cell) for i, cell in enumerate(row.find_all(\"td\"))} for row in table.tbody.find_all('tr')]\n return stats\n\n def __format_line(self, v):\n t = v.a.string if v.a is not None else v.string\n if t is None:\n return t\n try:\n t = float(t)\n return t\n except ValueError:\n return t\n\nclass TableTypes(Enum):\n TOTALS = 'totals'\n ADVANCED = 'advanced'\n SHOOTING = 'shooting'\n POSSESSION = 'per_poss'\n PER_GAME = 'per_game'\n PER_36 = 'per_minute'\n\n\ndef create_players_info_json():\n base_url = \"http://www.basketball-reference.com/players/{s}\"\n players = {}\n letters = string.ascii_lowercase\n\n for letter in letters:\n page = requests.get(base_url.format(s=letter))\n soup = BeautifulSoup(page.content, 'html.parser')\n player_table = soup.find(id=\"players\")\n if player_table is None:\n continue\n columns = [col.string for col in player_table.find_all('th')]\n for player_data in player_table.tbody.find_all('tr'):\n name = player_data.td.a.string\n players[name] = {columns[i]: cell.string for i, cell in enumerate(player_data.find_all(\"td\"))}\n players[name][\"info_page\"] = player_data.td.a['href']\n\n f = open('players_info.json', 'w')\n f.write(json.dumps(players, sort_keys=True, indent=4))\n f.close\n","repo_name":"aabulawi/basketball-stats","sub_path":"bballstats/bballreference/bbref_scraper.py","file_name":"bbref_scraper.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"41352812532","text":"import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom wdcuration import run_multiple_searches\nimport asyncio\nfrom pathlib import Path\nimport json\n\nHERE = Path(__file__).parent.resolve()\nRESULTS = HERE.joinpath(\"results\").resolve()\n\nurl = \"https://fapesp.br/15888/tabela-de-diarias-de-viagem\"\nhtml = requests.get(url).text\nsoup = BeautifulSoup(html, \"lxml\")\n\n# HTML locator identified with help of https://webscraper.io/\nentries = soup.find_all(\"tr\")\n\ninternational_flag = 0\nnational_flag = 0\ncountry_value_dict = {}\nnational_dict = {}\nfor entry in entries:\n if entry.text.strip() == \"\":\n continue\n if len(entry.find_all(\"strong\")) > 0:\n current_strong = entry.select(\"td\")[0].text.strip()\n\n if (\n current_strong\n == \"FAPESP: Tabela de Diárias Nacionais - Vigente a partir de 01/03/2023\"\n ):\n national_flag = 1\n\n if international_flag:\n country_value_dict[current_strong] = {}\n\n if national_flag:\n national_dict[current_strong] = {}\n\n if national_flag != 0 and international_flag == 0:\n print(entry)\n print(entry.find_all(\"strong\"))\n current_category = entry.select(\"td\")[0].text.strip()\n try:\n current_value = entry.select(\"td\")[1].text.strip()\n except IndexError:\n continue\n\n if current_strong not in national_dict:\n national_dict[current_strong] = {}\n\n national_dict[current_strong][current_category] = current_value\n\n if international_flag:\n try:\n current_subplace = entry.select(\"td\")[1].text.strip()\n current_strong = entry.select(\"td\")[0].text.strip()\n current_value = entry.select(\"td\")[2].text.strip()\n if current_strong not in country_value_dict:\n country_value_dict[current_strong] = {}\n\n country_value_dict[current_strong][current_subplace] = current_value\n except:\n continue\n if current_strong == \"País\":\n international_flag = 1\n national_flag = 0\n if current_strong == \"Diárias Nacionais em Auxílios\":\n national_flag = 1\n\nRESULTS.joinpath(\"fapesp_international_values.json\").write_text(\n json.dumps(country_value_dict, indent=4, sort_keys=True, ensure_ascii=False)\n)\n\n\nRESULTS.joinpath(\"fapesp_national_values.json\").write_text(\n json.dumps(national_dict, indent=4, sort_keys=True, ensure_ascii=False)\n)\n","repo_name":"lubianat/calculadora_diarias_fapesp","sub_path":"fapesp_calculator/extract_dict.py","file_name":"extract_dict.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"3142863544","text":"def get_power(n, s):\n if n // 10 == 0:\n return s\n else:\n s += 1\n return get_power(n // 10, s)\n\ns = 0\n# print(get_power(1004, 0))\n\ndef get_reverse_number(n):\n if n // 10 == 0:\n return n\n else:\n new_n = (n % 10) * (10 ** get_power(n, 0)) + get_reverse_number(n // 10)\n return new_n\n\n# print(get_reverse_number(1004))\n\n###################################################################################\n\ndef num_equal(arr, x, c):\n if len(arr) == 1:\n if arr[0] == x:\n c += 1\n return c\n else:\n mid = len(arr) // 2\n left, right = num_equal(arr[:mid], x, c), num_equal(arr[mid:], x, c)\n return left + right\n\n# print(num_equal([1, 2, 4, 4, 5, 4, 2, 4, 4], 4, 0))\n\nseq = 0\ndef binary_search(arr, n):\n global seq\n if len(arr) <= 1:\n if arr[0] == n:\n return True\n else:\n return False\n else:\n seq += 1\n print(seq)\n mid = len(arr) // 2\n if n < arr[mid]:\n return binary_search(arr[:mid], n)\n elif n > arr[mid]:\n return binary_search(arr[mid:], n)\n else:\n return True\n\n# print(binary_search([1, 2, 3, 4, 5, 6, 7, 8], 5))\n\ndef sum_rec(n):\n if n == 0:\n return 0\n else:\n return n + sum_rec(n - 1)\n\ndef sum_loop(n):\n res = 0\n for i in range(n + 1):\n res += i\n return res\n\n# print(sum_loop(5), sum_rec(5))\n\ndef seq1(n):\n # if n == 1:\n # return 12\n if n == 0:\n return 0\n else:\n return n * 10 + 2 + seq1(n - 1)\n\n# print(seq1(2))\n\ndef nested_rec(n, m):\n if n == 0:\n return m + 1\n elif n > 0 and m == 0:\n return nested_rec(n - 1, 1)\n else:\n nested_rec(n - 1, nested_rec(n , m - 1))\n\n\ndef print2n(n):\n if n > 3600:\n return\n else:\n print2n(2 * n)\n print(n)\n\ndef print2n_r(n):\n if n < 1:\n return\n else:\n print2n_r(n // 2)\n if n <= 3600:\n print(n)\n \n\n# print2n_r(3600)\n\ndef tree(n):\n if n == 1:\n print(1)\n else:\n tree(n - 1)\n print(n)\n tree(n - 1)\n\n# tree(4)\n\ndef color(target, pattern, y, x):\n global color_arr\n global flag\n if (y < 0 or y >= len(color_arr)) or ((x < 0 or x >= len(color_arr[0]))):\n return\n # print(color_arr[y][x])\n # if (color_arr[y][x] != target and color_arr[y][x] != pattern) or color_arr[y][x] == 0:\n # return\n else:\n color_arr[y][x] = pattern\n flag[y][x] = True\n print(color_arr)\n if not flag[y][x - 1]:\n color(target, pattern, y, x - 1)\n if not flag[y][x + 1]:\n color(target, pattern, y, x + 1)\n if not flag[y - 1][x]:\n color(target, pattern, y - 1, x)\n if not flag[y + 1][x]:\n color(target, pattern, y + 1, x)\n flag[y][x] = False\n\ncolor_arr = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 2, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n]\n\nflag = []\nfor i in range(len(color_arr)):\n temp = []\n for j in range(len(color_arr[0])):\n temp.append(True) if color_arr[i][j] == 0 else temp.append(False)\n flag.append(temp)\n\n# print(flag)\nx = 5\ny = 3\ntarget = 1\npattern = 2\ncolor(target, pattern, y, x)\nprint(color_arr)","repo_name":"mrbartrns/algorithm-and-structure","sub_path":"swea/stack2/recursion_p3.py","file_name":"recursion_p3.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"44728942823","text":"from sofia.step import Step\n\n\nclass GetMajorTranscriptFromGenomicFeature(Step):\n \"\"\"\n Get the major transcript of a genomic feature. Defined as the longest transcript (ie. most complete).\n \"\"\"\n\n IN = ['genomic_feature']\n OUT = ['major_transcript']\n\n def run(self, genomic_feature):\n for feature in genomic_feature:\n if feature is not None and len(feature.children) > 0:\n transcripts = sorted((child for child in feature.children if child.data['type'] in {'transcript', 'mRNA'}), key=self.get_transcript_length)\n if len(transcripts) > 0:\n feature = transcripts[-1]\n else:\n feature = None\n else:\n feature = None\n yield feature\n\n @staticmethod\n def get_transcript_length(transcript):\n return sum(len(child) for child in transcript.children if child.data['type'] == 'CDS')\n\n\n#class GetMajorTranscriptFromGenomicInterval(Step):\n# \"\"\"\n# Get the major transcript of a genomic feature. Defined as the longest transcript (ie. most complete).\n# \"\"\"\n\n# IN = ['genomic_interval']\n# OUT = ['major_transcript']\n\n# def run(self, genomic_interval):\n# return genomic_interval\n","repo_name":"childsish/sofia","sub_path":"templates/genomics/steps/genomic_feature.py","file_name":"genomic_feature.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"39782595028","text":"#! /usr/bin/env python\nfrom setuptools import setup, find_packages\n\nversion = __import__('pymongo_pubsub').get_version()\n\nCLASSIFIERS = [\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Database',\n]\n\nPACKAGE_DATA = {}\n\nREQUIREMENTS = [\n 'pymongo >= 1.5',\n]\n\nEXTRAS = {}\n\nsetup(name='pymongo-pubsub',\n author='Patryk Zawadzki',\n author_email='patrys@gmail.com',\n description='A publish-subscribe pattern implementation for pymongo',\n version = version,\n packages = find_packages(),\n package_data=PACKAGE_DATA,\n classifiers=CLASSIFIERS,\n install_requires=REQUIREMENTS,\n extras_require=EXTRAS,\n platforms=['any'],\n zip_safe=True)\n","repo_name":"patrys/pymongo-pubsub","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"78"} +{"seq_id":"15655441655","text":"import pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neighbors import NearestNeighbors\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport difflib\r\nfrom fuzzywuzzy import process\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.manifold import TSNE\r\n\r\n# Load the dataset\r\ndf = pd.read_csv('song_data.csv')\r\n\r\n# Remove duplicates based on song_name\r\ndf = df.drop_duplicates(subset='song_name')\r\n\r\n# Define the input features\r\nX = df[['acousticness', 'danceability', 'energy', 'liveness', 'loudness', 'tempo']]\r\n\r\n# Initialize the scaler and fit_transform the input features\r\nscaler = StandardScaler()\r\nX_scaled = scaler.fit_transform(X)\r\n\r\n# Create a new DataFrame with the scaled data and the original index and column names\r\nX_scaled_df = pd.DataFrame(X_scaled, index=X.index, columns=X.columns)\r\n\r\n# Reset the index of both DataFrames\r\nX_scaled_df.reset_index(drop=True, inplace=True)\r\ndf.reset_index(drop=True, inplace=True)\r\n\r\n# Update y to match the updated data DataFrame\r\ny = df['song_name']\r\n\r\n# Split the data into training and test sets\r\nX_train, X_test, y_train, y_test = train_test_split(X_scaled_df, y, test_size=0.2, random_state=42)\r\n\r\n# Create the k-NN model\r\nknn = NearestNeighbors(n_neighbors=5, metric='euclidean', algorithm='brute')\r\n\r\n# Train the model\r\nknn.fit(X_scaled_df)\r\n\r\n# Get the nearest neighbors for each point in the test set\r\nneighbors = knn.kneighbors(X_test)\r\n# Prints the song list\r\n# print(y)\r\n\r\n# Function for generating feature distributions via plot histograms for each audio feature\r\n\"\"\"\r\nfor feature in X.columns:\r\n sns.histplot(data=df, x=feature, kde=True)\r\n plt.title(f'{feature} distribution')\r\n plt.show()\r\n\"\"\"\r\n\r\n# Below code is used to generate a correlation matrix\r\n# calculates the correlation matrix for the features\r\n\"\"\"corr_matrix = X.corr()\r\n\r\n# plots the heatmap\r\nplt.figure(figsize=(8, 6))\r\nsns.heatmap(corr_matrix, annot=True, cmap='coolwarm', vmin=-1, vmax=1)\r\nplt.title('Correlation Matrix of Audio Features')\r\nplt.show()\r\n\"\"\"\r\n\r\n\r\ndef recommend_songs(songs_name, data=df, X_data=X_scaled_df, knn_model=knn, n_recommendations=5):\r\n # Find the index of the input song in the dataset\r\n song_index = data.index[data['song_name'] == songs_name].tolist()[0]\r\n\r\n # Get the song features from X_data\r\n song_features = X_data.loc[song_index]\r\n\r\n # Convert the song_features to a DataFrame with feature names\r\n song_features_df = pd.DataFrame([song_features], columns=X_data.columns)\r\n\r\n # Find the nearest neighbors\r\n distances, indices = knn_model.kneighbors(song_features_df, n_neighbors=n_recommendations + 1)\r\n\r\n # Get the indices of the nearest neighbors in the original dataset\r\n neighbor_indices = indices.flatten()\r\n\r\n # Remove the index of the input song if it's present in neighbor_indices\r\n neighbor_indices = [index for index in neighbor_indices if index != song_index][:n_recommendations]\r\n\r\n # Get the song names of the nearest neighbors and return them\r\n recommended_songs = data.loc[neighbor_indices]['song_name']\r\n return recommended_songs\r\n\r\n\r\ndef find_closest_song(query, data=df, threshold=70):\r\n song_names = data['song_name'].tolist()\r\n best_match, best_match_score = process.extractOne(query, song_names)\r\n\r\n if best_match_score >= threshold:\r\n return best_match\r\n else:\r\n return None\r\n\r\n\r\nprint(\"Welcome to our music recommendation system!\\nWhen you enter a song, our system will generate recommendations \"\r\n \"based on that song! Our model works different from others as Genre is not taken into account, \"\r\n \"allowing you to discover music in a new way!\\n\\n \")\r\n# Get the user's input and run the script in a loop\r\nwhile True:\r\n user_query = input(\"Please enter a song name or type 'quit' to exit: \")\r\n\r\n # Break the loop if the user types 'quit'\r\n if user_query.lower() == 'quit':\r\n break\r\n\r\n # Find the closest matching song name in the dataset\r\n closest_song = find_closest_song(user_query)\r\n\r\n # Check if a match was found\r\n if closest_song is not None:\r\n print(f\"Found a matching song: {closest_song}\")\r\n # Call the recommend_songs() function with the closest matching song\r\n recommendations = recommend_songs(closest_song)\r\n print(\"Recommended songs:\")\r\n print(recommendations)\r\n else:\r\n print(\"No matching song found.\")\r\n","repo_name":"adamcomo1/WGUCapstone","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28678874840","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pprint\nimport math\nfrom collections import defaultdict\nimport gzip\nimport os\nimport pickle\nimport urllib.request\n \n \n#Datasets\ndef load_XOR():\n \"\"\"\n Loads training data for XOR function. The outputs are encoded using one-hot encoding, so that I can check softmax and\n cross-entropy loss function.\n :return: Pair of numpy arrays: (4, 2) training inputs and (4, 2) training labels\n \"\"\"\n X = np.asarray([\n [0.0, 0.0],\n [0.0, 1.0],\n [1.0, 0.0],\n [1.0, 1.0]], dtype=np.float32)\n T = np.asarray([\n [0.0, 1.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [0.0, 1.0]], dtype=np.float32)\n return X, T\n \n \ndef load_spirals():\n '''\n Loads training and testing data of the spiral dataset. The inputs are standardized and the output labels are one-hot encoded.\n Source based on http://cs231n.github.io/\n :return: Quadruple of numpy arrays (100, 2) training inputs, (100, 3) one-hot encoded training labels,\n (100, 2) testing inputs and (100, 3) one-hot encoded testing labels\n '''\n \n def generate_points(N):\n K = 3\n X = np.zeros((N * K, 2), dtype=np.float32)\n T = np.zeros((N * K, K), dtype=np.float32)\n for i in range(K):\n r = np.linspace(0.0, 2.5, N)\n t = np.linspace(i * 4, (i + 1) * 4, N) + rng.randn(N) * 0.2\n ix = range(N * i, N * (i + 1))\n X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]\n T[ix, i] = 1.0 # one-hot encoding\n return X, T\n \n rng = np.random.RandomState(1234)\n X_train, T_train = generate_points(100)\n X_test, T_test = generate_points(100)\n return X_train, T_train, X_test, T_test\n \n \ndef plot_2D_classification(X, T, net):\n \"\"\"\n Plots a classification for 2D inputs. \n :param X: Input of shape (n_samples, 2)\n :param T: One-hot encoded target labels of shape (n_samples, n_classes)\n :param net: trained network, instance of MLP class\n \"\"\"\n h = 0.02\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n Z = net.propagate(np.c_[xx.ravel(), yy.ravel()])\n Z = np.argmax(Z, axis=1)\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.8)\n plt.scatter(X[:, 0], X[:, 1], c=np.argmax(T, axis=1), s=40, cmap=plt.cm.Spectral)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n \n \ndef load_MNIST():\n \"\"\"\n Loads MNIST dataset.\n The dataset consists of 60k training and 10k testing samples of 28x28 grayscale images. The inputs are standardized\n and the output labels are one-hot encoded.\n Inspired by https://gist.github.com/ischlag/41d15424e7989b936c1609b53edd1390\n :return: Quadruple of numpy arrays (60000, 784) training inputs, (60000, 10) one-hot encoded training labels,\n (10000, 784) testing inputs and (10000, 10) one-hot encoded testing labels\n \"\"\"\n IMAGE_SIZE = 28\n N_CLASSES = 10\n files = {\n 'X_train': ('train-images-idx3-ubyte.gz', 60000),\n 'T_train': ('train-labels-idx1-ubyte.gz', 60000),\n 'X_test': ('t10k-images-idx3-ubyte.gz', 10000),\n 'T_test': ('t10k-labels-idx1-ubyte.gz', 10000),\n }\n data = {}\n for label, (name, n_images) in files.items():\n if not os.path.exists(name):\n print('downloading: {}'.format(name))\n urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/{}'.format(name), name)\n with gzip.open(name) as bytestream:\n if label.startswith('X'):\n \n bytestream.read(16) # header\n data[label] = (np.frombuffer(bytestream.read(IMAGE_SIZE * IMAGE_SIZE * n_images),\n dtype=np.uint8).astype(np.float32) / 255.0).reshape(n_images, -1)\n else:\n bytestream.read(8) # header\n classes = np.frombuffer(bytestream.read(n_images), dtype=np.uint8).astype(np.int64)\n onehot = np.zeros((len(classes), N_CLASSES), dtype=np.float32)\n onehot[np.arange(len(classes)), classes] = 1\n data[label] = onehot\n \n # standardisation\n X_train, T_train, X_test, T_test = [data[label] for label in ['X_train', 'T_train', 'X_test', 'T_test']]\n m, s = X_train.mean(axis=0), X_train.std(axis=0)\n mask = s > 0.0\n X_train[:, mask] = (X_train[:, mask] - m[mask]) / s[mask]\n X_test[:, mask] = (X_test[:, mask] - m[mask]) / s[mask]\n \n return X_train, T_train, X_test, T_test\n \n \ndef plot_MNIST(array, n_cols=10):\n \"\"\"\n Plots table of MNIST characters with defined number of columns. The number of characters divided by the number of\n columns(i.e. the number of rows), must be integer. \n :param array: input array of shape (number of characters, 784)\n :param n_cols: number of table columns\n \"\"\"\n n, height, width = array.shape[0], 28, 28\n n_rows = n // n_cols\n assert n == n_rows * n_cols, [n, n_rows * n_cols]\n result = (array.reshape(n_rows, n_cols, height, width)\n .swapaxes(1, 2)\n .reshape(height * n_rows, width * n_cols))\n plt.imshow(result, cmap='gray')\n \n \n#Layers\n \nclass LinearLayer(object):\n def __init__(self, n_inputs, n_units, rng, name):\n \"\"\"\n Linear (dense, fully-connected) layer.\n :param n_inputs:\n :param n_units:\n :param rng: random number generator I use for initialization\n :param name:\n \"\"\"\n super(LinearLayer, self).__init__()\n self.n_inputs = n_inputs\n self.n_units = n_units\n self.rng = rng\n self.name = name\n self.initialize()\n \n def has_params(self):\n return True\n \n def forward(self, X):\n \"\"\"\n Forward message.\n :param X: layer inputs, shape (n_samples, n_inputs)\n :return: layer output, shape (n_samples, n_units)\n \"\"\"\n \n res = np.matmul( X, self.W)+self.b\n assert res.shape[0] == X.shape[0]\n assert res.shape[1] == self.n_units\n return res\n \n def delta(self, Y, delta_next):\n \"\"\"\n delta (dl/d(layer inputs)), based on delta from the following layer. The computations involve backward\n message.\n :param Y: output of this layer (i.e., input of the next), shape (n_samples, n_units)\n :param delta_next: delta vector backpropagated from the following layer, shape (n_samples, n_units)\n :return: delta vector from this layer, shape (n_samples, n_inputs)\n \"\"\"\n \n res = np.matmul(delta_next, self.W.T)\n assert res.shape == (Y.shape[0], self.n_inputs)\n return res\n \n def grad(self, X, delta_next):\n \"\"\"\n Gradient averaged over all samples. The computations involve parameter message.\n :param X: layer input, shape (n_samples, n_inputs)\n :param delta_next: delta vector backpropagated from the following layer, shape (n_samples, n_units)\n :return: a list of two arrays [dW, db] corresponding to gradients of loss w.r.t. weights and biases, the shapes\n of dW and db are the same as the shapes of the actual parameters (self.W, self.b)\n \"\"\"\n db = delta_next.sum(axis=0)/len(delta_next) # u becka ma byt suma?\n dW = np.matmul(X.T, delta_next) / len(delta_next)\n assert dW.shape == self.W.shape\n return [dW,db]\n \n def initialize(self):\n \"\"\"\n He's initialization (https://arxiv.org/pdf/1502.01852.pdf). This method is tuned for ReLU activation\n function. Biases are initialized to 1 increasing probability that ReLU is not initially turned off.\n \"\"\"\n scale = np.sqrt(2.0 / self.n_inputs)\n self.W = self.rng.normal(loc=0.0, scale=scale, size=(self.n_inputs, self.n_units))\n self.b = np.ones(self.n_units)\n \n def update_params(self, dtheta):\n \"\"\"\n Updates weighs and biases.\n :param dtheta: contains a two element list of weight and bias updates the shapes of which corresponds to self.W\n and self.b\n \"\"\"\n assert len(dtheta) == 2, len(dtheta)\n dW, db = dtheta\n assert dW.shape == self.W.shape, dW.shape\n assert db.shape == self.b.shape, db.shape\n self.W += dW\n self.b += db\n \n \nclass ReLULayer(object):\n def __init__(self, name):\n super(ReLULayer, self).__init__()\n self.name = name\n \n def has_params(self):\n return False\n \n def forward(self, X):\n return np.where(X>=0, X, 0) #dava nuly to co je negative\n \n def delta(self, Y, delta_next):\n # elementwise nasobeni dnext xkem >0\n return np.multiply(np.where(Y>0, 1, 0), delta_next)\n \n \nclass SoftmaxLayer(object):\n def __init__(self, name):\n super(SoftmaxLayer, self).__init__()\n self.name = name\n \n def has_params(self):\n return False\n \n def forward(self, X):\n return np.exp(X)/np.sum(np.exp(X), axis =1)[:,None]\n \n def delta(self, Y, delta_next):\n my_delta = delta_next\n for sample in range(len(Y)):\n # 1. nejprve udelam jacobian pro kazdy sample\n temp_jcb = -np.outer(Y[sample], Y[sample])+np.diag(Y[sample])\n # 2. pote updatuju deltu\n my_delta[sample] = np.dot(delta_next[sample], temp_jcb)\n \n assert my_delta.shape == delta_next.shape\n return my_delta\n \n \nclass LossCrossEntropy(object):\n def __init__(self, name):\n super(LossCrossEntropy, self).__init__()\n self.name = name\n \n def forward(self, X, T):\n \"\"\"\n Forward message.\n :param X: loss inputs (outputs of the previous layer), shape (n_samples, n_inputs), n_inputs is the same as\n the number of classes\n :param T: one-hot encoded targets, shape (n_samples, n_inputs)\n :return: layer output, shape (n_samples, 1)\n \"\"\"\n output = np.zeros(len(X))\n for sample in range(len(X)):\n for category in range(len(X[0])):\n if T[sample][category] == 1:\n output[sample]-=math.log(X[sample][category])\n assert output.shape[0] == X.shape[0]\n return output\n \n def delta(self, X, T):\n \"\"\"\n Computes delta vector for the output layer.\n :param X: loss inputs (outputs of the previous layer), shape (n_samples, n_inputs), n_inputs is the same as\n the number of classes\n :param T: one-hot encoded targets, shape (n_samples, n_inputs)\n :return: delta vector from the loss layer, shape (n_samples, n_inputs)\n \"\"\"\n res = -np.divide(T, X)\n return res\n \n \nclass LossCrossEntropyForSoftmaxLogits(object):\n def __init__(self, name):\n super(LossCrossEntropyForSoftmaxLogits, self).__init__()\n self.name = name\n \n def forward(self, X, T):\n return (np.log(np.sum(np.exp(X), axis=1))-X[T.astype(bool)])[:, np.newaxis]\n \n def delta(self, X, T):\n numerator = np.exp(X-np.max(X))\n return -T+(numerator/np.sum(numerator, axis=1)[:, None])\n \n \n \n#################################################################### MLP\n \nclass MLP(object):\n def __init__(self, n_inputs, layers, loss, output_layers=[]):\n \"\"\"\n MLP\n :param n_inputs:\n :param layers: list of layers\n :param loss: loss function layer\n :param output_layers: list of layers appended to \"layers\" in evaluation \n \"\"\"\n self.n_inputs = n_inputs\n self.layers = layers\n self.output_layers = output_layers\n self.loss = loss\n self.first_param_layer = layers[-1]\n for l in layers:\n if l.has_params():\n self.first_param_layer = l\n break\n \n def propagate(self, X, output_layers=True, last_layer=None):\n \"\"\"\n feedforwad network propagation\n :param X: input data, shape (n_samples, n_inputs)\n :param output_layers: controls whether the self.output_layers are appended to the self.layers in evaluation\n :param last_layer: if not None, the propagation will stop at layer with this name\n :return: propagated inputs, shape (n_samples, n_units_of_the_last_layer)\n \"\"\"\n layers = self.layers + (self.output_layers if output_layers else [])\n if last_layer is not None:\n assert isinstance(last_layer, str)\n layer_names = [layer.name for layer in layers]\n layers = layers[0: layer_names.index(last_layer) + 1]\n for layer in layers:\n X = layer.forward(X)\n return X\n \n def evaluate(self, X, T):\n \"\"\"\n Computes loss.\n :param X: input data, shape (n_samples, n_inputs)\n :param T: target labels, shape (n_samples, n_outputs)\n :return:\n \"\"\"\n return self.loss.forward(self.propagate(X, output_layers=False), T)\n \n def gradient(self, X, T):\n \"\"\"\n Computes gradient of loss w.r.t. all network parameters.\n :param X: input data, shape (n_samples, n_inputs)\n :param T: target labels, shape (n_samples, n_outputs)\n :return: a dict of records in which key is the layer.name and value the output of grad function\n \"\"\"\n gradient_dict = {}\n delta_dict = {}\n z = {}\n input_X = X\n # 1. Forward pass\n for layer in self.layers:\n z[layer.name] = layer.forward(X)\n X = z[layer.name]\n # 2. get the first delta\n delta_next = self.loss.delta(X, T)\n # 3. Backpropagation\n reversed_layers = self.layers[::-1]\n for layer in range(len(reversed_layers)):\n delta_dict[reversed_layers[layer].name] = reversed_layers[layer].delta(z[reversed_layers[layer].name], delta_next)\n if isinstance(reversed_layers[layer], LinearLayer):\n if layer< len(reversed_layers)-1:\n gradient_dict[reversed_layers[layer].name] = reversed_layers[layer].grad(z[reversed_layers[layer+1].name], delta_next)\n else:\n gradient_dict[reversed_layers[layer].name] = reversed_layers[layer].grad(\n input_X, delta_next)\n delta_next = delta_dict[reversed_layers[layer].name]\n return gradient_dict\n \n \n##################################################### Training\n \ndef accuracy(Y, T):\n p = np.argmax(Y, axis=1)\n t = np.argmax(T, axis=1)\n return np.mean(p == t)\n \n \ndef plot_weights(weights):\n for layer_name in weights.keys():\n my_amplitudes = weights[layer_name]\n normalised_amplitudes = my_amplitudes/my_amplitudes[0]\n x = range(0, len(my_amplitudes))\n plt.plot(x, normalised_amplitudes, label =layer_name)\n plt.ylabel('Normalised Amplitude')\n plt.xlabel('Epochs')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.title(\"Weight Normalised Amplitude of every linear layer over epochs\")\n plt.show()\n \n for layer_name in weights.keys():\n my_amplitudes = weights[layer_name]\n normalised_amplitudes = my_amplitudes\n epochs = range(0, len(normalised_amplitudes))\n plt.plot(epochs, normalised_amplitudes, label =layer_name)\n plt.ylabel(' Amplitude')\n plt.xlabel('Epochs')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.title(\"Weight Amplitude of every linear layer over epochs\")\n plt.show()\n \n \n \n \ndef xplot_weights(weights):\n \n # previous:\n for layer, values in weights.items():\n plitudes = weights[layer]/weights[layer][0]\n x = range(1, len(plitudes)+1)\n plt.plot(x, plitudes, label = layer)\n plt.xlabel('Epoch')\n plt.ylabel('Normalised Amplitude')\n plt.legend()\n plt.title(\"Weight Normalised Amplitude of every linear layer over epochs\")\n plt.show()\n \n \n \n keys = sorted(weights.keys())\n for key in keys:\n plt.plot(*zip(*weights[key]/weights[key][0][1]), label = key)\n plt.xlabel('Epoch')\n plt.ylabel('Amplitude')\n plt.legend()\n plt.title(\"Weight Amplitude of every linear layer over epochs\")\n plt.show()\n base = weights['Linear_1']\n \n \n \n \ndef train(net, X_train, T_train, batch_size=1, n_epochs=2, eta=0.1, X_test=None, T_test=None, verbose=False):\n \"\"\"\n Trains a network using trivial gradient descent.\n :param net:\n :param X_train:\n :param T_train:\n :param batch_size:\n :param n_epochs:\n :param eta: learning rate\n :param X_test:\n :param T_test:\n :param verbose: prints evaluation for each epoch if True\n :return:\n \"\"\"\n n_samples = X_train.shape[0]\n assert T_train.shape[0] == n_samples\n assert batch_size <= n_samples\n run_info = defaultdict(list)\n \n def process_info(epoch):\n loss_test, acc_test = np.nan, np.nan\n Y = net.propagate(X_train, output_layers=False)\n loss_train = net.loss.forward(Y, T_train)\n acc_train = accuracy(Y, T_train)\n run_info['loss_train'].append(loss_train)\n run_info['acc_train'].append(acc_train)\n if X_test is not None:\n Y = net.propagate(X_test, output_layers=False)\n loss_test = net.loss.forward(Y, T_test)\n acc_test = accuracy(Y, T_test)\n run_info['loss_test'].append(loss_test)\n run_info['acc_test'].append(acc_test)\n if verbose:\n print('epoch: {}, loss: {}/{} accuracy: {}/{}'.format(epoch, np.mean(loss_train), np.nanmean(loss_test),\n np.nanmean(acc_train), np.nanmean(acc_test)))\n \n plotting = {}\n for epoch in range(1, n_epochs + 1):\n offset = 0\n for layer in net.layers:\n if layer.has_params():\n if layer.name in plotting.keys():\n plotting[layer.name].append(np.abs(layer.W).mean())\n else:\n plotting[layer.name] = [np.abs(layer.W).mean()]\n while offset < n_samples:\n last = min(offset + batch_size, n_samples)\n if verbose:\n print('.', end='')\n grads = net.gradient(np.asarray(X_train[offset:last]), np.asarray(T_train[offset:last]))\n \n for layer in net.layers:\n if layer.has_params():\n gs = grads[layer.name]\n dtheta = [-eta * g for g in gs]\n layer.update_params(dtheta)\n offset += batch_size\n if verbose:\n print()\n process_info(epoch)\n plot_weights(plotting)\n return run_info\n \n \n################################################################# Experiments\n \ndef plot_convergence(run_info):\n plt.plot(run_info['acc_train'], label='train')\n plt.plot(run_info['acc_test'], label='test')\n plt.xlabel('epoch')\n plt.ylabel('accuracy')\n plt.legend()\n \n \ndef plot_test_accuracy_comparison(run_info_dict):\n keys = sorted(run_info_dict.keys())\n for key in keys:\n plt.plot(run_info_dict[key]['acc_test'], label=key)\n plt.xlabel('epoch')\n plt.ylabel('accuracy')\n plt.legend()\n \n \ndef experiment_XOR():\n X, T = load_XOR()\n rng = np.random.RandomState(1234)\n \n net = MLP(n_inputs=2,\n layers=[\n LinearLayer(n_inputs=2, n_units=4, rng=rng, name='Linear_1'),\n ReLULayer(name='ReLU_1'),\n LinearLayer(n_inputs=4, n_units=2, rng=rng, name='Linear_OUT'),\n SoftmaxLayer(name='Softmax_OUT')\n ],\n loss=LossCrossEntropy(name='CE'),\n )\n run_info = train(net, X, T, batch_size=4, eta=0.1, n_epochs=100, verbose=False)\n plot_convergence(run_info)\n plt.show()\n print(net.propagate(X))\n plot_2D_classification(X, T, net)\n plt.show()\n \n \ndef experiment_spirals():\n X_train, T_train, X_test, T_test = load_spirals()\n experiments = (\n ('eta = 0.2', 0.2),\n ('eta = 1', 1.0),\n ('eta = 5', 5.0),\n )\n run_info_dict = {}\n for name, eta in experiments:\n rng = np.random.RandomState(1234)\n net = MLP(n_inputs=2,\n layers=[\n LinearLayer(n_inputs=2, n_units=10, rng=rng, name='Linear_1'),\n ReLULayer(name='ReLU_1'),\n LinearLayer(n_inputs=10, n_units=3, rng=rng, name='Linear_OUT'),\n SoftmaxLayer(name='Softmax_OUT')\n ],\n loss=LossCrossEntropy(name='CE'),\n )\n \n run_info = train(net, X_train, T_train, batch_size=len(X_train), eta=eta, X_test=X_test, T_test=T_test,\n n_epochs=1000, verbose=True)\n run_info_dict[name] = run_info\n # plot_spirals(X_train, T_train, net)\n # plt.show()\n # plot_convergence(run_info)\n # plt.show()\n plot_test_accuracy_comparison(run_info_dict)\n plt.show()\n # plt.savefig('spiral.pdf') # you can instead save figure to file\n \ndef experiment_MNIST_unstable():\n X_train, T_train, X_test, T_test = load_MNIST()\n np.seterr(all='raise', under='warn', over='warn')\n rng = np.random.RandomState(1234)\n net = MLP(n_inputs=28 * 28,\n layers=[\n LinearLayer(n_inputs=28 * 28, n_units=64, rng=rng, name='Linear_1'),\n ReLULayer(name='ReLU_1'),\n LinearLayer(n_inputs=64, n_units=64, rng=rng, name='Linear_2'),\n ReLULayer(name='ReLU_2'),\n LinearLayer(n_inputs=64, n_units=64, rng=rng, name='Linear_3'),\n ReLULayer(name='ReLU_3'),\n LinearLayer(n_inputs=64, n_units=64, rng=rng, name='Linear_4'),\n ReLULayer(name='ReLU_4'),\n LinearLayer(n_inputs=64, n_units=64, rng=rng, name='Linear_5'),\n ReLULayer(name='ReLU_5'),\n LinearLayer(n_inputs=64, n_units=10, rng=rng, name='Linear_OUT'),\n SoftmaxLayer(name='Softmax_OUT')\n ],\n loss=LossCrossEntropy(name='CE'),\n )\n \n run_info = train(net, X_train, T_train, batch_size=3000, eta=1e-1,\n X_test=X_test, T_test=T_test, n_epochs=10, verbose=True)\n \ndef experiment_MNIST():\n X_train, T_train, X_test, T_test = load_MNIST()\n np.seterr(all='raise', under='warn', over='warn')\n rng = np.random.RandomState(1234)\n net = MLP(n_inputs=28 * 28,\n layers=[\n LinearLayer(n_inputs=28 * 28, n_units=64, rng=rng, name='Linear_1'),\n ReLULayer(name='ReLU_1'),\n LinearLayer(n_inputs=64, n_units=64, rng=rng, name='Linear_2'),\n ReLULayer(name='ReLU_2'),\n LinearLayer(n_inputs=64, n_units=64, rng=rng, name='Linear_3'),\n ReLULayer(name='ReLU_3'),\n LinearLayer(n_inputs=64, n_units=64, rng=rng, name='Linear_4'),\n ReLULayer(name='ReLU_4'),\n LinearLayer(n_inputs=64, n_units=64, rng=rng, name='Linear_5'),\n ReLULayer(name='ReLU_5'),\n LinearLayer(n_inputs=64, n_units=10, rng=rng, name='Linear_OUT'),\n ],\n loss=LossCrossEntropyForSoftmaxLogits(name='CE'),\n output_layers=[SoftmaxLayer(name='Softmax_OUT')]\n )\n \n run_info = train(net, X_train, T_train, batch_size=3000, eta=1e-1, X_test=X_test, T_test=T_test, n_epochs=100,\n verbose=True)\n #plot_convergence(run_info)\n #plt.show()\n print(\"run info:\", run_info)\n with open('MNIST_run_info.p', 'wb') as f:\n \n pickle.dump(run_info, f)\n \n \nif __name__ == '__main__':\n \n #experiment_XOR()\n \n #experiment_spirals()\n \n #experiment_MNIST_unstable()\n \n experiment_MNIST()\n","repo_name":"cerdwin/SSU","sub_path":"Backprop/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":23787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25767525431","text":"\nfrom flask_restx import Api\nfrom flask import Blueprint\n\ndocuments = Blueprint('api',__name__,url_prefix='/doc')\n\n\nauthorizations = {\n 'apikey': {\n 'type': 'apiKey',\n 'in': 'header',\n 'name': 'X-API-KEY',\n 'username':'username'\n }\n}\n\n\n\napi = Api(documents,\n version='2.0',\n title='User Api',\n description='This is the user page',\n authorizations=authorizations, \n security='apikey')\n\n","repo_name":"nithesh-lghive/project","sub_path":"apis/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1995270","text":"#!/usr/bin/env python\n\n\"\"\" TUI based Python Chess Program \"\"\"\n\nfrom __future__ import print_function\nimport os\n\nCLEAR='clear'\n# CLEAR='cls' # For Windows\n\nch = [[' '] * 8 for i in range(8)]\n\n\ndef init():\n ch[0][0] = 'R '\n ch[0][1] = 'N '\n ch[0][2] = 'B '\n ch[0][3] = 'Q '\n ch[0][4] = 'K '\n ch[0][5] = 'B '\n ch[0][6] = 'N '\n ch[0][7] = 'R '\n for i in range(8):\n ch[1][i] = 'P '\n\n ch[7][0] = 'r '\n ch[7][1] = 'n '\n ch[7][2] = 'b '\n ch[7][3] = 'q '\n ch[7][4] = 'k '\n ch[7][5] = 'b '\n ch[7][6] = 'n '\n ch[7][7] = 'r '\n for i in range(8):\n ch[6][i] = 'p '\n\n\ninit()\n\n\"\"\"An important note must be made that 'P '.isupper() and 'p '.islower(), both would give True.\n Whereas, ' ', '* ' would give False for both islower() and isupper() methods.\"\"\"\n\n\ndef disp(turn_to_play):\n os.system(CLEAR)\n print(\" +\" + \"-----+\" * 8)\n if turn_to_play == 'w':\n for i in range(7, -1, -1):\n print(i + 1, \"| \", str(ch[i]).lstrip(\"['\").rstrip(\"]'\").replace(\"', '\", \" | \"), \"|\")\n print(\" |\" + \" |\" * 8)\n print(\" +\" + \"-----+\" * 8)\n print(\" a b c d e f g h\")\n\n elif turn_to_play == 'b':\n for i in range(8):\n print(i + 1, \"| \", str(ch[i][-1::-1]).lstrip(\"['\").rstrip(\"]'\").replace(\"', '\", \" | \"), \"|\")\n print(\" |\" + \" |\" * 8)\n print(\" +\" + \"-----+\" * 8)\n print(\" h g f e d c b a\")\n\n\ndef c(x, y):\n \"\"\"Returns 1 if x < y and -1 if x > y.\"\"\"\n return (y - x) // abs(y - x)\n\n\ndef move(ini, fin):\n ch[fin[0]][fin[1]] = ch[ini[0]][ini[1]]\n ch[ini[0]][ini[1]] = ' '\n\n\ndef add_stars(positions):\n for pos in positions:\n ch[pos[0]][pos[1]] = '*' + ch[pos[0]][pos[1]][0]\n\n\ndef remove_stars(positions):\n for pos in positions:\n ch[pos[0]][pos[1]] = ch[pos[0]][pos[1]][1] + \" \"\n\n\ndef empty(piece):\n \"\"\"To check if given piece is empty\n Necessary because an empty position can be both ' ' or '* ' \"\"\"\n if piece == ' ' or piece == '* ':\n return True\n return False\n\n\ndef on_attack(ini, fin):\n \"\"\"To check if position at fin is under attack by piece by ini. If fin is under attack does not mean that the piece (under attack/movable)\n at ini can move over to fin since doing that can put a check on it's own king. This is checked in the\n legal(ini, fin) function.\"\"\"\n\n global ep_pos\n\n i_piece = ch[ini[0]][ini[1]] # Piece at ini\n f_piece = ch[fin[0]][fin[1]] # Piece at fin\n\n # Try to get the en-passant activated pawn if there is no IndexError given by default position (9, 9).\n try:\n ep_pawn = ch[ep_pos[0]][ep_pos[1]]\n except IndexError:\n ep_pawn = \" \"\n\n # Initial and Final Positions should not be same\n if ini == fin:\n return False\n\n # Condition that a piece cannot attack it's own team member\n if (i_piece.isupper() and f_piece.isupper()) or (i_piece.islower() and f_piece.islower()):\n return False\n\n # Condition for Pawns\n if i_piece == 'P ':\n if (fin[0] == ini[0] + 1 and fin[1] == ini[1] and empty(f_piece)) or (\n fin[0] == ini[0] + 1 and abs(fin[1] - ini[1]) == 1 and not empty(f_piece)) or (\n abs(ini[1] - ep_pos[1]) == 1 and ini[0] == ep_pos[0] and fin[0] == ini[0] + 1 and fin[1] == ep_pos[1] and ep_pawn.islower()) or(\n ini[0] == 1 and fin[0] == 3 and ini[1] == fin[1] and empty(ch[2][ini[1]]) and empty(f_piece)):\n return True\n else:\n return False\n\n elif i_piece == 'p ':\n if (fin[0] == ini[0] - 1 and fin[1] == ini[1] and empty(f_piece)) or (\n fin[0] == ini[0] - 1 and abs(fin[1] - ini[1]) == 1 and not empty(f_piece)) or(\n abs(ini[1] - ep_pos[1]) == 1 and ini[0] == ep_pos[0] and fin[0] == ini[0] - 1 and fin[1] == ep_pos[1] and ep_pawn.isupper()) or(\n ini[0] == 6 and fin[0] == 4 and ini[1] == fin[1] and empty(ch[5][ini[1]]) and empty(f_piece)):\n return True\n else:\n return False\n\n # Condition for Rooks\n elif i_piece == 'r ' or i_piece == 'R ':\n if ini[0] == fin[0]:\n for i in range(ini[1] + c(ini[1], fin[1]), fin[1], c(ini[1], fin[1])):\n if not empty(ch[ini[0]][i]):\n return False\n return True\n\n elif ini[1] == fin[1]:\n for i in range(ini[0] + c(ini[0], fin[0]), fin[0], c(ini[0], fin[0])):\n if not empty(ch[i][ini[1]]):\n return False\n return True\n else:\n return False\n\n # Condition for Bishops\n elif i_piece == 'b ' or i_piece == 'B ':\n if abs(fin[0] - ini[0]) == abs(fin[1] - ini[1]):\n i_increment = c(ini[0], fin[0])\n j_increment = c(ini[1], fin[1])\n i = ini[0] + i_increment\n j = ini[1] + j_increment\n while i != fin[0]:\n if not empty(ch[i][j]):\n return False\n i += i_increment\n j += j_increment\n return True\n else:\n return False\n\n # Conditions for Queens\n # Either one of Rook or Bishop Conditions should be satisfied\n elif i_piece == 'q ' or i_piece == 'Q ':\n # Condition for Rook\n ch[ini[0]][ini[1]] = 'r ' if i_piece == 'q ' else 'R '\n legal1 = on_attack(ini, fin)\n\n # Condition for Bishop\n ch[ini[0]][ini[1]] = 'b ' if ch[ini[0]][ini[1]] == 'r ' else 'B '\n legal2 = on_attack(ini, fin)\n\n # Convert the piece back to normal\n ch[ini[0]][ini[1]] = 'q ' if ch[ini[0]][ini[1]] == 'b ' else 'Q '\n\n return legal1 or legal2\n\n # Condition for Knights\n elif i_piece == 'n ' or i_piece == 'N ':\n if (abs(fin[0] - ini[0]) == 2 and abs(fin[1] - ini[1]) == 1) or (\n abs(fin[0] - ini[0]) == 1 and abs(fin[1] - ini[1]) == 2):\n return True\n else:\n return False\n\n # Condition for Kings\n elif i_piece == 'k ' or i_piece == 'K ':\n if abs(fin[1] - ini[1]) <= 1 and abs(fin[0] - ini[0]) <= 1:\n return True\n else:\n return False\n\n\ndef is_forbid_king(k_piece):\n \"\"\"Given the king, the function returns if that king is under check or not\"\"\"\n for i in range(8):\n for j in range(8):\n if ch[i][j] == k_piece:\n pos_king = i, j\n break\n for i in range(8):\n for j in range(8):\n if ch[i][j].isupper() and k_piece.islower() or ch[i][j].islower() and k_piece.isupper():\n if on_attack((i, j), pos_king):\n return True\n return False\n\n\ndef legal(ini, fin):\n \"\"\"If position at fin is under attack by piece at ini, this function checks whether moving the piece at ini to fin\n is legal or not. It might not be legal because moving there would put it's own king under check.\"\"\"\n \"\"\"To check that, we move the piece at ini to fin without displaying and check whether that configuration puts\n the piece's king under check.\"\"\"\n\n if not on_attack(ini, fin):\n return False\n piece_at_fin = ch[fin[0]][fin[1]]\n move(ini, fin)\n if ch[fin[0]][fin[1]].islower() and is_forbid_king('k '):\n move(fin, ini)\n ch[fin[0]][fin[1]] = piece_at_fin\n return False\n if ch[fin[0]][fin[1]].isupper() and is_forbid_king('K '):\n move(fin, ini)\n ch[fin[0]][fin[1]] = piece_at_fin\n return False\n\n move(fin, ini)\n ch[fin[0]][fin[1]] = piece_at_fin\n return True\n\n\ndef possible_moves(ini):\n \"\"\"Gives all the legal movable positions of the piece at ini.\"\"\"\n possible_mov = []\n for i in range(8):\n for j in range(8):\n if legal(ini, (i, j)):\n possible_mov.append((i, j))\n return possible_mov\n\n\ndef input_valid(inp):\n if len(inp) != 2 or (inp[0] not in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']) or (\n inp[1] not in ['1', '2', '3', '4', '5', '6', '7', '8']):\n return False\n return True\n\n\ndef under_check_func():\n \"\"\"To check whether a check was put under check in the previous turn.\"\"\"\n global turn\n if is_forbid_king('k ') and turn == 'b':\n return True\n elif is_forbid_king('K ') and turn == 'w':\n return True\n return False\n\n\ndef legalmov_left_func():\n \"\"\"To check if any legal move is left for the current turn.\"\"\"\n global turn\n if turn == 'b':\n for i in range(8):\n for j in range(8):\n if ch[i][j].islower() and possible_moves((i, j)) != []:\n return True\n elif turn == 'w':\n for i in range(8):\n for j in range(8):\n if ch[i][j].isupper() and possible_moves((i, j)) != []:\n return True\n return False\n\n\nturn = 'w'\nep_pos = (9, 9)\n\"\"\"ep_pos will give the position of en-passant activated pawn. (9, 9) index means no pawn is activated.\nch[9][9] would give IndexError but it is not at distance 1 from any column on chess board which is what we have used\nin the on_attack() function.\"\"\"\n\nwhile True:\n disp(turn)\n\n under_check = under_check_func()\n legalmov_left = legalmov_left_func()\n\n if not legalmov_left and not under_check:\n print(\"\\nStalemate. Draw.\")\n input()\n break\n\n if under_check:\n if turn == 'b':\n if not legalmov_left:\n print(\"\\nCheckmate. White Wins.\")\n input()\n break\n else:\n print(\"\\nBlack is under check.\")\n input()\n else:\n if not legalmov_left:\n print(\"\\nCheckmate. Black wins.\")\n input()\n break\n else:\n print(\"\\nWhite is under check.\")\n input()\n\n # ini_pos_chs_conv is the position in chess convention, like a5, b6, c7 etc. Similarly for fin_pos_chs_conv.\n ini_pos_chs_conv = input(\"\\nEnter initial position: \")\n\n if not input_valid(ini_pos_chs_conv):\n print(\"\\nIllegal entry.\")\n input()\n continue\n\n # ini_pos and fin_pos denote positions in cartesian form. For e.g ini('c7') or fin('c7') = (6,3)\n ini_pos = int(ini_pos_chs_conv[1]) - 1, ord(ini_pos_chs_conv[0]) - 97\n\n # The below if condition checks for:\n # There is no turn violation.\n # An empty place is not selected by using that: ' '.isupper() and ' '.islower() both are False\n if (turn == 'w' and ch[ini_pos[0]][ini_pos[1]].islower()) or (\n turn == 'b' and ch[ini_pos[0]][ini_pos[1]].isupper()) or (\n not ch[ini_pos[0]][ini_pos[1]].isupper() and not ch[ini_pos[0]][ini_pos[1]].islower()) or (\n empty(ch[ini_pos[0]][ini_pos[1]])):\n print(\"\\nIllegal entry.\")\n input()\n continue\n\n legal_positions = possible_moves(ini_pos)\n\n if not legal_positions:\n print(\"\\nNo possible moves for this piece currently.\")\n input()\n continue\n\n add_stars(legal_positions)\n disp(turn)\n\n fin_pos_chs_conv = input(\"\\nEnter final position: \")\n\n if not input_valid(fin_pos_chs_conv):\n print(\"\\nIllegal entry.\")\n input()\n remove_stars(legal_positions)\n continue\n\n fin_pos = int(fin_pos_chs_conv[1]) - 1, ord(fin_pos_chs_conv[0]) - 97\n remove_stars(legal_positions)\n\n if fin_pos in legal_positions:\n\n # Activating a pawn for en-passant or resetting the en-passant position\n if (ch[ini_pos[0]][ini_pos[1]] == 'p ') or (ch[ini_pos[0]][ini_pos[1]] == 'P '):\n\n # Activating a new pawn for en-passant\n if abs(fin_pos[0] - ini_pos[0]) == 2:\n ep_pos = fin_pos\n\n # If the opponent uses en-passant on the en-passant activated pawn\n elif abs(fin_pos[0] - ini_pos[0]) == 1 and abs(fin_pos[1] - ini_pos[1]) == 1 and empty(ch[fin_pos[0]][fin_pos[1]]):\n ch[ep_pos[0]][ep_pos[1]] = \" \"\n ep_pos = (9, 9)\n\n else:\n # If a new pawn is not activated for en-passant or en-passant is not used, reset the en-passant position\n ep_pos = (9, 9)\n\n # Pawn evolution\n if (ch[ini_pos[0]][ini_pos[1]] == 'p ' and fin_pos[0] == 0) or (\n ch[ini_pos[0]][ini_pos[1]] == 'P ' and fin_pos[0] == 7):\n new_piece = input(\"\\nEnter the piece you want to evolve into: \")\n while new_piece not in ['q', 'r', 'b', 'n', 'Q', 'R', 'B', 'N']:\n print(\"\\nInvalid choice.\")\n input()\n disp(turn)\n new_piece = input(\"\\nEnter the piece you want to evolve into: \")\n\n if fin_pos[0] == 0:\n ch[ini_pos[0]][ini_pos[1]] = new_piece.lower() + \" \"\n elif fin_pos[0] == 1:\n ch[ini_pos[0]][ini_pos[1]] = new_piece.upper() + \" \"\n\n move(ini_pos, fin_pos)\n\n else:\n print(\"\\nEnter legal move.\")\n input()\n continue\n\n if turn == 'w':\n turn = 'b'\n elif turn == 'b':\n turn = 'w'\n\n# TODO:\n# Add castling option\n","repo_name":"mrishu/Chess","sub_path":"chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":13099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40341022890","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPackage for implementing different commands based on Moler Command.\n\"\"\"\n\n__author__ = 'Grzegorz Latuszek, Marcin Usielski'\n__copyright__ = 'Copyright (C) 2018-2023, Nokia'\n__email__ = 'grzegorz.latuszek@nokia.com, marcin.usielski@nokia.com'\n\nfrom re import search, match\nfrom moler.exceptions import WrongUsage\n\n\nclass RegexHelper(object):\n \"\"\"\n Class to help with working with regular expressions.\n \"\"\"\n def __init__(self):\n \"\"\"\n Initializes internal variables.\n \"\"\"\n self._match = None\n\n def search(self, pattern, string, flags=0):\n \"\"\"\n Searches for passed pattern in passed string.\n\n :param pattern: Pattern to find. Regular expression.\n :param string: String to scan through to find the pattern.\n :param flags: Flags for search.\n :return: Match object.\n \"\"\"\n self._match = search(pattern, string, flags)\n return self._match\n\n def search_compiled(self, compiled, string, raise_if_compiled_is_none=False):\n \"\"\"\n Searches for passed pattern in passed string.\n\n :param compiled: Compiled regular expression pattern to find.\n :param string: String to scan through to find the pattern.\n :param raise_if_compiled_is_none: set True to raise a WrongUsage if compiled is None. If False then return None.\n :return: Match object.\n \"\"\"\n if compiled is None:\n if raise_if_compiled_is_none:\n exp = WrongUsage(\"{} parameter compiled passed to search_compiled is None. Expected not None.\"\n \" String is '{}'.\".format(self, string))\n raise exp\n else:\n return None\n\n self._match = compiled.search(string)\n return self._match\n\n def match(self, pattern, string, flags=0):\n \"\"\"\n Matches for passed pattern in passed string.\n\n :param pattern: Pattern to find. Regular expression.\n :param string: String to scan through to find the pattern.\n :param flags: Flags for search.\n :return: Match object.\n \"\"\"\n self._match = match(pattern, string, flags)\n return self._match\n\n def match_compiled(self, compiled, string, raise_if_compiled_is_none=False):\n \"\"\"\n Matches for passed pattern in passed string.\n\n :param compiled: Compiled regular expression pattern to find.\n :param string: String to scan through to find the pattern.\n :param raise_if_compiled_is_none: set True to raise a WrongUsage if compiled is None. If False then return None.\n :return: Match object.\n \"\"\"\n if compiled is None:\n if raise_if_compiled_is_none:\n exp = WrongUsage(\"{} parameter compiled passed to match_compiled is None. Expected not None.\"\n \" String is '{}.\".format(self, string))\n raise exp\n else:\n return None\n self._match = compiled.match(string)\n return self._match\n\n def get_match(self):\n \"\"\"\n Returns match object.\n\n :return: Match object.\n \"\"\"\n return self._match\n\n def group(self, number):\n \"\"\"\n Returns group from match object.\n\n :param number: Number or name of match object.\n :return: Match object.\n \"\"\"\n if self._match is None:\n exp = WrongUsage(\"{}. Nothing was matched before calling group in RegexHelper.\".format(self))\n raise exp\n return self._match.group(number)\n\n def groups(self):\n \"\"\"\n Returns groups from match object.\n\n :return: Groups from match object.\n \"\"\"\n if self._match is None:\n exp = WrongUsage(\"{}. Nothing was matched before calling groups in RegexHelper.\".format(self))\n raise exp\n return self._match.groups()\n\n def groupdict(self):\n \"\"\"\n Returns groupdict from match object.\n\n :return: Groupdict from match object.\n \"\"\"\n if self._match is None:\n exp = WrongUsage(\"{}. Nothing was matched before calling groupdict in RegexHelper\".format(self))\n raise exp\n return self._match.groupdict()\n","repo_name":"nokia/moler","sub_path":"moler/cmd/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"78"} +{"seq_id":"31880789911","text":"import sys\nfrom PyQt4 import QtGui, QtCore\nfrom PyQt4.QtCore import Qt, QPoint, QRect, QSize\nfrom PyQt4.QtGui import QPixmap, QApplication, QLabel, QRubberBand\n\n\nclass MyLabel(QLabel):\n\n def __init__(self, parent=None):\n\n QLabel.__init__(self, parent)\n self.rubberBand = QRubberBand(QRubberBand.Rectangle, self)\n self.origin = QPoint()\n\n def mousePressEvent(self, event):\n\n if event.button() == Qt.LeftButton:\n\n self.origin = QPoint(event.pos())\n self.rubberBand.setGeometry(QRect(self.origin, QSize()))\n self.rubberBand.show()\n\n def mouseMoveEvent(self, event):\n\n if not self.origin.isNull():\n self.rubberBand.setGeometry(\n QRect(self.origin, event.pos()).normalized())\n\n def mouseReleaseEvent(self, event):\n\n if event.button() == Qt.LeftButton:\n self.rubberBand.hide()\n\n\nclass mainUI(QtGui.QWidget):\n\n def __init__(self):\n super(mainUI, self).__init__()\n self.initUI()\n\n def initUI(self):\n\n layout = QtGui.QVBoxLayout(self)\n\n label = MyLabel(self)\n pixmap = QPixmap.grabWindow(app.desktop().winId())\n label.setPixmap(pixmap)\n layout.addWidget(label)\n\n self.setLayout(layout)\n\n geometry = app.desktop().availableGeometry()\n\n self.setFixedSize(geometry.width(), geometry.height())\n\n # self.setWindowFlags( self.windowFlags() | Qt.FramelessWindowHint)\n self.show()\n\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n\n window = mainUI()\n\n sys.exit(app.exec_())","repo_name":"rodstar97/sbrys","sub_path":"playground/snapshot_01.py","file_name":"snapshot_01.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40581928471","text":"#!/usr/bin/python3\n\n\nimport socket\nimport random\n\n\nmySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nmySocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n\nmySocket.bind(('localhost', 1234))\n\n\nmySocket.listen(5)\n\ntry:\n while True:\n print ('Waiting for connections')\n (recvSocket, address) = mySocket.accept()\n print ('Request received:')\n print (recvSocket.recv(2048).decode(\"utf-8\", \"strict\"))\n print ('Answering back...')\n random_int = str(random.randint(99999,999999))\n recvSocket.send(bytes(\"HTTP/1.1 303 See Other\\r\\n\\r\\n\" +\n \"

\" +\n \"Redirigiendo a... /\" +\n\t\t\t\t\t\t\t random_int + \"

\"\n \"\\r\\n\", \"utf-8\"))\n recvSocket.close()\nexcept KeyboardInterrupt:\n print (\"Closing binded socket\")\nmySocket.close()","repo_name":"cimartin/X-Serv-App-Redirectora","sub_path":"redirect.py","file_name":"redirect.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"29049731750","text":"\nimport cv2\n# load the photograph\n#pixels = cv2.imread('test1.jpg')\n# load the pre-trained model\n'''classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\ncap = cv2.VideoCapture(0)\nwhile cap.isOpened():\n frame = cap.read()\n # perform face detection\n bboxes = classifier.detectMultiScale(frame)\n # print bounding box for each detected face\n for box in bboxes:\n # extract\n x, y, width, height = box\n x2, y2 = x + width, y + height\n # draw a rectangle over the pixels\n cv2.rectangle(frame, (x, y), (x2, y2), (0,0,255), 1)\n # show the image\n cv2.imshow('face detection', pixels)\n# keep the window open until we press a key\n if cv2.waitKey(25) and 0xff == ord(q):\n break\n# close the window\ncv2.destroyAllWindows()'''\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n#eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\n#mg = cv2.imread('xfiles4.jpg')\ncap = cv2.VideoCapture('test.mp4')\nwhile cap.isOpened():\n ret, img = cap.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n\n for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n \n if cv2.waitKey(25) & 0xff == ord('q'):\n break \n cv2.imshow('img',img)\n\ncv2.destroyAllWindows()\n","repo_name":"NIDA575/DeepLearning","sub_path":"face_detection/face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10348706364","text":"import json\nimport logging\nfrom core.base_handler import BaseHandler, arguments\nfrom .model import TaskModel\nfrom core.exception import ParametersError\n\nmaps = {\n 1: 'import task',\n 2: 'parse to type2',\n 3: 'parse to type1',\n 4: 'parse type1 error',\n 5: 'parse type2 error'\n}\n\nclass TaskHandler(BaseHandler):\n @arguments\n async def get(self, site: str = None, model: TaskModel = None):\n # http://127.0.0.1:3333/task?site=dajie\n if not site:\n self.finish({\n 'code': -3,\n 'msg': 'success',\n 'task': 'no site received',\n })\n return\n\n res = await model.get(site)\n res = res.decode() if res else None\n logging.info('get %r task: %r' % (site, res))\n\n if not res:\n self.finish({\n 'code': -2,\n 'msg': 'success',\n 'task': 'task none',\n })\n return\n\n self.finish({\n 'code': 0,\n 'msg': 'success',\n 'task': res,\n })\n\n @arguments\n async def post(self, task: str = None, model: TaskModel = None):\n # http://127.0.0.1:3333/task\n # body={'site': 'dajie', 'task': 'task', 'type': 1}\n if not task:\n raise ParametersError('task none.')\n _task = json.loads(task)\n site = _task.get('site', None)\n if not site:\n raise ParametersError('site none.')\n type = _task.get('type', None)\n if not type:\n raise ParametersError('type none.')\n if type not in [1, 2, 3, 4, 5]: # 1:导入的任务 2: 解析的type2 3:解析的type1 4:失败的type1 5:失败的type2\n raise ParametersError('type: {} error.'.format(type))\n logging.info('push site: %r, type: %r, reason: %r, task: %r' % (site, type, maps.get(type),task))\n\n await model.push(site, type, task)\n self.finish({\n 'code': 0,\n 'msg': 'success'\n })\n","repo_name":"EngiGu/hanzo","sub_path":"doom/server/modules/task/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40986025644","text":"#coding:utf-8\n\nimport sys\nfrom PyQt5.QtCore import (Qt, QEvent, QTimer)\nfrom PyQt5.QtWidgets import (QWidget, QLCDNumber, QSlider, QVBoxLayout, QApplication, QSizePolicy)\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import pyqtSlot, pyqtSignal\nfrom PyQt5.QtGui import QColor\nfrom PyQt5 import *\nimport time\nimport traceback\nfrom libs.SocketInterfaces import SocketInterfaces\nfrom libs.QEditableLCD import QEditableLCD\nfrom libs.QMeassureLCD import QMeassureLCD\nimport pyvisa as visa\nimport numpy as np\nimport struct\nimport math\n\ndef generateSINData(start_phase = 0, end_phase = 360, num=4096, positiveOnly=False):\n baseWaveform = (np.sin(np.linspace(math.pi,-math.pi, num=num)) * 32767).astype(np.int16)\n if positiveOnly:\n baseWaveform = np.abs(baseWaveform)\n start_point = int(start_phase / 360. * (num/2))\n end_point = int(end_phase / 360. * (num/2))\n baseWaveform[0:start_point] = 0\n baseWaveform[end_point:int(num/2)] = 0\n baseWaveform[int(num/2):int(num/2)+start_point] = 0\n baseWaveform[int(num/2)+end_point:num] = 0\n return baseWaveform\n\nclass GwinstekASR2100(QWidget):\n modelChanged = pyqtSignal(str)\n\n def __init__(self,parent=None):\n QWidget.__init__(self)\n\n self.rm = visa.ResourceManager(visa_library=\"@ivi\")\n self.dev = None\n self.model = None\n self.mode = None\n self.initalizing = True\n self.output_enabled = True\n self.cur_pulse_enabled = False\n self.remote_enabled = False\n self.set_ocp = None\n self.set_ovp = None\n self.protection = None\n self.pause_query = False\n self.querying = False\n self.failed_read = 0\n self.current_range = None\n self.set_commands = {\"voltage\": None, \"current\": None}\n\n self.setWindowTitle('GWINSTEK ASR-2100')\n\n self.dev_selector = QLineEdit(\"192.168.11.247:2268\", self)\n\n self.btn_connect = QPushButton('Open', self)\n self.btn_connect.clicked.connect(self.on_open_click)\n self.btn_connect.setToolTip('Connect/Disconnect To device')\n\n # Device Selector\n box_selector = QHBoxLayout()\n box_selector.addWidget(self.dev_selector)\n box_selector.addWidget(self.btn_connect)\n\n # Main Zone\n main_layout = QHBoxLayout()\n\n main_control_layout = QVBoxLayout()\n\n # Voltage Group\n voltage_group_layout = QVBoxLayout()\n voltage_group = QGroupBox(\"Voltage\", self)\n voltage_group.setLayout(voltage_group_layout)\n \n # Voltage LCD\n voltage_group_ctrl_layout = QHBoxLayout()\n ac_label = QLabel(\"AC RMS\", self)\n voltage_group_ctrl_layout.addWidget(ac_label)\n self.voltage_lcd = QEditableLCD(5, self, QColor(99, 193, 149), \"background-color: #222; border: 0px; border-radius: 5px;\", textStyle=\"selection-background-color: #333; font-size: 36px; \", size=(100, 50))\n self.voltage_lcd.valueChanged.connect(self.on_voltage_lcd_valueChanged)\n voltage_group_ctrl_layout.addWidget(self.voltage_lcd)\n\n self.voltage_slider = QSlider(Qt.Horizontal,self)\n self.voltage_slider.setMinimum(0)\n self.voltage_slider.setMaximum(3500)\n self.voltage_slider.valueChanged.connect(self.on_voltage_slider_valueChanged)\n voltage_group_ctrl_layout.addWidget(self.voltage_slider)\n\n # Voltage DC LCD\n voltage_group_dc_ctrl_layout = QHBoxLayout()\n dc_label = QLabel(\"DC-OFF\", self)\n voltage_group_dc_ctrl_layout.addWidget(dc_label)\n self.voltage_dc_lcd = QEditableLCD(5, self, QColor(99, 193, 149), \"background-color: #222; border: 0px; border-radius: 5px;\", textStyle=\"selection-background-color: #333; font-size: 36px; \", size=(100, 50))\n self.voltage_dc_lcd.valueChanged[float].connect(lambda x: self.voltage_dc_slider.setValue(x*10))\n voltage_group_dc_ctrl_layout.addWidget(self.voltage_dc_lcd)\n\n self.voltage_dc_slider = QSlider(Qt.Horizontal,self)\n self.voltage_dc_slider.setMinimum(-5000)\n self.voltage_dc_slider.setMaximum(5000)\n self.voltage_dc_slider.valueChanged.connect(self.on_voltage_dc_slider_valueChanged)\n voltage_group_dc_ctrl_layout.addWidget(self.voltage_dc_slider)\n\n # OVP\n voltage_group_protect_layout = QHBoxLayout()\n ovp_label = QLabel(\"V-Limit:\", self)\n voltage_group_protect_layout.addWidget(ovp_label)\n self.volt_neg_slider = QSlider(Qt.Horizontal,self)\n self.volt_neg_slider.setInvertedControls(True)\n self.volt_neg_slider.setMinimum(-5000)\n self.volt_neg_slider.setMaximum(-200)\n\n self.volt_neg_lcd = QEditableLCD(6, self, QColor(237, 64, 60), \"background-color: #222; border: 0px; border-radius: 2px;\", textStyle=\"selection-background-color: #333; \")\n self.volt_pos_lcd = QEditableLCD(6, self, QColor(237, 64, 60), \"background-color: #222; border: 0px; border-radius: 2px;\", textStyle=\"selection-background-color: #333; \")\n\n self.volt_pos_slider = QSlider(Qt.Horizontal,self)\n self.volt_pos_slider.setMinimum(200)\n self.volt_pos_slider.setMaximum(5000)\n \n self.volt_neg_slider.valueChanged.connect(self.on_volt_neg_slider_valueChanged)\n self.volt_neg_lcd.valueChanged[float].connect(lambda x: self.volt_neg_slider.setValue(x*10) if not self.initalizing else 0)\n self.volt_pos_slider.valueChanged.connect(self.on_volt_pos_slider_valueChanged)\n self.volt_pos_lcd.valueChanged[float].connect(lambda x: self.volt_pos_slider.setValue(x*10) if not self.initalizing else 0)\n voltage_group_protect_layout.addWidget(self.volt_neg_slider)\n voltage_group_protect_layout.addWidget(self.volt_neg_lcd)\n voltage_group_protect_layout.addWidget(self.volt_pos_lcd)\n voltage_group_protect_layout.addWidget(self.volt_pos_slider)\n\n # Add Voltage Group To Main Layout\n voltage_group_layout.addLayout(voltage_group_ctrl_layout)\n voltage_group_layout.addLayout(voltage_group_dc_ctrl_layout)\n voltage_group_layout.addLayout(voltage_group_protect_layout)\n main_control_layout.addWidget(voltage_group)\n\n # Current Group\n current_group_layout = QVBoxLayout()\n current_group_ctrl_layout = QHBoxLayout()\n current_group = QGroupBox(\"Current\", self)\n current_group.setLayout(current_group_layout)\n\n # Current LCD\n self.current_lcd = QEditableLCD(5, self, QColor(0, 237, 0), \"background-color: #222; border: 0px; border-radius: 5px;\", textStyle=\"selection-background-color: #333; font-size: 36px; \", size=(100, 50))\n self.current_lcd.valueChanged.connect(self.on_current_lcd_valueChanged)\n current_group_ctrl_layout.addWidget(self.current_lcd)\n\n self.current_slider = QSlider(Qt.Horizontal,self)\n self.current_slider.valueChanged.connect(self.on_current_slider_valueChanged)\n current_group_ctrl_layout.addWidget(self.current_slider)\n\n self.current_ocp = QCheckBox(\"O&CP\", self)\n self.current_ocp.stateChanged.connect(self.on_current_ocp_stateChanged)\n current_group_ctrl_layout.addWidget(self.current_ocp)\n\n # Peak\n current_group_protect_layout = QHBoxLayout()\n ocp_label = QLabel(\"IPeak:\", self)\n current_group_protect_layout.addWidget(ocp_label)\n self.ipk_neg_slider = QSlider(Qt.Horizontal,self)\n self.ipk_neg_slider.setInvertedControls(True)\n # self.ipk_neg_slider.setInvertedAppearance(True)\n\n self.ipk_neg_lcd = QEditableLCD(6, self, QColor(0, 237, 60), \"background-color: #222; border: 0px; border-radius: 2px;\", textStyle=\"selection-background-color: #333; \")\n self.ipk_protect = QCheckBox(\"PROTECT\", self)\n self.ipk_pos_lcd = QEditableLCD(6, self, QColor(0, 237, 60), \"background-color: #222; border: 0px; border-radius: 2px;\", textStyle=\"selection-background-color: #333; \")\n\n self.ipk_pos_slider = QSlider(Qt.Horizontal,self)\n\n self.ipk_neg_slider.valueChanged.connect(self.on_ipk_neg_slider_valueChanged)\n self.ipk_neg_lcd.valueChanged[float].connect(lambda x: self.ipk_neg_slider.setValue(x*100) if not self.initalizing else 0)\n self.ipk_protect.stateChanged.connect(self.on_ipk_protect_stateChanged)\n self.ipk_pos_slider.valueChanged.connect(self.on_ipk_pos_slider_valueChanged)\n self.ipk_pos_lcd.valueChanged[float].connect(lambda x: self.ipk_pos_slider.setValue(x*100) if not self.initalizing else 0)\n current_group_protect_layout.addWidget(self.ipk_neg_slider)\n current_group_protect_layout.addWidget(self.ipk_neg_lcd)\n current_group_protect_layout.addWidget(self.ipk_protect)\n current_group_protect_layout.addWidget(self.ipk_pos_lcd)\n current_group_protect_layout.addWidget(self.ipk_pos_slider)\n\n # Add Current Group To Main Layout\n current_group_layout.addLayout(current_group_ctrl_layout)\n current_group_layout.addLayout(current_group_protect_layout)\n main_control_layout.addWidget(current_group)\n\n # Frequence Group\n frequence_group_layout = QVBoxLayout()\n frequence_group_ctrl_layout = QHBoxLayout()\n self.frequence_group = QGroupBox(\"Frequence & Phase\", self)\n self.frequence_group.setLayout(frequence_group_layout)\n\n # Frequence LCD\n self.frequence_lcd = QEditableLCD(5, self, QColor(255, 128, 0), \"background-color: #222; border: 0px; border-radius: 5px;\", textStyle=\"selection-background-color: #333; font-size: 36px; \", size=(100, 30))\n self.frequence_lcd.valueChanged[float].connect(lambda x: self.frequence_slider.setValue(x*100 if x < 100 else 10000 + (x-100) * 10))\n frequence_group_ctrl_layout.addWidget(self.frequence_lcd)\n\n self.frequence_slider = QSlider(Qt.Horizontal,self)\n self.frequence_slider.valueChanged.connect(self.on_freq_slider_valueChanged)\n self.frequence_slider.setMinimum(100)\n self.frequence_slider.setMaximum(10000+8999)\n frequence_group_ctrl_layout.addWidget(self.frequence_slider)\n\n # OFP\n ofp_label = QLabel(\"F-Limit:\", self)\n frequence_group_ctrl_layout.addWidget(ofp_label)\n self.freq_neg_slider = QSlider(Qt.Horizontal,self)\n self.freq_neg_slider.setInvertedControls(True)\n self.freq_neg_slider.setMinimum(100)\n self.freq_neg_slider.setMaximum(10000+8999)\n\n self.freq_neg_lcd = QEditableLCD(5, self, QColor(237, 64, 60), \"background-color: #222; border: 0px; border-radius: 2px;\", textStyle=\"selection-background-color: #333; \")\n self.freq_pos_lcd = QEditableLCD(5, self, QColor(237, 64, 60), \"background-color: #222; border: 0px; border-radius: 2px;\", textStyle=\"selection-background-color: #333; \")\n\n self.freq_pos_slider = QSlider(Qt.Horizontal,self)\n self.freq_pos_slider.setMinimum(100)\n self.freq_pos_slider.setMaximum(10000+8999)\n \n self.freq_neg_slider.valueChanged.connect(self.on_freq_neg_slider_valueChanged)\n self.freq_neg_lcd.valueChanged[float].connect(lambda x: self.freq_neg_slider.setValue(x*100 if x < 100 else 10000 + (x-100) * 10))\n self.freq_pos_slider.valueChanged.connect(self.on_freq_pos_slider_valueChanged)\n self.freq_pos_lcd.valueChanged[float].connect(lambda x: self.freq_pos_slider.setValue(x*100 if x < 100 else 10000 + (x-100) * 10))\n frequence_group_ctrl_layout.addWidget(self.freq_neg_slider)\n frequence_group_ctrl_layout.addWidget(self.freq_neg_lcd)\n frequence_group_ctrl_layout.addWidget(self.freq_pos_lcd)\n frequence_group_ctrl_layout.addWidget(self.freq_pos_slider)\n\n\n # Phase\n phase_group_layout = QHBoxLayout()\n phase_label = QLabel(\"Phase:\", self)\n self.phase_start_lcd = QEditableLCD(5, self, QColor(237, 64, 60), \"background-color: #222; border: 0px; border-radius: 2px;\", textStyle=\"selection-background-color: #333; \")\n self.phase_start_slider = QSlider(Qt.Horizontal,self)\n self.phase_start_slider.setMinimum(0)\n self.phase_start_slider.setMaximum(3599)\n\n self.phase_end_lcd = QEditableLCD(5, self, QColor(237, 64, 60), \"background-color: #222; border: 0px; border-radius: 2px;\", textStyle=\"selection-background-color: #333; \")\n self.phase_end_slider = QSlider(Qt.Horizontal,self)\n self.phase_end_slider.setMinimum(0)\n self.phase_end_slider.setMaximum(3599)\n \n self.phase_start_slider.valueChanged.connect(self.on_phase_start_slider_valueChanged)\n self.phase_start_lcd.valueChanged[float].connect(lambda x: self.phase_start_slider.setValue(x*10))\n self.phase_end_slider.valueChanged.connect(self.on_phase_end_slider_valueChanged)\n self.phase_end_lcd.valueChanged[float].connect(lambda x: self.phase_end_slider.setValue(x*100 if x < 100 else 10000 + (x-100) * 10))\n\n phase_group_layout.addWidget(phase_label)\n phase_group_layout.addWidget(self.phase_start_lcd)\n phase_group_layout.addWidget(self.phase_start_slider)\n phase_group_layout.addWidget(self.phase_end_lcd)\n phase_group_layout.addWidget(self.phase_end_slider)\n\n\n # SCR\n scr_group_layout = QHBoxLayout()\n scr_label = QLabel(\"SCR SIM:\", self)\n self.scr_lcd = QEditableLCD(5, self, QColor(237, 64, 60), \"background-color: #222; border: 0px; border-radius: 2px;\", textStyle=\"selection-background-color: #333; \")\n self.scr_slider = QSlider(Qt.Horizontal,self)\n self.scr_slider.setMinimum(0)\n self.scr_slider.setMaximum(3599)\n\n self.scr_slider.valueChanged.connect(self.on_scr_slider_valueChanged)\n self.scr_lcd.valueChanged[float].connect(lambda x: self.scr_slider.setValue(x*10))\n\n scr_group_layout.addWidget(scr_label)\n scr_group_layout.addWidget(self.scr_lcd)\n scr_group_layout.addWidget(self.scr_slider)\n\n self.br_sim = QComboBox(self)\n self.br_sim.addItem(\"None\", \"None\")\n self.br_sim.addItem(\"Full Bridge Rectifier\", \"FBR\")\n self.br_sim.addItem(\"Half Bridge Rectifier\", \"HBR\")\n self.br_sim.addItem(\"MOSFET RPC\", \"RPC\")\n self.br_sim.currentIndexChanged.connect(self.on_scr_slider_valueChanged)\n scr_group_layout.addWidget(self.br_sim)\n\n\n freq_sync_group_layout = QHBoxLayout()\n freq_sync_label = QLabel(\"Freq Sync:\", self)\n \n freq_sync_group_layout.addWidget(freq_sync_label)\n\n self.freq_sync = QComboBox(self)\n self.freq_sync.addItem(\"None\", \"None\")\n self.freq_sync.addItem(\"Sync With Power Line\", \"LINE\")\n self.freq_sync.addItem(\"Sync With Ext\", \"EXT\")\n self.freq_sync.currentIndexChanged.connect(self.on_freq_sync_valueChanged)\n freq_sync_group_layout.addWidget(self.freq_sync)\n\n # Add Frequence Group To Main Layout\n frequence_group_layout.addLayout(frequence_group_ctrl_layout)\n frequence_group_layout.addLayout(phase_group_layout)\n frequence_group_layout.addLayout(scr_group_layout)\n frequence_group_layout.addLayout(freq_sync_group_layout)\n main_control_layout.addWidget(self.frequence_group)\n\n main_layout.addLayout(main_control_layout)\n\n # Load Control\n self.btn_output = QPushButton('OUTPUT', self)\n self.btn_output.clicked.connect(self.on_btn_output_clicked)\n\n self.btn_output.setFixedSize(self.btn_output.sizeHint().width(), main_control_layout.sizeHint().height())\n main_layout.addWidget(self.btn_output)\n \n\n # Meassure Zone Start\n self.meas_vol_lcd = QMeassureLCD(\"Voltage\", 8, self, QColor(255, 0, 0), size=(160, 40))\n self.meas_cur_lcd = QMeassureLCD(\"Current\", 8, self, QColor(0, 255, 0), size=(160, 40))\n self.meas_pow_lcd = QMeassureLCD(\"Power(W)\", 8, self, QColor(0, 128, 255), size=(160, 40))\n self.meas_q_lcd = QMeassureLCD(\"Q (var)\", 8, self, QColor(192, 128, 0), size=(120, 30))\n self.meas_pf_lcd = QMeassureLCD(\"PowerFactor (PF)\", 5, self, QColor(0, 192, 128), size=(90, 30))\n self.meas_cf_lcd = QMeassureLCD(\"CF\", 5, self, QColor(0, 192, 128), size=(90, 30))\n self.meas_s_lcd = QMeassureLCD(\"Power(VA)\", 8, self, QColor(0, 128, 255), size=(120, 30))\n\n meas_layout = QVBoxLayout()\n meas_layout1 = QHBoxLayout()\n meas_layout1.addWidget(self.meas_vol_lcd)\n meas_layout1.addWidget(self.meas_cur_lcd)\n meas_layout1.addWidget(self.meas_pow_lcd)\n meas_layout.addLayout(meas_layout1)\n meas_layout2 = QHBoxLayout()\n meas_layout2.addWidget(self.meas_q_lcd)\n meas_layout2.addWidget(self.meas_pf_lcd)\n meas_layout2.addWidget(self.meas_cf_lcd)\n meas_layout2.addWidget(self.meas_s_lcd)\n meas_layout.addLayout(meas_layout2)\n # Meassure Zone End\n\n vbox = QVBoxLayout()\n vbox.addLayout(box_selector)\n vbox.addLayout(main_layout)\n\n vbox.addLayout(meas_layout)\n\n self.setLayout(vbox)\n \n # self.slider.valueChanged.connect(self.on_main_slider_valueChanged)\n self.resize(350,250)\n\n # self.volt_neg_lcd.installEventFilter(self)\n # self.volt_neg_lcd_edit.installEventFilter(self)\n # self.volt_pos_lcd.installEventFilter(self)\n # self.volt_pos_lcd_edit.installEventFilter(self)\n # self.ovp_value.installEventFilter(self)\n\n timer = QTimer(self)\n timer.setSingleShot(False)\n timer.timeout.connect(self.get_meas_value)\n timer.start(300)\n\n @pyqtSlot()\n def get_meas_value(self):\n if self.dev is not None and not self.initalizing:\n try:\n if self.pause_query:\n return\n self.querying = True\n load_settings = False\n for k, v in self.set_commands.items():\n if v is not None:\n if type(v) is list:\n for vl in v:\n self.dev.write(vl)\n else:\n self.dev.write(v)\n self.set_commands[k] = None\n if k == \"mode\":\n load_settings = True\n # ,,,,,,,,,

,,,,,,,\n error_list = []\n while True:\n self.dev.write(\":SYSTem:ERRor?\")\n err = self.dev.read().strip()\n if err != '+0,\"No error\"':\n error_list.append(err)\n else:\n break\n\n if len(error_list) > 0:\n QMessageBox.critical(None,\"Error\", \"A error has occured: %s\" % (\"\\n
\".join(error_list)))\n self.load_current_settings()\n\n if load_settings:\n self.load_current_settings()\n\n self.dev.write(\":STATus:WARNing:CONDition?\")\n reg_warning_status = int(self.dev.read().strip())\n\n self.dev.write(\":STATus:OPERation?\")\n reg_operation_status = int(self.dev.read().strip())\n\n self.dev.write(\":STATus:QUEStionable?\")\n reg_questionable_status = int(self.dev.read().strip())\n # :STATus:WARNing:CONDition?\n # :STATus:OPERation\n # :STATus:QUEStionable\n # *SRE?\n\n # print(\"REG: WARNING %04x OPT %04x QUESTION %04x\" % (reg_warning_status, reg_operation_status, reg_questionable_status))\n\n if (reg_warning_status & 0x1) != 0:\n self.protection = \"OVP\"\n elif (reg_warning_status & 0x2) != 0:\n self.protection = \"Over Irms\"\n elif (reg_warning_status & 0x8) != 0:\n self.protection = \"Over Ipeak\"\n elif (reg_warning_status & 0x40) != 0:\n self.protection = \"Overheat\"\n elif (reg_warning_status & 0x80) != 0:\n self.protection = \"Ext Sync Error\"\n elif (reg_warning_status & 0x200) != 0:\n self.protection = \"Sense Error\"\n elif (reg_warning_status & 0x1000) != 0:\n self.protection = \"Power Limit\"\n elif (reg_warning_status & 0x2000) != 0:\n self.protection = \"IRMS\\nSoft Limit\"\n # pass\n elif (reg_warning_status & 0x4000) != 0:\n self.protection = \"IPK\\nSoft Limit\"\n else:\n self.protection = None\n self.update_output_button()\n\n if self.protection is not None:\n self.update_output_button()\n self.dev.write(\":SOURce:READ?\")\n self.querying = False\n meas_value = self.dev.read().strip().split(\",\")\n self.meas_volt = float(meas_value[0])\n self.meas_vol_lcd.display(\"%.04f\" % (self.meas_volt))\n\n self.meas_curr = float(meas_value[4])\n self.meas_cur_lcd.display(\"%.04f\" % (self.meas_curr))\n\n self.meas_pwr = float(meas_value[9])\n self.meas_pow_lcd.display(\"%.04f\" % (self.meas_pwr))\n\n self.meas_s = float(meas_value[10])\n self.meas_s_lcd.display(\"%.04f\" % (self.meas_s))\n\n self.meas_q = float(meas_value[11])\n self.meas_q_lcd.display(\"%.04f\" % (self.meas_q))\n\n self.meas_pf = float(meas_value[12])\n self.meas_pf_lcd.display(\"%.03f\" % (self.meas_pf))\n\n self.meas_cf = float(meas_value[13])\n self.meas_cf_lcd.display(\"%.03f\" % (self.meas_cf))\n\n # if self.protection is not None:\n # self.protection = None\n # self.update_output_button()\n # else:\n # self.protection = protect_state\n pass\n except Exception as e:\n print(\"Read data failed\", e)\n traceback.print_exc()\n self.failed_read+=1\n if self.failed_read >= 10:\n self.on_open_click()\n # self.on_open_click()\n\n def load_current_settings(self):\n self.initalizing = True\n\n self.dev.write(\":MODE?\")\n self.set_mode = self.dev.read().strip()\n\n self.dev.write(\":OUTPut?\")\n self.output_enabled = self.dev.read().strip() == \"+1\"\n self.update_output_button()\n\n self.dev.write(\":FUNCtion?\")\n self.output_waveform = self.dev.read().strip()\n\n self.dev.write(\":SOUR:VOLT:RANG?\")\n self.set_voltage_range = self.dev.read().strip()\n \n self.dev.write(\":VOLTage?\")\n self.set_voltage = float(self.dev.read().strip()) * (1 if self.output_waveform == \"SIN\" else 1/(2*math.sqrt(2)))\n self.voltage_slider.setValue(self.set_voltage * 10)\n\n if self.set_mode == \"ACDC-INT\" or self.set_mode == \"ACDC-Sync\":\n self.dev.write(\":VOLT:OFFSet?\")\n self.set_dc_voltage = float(self.dev.read().strip())\n self.voltage_dc_slider.setValue(self.set_dc_voltage * 10)\n self.voltage_dc_lcd.setEnabled(True)\n self.voltage_dc_slider.setEnabled(True)\n else:\n self.voltage_dc_lcd.setEnabled(False)\n self.voltage_dc_slider.setEnabled(False)\n\n # if self.output_waveform == \"SIN\" and self.set_voltage_range != \"AUTO\":\n # self.dev.write(\":SOUR:VOLT:RANG AUTO\")\n # elif self.output_waveform == \"ARB16\" and self.set_voltage_range == \"AUTO\":\n \n if self.set_mode in [\"ACDC-INT\", \"AC-INT\"]:\n self.dev.write(\":FREQuency?\")\n self.set_freq = float(self.dev.read().strip())\n self.dev.write(\":FREQuency:LIMit:LOW?\")\n self.set_freq_neg = float(self.dev.read().strip())\n self.dev.write(\":FREQuency:LIMit:HIGH?\")\n self.set_freq_pos = float(self.dev.read().strip())\n self.frequence_slider.setValue(self.set_freq *100 if self.set_freq < 100 else 10000 + (self.set_freq-100) * 10)\n self.freq_neg_slider.setValue(self.set_freq_neg *100 if self.set_freq_neg < 100 else 10000 + (self.set_freq_neg-100) * 10)\n self.freq_pos_slider.setValue(self.set_freq_pos *100 if self.set_freq_pos < 100 else 10000 + (self.set_freq_pos-100) * 10)\n\n self.frequence_slider.setEnabled(True)\n self.freq_neg_slider.setEnabled(True)\n self.freq_pos_slider.setEnabled(True)\n else:\n self.frequence_slider.setEnabled(False)\n self.freq_neg_slider.setEnabled(False)\n self.freq_pos_slider.setEnabled(False)\n\n if self.set_mode in [\"ACDC-INT\", \"ACDC-Sync\", \"AC-Sync\"]:\n self.dev.write(\":PHASe:STARt?\")\n self.set_phase_start = float(self.dev.read().strip())\n self.dev.write(\":PHASe:STOP?\")\n self.set_phase_end = float(self.dev.read().strip())\n\n self.phase_start_slider.setEnabled(True)\n self.phase_end_slider.setEnabled(True)\n self.phase_start_slider.setValue(self.set_phase_start * 10)\n \n self.phase_end_slider.setValue(self.set_phase_end * 10)\n else:\n self.phase_start_slider.setEnabled(False)\n self.phase_end_slider.setEnabled(False)\n\n if self.set_mode in [\"ACDC-Sync\", \"AC-Sync\"]:\n self.dev.write(\":INPut:SYNC:SOURce?\")\n if self.dev.read().strip() == \"LINE\":\n self.freq_sync.setCurrentIndex(1)\n else:\n self.freq_sync.setCurrentIndex(2)\n\n self.current_range = None\n self.initalizing = False\n\n @pyqtSlot()\n def on_open_click(self):\n if self.dev is None:\n self.initalizing = True\n \n self.dev = SocketInterfaces(self.dev_selector.text())\n # for i in range(0,5):\n # try:\n # self.dev = self.rm.open_resource(\"TCPIP0::%s::SOCKET\" % (self.dev_selector.text().replace(\":\",\"::\")), read_termination = '\\n')\n # break\n # except Exception as e:\n # print(\"Open Resource Failed\")\n # time.sleep(1)\n\n self.btn_connect.setText(\"Close\")\n self.dev.write(\"*IDN?\")\n self.model = self.dev.read().strip()\n self.modelChanged.emit(self.model)\n self.model = self.model.split(\",\")\n self.setWindowTitle(\" \".join(self.model[0:2])+\" VER: \"+self.model[3])\n\n self.load_current_settings()\n self.initalizing = False\n\n else:\n self.dev.write(\":SYSTem:COMMunicate:RLSTate LOCal\")\n self.dev.close()\n self.btn_connect.setText(\"Open\")\n self.dev = None\n self.initalizing = True\n self.repaint()\n\n def closeEvent(self, event):\n if self.dev is not None:\n self.on_open_click()\n\n def eventFilter(self, obj, event):\n if self.dev is None:\n return super(self.__class__, self).eventFilter(obj, event)\n return super(self.__class__, self).eventFilter(obj, event)\n\n def update_output_button(self):\n if self.protection is not None:\n self.btn_output.setText(self.protection)\n self.btn_output.setStyleSheet(\"background-color: #F00; color: #fff; font-weight:bold; border-radius: 15px; border: 2px dashed #000\")\n elif self.output_enabled == False:\n self.btn_output.setText(\"OUTPUT\")\n self.btn_output.setStyleSheet(\"background-color: #999; color: #efefef; border-radius: 15px; border: 2px dashed #000\")\n else:\n self.btn_output.setText(\"OUTPUT\")\n self.btn_output.setStyleSheet(\"background-color: #172e7b; color: white; font-weight:bold; border-radius: 15px; border: 2px dashed #000\")\n\n @pyqtSlot()\n def on_voltage_dc_slider_valueChanged(self):\n self.set_dc_voltage = self.voltage_dc_slider.value() / 10\n self.voltage_dc_lcd.display(\"%.01f\" % (self.set_dc_voltage))\n if self.initalizing:\n return\n self.set_commands[\"voltage\"] = \":VOLT:OFFSet %.01f\" % (self.set_dc_voltage)\n\n @pyqtSlot()\n def on_voltage_slider_valueChanged(self):\n self.set_voltage = self.voltage_slider.value() / 10\n self.voltage_lcd.display(\"%.01f\" % (self.set_voltage))\n rangeChanged = False\n manual_init = self.initalizing\n\n if not self.initalizing:\n self.initalizing = True\n\n if self.set_voltage < 176.80:\n if self.set_voltage_range != \"100\":\n self.set_voltage_range = \"100\"\n self.dev.write(\":SOUR:VOLT:RANG 100\")\n time.sleep(0.1)\n if self.output_enabled:\n self.dev.write(\":OUTPut 1\")\n self.current_slider.setMinimum(50)\n self.current_slider.setMaximum(1050)\n self.ipk_neg_slider.setMinimum(-4200)\n self.ipk_neg_slider.setMaximum(-420)\n self.ipk_pos_slider.setMinimum(420)\n self.ipk_pos_slider.setMaximum(4200)\n if self.current_range != \"LOW\":\n rangeChanged = True\n self.current_range = \"LOW\"\n else:\n if self.set_voltage_range != \"200\":\n self.set_voltage_range = \"200\"\n self.dev.write(\":SOUR:VOLT:RANG 200\")\n time.sleep(0.1)\n if self.output_enabled:\n self.dev.write(\":OUTPut 1\")\n self.current_slider.setMinimum(25)\n self.current_slider.setMaximum(525)\n self.ipk_neg_slider.setMinimum(-2100)\n self.ipk_neg_slider.setMaximum(-210)\n self.ipk_pos_slider.setMinimum(210)\n self.ipk_pos_slider.setMaximum(2100)\n if self.current_range != \"HIGH\":\n rangeChanged = True\n self.current_range = \"HIGH\"\n\n if rangeChanged:\n self.dev.write(\":CURRent:LIMit:RMS?\")\n self.set_current = float(self.dev.read().strip())\n self.current_slider.setValue(self.set_current * 100)\n\n self.dev.write(\":CURRent:LIMit:RMS:MODE?\")\n self.current_ocp.setChecked(self.dev.read().strip() != \"+1\")\n\n self.dev.write(\":CURRent:LIMit:PEAK:MODE?\")\n self.ipk_protect.setChecked(self.dev.read().strip() != \"+1\")\n\n self.dev.write(\":CURRent:LIMit:PEAK:LOW?\")\n self.set_ipk_neg = float(self.dev.read().strip())\n self.ipk_neg_slider.setValue(self.set_ipk_neg * 100)\n\n self.dev.write(\":CURRent:LIMit:PEAK:HIGH?\")\n self.set_ipk_pos = float(self.dev.read().strip())\n self.ipk_pos_slider.setValue(self.set_ipk_pos * 100)\n\n self.dev.write(\":VOLTage:LIMit:LOW?\")\n self.set_volt_neg = float(self.dev.read().strip())\n self.volt_neg_slider.setValue(self.set_volt_neg * 10)\n\n self.dev.write(\":VOLTage:LIMit:HIGH?\")\n self.set_volt_pos = float(self.dev.read().strip())\n self.volt_pos_slider.setValue(self.set_volt_pos * 10)\n\n self.initalizing = manual_init\n\n if self.initalizing:\n return\n\n if self.output_waveform == \"SIN\":\n self.set_commands[\"voltage\"] = \":VOLT %.01f\" % (self.set_voltage)\n else:\n self.set_commands[\"voltage\"] = \":VOLT %.01f\" % (self.set_voltage * math.sqrt(2)*2)\n\n @pyqtSlot()\n def on_volt_neg_slider_valueChanged(self):\n self.set_volt_neg = self.volt_neg_slider.value() / 10\n self.volt_neg_lcd.display(\"%.01f\" % (self.set_volt_neg))\n if self.initalizing:\n return\n print(\"Set Voltage Limit Low Range \", (self.set_volt_neg))\n self.set_commands[\"current\"] = \":VOLTage:LIMit:LOW %.02f\" % (self.set_volt_neg)\n\n @pyqtSlot()\n def on_volt_pos_slider_valueChanged(self):\n self.set_volt_pos = self.volt_pos_slider.value() / 10\n self.volt_pos_lcd.display(\"%.01f\" % (self.set_volt_pos))\n if self.initalizing:\n return\n print(\"Set Voltage Limit High Range \", (self.set_volt_pos))\n self.set_commands[\"current\"] = \":VOLTage:LIMit:HIGH %.02f\" % (self.set_volt_pos)\n\n def on_voltage_lcd_valueChanged(self, value):\n if self.initalizing:\n return\n try:\n self.voltage_slider.setValue(float(value) * 10)\n except Exception as e:\n QMessageBox.warning(None, \"Warning\", \"Input invalid\")\n\n @pyqtSlot()\n def on_current_slider_valueChanged(self):\n self.set_current = self.current_slider.value() / 100\n self.current_lcd.display(\"%.02f\" % (self.set_current))\n if self.initalizing:\n return\n self.set_commands[\"current\"] = \":CURRent:LIMit:RMS %.02f\" % (self.set_current)\n\n @pyqtSlot()\n def on_ipk_neg_slider_valueChanged(self):\n self.set_ipk_neg = self.ipk_neg_slider.value() / 100\n self.ipk_neg_lcd.display(\"%.02f\" % (self.set_ipk_neg))\n if self.initalizing:\n return\n print(\"Set IPeak Low Range \", (self.set_ipk_neg))\n self.set_commands[\"current\"] = \":CURRent:LIMit:PEAK:LOW %.02f\" % (self.set_ipk_neg)\n\n @pyqtSlot()\n def on_ipk_pos_slider_valueChanged(self):\n self.set_ipk_pos = self.ipk_pos_slider.value() / 100\n self.ipk_pos_lcd.display(\"%.02f\" % (self.set_ipk_pos))\n if self.initalizing:\n return\n print(\"Set High Range \", (self.set_ipk_pos))\n self.set_commands[\"current\"] = \":CURRent:LIMit:PEAK:HIGH %.02f\" % (self.set_ipk_pos)\n\n def on_current_lcd_valueChanged(self, value):\n try:\n self.current_slider.setValue(float(value) * 100)\n except Exception as e:\n QMessageBox.warning(None, \"Warning\", \"Input invalid\")\n\n @pyqtSlot()\n def on_current_ocp_stateChanged(self):\n self.set_ocp = self.current_ocp.isChecked()\n if self.initalizing:\n return\n self.set_commands[\"current_ocp\"] = \":CURRent:LIMit:RMS:MODE %s\" % (\"OFF\" if self.set_ocp else \"ON\")\n\n @pyqtSlot()\n def on_ipk_protect_stateChanged(self):\n self.set_ipk_protect = self.ipk_protect.isChecked()\n if self.initalizing:\n return\n self.set_commands[\"current_ocp\"] = \":CURRent:LIMit:PEAK:MODE %s\" % (\"OFF\" if self.set_ipk_protect else \"ON\")\n\n @pyqtSlot()\n def on_freq_slider_valueChanged(self):\n self.set_freq = self.frequence_slider.value() / 100 if self.frequence_slider.value() < 10000 else 100+(self.frequence_slider.value() - 10000) / 10\n print(self.frequence_slider.value(), self.set_freq )\n if self.set_freq >= 100:\n self.frequence_lcd.display(\"%.01f\" % (self.set_freq))\n else:\n self.frequence_lcd.display(\"%.02f\" % (self.set_freq))\n if self.initalizing:\n return\n self.set_commands[\"frequence\"] = \":FREQuency %.02f\" % (self.set_freq)\n\n @pyqtSlot()\n def on_freq_neg_slider_valueChanged(self):\n self.set_freq_neg = self.freq_neg_slider.value() / 100 if self.freq_neg_slider.value() < 10000 else 100+(self.freq_neg_slider.value() - 10000) / 10\n if self.set_freq_neg >= 100:\n self.freq_neg_lcd.display(\"%.01f\" % (self.set_freq_neg))\n else:\n self.freq_neg_lcd.display(\"%.02f\" % (self.set_freq_neg))\n if self.initalizing:\n return\n print(\"Set Frequence Limit Low Range \", (self.set_freq_neg))\n self.set_commands[\"frequence\"] = \":FREQuency:LIMit:LOW %.02f\" % (self.set_freq_neg)\n\n @pyqtSlot()\n def on_freq_pos_slider_valueChanged(self):\n self.set_freq_pos = self.freq_pos_slider.value() / 100 if self.freq_pos_slider.value() < 10000 else 100+(self.freq_pos_slider.value() - 10000) / 10\n if self.set_freq_pos >= 100:\n self.freq_pos_lcd.display(\"%.01f\" % (self.set_freq_pos))\n else:\n self.freq_pos_lcd.display(\"%.02f\" % (self.set_freq_pos))\n if self.initalizing:\n return\n print(\"Set Frequence Limit High Range \", (self.set_freq_pos))\n self.set_commands[\"frequence\"] = \":FREQuency:LIMit:HIGH %.02f\" % (self.set_freq_pos)\n\n @pyqtSlot()\n def on_phase_start_slider_valueChanged(self):\n self.set_phase_start = self.phase_start_slider.value() / 10\n self.phase_start_lcd.display(\"%.01f\" % (self.set_phase_start))\n if self.initalizing:\n return\n print(\"Set Phase Start \", (self.set_phase_start))\n self.set_commands[\"frequence\"] = \":PHASe:STARt %.02f\" % (self.set_phase_start)\n\n @pyqtSlot()\n def on_phase_end_slider_valueChanged(self):\n self.set_phase_end = self.phase_end_slider.value() / 10\n self.phase_end_lcd.display(\"%.01f\" % (self.set_phase_end))\n if self.initalizing:\n return\n print(\"Set Phase End \", (self.set_phase_end))\n self.set_commands[\"frequence\"] = \":PHASe:STOP %.02f\" % (self.set_phase_end)\n\n @pyqtSlot()\n def on_scr_slider_valueChanged(self):\n self.set_scr = self.scr_slider.value()/10\n self.scr_lcd.display(\"%.01f\" % (self.set_scr))\n # print(\"TRAC:WAV 16,#216\" + ''.join(['%04x' % b for b in bytearray(struct.pack(\">32h\",*generateSINData(self.set_scr, num=32)))]))\n if self.initalizing:\n return\n print(\"Set SCR Simulate \", (self.set_scr))\n if self.set_scr == 0 and self.br_sim.currentData() == \"None\":\n self.set_commands[\"mode\"] = [\":FUNCtion SIN\", \":OUTPut %d\" % (1 if self.output_enabled else 0)]\n else:\n # self.set_commands[\"scr\"] = \"TRACe:WAVe:CLEar 16\"# % (self.set_phase_start)\n # self.set_commands[\"scr_data\"] = \":DATA:WAVE 16,#232\" + ''.join(['%04X' % b for b in bytearray(struct.pack(\">32h\",*generateSINData(self.set_scr, num=32)))])\n if self.br_sim.currentData() == \"FBR\":\n sinData = generateSINData(self.set_scr, positiveOnly=True)\n elif self.br_sim.currentData() == \"HBR\":\n sinData = generateSINData(self.set_scr, positiveOnly=False)\n sinData = np.where(sinData > 0, sinData, 0)\n elif self.br_sim.currentData() == \"RPC\":\n sinData = generateSINData(start_phase = 0, end_phase = self.set_scr, positiveOnly=False)\n else:\n sinData = generateSINData(self.set_scr, positiveOnly=False)\n\n self.set_commands[\"scr_data\"] = b\":DATA:WAVE 16,#48192\" + struct.pack(\">4096h\",*sinData) + b\"\\n\"\n self.set_commands[\"mode\"] = [\":FUNCtion ARB16\", \":OUTPut %d\" % (1 if self.output_enabled else 0)]\n\n @pyqtSlot()\n def on_freq_sync_valueChanged(self):\n if self.initalizing:\n return\n\n if self.freq_sync.currentData() == \"None\":\n self.set_commands[\"mode\"] = \":MODE ACDC-INT\"\n else:\n self.set_commands[\"mode\"] = \":MODE ACDC-SYNC\"\n self.set_commands[\"sync\"] = \":INPut:SYNC:SOURce %s\" % (self.freq_sync.currentData())\n\n\n @pyqtSlot()\n def on_btn_output_clicked(self):\n if self.initalizing:\n return\n \n if self.protection:\n print(\"Clear Protect\")\n self.dev.write(\":OUTPut:PROTection:CLEar\")\n self.protection = None\n time.sleep(0.1)\n self.dev.write(\":OUTPut?\")\n self.output_enabled = self.dev.read().strip() == \"+1\"\n self.update_output_button()\n elif self.output_enabled == False:\n print(\"Change Output State ON\")\n self.dev.write(\":OUTPut 1\")\n self.output_enabled = True\n else:\n print(\"Change Output State OFF\")\n self.dev.write(\":OUTPut 0\")\n self.output_enabled = False\n\n self.update_output_button()\n \n def keyPressEvent(self, e):\n if e.key() == Qt.Key_F5:\n self.on_btn_output_clicked()\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n qb = GwinstekASR2100()\n qb.show()\n def handle_exception(exc_type, exc_value, exc_traceback):\n if qb is not None and qb.dev is not None:\n qb.on_open_click()\n if not exc_type is KeyboardInterrupt:\n QMessageBox.critical(None,\"Error\", \"A critical error has occured.

%s: %s

Traceback:

%s


\" % (exc_type.__name__, exc_value, '
'.join(traceback.format_tb(exc_traceback))))\n sys.exit(1)\n\n sys.excepthook = handle_exception\n sys.exit(app.exec_())\n","repo_name":"magicbear/PyInstructment-GUI","sub_path":"gwinstek_ASR2100.py","file_name":"gwinstek_ASR2100.py","file_ext":"py","file_size_in_byte":40954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21671201632","text":"# 라그랑주 네 제곱수 정리\n# https://pgh268400.tistory.com/464\n# 자신의 수에서 그보다 작은 수의 제곱수를 뺀 것의 최소를 구하고 거기에 한개를 더해주면 된다.\n# - 다이나믹 프로그래밍 유형의 문제입니다.\n# - 자연수 n을 최소 개수의 제곱수 합의 개수로 표현하면 다음과 같습니다.\n# - 1 -> 1\n# - 2 -> 2\n# - 3 -> 3\n# - 4 -> 1\n# - 5 -> 2\n# - 6 -> 3\n# - 7 -> 4\n# - 8 -> 1\n# - 9 -> 1\n# - 10 -> 2\n# - 11 -> 3\n# - 12 -> 4\n# - 13 -> 5\n# - 14 -> 6\n# - 15 -> 7\n# - 16 -> 1\n# - 이를 통해 점화식을 이끌어 낼 수 있습니다.\n# - f(n) = f(n - m * m ) + 1\n# - 이 식을 구현하는게 문제를 중요 부분이자, 어렵다고 생각합니다.\n# - 이러한 식을 이용해 반복문을 구현해 최솟값을 dp 배열에 할당하면 됩니다\nN = int(input())\ndp = [0, 1]\nfor i in range(2, N + 1):\n target = int(1e9)\n # 제곱확인\n for j in range(1, 50001):\n # 현재 ��의 제곱이 i보다 크다면 멈춘다.\n if j ** 2 > i:\n break\n target = min(target, dp[i - (j ** 2)])\n dp.append(target + 1)\nprint(dp[N])\n","repo_name":"djs02027/python_algorithm-study","sub_path":"solve.ac/class 3/17626_Four Squares/17626_Four Squares.py","file_name":"17626_Four Squares.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37959420565","text":"def get_subjects_list(worker):\n \"\"\" Will send subjects list for current group \"\"\"\n worker.answer_to_the_message(\n \"\"\"This is the subjects list for current group:\\n{}\"\"\".format('\\n'.join(\n ' - '.join(str(e) for e in el)\n for el in enumerate((binding.subject.name\n for binding in (\n worker.source.participant_group or\n worker.source.administrator_page.participant_group).\n subjectgroupbinding_set.all()), 1)))\n )\n","repo_name":"KoStard/TelegramExerciseManager","sub_path":"main/command_handlers/get_subjects_list.py","file_name":"get_subjects_list.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"1854859271","text":"import torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, xavier_init\n\nfrom mmdet.core import auto_fp16\nfrom ..builder import NECKS\nfrom mmcv.ops import DeformConv2dPack\n\n@NECKS.register_module()\nclass FPNs16C45add(nn.Module):\n r\"\"\"Feature Pyramid Network.\n\n This is an implementation of paper `Feature Pyramid Networks for Object\n Detection `_.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool | str): If bool, it decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, its actual mode is specified by `extra_convs_on_inputs`.\n If str, it specifies the source feature map of the extra convs.\n Only the following options are allowed\n\n - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n - 'on_lateral': Last feature map after lateral convs.\n - 'on_output': The last output feature map after fpn convs.\n extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs\n on the original feature from the backbone. If True,\n it is equivalent to `add_extra_convs='on_input'`. If False, it is\n equivalent to set `add_extra_convs='on_output'`. Default to True.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (str): Config dict for activation layer in ConvModule.\n Default: None.\n upsample_cfg (dict): Config dict for interpolate layer.\n Default: `dict(mode='nearest')`\n\n Example:\n >>> import torch\n >>> in_channels = [2, 3, 5, 7]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = FPN(in_channels, 11, len(in_channels)).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print(f'outputs[{i}].shape = {outputs[i].shape}')\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n use_dconv=False,\n kernel1=True):\n super(FPNs16C45add, self).__init__()\n assert isinstance(in_channels, list)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.use_dconv = use_dconv\n self.kernel1 = kernel1\n self.deform_conv = DeformConv2dPack(\n out_channels,\n out_channels,\n kernel_size=3,\n padding=1, )\n if self.use_dconv:\n if self.kernel1:\n self.lateral_conv_8 = DeformConv2dPack(\n in_channels[0],\n out_channels,\n kernel_size=1,)\n\n self.lateral_conv_32 = DeformConv2dPack(\n in_channels[2],\n out_channels,\n kernel_size=1,)\n else:\n self.lateral_conv_8 = DeformConv2dPack(\n in_channels[0],\n out_channels,\n kernel_size=3,\n padding=1,)\n\n self.lateral_conv_32 = DeformConv2dPack(\n in_channels[2],\n out_channels,\n kernel_size=3,\n padding=1,)\n else:\n self.lateral_conv_8 = nn.Conv2d(\n in_channels[0],\n out_channels,\n kernel_size=1,)\n\n self.lateral_conv_32 = nn.Conv2d(\n in_channels[2],\n out_channels,\n kernel_size=1,)\n\n # default init_weights for conv(msra) and norm in ConvModule\n def init_weights(self):\n \"\"\"Initialize the weights of FPN module.\"\"\"\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n\n @auto_fp16()\n def forward(self, inputs):\n \"\"\"Forward function.\"\"\"\n # print(len(inputs), inputs[0].shape, inputs[1].shape, inputs[2].shape),\n\n new_input_32 = self.lateral_conv_32(inputs[2])\n\n res_x = inputs[1] + new_input_32 #\n x = self.deform_conv(res_x)\n x = x + res_x\n\n return tuple([x])\n","repo_name":"jiabeiwangTJU/DICL","sub_path":"mmdet/models/necks/fpn_single16_C45add.py","file_name":"fpn_single16_C45add.py","file_ext":"py","file_size_in_byte":5285,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"28491064419","text":"from .emsemble.HomogeneousEmsemble import HomogeneousEmsemble\nfrom .emsemble.Emsemble import Emsemble\nfrom .feature_selection.EmsembleFS import EmsembleFS\nfrom .feature_selection.RFRFE import RFRFE\nfrom .feature_selection.MSVMRFE import MSVMRFE\nfrom .feature_selection.mRMR import mRMR\nfrom .feature_selection.Relief import Relief\nfrom .feature_selection.AGA import AGA\nfrom .classification.ThresholdClassifier import ThresholdClassifier\nfrom .classification.BaselineClassifiers import BaselineClassifiers\n\nfrom sklearn import base \nfrom sklearn.datasets import load_diabetes, load_iris, load_breast_cancer\nfrom sklearn.model_selection import train_test_split, StratifiedKFold, ShuffleSplit\nfrom sklearn.feature_selection import RFE, RFECV, SelectFromModel\nfrom sklearn.svm import SVR, SVC, LinearSVC, LinearSVR\nfrom sklearn.preprocessing import normalize, MinMaxScaler, StandardScaler, KBinsDiscretizer, label_binarize\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.metrics import roc_curve, roc_auc_score, f1_score, precision_score, recall_score, accuracy_score, mutual_info_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nfrom sklearn.ensemble import BaggingClassifier\nimport seaborn as sns\nimport statsmodels.api as sm\nimport os\nimport warnings\n\ndef make_dir_if_not_exist(directory, filename):\n new_dir = None\n if directory is not None:\n new_dir = os.path.join(directory, filename)\n if not os.path.exists(new_dir):\n os.mkdir(new_dir)\n return new_dir\n\ndef add_row(results, y_test, y_pred, base_clf, num_features, num_classes, prediction_proba):\n average='binary'\n if num_classes > 2:\n average='micro'\n auc_score = None\n if prediction_proba is not None:\n if average == 'binary':\n auc_score = roc_auc_score(y_test, y_pred) \n else:\n auc_score = []\n classes = range(num_classes)\n y_bin = label_binarize(y_test, classes=classes)\n for i in range(num_classes):\n y_temp = y_bin[:,i]\n auc_score.append(roc_auc_score(y_temp, prediction_proba[:, i]))\n auc_score = str(auc_score)\n\n results = results.append({'base clf' : base_clf,\n 'num features': num_features,\n 'accuracy' : accuracy_score(y_test, y_pred), \n 'precision': precision_score(y_test, y_pred, average=average), \n 'recall': recall_score(y_test, y_pred, average=average), \n 'auc': auc_score if auc_score is not None else -1, \n 'f1' : f1_score(y_test, y_pred, average=average)}, \n ignore_index=True)\n return results\n\ndef add_classifier(results, all_pred, all_pred_scores, \n X_train_tranformed, y_train, X_test_transformed, y_test, \n clf_temp, clf_name, \n results_params, num_classes, multiclass,\n example_indices, compare_classifiers, misclassified):\n\n clf = None\n if hasattr(clf_temp, 'reset'): # emsembles\n clf = clf_temp.reset()\n else:\n clf = base.clone(clf_temp)\n if num_classes > 2:\n if multiclass=='ovr':\n clf = OneVsRestClassifier(clf)\n else:\n clf = OneVsOneClassifier(clf)\n \n # get predictions\n clf.fit(X_train_tranformed, y_train)\n y_pred = clf.predict(X_test_transformed)\n all_pred[clf_name] = y_pred\n \n # get ids of examples that were misclassified\n if compare_classifiers == 'mcnemar':\n for i in range(len(y_pred)):\n if y_pred[i] != y_test[i]:\n misclassified[clf_name].append(example_indices[i])\n\n # get prediction probabilities \n proba = None\n if (num_classes == 2 or multiclass == 'ovr') and clf.__class__.__name__ != 'SVC':\n proba = clf.predict_proba(X_test_transformed)\n all_pred_scores[clf_name] = proba\n\n # add performance results to table\n results = add_row(results, y_test, y_pred, clf_name, len(X_train_tranformed[0]), num_classes, proba)\n if results_params is not None:\n for k in results_params.keys():\n results.loc[len(results)-1, k] = results_params[k]\n return results\n\ndef run_classifiers(results, base_clfs, base_clf_names,\n X_train_tranformed, y_train, X_test_transformed, y_test, \n num_classes, multiclass, results_params,\n example_indices, compare_classifiers, misclassified):\n all_pred_scores = dict()\n all_pred = dict()\n\n # create homogeneous emsemble\n for i in range(len(base_clfs)):\n results = add_classifier(results, all_pred, all_pred_scores, \n X_train_tranformed, y_train, X_test_transformed, y_test, \n base_clfs[i] , base_clf_names[i], \n results_params, num_classes, multiclass,\n example_indices, compare_classifiers, misclassified)\n return results, all_pred, all_pred_scores\n\n\ndef create_scores_csv(num_classes, y, prediction_scores, ids, directory):\n if num_classes == 2:\n create_scores_csv_binary(y, prediction_scores, ids, directory)\n else:\n create_scores_csv_multiclass(y, prediction_scores, ids, directory)\n\ndef create_scores_csv_multiclass(y, prediction_scores, ids, directory):\n num_classes = np.max(y)\n if not os.path.exists(directory):\n os.mkdir(directory)\n for clf in prediction_scores.keys():\n scores = np.array(prediction_scores[clf])\n correct = []\n wrong = []\n for i in range(len(scores)): \n true_class = y[i] * 1.0\n predicted_class = np.argmax(scores[i]) * 1.0\n if true_class == predicted_class:\n # TP \n correct.append(np.concatenate(( [ids[i], true_class, predicted_class] , scores[i])))\n else:\n wrong.append(np.concatenate(( [ids[i], true_class, predicted_class] , scores[i])))\n \n names = ['correct', 'wrong'] \n data_columns = ['Id', 'True Class', 'Predicted Class']\n for c in range(num_classes + 1):\n data_columns.append('score class ' + str(c))\n for i, data in enumerate([correct, wrong]):\n data = sorted(data,key=lambda x: x[1])\n df = pd.DataFrame(data, columns=data_columns)\n df.to_csv(os.path.join(directory, clf + \"_\" + names[i] + \".csv\"), index=False)\n \n# find TPs with high prediction score\n# FNs with low score for prediction of 1 \ndef create_scores_csv_binary(y, prediction_scores, ids, directory):\n num_classes = np.max(y)\n if not os.path.exists(directory):\n os.mkdir(directory)\n\n for clf in prediction_scores.keys():\n scores = np.array(prediction_scores[clf])\n neg_score = scores[:,0]\n pos_score = scores[:,1]\n TP, FP, TN, FN = [], [], [], []\n for i in range(len(scores)): \n if y[i] == 1: # true class\n if pos_score[i] > neg_score[i]: #TP\n TP.append([ ids[i], neg_score[i], pos_score[i]])\n else: #FN\n FN.append([ ids[i], neg_score[i], pos_score[i]])\n else:\n if pos_score[i] > neg_score[i]: #FP \n FP.append([ ids[i], neg_score[i], pos_score[i]])\n else: # TN \n TN.append([ ids[i], neg_score[i], pos_score[i]])\n \n names = ['TP', 'FP', 'TN', 'FN'] \n data_columns = ['Id', 'Neg Score', 'Pos Score']\n for i, data in enumerate([TP, FP, TN, FN]):\n data = sorted(data,key=lambda x: x[1])\n df = pd.DataFrame(data, columns=data_columns)\n df.to_csv(os.path.join(directory, clf + \"_\" + names[i] + \".csv\"), index=False)\n\ndef run_selector_classifier(selector, all_prediction, all_prediction_scores, test_index, key_str, results, \n base_clfs, base_clf_names,\n X_train_tranformed, y_train, X_test_transformed, y_test, \n num_classes, multiclass, results_params,\n compare_classifiers, misclassified):\n \n\n # run classification\n if len(X_train_tranformed[0]) > 0:\n results, temp_pred, temp_pred_scores = run_classifiers(results, base_clfs, base_clf_names,\n X_train_tranformed, y_train, X_test_transformed, y_test, \n num_classes, multiclass, results_params,\n test_index, compare_classifiers, misclassified)\n # map prediction score over to all_prediction_scores\n if num_classes == 2 or multiclass == 'ovr':\n for j in range(len(test_index)):\n for name in base_clf_names:\n if name != 'SVC':\n if key_str is None:\n all_prediction_scores[name][test_index[j]] = temp_pred_scores[name][j]\n all_prediction[name][test_index[j]] = temp_pred[name][j]\n else:\n all_prediction_scores[key_str][name][test_index[j]] = temp_pred_scores[name][j]\n all_prediction[key_str][name][test_index[j]] = temp_pred[name][j]\n \n return results\n\ndef initial_all_prediction_scores(param_strs, base_clf_names, X):\n all_prediction_scores = dict()\n if param_strs is not None:\n for i in range(len(param_strs)): \n prediction_scores = dict()\n for name in base_clf_names:\n if name != 'SVC':\n prediction_scores[name] = [[0,0]] * len(X)\n all_prediction_scores[param_strs[i]] = prediction_scores\n else:\n for name in base_clf_names:\n if name != 'SVC':\n all_prediction_scores[name] = [[0,0]] * len(X)\n return all_prediction_scores\n\ndef init_misclassified_dict(base_clf_names):\n misclassified = dict()\n for name in base_clf_names:\n misclassified[name] = []\n return misclassified\n\ndef EmsembleFSClassifierPipeline(X, y, groups, \n base_clfs, base_clf_names,\n selector, combines, thresholds, cv_size, patient_ids, \n directory=None, filename=None, multiclass='ovr', \n compare_classifiers=None):\n # set number classes\n num_classes = np.max(y) + 1\n\n # make subfolder\n new_dir = make_dir_if_not_exist(directory, filename)\n\n # initial results dataframe\n results = pd.DataFrame(columns=[\"base clf\", \"combine\", \"threshold\", \"num features\", \"accuracy\", \"precision\", \"recall\", \"auc\", \"f1\"])\n \n # prediction scores\n param_strs = None\n if combines is not None:\n param_strs = [combines[i] + str(thresholds[i]) for i in range(len(combines))]\n all_prediction_scores = initial_all_prediction_scores(param_strs, base_clf_names, X)\n all_predictions = initial_all_prediction_scores(param_strs, base_clf_names, X)\n \n results_params = dict()\n\n # dict of misclassified example indices for each classifier\n misclassified = dict()\n\n selection_folds = dict()\n for param in param_strs:\n misclassified[param] = init_misclassified_dict(base_clf_names)\n selection_folds[param] = []\n \n CVSplitter = StratifiedKFold(n_splits=cv_size, random_state=None, shuffle=True)\n \n \n for train_index, test_index in CVSplitter.split(X, groups): # split based on group_number\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n groups_train = groups[train_index]\n\n # run feature selection\n if combines is not None:\n selector.fit(X_train, y_train, combine=False, groups=groups_train)\n else:\n selector.fit(X_train, y_train)\n\n if combines is not None:\n for i in range(len(combines)):\n results_params['combine'] = combines[i]\n results_params['threshold'] = thresholds[i]\n key_str = combines[i] + str(thresholds[i])\n\n selector.combine_rankings(combines[i], thresholds[i])\n selection_folds[key_str].append(selector.selection_indices)\n X_train_tranformed = selector.transform(X_train)\n X_test_transformed = selector.transform(X_test)\n \n results = run_selector_classifier(selector, all_predictions, all_prediction_scores, test_index, key_str, \n results, base_clfs, base_clf_names,\n X_train_tranformed, y_train, X_test_transformed, y_test,\n num_classes, multiclass, results_params,\n compare_classifiers, misclassified[key_str])\n\n else:\n selection_folds.append(selector.selection_indices)\n X_train_tranformed = selector.transform(X_train)\n X_test_transformed = selector.transform(X_test)\n results = run_selector_classifier(selector, all_prediction_scores, test_index, None, \n results, base_clfs, base_clf_names,\n X_train_tranformed, y_train, X_test_transformed, y_test, \n num_classes, multiclass, results_params,\n compare_classifiers, misclassified)\n \n if combines is not None:\n for i in range(len(combines)):\n \n key_str = combines[i] + str(thresholds[i])\n prediction_scores = all_prediction_scores[key_str]\n if new_dir is not None:\n create_scores_csv(num_classes, y, prediction_scores, patient_ids, os.path.join(new_dir, key_str))\n else:\n if new_dir is not None:\n create_scores_csv(num_classes, y, all_prediction_scores, patient_ids, os.path.join(new_dir, filename))\n \n if compare_classifiers is not None and compare_classifiers=='mcnemar':\n for param in misclassified.keys():\n compare_all(misclassified[param], len(X), os.path.join(new_dir, param))\n \n if new_dir is not None:\n results.groupby(['base clf', 'combine', 'threshold']).mean().to_csv(os.path.join(new_dir, filename + '_mean' + \".csv\"))\n results.to_csv(os.path.join(new_dir, filename + \".csv\"))\n \n return results, all_predictions, all_prediction_scores, selection_folds\n\ndef SelectorThresholdClassifierPipeline(X, y, groups,\n cv_size, feature_selector, \n base_clfs, base_clf_names, \n directory, filename):\n new_dir = make_dir_if_not_exist(directory, filename)\n\n num_classes = np.max(y) + 1\n \n results = pd.DataFrame(columns=[\"base clf\", \"num features\", \"accuracy\", \"precision\", \"recall\", \"auc\", \"f1\"])\n CVSplitter = StratifiedKFold(n_splits=cv_size, random_state=None, shuffle=True)\n \n shuffle_splitter = ShuffleSplit(n_splits=5, test_size=.2)\n \n for train_index, test_index in CVSplitter.split(X, groups):\n X_dev, X_test = X[train_index], X[test_index]\n y_dev, y_test = y[train_index], y[test_index]\n group_dev, group_test = groups[train_index], groups[test_index]\n \n for train_index, test_index in shuffle_splitter.split(X_dev, group_dev):\n X_train, X_holdout = X[train_index], X[test_index]\n y_train, y_holdout = y[train_index], y[test_index]\n\n # Feature selection \n feature_selector.fit(X_train, y_train)\n X_train_tranformed = feature_selector.transform(X_train)\n X_holdout_transformed = feature_selector.transform(X_holdout)\n X_test_transformed = feature_selector.transform(X_test)\n \n # threshold classifier\n for i in range(len(base_clfs)):\n clf_temp = base_clfs[i]\n if hasattr(clf_temp, 'reset'): # emsembles\n clf = clf_temp.reset()\n else:\n clf = base.clone(clf_temp)\n \n threshold_clf = ThresholdClassifier(clf, multilabel=False)\n threshold_clf.fit(X_train_tranformed, y_train)\n threshold_clf.optimize_threshold(X_holdout_transformed, y_holdout)\n y_pred = threshold_clf.predict(X_test_transformed)\n results = add_row(results, y_test, y_pred, base_clf_names[i], len(X_train_tranformed[0]), num_classes, None)\n for class_i in range(len(threshold_clf.thresholds)):\n results.loc[len(results)-1, 'threshold class ' + str(class_i)] = threshold_clf.thresholds[class_i]\n \n if new_dir is not None:\n results.groupby(['base clf']).mean().to_csv(os.path.join(new_dir, filename + \"_mean\" + \".csv\"))\n results.to_csv(os.path.join(new_dir, filename + \".csv\"))\n return results\n\ndef FilterSelectorClassifierPipeline(X, y, groups,\n base_clfs, base_clf_names,\n cv_size, feature_sizes, selector, patient_ids, \n directory=None, filename=None, multiclass='ovr',\n compare_classifiers=None):\n num_classes = np.max(y) + 1\n\n new_dir = make_dir_if_not_exist(directory, filename)\n \n results = pd.DataFrame(columns=[\"base clf\", \"num features\", \"accuracy\", \"precision\", \"recall\", \"auc\", \"f1\"])\n CVSplitter = StratifiedKFold(n_splits=cv_size, random_state=None, shuffle=True)\n \n # prediction scores\n param_strs = [str(f_size) for f_size in feature_sizes]\n all_prediction_scores = initial_all_prediction_scores(param_strs, base_clf_names, X)\n all_predictions = initial_all_prediction_scores(param_strs, base_clf_names, X)\n\n # dict of misclassified example indices for each classifier \n misclassified = dict()\n selection_folds = dict()\n for param in param_strs:\n misclassified[param] = init_misclassified_dict(base_clf_names)\n selection_folds[param] = []\n\n for train_index, test_index in CVSplitter.split(X, groups):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n selector.fit(X_train, y_train)\n for f_size in feature_sizes:\n key_str = str(f_size)\n X_train_tranformed = selector.transform(X_train, f_size)\n X_test_transformed = selector.transform(X_test, f_size)\n selection_folds[key_str].append(selector.selected_indices[:f_size])\n # run classification\n results = run_selector_classifier(selector, all_predictions, all_prediction_scores, test_index, key_str, results, \n base_clfs, base_clf_names,\n X_train_tranformed, y_train, X_test_transformed, y_test, \n num_classes, multiclass, None,\n compare_classifiers, misclassified[key_str])\n \n for f_size in feature_sizes:\n key_str = str(f_size)\n prediction_scores = all_prediction_scores[key_str]\n if new_dir is not None:\n create_scores_csv(num_classes, y, prediction_scores, patient_ids, os.path.join(new_dir, 'f_size_' + key_str))\n \n if compare_classifiers is not None and compare_classifiers=='mcnemar':\n for param in misclassified.keys():\n compare_all(misclassified[param], len(X), os.path.join(new_dir, 'f_size_' + param))\n\n if new_dir is not None:\n results.groupby(['base clf', 'num features']).mean().to_csv(os.path.join(new_dir, filename + \"_mean\" + \".csv\"))\n results.to_csv(os.path.join(new_dir, filename + \".csv\"))\n return results, all_predictions, all_prediction_scores, selection_folds\n\ndef RF_FS_CV(X, y, groups,\n n_estimators, num_features, patient_ids, \n directory=None, filename=None):\n # make subfolder\n num_classes = np.max(y) + 1\n new_dir = make_dir_if_not_exist(directory, filename)\n \n results = pd.DataFrame(columns=[\"base clf\", \"num features\", \"accuracy\", \"precision\", \"recall\", \"auc\", \"f1\"])\n CVSplitter = StratifiedKFold(n_splits=5, random_state=None, shuffle=True)\n \n prediction_scores = dict()\n prediction_scores['Random Forest'] = [[0,0]] * len(X)\n \n for train_index, test_index in CVSplitter.split(X, groups):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n \n sel = RandomForestClassifier(n_estimators=n_estimators, n_jobs=-1)\n sel.fit(X_train, y_train)\n order = sel.feature_importances_.argsort().argsort()\n for f_size in num_features:\n selected_indices = []\n min_rank_select = len(order) - f_size\n for i in range(len(order)):\n if order[i] >= min_rank_select:\n selected_indices.append(i)\n X_train_trans = X_train[:, selected_indices]\n\n clf = RandomForestClassifier(n_estimators=n_estimators, n_jobs=-1)\n clf.fit(X_train_trans, y_train)\n y_pred = clf.predict(X_test[:, selected_indices])\n #add_row(results, y_test, y_pred, base_clf, num_features, num_classes, prediction_proba):\n results = add_row(results, y_test, y_pred, 'RF', f_size, num_classes, prediction_scores['Random Forest'])\n \n y_scores = clf.predict_proba(X_test[:, selected_indices])\n for j in range(len(test_index)):\n prediction_scores['Random Forest'][test_index[j]] = y_scores[j]\n \n if new_dir is not None:\n create_scores_csv(num_classes, y, prediction_scores, patient_ids, os.path.join(new_dir, 'RF'))\n results.to_csv(os.path.join(new_dir, filename + \".csv\"))\n results.groupby(['base clf', \"num features\"]).mean().to_csv(os.path.join(new_dir, filename + '_mean' + \".csv\"))\n return results\n\ndef AGAFS(X, y, groups,\n aga, \n base_clfs, base_clf_names, \n cv_splits,\n patient_ids, directory=None, filename=None, \n multiclass='ovr', compare_classifiers=None):\n \n num_classes = np.max(y) + 1\n new_dir = make_dir_if_not_exist(directory, filename)\n\n prediction_scores = initial_all_prediction_scores(None, base_clf_names, X)\n misclassified = init_misclassified_dict(base_clf_names)\n \n results = pd.DataFrame(columns=[\"base clf\", \"num features\", \"accuracy\", \"precision\", \"recall\", \"auc\", \"f1\"])\n CVSplitter = StratifiedKFold(n_splits=cv_splits, random_state=None, shuffle=True)\n for train_index, test_index in CVSplitter.split(X, groups):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n \n aga.fit(X_train, y_train)\n selection = aga.population[np.argmax(aga.fitnesses)]\n X_train_tranformed = X_train[:, selection]\n X_test_transformed = X_test[:, selection]\n \n results, temp_pred_scores = run_classifiers(results, base_clfs, base_clf_names,\n X_train_tranformed, y_train, X_test_transformed, y_test, \n num_classes, multiclass, None,\n test_index, compare_classifiers, misclassified)\n for j in range(len(test_index)):\n for name in base_clf_names:\n if name in prediction_scores.keys():\n prediction_scores[name][test_index[j]] = temp_pred_scores[name][j]\n \n if new_dir is not None:\n create_scores_csv(num_classes, y, prediction_scores, patient_ids, os.path.join(new_dir, filename))\n results.groupby(['base clf']).mean().to_csv(os.path.join(new_dir, filename + \"_mean\" + \".csv\"))\n results.to_csv(os.path.join(new_dir, filename + \".csv\"))\n return results\n\n# Baseline classifier \ndef CV_baseline(X, y, groups,\n cv_splits, directory=None, filename=None):\n all_results = []\n clfs = [LogisticRegression(penalty='l2'), \n SVC(), \n AdaBoostClassifier(n_estimators=1000),\n RandomForestClassifier(n_estimators=1000)]\n\n CVSplitter = StratifiedKFold(n_splits=cv_splits, random_state=None, shuffle=True)\n for train_index, test_index in CVSplitter.split(X, groups):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n baseline_clfs = BaselineClassifiers(clfs)\n baseline_clfs.fit(X_train, y_train)\n y_preds = baseline_clfs.predict(X_test)\n all_results.append(baseline_clfs.get_scores(y_test, y_preds))\n all_results = pd.concat(all_results, axis=0)\n if directory is not None:\n all_results.to_csv(directory + filename)\n all_results.groupby('classifer').mean().to_csv(os.path.join(directory, filename + \".csv\"))\n return all_results\n\nfrom statsmodels.stats.contingency_tables import mcnemar\n\ndef intersection_size(lst1, lst2): \n count = 0\n for value in lst1:\n if value in lst2:\n count+=1\n return count \n\ndef reject_null(misclf, total_examples, clf_name1, clf_name2):\n size = intersection_size(misclf[clf_name1], misclf[clf_name2])\n a = [[0,0], [0,0]]\n # misclassified by both\n a[0][0] = size\n # misclassified by A\n a[0][1] = len(misclf[clf_name1]) - size\n # misclassified by B\n a[1][0] = len(misclf[clf_name2]) - size\n # not misclassified by A or B\n a[1][1] = total_examples- a[0][0] - a[0][1] - a[1][0]\n result = mcnemar(a, exact=True)\n #print('statistic=%.3f, p-value=%.3f' % (result.statistic, result.pvalue))\n #alpha = 0.05\n print(clf_name1 + \" v \" + clf_name2 + \" \" + str(result.pvalue))\n return result.pvalue\n #if result.pvalue > alpha:\n # return False\n #else:\n # return True \n \ndef compare_all(misclf, total_count, new_dir):\n clf_names = list(misclf.keys())\n sig_results = np.zeros([len(clf_names), len(clf_names)])\n for i, name in enumerate(clf_names):\n for j in range(i+1, len(clf_names)):\n result = reject_null(misclf, total_count, name, clf_names[j])\n sig_results[i][j] = result\n print(sig_results)\n # write results\n df = pd.DataFrame(sig_results, columns=clf_names)\n df['clf'] = clf_names\n if new_dir is not None:\n df.to_csv(os.path.join(new_dir, 'compare_clf.csv'), index=False)\n return sig_results","repo_name":"helenzhao093/MLMethods","sub_path":"run_trials.py","file_name":"run_trials.py","file_ext":"py","file_size_in_byte":27168,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"78"} +{"seq_id":"16878425807","text":"# Aliasing - second name for piece of data.\n# Easier (and more useful) than making a second copy.\n# If data is immutable, aliases don't matter\n# Because data can't change.\n#But if data, can change, can lead to bugs\n\nfirst = \"Isaac\"\nsecond = first\nsecond = \"Newton\"\n\n# Not mutable, creates new string\nfirst = first + \"Newton\"\n\n# List is mutable, both first and second point to same list in memory.\n#Changes in first will affect second.\n\nfirst = [\"Isaac\"]\nsecond = first\n# Only appending list....\nfirst.append(\"Newton\")\n\n#But returns mutated list!\nprint(second)","repo_name":"MochaDreamboat/Alfy","sub_path":"FCC Python/24_Aliasing.py","file_name":"24_Aliasing.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30855203262","text":"import torch\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom mmdet3d.core.bbox.structures import (get_proj_mat_by_coord_type,\n points_cam2img)\nfrom ..builder import FUSION_LAYERS\nfrom . import apply_3d_transformation\nfrom .deform_layer import DeformTransLayer\nfrom .position_embedding import PositionEmbeddingSine\n\ndef get_reference_feats(img_meta,\n img_features,\n points,\n proj_mat,\n coord_type,\n img_scale_factor,\n img_crop_offset,\n img_flip,\n img_pad_shape,\n img_shape,\n aligned=True,\n padding_mode='zeros',\n align_corners=True):\n \"\"\"Obtain image features using points.\n\n Args:\n img_meta (dict): Meta info.\n img_features (torch.Tensor): 1 x C x H x W image features.\n points (torch.Tensor): Nx3 point cloud in LiDAR coordinates.\n proj_mat (torch.Tensor): 4x4 transformation matrix.\n coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'.\n img_scale_factor (torch.Tensor): Scale factor with shape of \\\n (w_scale, h_scale).\n img_crop_offset (torch.Tensor): Crop offset used to crop \\\n image during data augmentation with shape of (w_offset, h_offset).\n img_flip (bool): Whether the image is flipped.\n img_pad_shape (tuple[int]): int tuple indicates the h & w after\n padding, this is necessary to obtain features in feature map.\n img_shape (tuple[int]): int tuple indicates the h & w before padding\n after scaling, this is necessary for flipping coordinates.\n aligned (bool, optional): Whether use bilinear interpolation when\n sampling image features for each point. Defaults to True.\n padding_mode (str, optional): Padding mode when padding values for\n features of out-of-image points. Defaults to 'zeros'.\n align_corners (bool, optional): Whether to align corners when\n sampling image features for each point. Defaults to True.\n\n Returns:\n torch.Tensor: NxC image features sampled by point coordinates.\n \"\"\"\n\n # apply transformation based on info in img_meta\n points = apply_3d_transformation(\n points, coord_type, img_meta, reverse=True)\n\n # project points to camera coordinate\n pts_2d_with_depth = points_cam2img(points, proj_mat, with_depth=True)\n pts_depth = pts_2d_with_depth[:, -1]\n pts_2d = pts_2d_with_depth[:, :2]\n\n valid_depth_idx = (pts_depth > 0).reshape(-1,)\n\n # img transformation: scale -> crop -> flip\n # the image is resized by img_scale_factor\n img_coors = pts_2d[:, 0:2] * img_scale_factor # Nx2\n img_coors -= img_crop_offset\n\n # grid sample, the valid grid range should be in [-1,1]\n coor_x, coor_y = torch.split(img_coors, 1, dim=1) # each is Nx1\n\n if img_flip:\n # by default we take it as horizontal flip\n # use img_shape before padding for flip\n orig_h, orig_w = img_shape\n coor_x = orig_w - coor_x\n\n h, w = img_pad_shape\n coor_y = coor_y / h * 2 - 1\n coor_x = coor_x / w * 2 - 1\n\n valid_y_idx = ((coor_y >= -1) & (coor_y <= 1)).reshape(-1,)\n valid_x_idx = ((coor_x >= -1) & (coor_y <= 1)).reshape(-1,)\n\n grid = torch.cat([coor_x, coor_y],\n dim=1).unsqueeze(0).unsqueeze(0) # Nx2 -> 1x1xNx2\n\n # align_corner=True provides higher performance\n mode = 'bilinear' if aligned else 'nearest'\n point_features = F.grid_sample(\n img_features,\n grid,\n mode=mode,\n padding_mode=padding_mode,\n align_corners=align_corners) # 1xCx1xN feats\n norm_grid = (grid + 1) / 2\n\n valid_idx = valid_depth_idx & valid_y_idx & valid_x_idx\n\n return point_features.squeeze().t(), norm_grid.clone().detach(), valid_idx\n\n\n@FUSION_LAYERS.register_module()\nclass MultiVoxelDeformFusionV2(BaseModule):\n \"\"\"Fuse image features from multi-scale features.\n\n Args:\n img_channels (list[int] | int): Channels of image features.\n It could be a list if the input is multi-scale image features.\n pts_channels (int): Channels of point features\n mid_channels (int): Channels of middle layers\n out_channels (int): Channels of output fused features\n img_levels (int, optional): Number of image levels. Defaults to 3.\n coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'.\n Defaults to 'LIDAR'.\n conv_cfg (dict, optional): Dict config of conv layers of middle\n layers. Defaults to None.\n norm_cfg (dict, optional): Dict config of norm layers of middle\n layers. Defaults to None.\n act_cfg (dict, optional): Dict config of activatation layers.\n Defaults to None.\n activate_out (bool, optional): Whether to apply relu activation\n to output features. Defaults to True.\n fuse_out (bool, optional): Whether apply conv layer to the fused\n features. Defaults to False.\n dropout_ratio (int, float, optional): Dropout ratio of image\n features to prevent overfitting. Defaults to 0.\n aligned (bool, optional): Whether apply aligned feature fusion.\n Defaults to True.\n align_corners (bool, optional): Whether to align corner when\n sampling features according to points. Defaults to True.\n padding_mode (str, optional): Mode used to pad the features of\n points that do not have corresponding image features.\n Defaults to 'zeros'.\n lateral_conv (bool, optional): Whether to apply lateral convs\n to image features. Defaults to True.\n \"\"\"\n\n def __init__(self,\n img_channels,\n pts_channels,\n mid_channels,\n out_channels,\n img_levels=3,\n n_heads=4,\n n_points=8,\n norm=True,\n disentangle=False,\n multi_input='',\n with_pos_embed=False,\n coord_type='LIDAR',\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=None,\n init_cfg=None,\n activate_out=True,\n fuse_out=False,\n dropout_ratio=0,\n aligned=True,\n align_corners=True,\n padding_mode='zeros',\n lateral_conv=True):\n super(MultiVoxelDeformFusionV2, self).__init__(init_cfg=init_cfg)\n if isinstance(img_levels, int):\n img_levels = [img_levels]\n if isinstance(img_channels, int):\n img_channels = [img_channels] * len(img_levels)\n assert isinstance(img_levels, list)\n assert isinstance(img_channels, list)\n assert len(img_channels) == len(img_levels)\n\n self.img_levels = img_levels\n self.coord_type = coord_type\n self.act_cfg = act_cfg\n self.activate_out = activate_out\n self.fuse_out = fuse_out\n self.dropout_ratio = dropout_ratio\n self.img_channels = img_channels\n self.mid_channels = mid_channels\n self.aligned = aligned\n self.align_corners = align_corners\n self.padding_mode = padding_mode\n self.n_heads = n_heads\n self.n_points = n_points\n self.with_pos_embed = with_pos_embed\n self.norm = norm\n self.multi_input = multi_input\n self.disentangle = disentangle\n if with_pos_embed:\n self.pos_embed = PositionEmbeddingSine(self.mid_channels // 2)\n if self.disentangle:\n self.img_mu_proj = self._generate_proj(self.mid_channels, self.mid_channels)\n self.img_logvar_proj = self._generate_proj(self.mid_channels, self.mid_channels)\n self.img_p_proj = self._generate_proj(self.mid_channels, self.mid_channels)\n self.pts_mu_proj = self._generate_proj(self.mid_channels, self.mid_channels)\n self.pts_logvar_proj = self._generate_proj(self.mid_channels, self.mid_channels)\n self.pts_p_proj = self._generate_proj(self.mid_channels, self.mid_channels)\n self.img_recons_proj = self._generate_mapping(self.mid_channels, self.mid_channels)\n self.pts_recons_proj = self._generate_mapping(self.mid_channels, self.mid_channels)\n\n self.lateral_convs = None\n if lateral_conv:\n self.lateral_convs = nn.ModuleList()\n for i in range(len(img_channels)):\n l_conv = ConvModule(\n img_channels[i],\n mid_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=self.act_cfg,\n inplace=False)\n self.lateral_convs.append(l_conv)\n self.img_transform = nn.Sequential(\n nn.Linear(mid_channels * len(img_channels), out_channels),\n nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),\n )\n else:\n self.img_transform = nn.Sequential(\n nn.Linear(sum(img_channels), out_channels),\n nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),\n )\n self.pts_transform = nn.Sequential(\n nn.Linear(pts_channels, out_channels),\n nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),\n )\n self.cross_att = DeformTransLayer(d_model=mid_channels, d_ffn=2 * mid_channels, \\\n n_levels=1, n_heads=self.n_heads, n_points=self.n_points,\\\n version='v2' if self.multi_input == 'concat' else 'v1')\n\n if self.fuse_out:\n self.fuse_conv = nn.Sequential(\n nn.Linear(mid_channels * 2, out_channels),\n # For pts the BN is initialized differently by default\n # TODO: check whether this is necessary\n nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),\n nn.ReLU(inplace=False))\n\n if init_cfg is None:\n self.init_cfg = [\n dict(type='Xavier', layer='Conv2d', distribution='uniform'),\n dict(type='Xavier', layer='Linear', distribution='uniform')\n ]\n \n def _generate_proj(self, in_channels, out_channels):\n return nn.Sequential(\n nn.Linear(in_channels, out_channels),\n nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),\n nn.ReLU(inplace=False),\n nn.Linear(out_channels, out_channels),\n )\n def _generate_mapping(self, in_channels, out_channels):\n return nn.Sequential(\n nn.Linear(in_channels, out_channels),\n nn.BatchNorm1d(self.mid_channels, eps=1e-3, momentum=0.01),\n nn.ReLU(inplace=False),\n nn.Linear(out_channels, out_channels),\n )\n\n def forward(self, img_feats, voxel_coors, voxel_feats, \\\n img_metas, voxel_size, point_cloud_range):\n \"\"\"Forward function.\n\n Args:\n img_feats (list[torch.Tensor]): Image features. NOTE this should be a list of list\n pts: [list[torch.Tensor]]: A batch of points with shape N x 3.\n pts_feats (torch.Tensor): A tensor consist of point features of the\n total batch.\n img_metas (list[dict]): Meta information of images.\n\n Returns:\n torch.Tensor: Fused features of each point.\n \"\"\"\n img_pts = self.obtain_mlvl_feats(img_feats, voxel_coors, voxel_feats,\\\n img_metas, voxel_size, point_cloud_range)\n if self.disentangle:\n img_pts, losses_vae = img_pts\n img_pre_fuse = self.img_transform(img_pts)\n if self.training and self.dropout_ratio > 0:\n img_pre_fuse = F.dropout(img_pre_fuse, self.dropout_ratio)\n pts_pre_fuse = self.pts_transform(voxel_feats)\n\n # fuse_out = img_pre_fuse + pts_pre_fuse\n fuse_out = torch.cat([img_pre_fuse, pts_pre_fuse], dim=-1)\n if self.activate_out:\n fuse_out = F.relu(fuse_out)\n if self.fuse_out:\n fuse_out = self.fuse_conv(fuse_out)\n if self.disentangle:\n return fuse_out, losses_vae\n return fuse_out\n\n def obtain_mlvl_feats(self, img_feats, voxel_coors, voxel_feats,\n img_metas, voxel_size, point_cloud_range):\n \"\"\"Obtain multi-level features for each point.\n\n Args:\n img_feats (list(torch.Tensor)): Multi-scale image features produced\n by image backbone in shape (N, C, H, W).\n pts (list[torch.Tensor]): Points of each sample.\n img_metas (list[dict]): Meta information for each sample.\n\n Returns:\n torch.Tensor: Corresponding image features of each point.\n \"\"\"\n if self.lateral_convs is not None:\n img_ins = [\n lateral_conv(img_feats[i])\n for i, lateral_conv in zip(self.img_levels, self.lateral_convs)\n ]\n else:\n img_ins = img_feats\n if self.with_pos_embed:\n for ii in range(len(img_ins)):\n pos_embed = self.pos_embed(img_ins[ii])\n img_ins[ii] += pos_embed\n img_feats_per_point = []\n # Sample multi-level features\n num_camera = img_ins[0].shape[0] // len(img_metas)\n start_iter = 0\n if self.disentangle:\n recon_loss_sum = 0; kl_loss_sum = 0; num_sum = 0\n for i in range(len(img_metas)):\n mlvl_img_feats = []\n voxel_coors_per_img = voxel_coors[voxel_coors[:, 0] == i]\n x = (voxel_coors_per_img[:, 3] + 0.5) * voxel_size[0] + point_cloud_range[0]\n y = (voxel_coors_per_img[:, 2] + 0.5) * voxel_size[1] + point_cloud_range[1]\n z = (voxel_coors_per_img[:, 1] + 0.5) * voxel_size[2] + point_cloud_range[2]\n x = x.unsqueeze(-1); y = y.unsqueeze(-1); z = z.unsqueeze(-1)\n decoded_voxel_coors = torch.cat([x, y, z], dim=-1)\n num_voxels = decoded_voxel_coors.shape[0]\n voxel_feat = voxel_feats[start_iter: start_iter + num_voxels]\n for level in range(len(self.img_levels)):\n if self.disentangle:\n img_pts_feat, recon_loss, kl_loss, num_samples = self.sample_single(img_ins[level][i * num_camera: (i + 1) * num_camera],\n decoded_voxel_coors, voxel_feat, img_metas[i], level_num=level)\n mlvl_img_feats.append(img_pts_feat)\n recon_loss_sum += recon_loss; kl_loss_sum += kl_loss; num_sum += num_samples\n else:\n mlvl_img_feats.append(\n self.sample_single(img_ins[level][i * num_camera: (i + 1) * num_camera],\n decoded_voxel_coors, voxel_feat, img_metas[i], level_num=level))\n start_iter += num_voxels\n mlvl_img_feats = torch.cat(mlvl_img_feats, dim=-1)\n img_feats_per_point.append(mlvl_img_feats)\n\n img_pts = torch.cat(img_feats_per_point, dim=0)\n if self.disentangle:\n losses_vae = dict(\n recon_loss=recon_loss_sum / num_sum,\n kl_loss=kl_loss_sum / num_sum\n )\n return img_pts, losses_vae\n return img_pts\n \n def disentangle_feature(self, img_feat, pts_feat):\n img_mu = self.img_mu_proj(img_feat)\n img_logvar = self.img_logvar_proj(img_feat)\n img_p = self.img_p_proj(img_feat)\n img_std = torch.exp(0.5 * img_logvar)\n img_eps = torch.randn_like(img_std)\n img_z = img_eps * img_std + img_mu\n # img_all = torch.cat([img_p, img_z], dim=-1) # entangle to recontruct\n img_all = img_p * img_z\n img_recons = self.img_recons_proj(img_all)\n\n pts_mu = self.pts_mu_proj(pts_feat)\n pts_logvar = self.pts_logvar_proj(pts_feat)\n pts_p = self.pts_p_proj(pts_feat)\n pts_std = torch.exp(0.5 * pts_logvar)\n pts_eps = torch.randn_like(pts_std)\n pts_z = pts_eps * pts_std + pts_mu\n # pts_all = torch.cat([pts_p, pts_z], dim=-1)\n pts_all = pts_p * pts_z\n pts_recons = self.pts_recons_proj(pts_all)\n\n img_recons_loss = torch.sum(0.5 * (img_recons - img_feat) ** 2, dim=1)\n pts_recons_loss = torch.sum(0.5 * (pts_recons - pts_feat) ** 2, dim=1)\n\n img_kl_loss = -0.5 * torch.sum(1 + img_logvar - img_mu ** 2 - img_logvar.exp(), dim = 1)\n pts_kl_loss = -0.5 * torch.sum(1 + pts_logvar - pts_mu ** 2 - pts_logvar.exp(), dim = 1)\n\n recons_loss = img_recons_loss + pts_recons_loss\n kl_loss = img_kl_loss + pts_kl_loss\n # return loss shape (N, )\n return recons_loss, kl_loss, img_p, pts_p\n\n def sample_single(self, img_feats, pts, pts_feats, img_meta, level_num):\n \"\"\"Sample features from single level image feature map.\n\n Args:\n img_feats (torch.Tensor): Image feature map in shape\n (1, num_camera, C, H, W).\n pts (torch.Tensor): Points of a single sample.\n img_meta (dict): Meta information of the single sample.\n\n Returns:\n torch.Tensor: Single level image features of each point.\n \"\"\"\n # TODO: image transformation also extracted\n img_scale_factor = (\n pts.new_tensor(img_meta['scale_factor'][:2])\n if 'scale_factor' in img_meta.keys() else 1)\n img_flip = img_meta['flip'] if 'flip' in img_meta.keys() else False\n img_crop_offset = (\n pts.new_tensor(img_meta['img_crop_offset'])\n if 'img_crop_offset' in img_meta.keys() else 0)\n proj_mat_list = get_proj_mat_by_coord_type(img_meta, self.coord_type)\n num_camera = img_feats.shape[0]\n ref_points_list = []; valid_idx_list = []; ref_feats_list = []\n for camera_id in range(num_camera):\n ref_feats, ref_points, valid_idx = get_reference_feats(\n img_meta=img_meta,\n img_features=img_feats[camera_id].unsqueeze(0),\n points=pts,\n proj_mat=pts.new_tensor(proj_mat_list[camera_id]),\n coord_type=self.coord_type,\n img_scale_factor=img_scale_factor,\n img_crop_offset=img_crop_offset,\n img_flip=img_flip,\n img_pad_shape=img_meta['input_shape'][:2],\n img_shape=img_meta['img_shape'][:2],\n aligned=self.aligned,\n padding_mode=self.padding_mode,\n align_corners=self.align_corners,\n )\n ref_feats_list.append(ref_feats)\n ref_points_list.append(ref_points)\n valid_idx_list.append(valid_idx)\n\n # pts_feats = self.pts_key_proj(pts_feats.detach())\n # align pts_feats from each camera\n assign_mask = pts_feats.new_zeros((pts.shape[0]), dtype=torch.bool)\n final_img_pts = pts_feats.new_zeros((pts.shape[0], self.mid_channels))\n if self.disentangle:\n recon_loss_all = pts_feats.new_zeros((pts.shape[0], ))\n kl_loss_all = pts_feats.new_zeros((pts.shape[0], ))\n for camera_id in range(num_camera):\n valid_idx = valid_idx_list[camera_id]\n img_feat = img_feats[camera_id].unsqueeze(0)\n bs, channel_num, h, w = img_feat.shape\n flatten_img_feat = img_feat.permute(0, 2, 3, 1).\\\n reshape(bs, h * w, channel_num)\n ref_points = ref_points_list[camera_id].reshape(bs, -1, 1, 2)\n assign_idx = (~assign_mask) & valid_idx\n assign_mask |= valid_idx\n # filter invalid voxel features for redundant training\n valid_ref_points = ref_points[:, assign_idx]\n valid_ref_feats = ref_feats_list[camera_id][assign_idx]\n valid_pts_feats = pts_feats[assign_idx]\n N, Len_in, _ = flatten_img_feat.shape\n level_spatial_shapes = pts_feats.new_tensor([(h, w)], dtype=torch.long)\n level_start_index = pts_feats.new_tensor([0], dtype=torch.long)\n if self.disentangle:\n recon_loss, kl_loss, valid_ref_feats, valid_pts_feats = self.disentangle_feature(valid_ref_feats, valid_pts_feats)\n valid_ref_feats = valid_ref_feats.unsqueeze(0)\n valid_pts_feats = valid_pts_feats.unsqueeze(0)\n if self.multi_input == 'concat':\n query_feat = torch.cat([valid_ref_feats, valid_pts_feats], dim=-1)\n elif self.multi_input == 'multiply':\n query_feat = valid_ref_feats * valid_pts_feats\n elif self.multi_input == 'multiply_pts_detach':\n query_feat = valid_ref_feats * valid_pts_feats.detach()\n elif self.multi_input == 'pts':\n query_feat = valid_pts_feats\n elif self.multi_input == 'pts_detach':\n query_feat = valid_pts_feats.detach()\n elif self.multi_input == 'img':\n query_feat = valid_ref_feats\n elif self.multi_input == 'img_detach':\n query_feat = valid_ref_feats.detach()\n else:\n raise Exception\n img_pts = self.cross_att(valid_ref_feats, query_feat, valid_ref_points, \\\n flatten_img_feat, level_spatial_shapes, level_start_index).squeeze(0)\n final_img_pts[assign_idx] = img_pts\n if self.disentangle:\n recon_loss_all[assign_idx] += recon_loss\n kl_loss_all[assign_idx] += kl_loss\n if self.disentangle:\n recon_loss_all = torch.sum(recon_loss_all) / self.mid_channels ** 0.5\n kl_loss_all = torch.sum(kl_loss_all) / self.mid_channels ** 0.5\n num_valid_samples = torch.sum(assign_mask.int())\n return final_img_pts, recon_loss_all, kl_loss_all, num_valid_samples\n return final_img_pts\n","repo_name":"zehuichen123/AutoAlignV2","sub_path":"mmdet3d/models/fusion_layers/multi_voxel_deform_fusion_v2.py","file_name":"multi_voxel_deform_fusion_v2.py","file_ext":"py","file_size_in_byte":22200,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"78"} +{"seq_id":"4098020680","text":"# binary search, sort\n\n# time O(nlogn)\n# space O(n)\n\nclass Solution:\n def numSubseq(self, nums: List[int], target: int) -> int:\n n = len(nums)\n mod = 10 ** 9 + 7\n nums.sort()\n ret = 0\n \n for i in range(n):\n j = bisect.bisect_right(nums, target-nums[i]) - 1\n if j >= i:\n ret += pow(2, j-i, mod)\n\n return ret % mod\n \n# two pointers, sort\n\n# time O(nlogn)\n# space O(1)\n\nclass Solution:\n def numSubseq(self, nums: List[int], target: int) -> int:\n n = len(nums)\n nums.sort()\n mod = 10 ** 9 + 7\n ret = 0\n l = 0\n r = n-1\n\n while l <= r:\n if nums[l] + nums[r] <= target:\n ret = (ret + pow(2, r-l, mod)) % mod\n l += 1\n else:\n r -= 1\n\n return ret\n","repo_name":"boknowswiki/mytraning","sub_path":"lc/python/1498_number_of_subsequences_that_satisfy_the_give_sum_condition.py","file_name":"1498_number_of_subsequences_that_satisfy_the_give_sum_condition.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74584852731","text":"# Import required modules\nimport phpserialize\nimport traceback\nimport asyncio\nimport hashlib\nimport os.path\nimport subprocess\nimport urllib\nfrom datetime import datetime, timedelta\nimport inspect\nimport tempfile\nimport shutil\nfrom cogs.misc.exceptions import HoNAuthenticationError, HoNServerError\nfrom cogs.connectors.masterserver_connector import MasterServerHandler\nfrom cogs.connectors.chatserver_connector import ChatServerHandler\nfrom cogs.TCP.game_packet_lsnr import handle_clients\nfrom cogs.TCP.auto_ping_lsnr import AutoPingListener\nfrom cogs.connectors.api_server import start_api_server\nfrom cogs.game.game_server import GameServer\nfrom cogs.game.cow_master import CowMaster\nfrom cogs.handlers.commands import Commands\nfrom cogs.handlers.events import stop_event, ReplayStatus, GameStatus, GamePhase, GameServerCommands, EventBus as ManagerEventBus\nfrom cogs.misc.logger import get_logger, get_misc, get_home, get_mqtt, get_filebeat_status, get_filebeat_auth_url\nfrom pathlib import Path\nfrom cogs.game.healthcheck_manager import HealthCheckManager\nfrom enum import Enum\nfrom os.path import exists\nfrom utilities.filebeat import main as filebeat, filebeat_status, get_filebeat_auth_url\nimport random\n\nLOGGER = get_logger()\nMISC = get_misc()\nHOME_PATH = get_home()\nHON_WAS_VERSION_URL = \"http://gitea.kongor.online/administrator/KONGOR/raw/branch/main/patch/was-crIac6LASwoafrl8FrOa/x86_64/version.cfg\"\nHON_WAS_LAUNCHER_DOWNLOAD_URL = \"http://gitea.kongor.online/administrator/KONGOR/raw/branch/main/patch/was-crIac6LASwoafrl8FrOa/x86_64/hon_update_x64.zip\"\nHON_LAS_VERSION_URL = \"http://gitea.kongor.online/administrator/KONGOR/raw/branch/main/patch/las-crIac6LASwoafrl8FrOa/x86-biarch/version.cfg\"\nHON_LAS_LAUNCHER_DOWNLOAD_URL = \"http://gitea.kongor.online/administrator/KONGOR/raw/branch/main/patch/las-crIac6LASwoafrl8FrOa/x86-biarch/launcher.zip\"\n\nclass GameServerManager:\n def __init__(self, global_config, setup):\n \"\"\"\n Initializes a new GameServerManager object.\n\n Args:\n global_config (dict): A dictionary containing the global configuration for the game server.\n \"\"\"\n self.global_config = global_config\n \"\"\"\n Event Subscriptions. These are used to call other parts of the code in an event-driven approach within async functions.\n \"\"\"\n self.event_bus = ManagerEventBus()\n self.event_bus.subscribe('handle_replay_request', self.handle_replay_request)\n self.event_bus.subscribe('authenticate_to_chat_svr', self.authenticate_and_handle_chat_server)\n self.event_bus.subscribe('start_game_servers', self.start_game_servers)\n self.event_bus.subscribe('start_game_servers_task', self.start_game_servers_task)\n self.event_bus.subscribe('add_game_servers', self.create_dynamic_game_server)\n self.event_bus.subscribe('remove_game_servers', self.remove_dynamic_game_server)\n self.event_bus.subscribe('remove_game_server', self.remove_game_server)\n self.event_bus.subscribe('balance_game_server_count', self.balance_game_server_count)\n self.event_bus.subscribe('enable_game_server', self.enable_game_server)\n self.event_bus.subscribe('disable_game_server', self.disable_game_server)\n self.event_bus.subscribe('cmd_message_server', self.cmd_message_server)\n self.event_bus.subscribe('cmd_shutdown_server', self.cmd_shutdown_server)\n self.event_bus.subscribe('cmd_wake_server', self.cmd_wake_server)\n self.event_bus.subscribe('cmd_sleep_server', self.cmd_sleep_server)\n self.event_bus.subscribe('cmd_custom_command', self.cmd_custom_command)\n self.event_bus.subscribe('fork_server_from_cowmaster', self.fork_server_from_cowmaster),\n self.event_bus.subscribe('config_change_hook_actions', self.config_change_hook_actions),\n # self.event_bus.subscribe('start_gameserver_from_cowmaster', self.start_gameserver_from_cowmaster)\n self.event_bus.subscribe('patch_server', self.initialise_patching_procedure)\n self.event_bus.subscribe('update', self.update)\n self.event_bus.subscribe('check_for_restart_required', self.check_for_restart_required)\n self.event_bus.subscribe('resubmit_match_stats_to_masterserver', self.resubmit_match_stats_to_masterserver)\n self.event_bus.subscribe('update_server_start_semaphore', self.update_server_start_semaphore)\n self.tasks = {\n 'cli_handler':None,\n 'health_checks':None,\n 'autoping_listener':None,\n 'gameserver_listener':None,\n 'authentication_handler':None,\n 'gameserver_startup':None,\n 'task_cleanup': None\n }\n self.schedule_task(self.cleanup_tasks_every_30_minutes(), 'task_cleanup')\n self.schedule_task(self.heartbeat(), 'heartbeat')\n # initialise the config validator in case we need it\n self.setup = setup\n\n # set the current state of patching\n self.patching = False\n\n # preserve the current system path. We need it for a silly fix.\n self.preserved_path = os.environ[\"PATH\"]\n\n # Initialize dictionaries to store game servers and client connections\n self.server_start_semaphore = asyncio.Semaphore(self.global_config['hon_data']['svr_max_start_at_once']) # 2 max servers starting at once\n self.game_servers = {}\n self.client_connections = {}\n\n self.cowmaster = CowMaster(self.global_config['hon_data']['svr_starting_gamePort'] - 2, self.global_config)\n\n # Initialize a Commands object for sending commands to game servers\n self.commands = Commands(self.game_servers, self.client_connections, self.global_config, self.event_bus, self.cowmaster)\n # Initialise the autoping listener object\n self.auto_ping_listener = AutoPingListener(self.global_config, self.global_config['hon_data']['autoping_responder_port'])\n # Create game server instances\n LOGGER.info(f\"Manager running, starting {self.global_config['hon_data']['svr_total']} servers. Staggered start ({self.global_config['hon_data']['svr_max_start_at_once']} at a time)\")\n self.create_all_game_servers()\n\n coro = self.commands.handle_input()\n self.schedule_task(coro, 'cli_handler')\n\n # Start running health checks\n\n # Initialize MasterServerHandler and send requests\n self.chat_server_handler = None\n self.chat_server_connected = None\n self.master_server_connected = None\n self.master_server_handler = MasterServerHandler(master_server=self.global_config['hon_data']['svr_masterServer'], patch_server=self.global_config['hon_data']['svr_patchServer'], version=self.global_config['hon_data']['svr_version'], architecture=f'{self.global_config[\"hon_data\"][\"architecture\"]}', event_bus=self.event_bus)\n self.health_check_manager = HealthCheckManager(self.game_servers, self.event_bus, self.check_upstream_patch, self.resubmit_match_stats_to_masterserver, self.global_config)\n\n coro = self.health_check_manager.run_health_checks()\n self.schedule_task(coro, 'health_checks')\n\n MISC.save_last_working_branch()\n\n def cleanup_tasks(self, tasks_dict, current_time):\n for task_name, task in list(tasks_dict.items()): # Use list() to avoid \"dictionary changed size during iteration\" error\n if task is None:\n continue\n\n if not isinstance(task, asyncio.Task):\n LOGGER.error(f\"Item '{task_name}' in tasks is not a Task object.\")\n return\n\n if task.done() and task.exception() is None and task.end_time + timedelta(minutes=30) < current_time:\n del tasks_dict[task_name]\n\n async def cleanup_tasks_every_30_minutes(self):\n while True:\n current_time = datetime.now()\n # Iterate over all game servers and the manager\n for game_server in self.game_servers.values():\n self.cleanup_tasks(game_server.tasks, current_time)\n self.cleanup_tasks(self.tasks, current_time)\n for _ in range(30 * 60):\n if stop_event.is_set():\n break\n await asyncio.sleep(1)\n\n def schedule_task(self, coro, name, override = False):\n existing_task = self.tasks.get(name) # Get existing task if any\n\n if existing_task is not None:\n if not isinstance(existing_task, asyncio.Task):\n LOGGER.error(f\"Item '{name}' in tasks is not a Task object.\")\n # Choose one of the following lines, depending on your requirements:\n # raise ValueError(f\"Item '{name}' in tasks is not a Task object.\") # Option 1: raise an error\n existing_task = None # Option 2: ignore the non-Task item and overwrite it later\n\n if existing_task:\n if existing_task.done():\n if not existing_task.cancelled():\n # If the task has finished and was not cancelled, retrieve any possible exception to avoid 'unretrieved exception' warnings\n exception = existing_task.exception()\n if exception:\n LOGGER.error(f\"The previous task '{name}' raised an exception: {exception}. We are scheduling a new one.\")\n else:\n LOGGER.debug(f\"The previous task '{name}' was cancelled.\")\n else:\n if not override:\n # Task is still running\n LOGGER.debug(f\"Task '{name}' is still running, new task not scheduled.\")\n return existing_task # Return existing task\n\n # Create and register the new task\n task = asyncio.create_task(coro)\n task.add_done_callback(lambda t: setattr(t, 'end_time', datetime.now()))\n self.tasks[name] = task\n return task\n \n async def cmd_shutdown_server(self, game_server=None, force=False, delay=0, delete=False, disable=True, kill=False):\n try:\n if game_server is None: return False\n client_connection = self.client_connections.get(game_server.port, None)\n await asyncio.sleep(delay)\n if client_connection:\n if force:\n client_connection.writer.write(GameServerCommands.COMMAND_LEN_BYTES.value)\n client_connection.writer.write(GameServerCommands.SHUTDOWN_BYTES.value)\n await client_connection.writer.drain()\n LOGGER.info(f\"Command - Shutdown packet sent to GameServer #{game_server.id}. FORCED.\")\n if get_mqtt():\n get_mqtt().publish_json(\"manager/command\", {\"event_type\":\"server_shutdown_force\"})\n return True\n else:\n game_server.schedule_task(game_server.schedule_shutdown_server(delete=delete, disable=disable),'scheduled_shutdown')\n # await asyncio.sleep(0) # allow the scheduled task to be executed\n LOGGER.info(f\"Command - Shutdown packet sent to GameServer #{game_server.id}. Scheduled.\")\n if get_mqtt():\n get_mqtt().publish_json(\"manager/command\", {\"event_type\":\"server_shutdown_scheduled\"})\n return True\n else:\n # this server hasn't connected to the manager yet\n await game_server.stop_server_exe(disable=disable, delete=delete, kill=kill)\n game_server.reset_game_state()\n return True\n except Exception as e:\n LOGGER.exception(f\"An error occurred while handling the {inspect.currentframe().f_code.co_name} function: {traceback.format_exc()}\")\n\n async def cmd_wake_server(self, game_server):\n try:\n client_connection = self.client_connections.get(game_server.port, None)\n if not client_connection: return\n\n # TODO: use client_connection.send_packet() ??\n client_connection.writer.write(GameServerCommands.COMMAND_LEN_BYTES.value)\n client_connection.writer.write(GameServerCommands.WAKE_BYTES.value)\n await client_connection.writer.drain()\n\n LOGGER.info(f\"Command - Wake command sent to GameServer #{game_server.id}.\")\n except Exception as e:\n LOGGER.exception(f\"An error occurred while handling the {inspect.currentframe().f_code.co_name} function: {traceback.format_exc()}\")\n\n async def cmd_sleep_server(self, game_server):\n try:\n client_connection = self.client_connections.get(game_server.port, None)\n if not client_connection: return\n\n # TODO: use client_connection.send_packet() ??\n client_connection.writer.write(GameServerCommands.COMMAND_LEN_BYTES.value)\n client_connection.writer.write(GameServerCommands.SLEEP_BYTES.value)\n await client_connection.writer.drain()\n\n LOGGER.info(f\"Command - Sleep command sent to GameServer #{game_server.id}.\")\n except Exception as e:\n LOGGER.exception(f\"An error occurred while handling the {inspect.currentframe().f_code.co_name} function: {traceback.format_exc()}\")\n\n async def cmd_message_server(self, game_server, message):\n try:\n client_connection = self.client_connections.get(game_server.port, None)\n if client_connection is None:\n return\n\n if isinstance(message, list): message = (' ').join(message)\n message_bytes = GameServerCommands.MESSAGE_BYTES.value + message.encode('ascii') + b'\\x00'\n length = len(message_bytes)\n length_bytes = length.to_bytes(2, byteorder='little')\n\n # TODO: use client_connection.send_packet() ??\n client_connection.writer.write(length_bytes)\n client_connection.writer.write(message_bytes)\n await client_connection.writer.drain()\n LOGGER.info(f\"Command - Message command sent to GameServer #{game_server.id}.\")\n except Exception:\n LOGGER.exception(f\"An error occurred while handling the {inspect.currentframe().f_code.co_name} function: {traceback.format_exc()}\")\n\n async def cmd_custom_command(self, game_server, command, delay = 0):\n try:\n client_connection = self.client_connections.get(game_server.port, None)\n if client_connection is None:\n return\n await asyncio.sleep(delay)\n\n if isinstance(command, list): command = (' ').join(command)\n command_bytes = GameServerCommands.COMMAND_BYTES.value + command.encode('ascii') + b'\\x00'\n length = len(command_bytes)\n length_bytes = length.to_bytes(2, byteorder='little')\n\n # TODO: use client_connection.send_packet() ??\n client_connection.writer.write(length_bytes)\n client_connection.writer.write(command_bytes)\n await client_connection.writer.drain()\n if get_mqtt():\n get_mqtt().publish_json(\"manager/command\", {\"event_type\":\"custom_command\",\"command\":command})\n LOGGER.info(f\"Command - command sent to GameServer #{game_server.id}.\")\n except Exception:\n LOGGER.exception(f\"An error occurred while handling the {inspect.currentframe().f_code.co_name} function: {traceback.format_exc()}\")\n\n async def cmd_cowmaster_fork(self, instance_number, port):\n try:\n client_connection = self.client_connections.get(self.cowmaster.get_port(), None)\n\n if client_connection is None:\n return\n\n command_bytes = b'\\x28' + instance_number.to_bytes(1, \"little\") + port.to_bytes(2, \"little\") + b'\\x00'\n\n #command_bytes = b'\\x28\\x01\\x11\\x27\\x00'\n length = len(command_bytes)\n length_bytes = length.to_bytes(2, byteorder='little')\n\n client_connection.writer.write(length_bytes)\n client_connection.writer.write(command_bytes)\n await client_connection.writer.drain()\n\n LOGGER.info(f\"Command - command sent to CowMaster.\")\n except Exception:\n LOGGER.exception(f\"An error occurred while handling the {inspect.currentframe().f_code.co_name} function: {traceback.format_exc()}\")\n\n async def fork_server_from_cowmaster(self, game_server):\n try:\n await self.cowmaster.fork_new_server(game_server)\n except Exception:\n LOGGER.error(traceback.format_exc())\n\n async def start_gameserver_from_cowmaster(self, num = \"all\"):\n try:\n starting_port = self.global_config.get(\"hon_data\").get(\"svr_starting_gamePort\")\n if num == \"all\":\n number_of_instances = self.global_config.get(\"hon_data\").get(\"svr_total\")\n else:\n number_of_instances = num\n\n for i in range(number_of_instances):\n print(i)\n await self.cmd_cowmaster_fork(i+1, starting_port)\n starting_port += 1\n except Exception as e:\n LOGGER.exception(e)\n \n async def heartbeat(self):\n while not stop_event.is_set():\n # Introduce jitter: sleep for a random duration between 0 to 10 seconds\n jitter = random.uniform(0, 10)\n await asyncio.sleep(jitter)\n \n for _ in range(60):\n if stop_event.is_set():\n return\n await asyncio.sleep(1)\n if get_mqtt():\n get_mqtt().publish_json(\"manager/status\", {\"event_type\":\"heartbeat\", **self.manager_status()})\n \n def manager_status(self):\n total_free_servers = len([game_server for game_server in self.game_servers.values() if game_server.game_state._state['game_phase'] == GamePhase.IDLE.value])\n total_occupied_servers = len([game_server for game_server in self.game_servers.values() if game_server.game_state._state['game_phase'] != GamePhase.IDLE.value])\n total_servers_in_lobby = len([game_server for game_server in self.game_servers.values() if game_server.game_state._state['game_phase'] == GamePhase.IN_LOBBY.value])\n total_servers_in_picking_phase = len([game_server for game_server in self.game_servers.values() if game_server.game_state._state['game_phase'] == GamePhase.PICKING_PHASE.value])\n total_servers_in_banning_phase = len([game_server for game_server in self.game_servers.values() if game_server.game_state._state['game_phase'] == GamePhase.BANNING_PHASE.value])\n total_servers_in_game_ended_phase = len([game_server for game_server in self.game_servers.values() if game_server.game_state._state['game_phase'] == GamePhase.GAME_ENDED.value])\n total_servers_in_game_ending_phase = len([game_server for game_server in self.game_servers.values() if game_server.game_state._state['game_phase'] == GamePhase.GAME_ENDING.value])\n total_servers_in_match_started_phase = len([game_server for game_server in self.game_servers.values() if game_server.game_state._state['game_phase'] == GamePhase.MATCH_STARTED.value])\n total_servers_in_preparation_phase = len([game_server for game_server in self.game_servers.values() if game_server.game_state._state['game_phase'] == GamePhase.PREPERATION_PHASE.value])\n total_players_online = sum(game_server.game_state._state['num_clients'] for game_server in self.game_servers.values())\n \n return {\n \"total_free_servers\": total_free_servers,\n \"total_occupied_servers\": total_occupied_servers,\n \"total_servers_in_lobby\": total_servers_in_lobby,\n \"total_servers_in_picking_phase\": total_servers_in_picking_phase,\n \"total_servers_in_banning_phase\": total_servers_in_banning_phase,\n \"total_servers_in_game_ended_phase\": total_servers_in_game_ended_phase,\n \"total_servers_in_game_ending_phase\": total_servers_in_game_ending_phase,\n \"total_servers_in_match_started_phase\": total_servers_in_match_started_phase,\n \"total_servers_in_preparation_phase\": total_servers_in_preparation_phase,\n \"total_players_online\": total_players_online,\n \"total_configured_servers\": len(self.game_servers)\n }\n\n async def check_upstream_patch(self):\n if self.patching:\n LOGGER.info(\"Server patching is ongoing.. Please wait.\")\n return\n\n local_svr_version = MISC.get_svr_version(self.global_config['hon_data']['hon_executable_path'])\n\n try:\n patch_information = await self.master_server_handler.compare_upstream_patch()\n if not patch_information:\n LOGGER.error(\"Checking the upstream patch version failed, as the upstream services were unavailable.\")\n return\n if patch_information[1] != 200:\n LOGGER.error(f\"Checking the upstream patch version failed with: [{patch_information[1]}] {patch_information[0]}\")\n return\n parsed_patch_information = phpserialize.loads(patch_information[0].encode('utf-8'))\n parsed_patch_information = {key.decode() if isinstance(key, bytes) else key: (value.decode() if isinstance(value, bytes) else value) for key, value in parsed_patch_information.items()}\n LOGGER.debug(f\"Upstream patch information: {parsed_patch_information}\")\n self.latest_available_game_version = parsed_patch_information['latest']\n\n if local_svr_version != self.latest_available_game_version:\n LOGGER.info(f\"A newer patch is available. Initiating server shutdown for patching.\\n\\tUpgrading from {local_svr_version} --> {parsed_patch_information['latest']}\")\n return True\n\n return False\n\n except Exception:\n LOGGER.error(f\"{traceback.format_exc()}\")\n\n async def start_autoping_listener(self):\n LOGGER.debug(\"Starting AutoPingListener...\")\n await self.auto_ping_listener.start_listener()\n if get_mqtt():\n get_mqtt().publish_json(\"manager/admin\", {\"event_type\":\"autoping_started\"})\n\n async def start_api_server(self):\n if get_mqtt():\n get_mqtt().publish_json(\"manager/admin\", {\"event_type\":\"api_started\"})\n await start_api_server(self.global_config, self.game_servers, self.tasks, self.health_check_manager.tasks, self.event_bus, self.find_replay_file, port=self.global_config['hon_data']['svr_api_port'])\n\n async def start_game_server_listener(self, host, game_server_to_mgr_port):\n \"\"\"\n Starts a listener for incoming client connections on the specified host and port\n\n Args:\n host (str): the host to listen on\n game_server_to_mgr_port (int): the port to listen on\n\n Returns:\n None\n \"\"\"\n\n # Start the listener for incoming client connections\n self.game_server_lsnr = await asyncio.start_server(\n lambda *args, **kwargs: handle_clients(*args, **kwargs, game_server_manager=self),\n host, game_server_to_mgr_port\n )\n LOGGER.highlight(f\"[*] HoNfigurator Manager - Listening on {host}:{game_server_to_mgr_port} (LOCAL)\")\n \n if get_mqtt():\n get_mqtt().publish_json(\"manager/admin\", {\"event_type\":\"gameserver_listener_started\"})\n\n await stop_event.wait()\n\n # Close all client connections\n for connection in list(self.client_connections.values()):\n await connection.close()\n\n # Close the server\n self.game_server_lsnr.close()\n await self.game_server_lsnr.wait_closed()\n\n await self.master_server_handler.close_session()\n\n LOGGER.info(\"Stopping HoNfigurator manager listener.\")\n\n def update(self):\n MISC.update_github_repository()\n MISC.save_last_working_branch()\n\n async def send_auth_request_to_masterserver(self):\n \"\"\"\n Send a request to the master server to authenticate the game server.\n\n This function sends a request to the master server to authenticate the game server using the\n replay_auth method. If the authentication is successful, it returns the parsed response.\n\n Returns:\n dict: A dictionary containing the parsed response from the master server.\n\n Raises:\n HoNAuthenticationError: If the authentication fails.\n \"\"\"\n mserver_auth_response = await self.master_server_handler.send_replay_auth(f\"{self.global_config['hon_data']['svr_login']}:\", hashlib.md5(self.global_config['hon_data']['svr_password'].encode()).hexdigest())\n \n if mserver_auth_response[1] != 200:\n prefix = (f\"[{mserver_auth_response[1]}] Authentication to MasterServer failed. \")\n if mserver_auth_response[1] in [401, 403]:\n LOGGER.error(f\"{prefix}Please ensure your username and password are correct in {HOME_PATH / 'config' / 'config.json'}\")\n if mserver_auth_response[1] == 401:\n msg = 'Credentials incorrect'\n else:\n msg = 'Credentials correct, but no permissions to host.'\n self.set_masterserver_status(False, msg)\n \n elif mserver_auth_response[1] > 500 and mserver_auth_response[1] < 600:\n LOGGER.error(f\"{prefix}The issue is most likely server side.\")\n self.set_masterserver_status(False,'Server side issue, master server is probably down.')\n raise HoNAuthenticationError(f\"[{mserver_auth_response[1]}] Authentication error.\")\n \n LOGGER.highlight(\"Authenticated to MasterServer.\")\n self.set_masterserver_status(True)\n\n parsed_mserver_auth_response = phpserialize.loads(mserver_auth_response[0].encode('utf-8'))\n parsed_mserver_auth_response = {key.decode(): (value.decode() if isinstance(value, bytes) else value) for key, value in parsed_mserver_auth_response.items()}\n self.master_server_handler.set_server_id(parsed_mserver_auth_response['server_id'])\n self.master_server_handler.set_cookie(parsed_mserver_auth_response['session'])\n\n return parsed_mserver_auth_response\n\n\n async def manage_upstream_connections(self, udp_ping_responder_port, retry=30):\n \"\"\"\n Authenticate the game server with the master server and connect to the chat server.\n\n This function sends a request to the master server to authenticate the game server, and then\n connects to the chat server and authenticates the game server with the chat server. It also starts\n handling packets from the chat server.\n\n Args:\n udp_ping_responder_port (int): The port to use for the UDP ping responder.\n\n Returns:\n None\n \"\"\"\n while not stop_event.is_set():\n try:\n # Send requests to the master server\n parsed_mserver_auth_response = await self.send_auth_request_to_masterserver()\n\n # Connect to the chat server and authenticate\n await self.authenticate_and_handle_chat_server(parsed_mserver_auth_response, udp_ping_responder_port)\n\n except (HoNAuthenticationError, ConnectionResetError, Exception ) as e:\n LOGGER.error(f\"{e.__class__.__name__} occurred. Retrying in {retry} seconds...\")\n self.set_chatserver_status(False, e)\n\n for _ in range(retry):\n if stop_event.is_set():\n break\n await asyncio.sleep(1)\n\n LOGGER.info(\"Stopping authentication handlers\")\n async def authenticate_and_handle_chat_server(self, parsed_mserver_auth_response, udp_ping_responder_port):\n # Create a new ChatServerHandler instance and connect to the chat server\n self.chat_server_handler = ChatServerHandler(\n parsed_mserver_auth_response[\"chat_address\"],\n parsed_mserver_auth_response[\"chat_port\"],\n parsed_mserver_auth_response[\"session\"],\n parsed_mserver_auth_response[\"server_id\"],\n username=self.global_config['hon_data']['svr_login'],\n version=self.global_config['hon_data']['svr_version'],\n region=self.global_config['hon_data']['svr_location'],\n server_name=self.global_config['hon_data']['svr_name'],\n ip_addr=self.global_config['hon_data']['svr_ip'],\n udp_ping_responder_port=udp_ping_responder_port,\n event_bus=self.event_bus\n )\n\n # connect and authenticate to chatserver\n chat_auth_response = await self.chat_server_handler.connect()\n\n if not chat_auth_response:\n raise HoNAuthenticationError(f\"Chatserver authentication failure\")\n\n LOGGER.highlight(\"Authenticated to ChatServer.\")\n \n self.set_chatserver_status(True)\n\n # Start handling packets from the chat server\n await self.chat_server_handler.handle_packets()\n\n async def resubmit_match_stats_to_masterserver(self, match_id, file_path):\n mserver_stats_response = await self.master_server_handler.send_stats_file(f\"{self.global_config['hon_data']['svr_login']}:\", hashlib.md5(self.global_config['hon_data']['svr_password'].encode()).hexdigest(), match_id, file_path)\n if not mserver_stats_response or mserver_stats_response[1] != 200 or mserver_stats_response[0] == '':\n LOGGER.error(f\"[{mserver_stats_response[1] if mserver_stats_response else 'unknown'}] Stats resubmission failed - {file_path}. Response: {mserver_stats_response[0] if mserver_stats_response else 'unknown'}\")\n if mserver_stats_response and mserver_stats_response[1] == 400 and 'title' in mserver_stats_response[0]:\n if mserver_stats_response[0] == \"One or more validation errors occurred.\":\n try:\n shutil.move(file_path, f\"{file_path}.failed\")\n except Exception:\n LOGGER.error(traceback.format_exc())\n return False\n LOGGER.info(f\"{match_id} Stats resubmission successful\")\n parsed_mserver_stats_response = phpserialize.loads(mserver_stats_response[0].encode('utf-8'))\n parsed_mserver_stats_response = {key.decode() if isinstance(key, bytes) else key: (value.decode() if isinstance(value, bytes) else value) for key, value in parsed_mserver_stats_response.items()}\n\n return True\n\n def set_server_status(self, server_type, connected, info=''):\n current_status = getattr(self, f\"{server_type}_connected\", None)\n mqtt = get_mqtt()\n\n if mqtt:\n if server_type == 'chatsv':\n mqtt.set_chatsv_state(connected)\n else:\n mqtt.set_mastersv_state(connected)\n \n if current_status != connected:\n setattr(self, f\"{server_type}_connected\", connected)\n state_info = 'connected' if connected else 'disconnected'\n if mqtt:\n mqtt.publish_json(\"manager/admin\", {\"event_type\": f\"{server_type}_state_change\", \"info\": state_info})\n\n connection_event_type = f\"{server_type}_connection_succeeded\" if connected else f\"{server_type}_connection_failed\"\n if mqtt:\n mqtt.publish_json(\"manager/admin\", {\"event_type\": connection_event_type, \"info\": str(info)})\n\n def set_chatserver_status(self, connected, info=''):\n self.set_server_status('chatsv', connected, info)\n\n def set_masterserver_status(self, connected, info=''):\n self.set_server_status('mastersv', connected, info)\n\n\n def create_all_game_servers(self):\n for id in range (1,self.global_config['hon_data']['svr_total']+1):\n port = self.global_config['hon_data']['svr_starting_gamePort'] + id - 1\n self.create_game_server(port)\n\n def create_game_server(self, game_server_port):\n \"\"\"\n Creates a new game server instance and adds it to the game server dictionary\n\n Args:\n id (int): the ID of the new game server\n game_server_port (int): the port of the new game server\n\n Returns:\n None\n \"\"\"\n id = game_server_port - self.global_config['hon_data']['svr_starting_gamePort'] + 1\n game_server = GameServer(id, game_server_port, self.global_config, self.remove_game_server, self.event_bus)\n self.game_servers[game_server_port] = game_server\n return game_server\n\n def find_next_available_ports(self):\n \"\"\"\n Finds the next available port for creating a new game server\n\n Returns:\n int or None: the next available port, or None if no ports are available\n \"\"\"\n starting_game_port = self.global_config['hon_data']['svr_starting_gamePort']\n total_allowed_servers = MISC.get_total_allowed_servers(self.global_config['hon_data']['svr_total_per_core'])\n\n for i in range(total_allowed_servers):\n game_port = starting_game_port + i\n\n if game_port not in self.game_servers:\n return game_port\n\n return None\n\n async def balance_game_server_count(self, to_add=0, to_remove=0):\n \"\"\"\n Ensures that the maximum number of game servers are running by creating new game servers\n and removing existing game servers as needed.\n\n Returns:\n None\n \"\"\"\n max_servers = self.global_config['hon_data']['svr_total']\n if to_add == \"all\":\n max_servers = MISC.get_total_allowed_servers(self.global_config['hon_data']['svr_total_per_core'])\n elif to_add > 0:\n max_servers += to_add\n\n if to_remove == \"all\":\n max_servers = 0\n elif to_remove > 0:\n max_servers -= to_remove\n\n if max_servers < 0: max_servers = 0\n self.global_config['hon_data']['svr_total'] = max_servers\n\n self.setup.validate_hon_data(self.global_config['hon_data'])\n\n idle_servers = [game_server for game_server in self.game_servers.values() if game_server.get_dict_value('status') != 3]\n occupied_servers = [game_server for game_server in self.game_servers.values() if game_server.get_dict_value('status') == 3]\n total_num_servers = len(occupied_servers) + len(idle_servers)\n num_servers_to_remove = max(total_num_servers - max_servers, 0)\n num_servers_to_create = max(max_servers - total_num_servers, 0)\n\n if num_servers_to_create > 0:\n start_servers = []\n for i in range(num_servers_to_create):\n game_port = self.find_next_available_ports()\n\n if game_port is not None:\n game_server = self.create_game_server(game_port)\n start_servers.append(game_server)\n LOGGER.info(f\"Game server created at game_port: {game_port}\")\n else:\n LOGGER.warn(\"No available ports for creating a new game server.\")\n coro = self.start_game_servers(start_servers)\n self.schedule_task(coro, 'gameserver_startup', override = True)\n\n async def remove_servers(servers, server_type):\n servers_removed = 0\n servers_to_remove = []\n for game_server in servers:\n await self.cmd_shutdown_server(game_server, delete=True)\n if not game_server.delete_me:\n servers_to_remove.append(game_server.port)\n servers_removed += 1\n if servers_removed >= num_servers_to_remove:\n break\n # for port in servers_to_remove:\n # await asyncio.sleep(0.1)\n # if port in self.game_servers:\n # game_server.cancel_tasks()\n # del self.game_servers[port]\n\n LOGGER.info(f\"Removed {servers_removed} {server_type} game servers. {total_num_servers - servers_removed} game servers are now running.\")\n return servers_removed\n\n if num_servers_to_remove > 0:\n removed_idle = await remove_servers(idle_servers, 'idle')\n num_servers_to_remove -= removed_idle\n if num_servers_to_remove > 0:\n removed_occupied = await remove_servers(occupied_servers, 'occupied')\n elif num_servers_to_remove < 0:\n LOGGER.warn(\"Number of running game servers is greater than the maximum number of game servers.\")\n\n\n async def create_dynamic_game_server(self):\n \"\"\"\n Creates new game server instances with the next available ports until the maximum number of servers is reached\n\n Returns:\n None\n \"\"\"\n max_servers = self.global_config['hon_data']['svr_total']\n running_servers = [game_server for game_server in self.game_servers.values() if game_server.started]\n num_servers_to_create = max_servers - len(running_servers)\n if num_servers_to_create <= 0:\n return\n\n start_servers = []\n for i in range(num_servers_to_create):\n game_port = self.find_next_available_ports()\n\n if game_port is not None:\n game_server = self.create_game_server(game_port)\n start_servers.append(game_server)\n LOGGER.info(f\"Game server created at game_port: {game_port}\")\n else:\n LOGGER.warn(\"No available ports for creating a new game server.\")\n\n coro = self.start_game_servers(start_servers)\n self.schedule_task(coro, 'gameserver_startup', override = True)\n\n async def check_for_restart_required(self, game_server='all'):\n if game_server == 'all':\n for game_server in self.game_servers.values():\n if game_server.params_are_different():\n await self.cmd_shutdown_server(game_server,disable=False)\n if self.cowmaster.client_connection:\n self.cowmaster.stop_cow_master(disable=False)\n await asyncio.sleep(0.1)\n game_server.enable_server()\n else:\n if game_server.params_are_different():\n await self.cmd_shutdown_server(game_server,disable=False)\n if self.cowmaster.client_connection:\n self.cowmaster.stop_cow_master(disable=False)\n await asyncio.sleep(0.1)\n game_server.enable_server()\n \n async def config_change_hook_actions(self):\n if not self.global_config['hon_data'].get('man_use_cowmaster') and self.cowmaster.client_connection:\n self.cowmaster.stop_cow_master()\n elif self.global_config['hon_data'].get('man_use_cowmaster') and not self.cowmaster.client_connection:\n await self.cowmaster.start_cow_master()\n\n async def remove_dynamic_game_server(self):\n max_servers = self.global_config['hon_data']['svr_total']\n running_servers = [game_server for game_server in self.game_servers.values() if game_server.get_dict_value('match_started') != 0]\n num_servers_to_remove = len(running_servers) - max_servers\n if num_servers_to_remove <= 0:\n return\n\n servers_removed = 0\n for game_server in running_servers:\n if await self.cmd_shutdown_server(game_server):\n del self.game_servers[game_server.port]\n servers_removed += 1\n if servers_removed >= num_servers_to_remove:\n break\n\n LOGGER.info(f\"Removed {servers_removed} game servers. {max_servers} game servers are now running.\")\n\n # count = 0\n # for i in range(num):\n # removed = False\n # for port, game_server in self.game_servers.items():\n # if not game_server.started:\n # if self.remove_game_server(game_server):\n # LOGGER.info(f\"Removed game server {port}\")\n # count += 1\n # removed = True\n # break\n # if not removed:\n # LOGGER.info(f\"No more running game servers found after removing {count} game servers.\")\n # break\n # LOGGER.info(f\"Removed a total of {count} game servers.\")\n\n def remove_game_server(self, game_server):\n \"\"\"\n Removes a game server instance with the specified port from the game server dictionary\n\n Args:\n port (int): the port of the game server to remove\n\n Returns:\n None\n \"\"\"\n for key, value in self.game_servers.items():\n if value == game_server and not game_server.started:\n game_server.cancel_tasks()\n del self.game_servers[key]\n return True\n return False\n\n def get_game_server_by_id(self, id):\n \"\"\"\n Returns the game server instance with the specified ID\n\n Args:\n id (int): the ID of the game server to get\n\n Returns:\n GameServer or None: the game server instance, or None if no game server with the specified ID exists\n \"\"\"\n return self.game_servers.get(id)\n\n def get_game_server_by_port(self, game_server_port):\n \"\"\"\n Returns the game server instance with the specified port\n\n Args:\n game_server_port (int): the port of the game server to get\n\n Returns:\n GameServer or None: the game server instance, or None if no game server with the specified port exists\n \"\"\"\n if game_server_port == self.cowmaster.get_port():\n return self.cowmaster\n\n return self.game_servers.get(game_server_port, None)\n\n async def add_client_connection(self,client_connection, port):\n \"\"\"\n Adds a client connection to the client connection dictionary with the specified port as the key\n\n Args:\n client_connection (ClientConnection): the client connection instance to add\n port (int): the port associated with the client connection\n\n Returns:\n bool: True if the client connection was added successfully, False otherwise\n \"\"\"\n if port not in self.client_connections:\n self.client_connections[port] = client_connection\n if port == self.cowmaster.get_port():\n await self.cowmaster.set_client_connection(client_connection)\n self.cowmaster.status_received.set()\n\n else:\n game_server = self.game_servers.get(port, None)\n # this is in case game server doesn't exist (config change maybe)\n if game_server:\n game_server.status_received.set()\n await game_server.set_client_connection(client_connection)\n await self.check_for_restart_required(game_server)\n # TODO\n # Create game server object here? May be happening already in game_packet_lsnr.py (handle_client_connection)\n # The instance of this happening, is for example, someone is running 10 servers. They modify the config on the fly to be 5 servers. Servers 5-10 are scheduled for shutdown, but game server objects have been destroyed.\n # since the game server isn't actually off yet, it will keep creating a connection.\n return True\n \n else:\n #TODO: raise error or happy with logger?\n if port == self.cowmaster.get_port():\n LOGGER.debug(f\"Attempting to locate duplicate CowMaster server. Looking for TCP source port ({client_connection.addr[1]}) and TCP dest port ({self.global_config['hon_data']['svr_managerPort']})\")\n cowmaster_proc = MISC.get_client_pid_by_tcp_source_port(self.global_config['hon_data']['svr_managerPort'], client_connection.addr[1])\n if cowmaster_proc:\n LOGGER.info(f\"Found duplicate CowMaster server. Killing process {cowmaster_proc.pid}\")\n cowmaster_proc.terminate()\n return\n else:\n LOGGER.warn(\"There is a duplicate CowMaster and we're unable to identify it's PID (Process ID). This won't cause issues, but there may be some wasteful RAM usage.\")\n return\n LOGGER.error(f\"A GameServer connection is already established for port {port}, this is either a dead connection, or something is very wrong (two servers with the same port).\")\n return False\n\n async def find_replay_file(self,replay_file_name):\n replay_file_paths = [Path(self.global_config['hon_data']['hon_replays_directory']) / replay_file_name]\n if self.global_config['application_data']['longterm_storage']['active']:\n replay_file_paths.append(Path(self.global_config['application_data']['longterm_storage']['location']) / replay_file_name)\n\n for replay_path in replay_file_paths:\n file_exists = Path.exists(replay_path)\n if file_exists:\n replay_file_path = replay_path\n return True,replay_file_path\n\n async def handle_replay_request(self, match_id, extension, account_id):\n replay_file_name = f\"M{match_id}.{extension}\"\n LOGGER.debug(f\"Received replay upload request.\\n\\tFile Name: {replay_file_name}\\n\\tAccount ID (requestor): {account_id}\")\n\n replay_file_paths = [Path(self.global_config['hon_data']['hon_replays_directory']) / replay_file_name]\n if self.global_config['application_data']['longterm_storage']['active']:\n replay_file_paths.append(Path(self.global_config['application_data']['longterm_storage']['location']) / replay_file_name)\n file_exists,replay_file_path = await self.find_replay_file(replay_file_name)\n\n if not file_exists:\n # Send the \"does not exist\" packet\n # await self.event_bus.emit('replay_status_update', match_id, account_id, ReplayStatus.DOES_NOT_EXIST)\n res = await self.chat_server_handler.create_replay_status_update_packet(match_id, account_id, ReplayStatus.DOES_NOT_EXIST)\n non_existing_paths = [str(path) for path in replay_file_paths]\n LOGGER.warn(f\"Replay file {replay_file_name} does not exist. Checked: {non_existing_paths}\")\n return\n\n # Send the \"exists\" packet\n # await self.event_bus.emit('replay_status_update', match_id, account_id, ReplayStatus.QUEUED)\n res = await self.chat_server_handler.create_replay_status_update_packet(match_id, account_id, ReplayStatus.QUEUED)\n LOGGER.debug(f\"Replay file exists ({replay_file_name}). Obtaining upload location information.\")\n\n # Upload the file and send status updates as required\n file_size = os.path.getsize(replay_file_path)\n\n upload_details = await self.master_server_handler.get_replay_upload_info(match_id, extension, self.global_config['hon_data']['svr_login'], file_size)\n\n if upload_details is None or upload_details[1] != 200:\n # await self.event_bus.emit('replay_status_update', match_id, account_id, ReplayStatus.GENERAL_FAILURE)\n res = await self.chat_server_handler.create_replay_status_update_packet(match_id, account_id, ReplayStatus.GENERAL_FAILURE)\n LOGGER.error(f\"{replay_file_name} - Failed to obtain upload location information. HTTP Response ({upload_details[1]}):\\n\\t{upload_details[0]}\")\n return\n\n upload_details_parsed = {key.decode(): (value.decode() if isinstance(value, bytes) else value) for key, value in upload_details[0].items()}\n LOGGER.debug(f\"Uploading {replay_file_name} to {upload_details_parsed['TargetURL']}\")\n\n # await self.event_bus.emit('replay_status_update', match_id, account_id, ReplayStatus.UPLOADING)\n res = await self.chat_server_handler.create_replay_status_update_packet(match_id, account_id, ReplayStatus.UPLOADING)\n try:\n upload_result = await self.master_server_handler.upload_replay_file(replay_file_path, replay_file_name, upload_details_parsed['TargetURL'])\n except Exception:\n LOGGER.error(f\"Error uploading replay file {replay_file_path}\")\n LOGGER.error(f\"Undefined Exception: {traceback.format_exc()}\")\n if get_mqtt():\n get_mqtt().publish_json(\"manager/admin\", {\"event_type\":\"replay_upload_failure\",\"message\":f\"failed. Premature failure.\"})\n\n if upload_result[1] not in [204,200]:\n # await self.event_bus.emit('replay_status_update', match_id, account_id, ReplayStatus.GENERAL_FAILURE)\n res = await self.chat_server_handler.create_replay_status_update_packet(match_id, account_id, ReplayStatus.GENERAL_FAILURE)\n LOGGER.error(f\"Replay upload failed! HTTP Upload Response ({upload_result[1]})\\n\\t{upload_result[0]}\")\n if get_mqtt():\n get_mqtt().publish_json(\"manager/admin\", {\"event_type\":\"replay_upload_failure\",\"message\":f\"failed. HTTP response code: {upload_result[1]}\"})\n return\n # await self.event_bus.emit('replay_status_update', match_id, account_id, ReplayStatus.UPLOAD_COMPLETE)\n res = await self.chat_server_handler.create_replay_status_update_packet(match_id, account_id, ReplayStatus.UPLOAD_COMPLETE)\n LOGGER.debug(\"Replay upload completed successfully.\")\n if get_mqtt():\n get_mqtt().publish_json(\"manager/admin\", {\"event_type\":\"replay_upload_success\",\"message\":\"successful\"})\n\n\n async def remove_client_connection(self,client_connection):\n \"\"\"\n Removes a client connection from the client connection dictionary with the specified port as the key\n\n Args:\n client_connection (ClientConnection): the client connection instance to remove\n port (int): the port associated with the client connection\n\n Returns:\n None\n \"\"\"\n for key, value in self.client_connections.items():\n if value == client_connection:\n LOGGER.debug(f\"GameServer #{client_connection.id} removing connection.\")\n del self.client_connections[key]\n game_server = self.game_servers.get(key, None)\n # This is in case game server doesn't exist intentionally (maybe config changed)\n if game_server:\n game_server.reset_game_state()\n game_server.unset_client_connection()\n # indicate that the sub commands should be regenerated since the list of connected servers has changed.\n # await self.commands.initialise_commands()\n self.commands.subcommands_changed.set()\n return True\n return False\n\n def update_server_start_semaphore(self):\n max_start_at_once = self.global_config['hon_data']['svr_max_start_at_once']\n self.server_start_semaphore = asyncio.Semaphore(max_start_at_once)\n\n async def start_game_servers_task(self, game_servers):\n coro = self.start_game_servers(game_servers)\n self.schedule_task(coro, 'gameserver_startup')\n\n async def start_game_servers(self, game_servers, timeout=120, launch=False, service_recovery=False, config_reload=True):\n try:\n timeout = self.global_config['hon_data']['svr_startup_timeout']\n \"\"\"\n Start all game servers.\n\n This function starts all the game servers that were created by the GameServerManager. It\n does this by calling the start_server method of each game server object.\n\n Game servers are started using a \"semaphore\", to stagger their start to groups and not all at once.\n The timeout value may be reached, for slow servers, it may need to be adjusted in the config file if required.\n\n This function does not return anything, but can log errors or other information.\n \"\"\"\n if MISC.get_os_platform() == \"win32\":\n # this is an atrocious fix until I find a better solution.\n # on some systems, the compiled honfigurator.exe file, which is just launcher.py from cogs.misc causes issues for the opened hon_x64.exe. The exe is unable to locate one of the game dll resources.\n # I wasted a lot of time trying to troubleshoot it, launching main.py directly works fine. This is my solution until a better one comes around. It's set within the scope of the script, and doesn't modify the systems environment.\n path_list = os.environ[\"PATH\"].split(os.pathsep)\n if str(self.global_config['hon_data']['hon_install_directory'] / 'game') not in path_list:\n os.environ[\"PATH\"] = f\"{self.global_config['hon_data']['hon_install_directory'] / 'game'}{os.pathsep}{self.preserved_path}\"\n\n if launch and await self.check_upstream_patch():\n if not await self.initialise_patching_procedure(source=\"startup\"):\n return False\n\n async def start_game_server_with_semaphore(game_server, timeout):\n game_server.game_state.update({'status':GameStatus.QUEUED.value})\n async with self.server_start_semaphore:\n # Use the schedule_task method to start the server\n if game_server not in self.game_servers.values():\n return\n\n # Ensure the task is actually a Task or Future\n task = asyncio.ensure_future(game_server.schedule_task(game_server.start_server(timeout=timeout), 'start_server'))\n try:\n # Ensure asyncio.wait_for(task, timeout) and stop_event.wait() are Tasks\n wait_for_task = asyncio.create_task(asyncio.wait_for(task, timeout))\n stop_event_wait_task = asyncio.create_task(stop_event.wait())\n\n # Prepare the tasks\n tasks = [wait_for_task, stop_event_wait_task]\n\n # Wait for any task to complete\n done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)\n # If the stop_event was set, cancel the other task and return\n if stop_event.is_set():\n for task in pending:\n task.cancel()\n LOGGER.info(f\"Shutting down uninitialised GameServer #{game_server.id} due to stop event.\")\n await self.cmd_shutdown_server(game_server)\n else:\n # The game server start task completed successfully\n # LOGGER.info(f\"GameServer #{game_server.id} started successfully.\")\n pass\n except asyncio.TimeoutError:\n LOGGER.error(f\"GameServer #{game_server.id} failed to start within the timeout period.\")\n await self.cmd_shutdown_server(game_server)\n except HoNServerError:\n # LOGGER.error(f\"GameServer #{game_server.id} encountered a server error.\")\n await self.cmd_shutdown_server(game_server)\n\n start_tasks = []\n if game_servers == \"all\":\n game_servers = list(self.game_servers.values())\n\n for game_server in game_servers:\n already_running = await game_server.get_running_server()\n if already_running:\n LOGGER.info(f\"GameServer #{game_server.id} with public ports {game_server.get_public_game_port()}/{game_server.get_public_voice_port()} already running.\")\n\n if self.global_config['hon_data'].get('man_use_cowmaster') and not self.cowmaster.client_connection:\n await self.cowmaster.start_cow_master()\n\n # setup or verify filebeat configuration for match log submission\n await filebeat_status()\n if launch:\n await filebeat(self.global_config)\n\n if not self.global_config['hon_data']['svr_start_on_launch']:\n LOGGER.info(\"Waiting for manual server start up. svr_start_on_launch setting is disabled.\")\n return\n\n if not service_recovery and not get_filebeat_status()['running']:\n msg = f\"Filebeat is not running, you may not start any game servers until you finalise the setup of filebeat.\\nStatus\\n\\tInstalled: {get_filebeat_status()['installed']}\\n\\tRunning: {get_filebeat_status()['running']}\\n\\tCertificate Status: {get_filebeat_status()['certificate_status']}\\n\\tPending Auth: {True if get_filebeat_auth_url() else False}\"\n if get_filebeat_auth_url():\n print(f\"Please authorise match log submissions to continue: {get_filebeat_auth_url()}\")\n raise RuntimeError(msg)\n\n if self.global_config['hon_data'].get('man_use_cowmaster'):\n if not self.cowmaster.client_connection:\n if launch or config_reload:\n i = 0\n incr = 5\n while not self.cowmaster.client_connection:\n if not config_reload: # prevent excessive prints\n LOGGER.warn(f\"Waiting for CowMaster to connect to manager before starting servers. Waiting {i}/{timeout} seconds\")\n await asyncio.sleep(incr)\n i += incr\n if i > timeout:\n return False\n else:\n LOGGER.warn(\"Cannot start servers. Cowmaster is in use, but not yet connected to the manager. Please wait and try again\")\n return\n\n for game_server in game_servers:\n start_tasks.append(start_game_server_with_semaphore(game_server, timeout))\n\n await asyncio.gather(*start_tasks)\n\n # indicate that the sub commands should be regenerated since the list of connected servers has changed.\n asyncio.create_task(self.commands.initialise_commands())\n self.commands.subcommands_changed.set()\n\n except Exception as e:\n LOGGER.error(f\"GameServers failed to start\\n{traceback.format_exc()}\")\n if not launch:\n raise\n\n async def patch_extract_crc_from_file(self, url):\n try:\n with urllib.request.urlopen(url) as response:\n content = response.read().decode('utf-8')\n # sample: 4.10.8.0;4.10.8.honpatch;B30B80D1;hon_update_x64.zip;4DFDFDD5\n components = content.strip().split(';')\n version = components[0]\n patch = components[1]\n hon_exe_crc = components[2]\n filename = components[3]\n hon_update_exe_crc = components[-1]\n return hon_update_exe_crc\n\n except Exception as e:\n LOGGER.error(f\"URL: {url} - Error occurred while extracting CRC from file: {e}\")\n return None\n\n async def initialise_patching_procedure(self, timeout=3600, source='startup'):\n if self.patching:\n LOGGER.warn(\"Patching is already in progress.\")\n return\n\n for game_server in self.game_servers.values():\n LOGGER.debug(f\"GameServer #{game_server.id} - Initialising server shutdown for patching\")\n if game_server.started and game_server.enabled:\n await self.cmd_message_server(game_server, \"!! ANNOUNCEMENT !! This server will shutdown after the current match for patching.\")\n await self.cmd_shutdown_server(game_server)\n\n if MISC.get_proc(self.global_config['hon_data']['hon_executable_name']):\n LOGGER.debug(\"Some HoN servers are still running. Waiting until they've shut down.\")\n return\n \n if MISC.get_os_platform() == \"win32\":\n launcher_binary = 'hon_update_x64.exe'\n launcher_zip = 'hon_update_x64.zip'\n hon_version_url = HON_WAS_VERSION_URL\n launcher_download_url = HON_WAS_LAUNCHER_DOWNLOAD_URL\n else:\n launcher_binary = 'launcher'\n launcher_zip = 'launcher.zip'\n hon_version_url = HON_LAS_VERSION_URL\n launcher_download_url = HON_LAS_LAUNCHER_DOWNLOAD_URL\n\n launcher_crc = await self.patch_extract_crc_from_file(hon_version_url)\n if not launcher_crc:\n LOGGER.error(\"Patching failed.\")\n return False\n if (not exists(self.global_config['hon_data']['hon_install_directory'] / launcher_binary)) or (launcher_crc and launcher_crc.lower() != MISC.calculate_crc32(self.global_config['hon_data']['hon_install_directory'] / launcher_binary).lower()):\n LOGGER.debug(f\"Beginning to download new launcher from {launcher_download_url}\")\n try:\n temp_folder = tempfile.TemporaryDirectory()\n temp_path = temp_folder.name\n temp_zip_path = Path(temp_path) / launcher_zip\n\n download_launcher = urllib.request.urlretrieve(launcher_download_url, temp_zip_path)\n if not download_launcher:\n LOGGER.warn(f\"Newer {launcher_zip} is available, however the download failed.\\n\\t1. Please download the file manually: {launcher_download_url}\\n\\t2. Unzip the file into {self.global_config['hon_data']['hon_install_directory']}\")\n return\n\n temp_extracted_path = temp_folder.name\n extracted_file_name = MISC.unzip_file(source_zip=temp_zip_path, dest_unzip=temp_extracted_path)\n\n temp_extracted_launcher_path = Path(temp_path) / extracted_file_name[0]\n\n launcher_binary_path = self.global_config['hon_data']['hon_install_directory'] / launcher_binary\n\n LOGGER.debug(f\"Downloaded launcher files: {os.listdir(temp_extracted_path)}\")\n\n # Check if the file is in use before moving it\n try:\n shutil.move(temp_extracted_launcher_path, launcher_binary_path)\n LOGGER.debug(f\"Moved extracted launcher to HoN working directory: {launcher_binary_path}\")\n except PermissionError:\n LOGGER.warn(f\"Hon Update - the file {self.global_config['hon_data']['hon_install_directory'] / launcher_zip} is currently in use. Closing the file..\")\n process = MISC.get_proc(proc_name=launcher_zip)\n if process: process.terminate()\n try:\n shutil.move(temp_extracted_launcher_path, launcher_binary_path)\n except Exception:\n LOGGER.error(f\"HoN Update - Failed to copy downloaded {launcher_binary} into {self.global_config['hon_data']['hon_install_directory']}\\n\\t1. Please download the file manually: {launcher_download_url}\\n\\t2. Unzip the file into {self.global_config['hon_data']['hon_install_directory']}\")\n return\n\n except Exception as e:\n LOGGER.error(f\"Error occurred during file download or extraction: {e}\")\n\n patcher_executable = self.global_config['hon_data']['hon_install_directory'] / launcher_binary\n try:\n if MISC.get_os_platform() == \"win32\":\n subprocess.run([patcher_executable, \"-norun\"], timeout=timeout)\n else:\n os.chmod(patcher_executable, 0o700)\n subprocess.run([patcher_executable], timeout=timeout)\n\n if MISC.get_os_platform() == \"linux\":\n executable = \"hon-x86_64-server_KONGOR\"\n if not os.path.exists(self.global_config['hon_data']['hon_install_directory'] / executable):\n executable = \"hon-x86_64-server\"\n self.global_config['hon_data']['hon_executable_path'] = self.global_config['hon_data']['hon_install_directory'] / executable\n self.global_config['hon_data']['hon_executable_name'] = executable\n\n svr_version = MISC.get_svr_version(self.global_config['hon_data']['hon_executable_path'])\n if MISC.get_svr_version(self.global_config['hon_data']['hon_executable_path']) != self.latest_available_game_version:\n LOGGER.error(f\"Server patching failed. Current version: {svr_version}\")\n return False\n\n LOGGER.info(\"Patching successful!\")\n self.global_config['hon_data']['svr_version'] = svr_version\n if source == \"startup\":\n return True\n elif source == \"healthcheck\":\n await self.start_game_servers(\"all\")\n\n except subprocess.TimeoutExpired:\n LOGGER.warn(f\"Patching failed as it exceeded {timeout} seconds to patch resources.\")\n return False\n except Exception:\n LOGGER.error(f\"An unexpected error occured while patching: {traceback.format_exc()}\")\n return False\n finally:\n # patching is done. Whether it failed or otherwise.\n self.patching = False\n\n async def disable_game_server(self, game_server):\n game_server.disable_server()\n\n async def enable_game_server(self, game_server):\n game_server.enable_server()\n\n def start_hon_proxy():\n pass\n\n def check_hon_proxy_running():\n pass\n\n def create_hon_proxy_config():\n pass","repo_name":"HoNfigurator/HoNfigurator-Central","sub_path":"cogs/game/game_server_manager.py","file_name":"game_server_manager.py","file_ext":"py","file_size_in_byte":65485,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"6055939866","text":"import os\n\nlist_ = [\"BOS to LAX on 2022-09-10\", \"BOS to LAX on 2022-09-10\", \"BOS to LAX on 2022-09-10\"]\npath = os.getcwd()\n#exists = os.path.exists(path+r\"\\test.txt\")\n'''\nif exists:\n file = open(\"test.txt\", \"r\")\n var = file.read()\n file.close()\n\n file2 = open(\"test2.txt\", \"w\")\n file2.write(var)\n file2.close()\nelse:\n file = open(\"test.txt\", \"w\")\n file.close()\n\n# reads the file first and saves it to the backup file,\n# code below changes the new file without modifying backup\n\nfile = open(\"test.txt\", \"w\")\nfor i in list_:\n file.write(i)\nfile.close()\n'''\nlist1 = [\"BOS to ABE on 2022-12-18\", \"BOS to AVP on 2022-12-18\", \"BOS to CHS on 2022-12-18\", \"BOS to LAX on 2022-12-18\"]\nlist2 = [\"PHL to BOS on 2022-12-23\", \"CHS to BOS on 2022-12-23\", \"EWR to BOS on 2022-12-23\", \"IAD to BOS on 2022-12-23\"]\n\nlist3 = []\nlist4 = []\n\nfor i in list1:\n list3.append(i[7:10])\n\nfor i in list2:\n list4.append(i[:3])\n\nfor i in list3:\n if i in list4:\n print(i)\n\n'''\nFrom BOS to CHS\n departure dates:\n 2022-08-20\n 2022-08-22\n 2022-08-23\n \n return dates:\n 2022-08-22\n 2022-08-23\n 2022-08-25\n \nfor i in range(206+1):\n print(\"{:.2f}\".format(i/206*100) + \"%\")\n #rint(i/206*100)\n'''\n\n\ndef percent(counter, mode):\n if mode == 1:\n return \"{:.2f}\".format(counter/206*100) + \"%\"\n\nmode = 1\n\nfor i in range(207):\n print(percent(i,\"1\"))\n\n\n","repo_name":"hprantis/United-Award","sub_path":"File.py","file_name":"File.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"69813610172","text":"from datetime import datetime\n\n\ndef is_tuple(value):\n return type(value) is tuple\n\n\ndef is_list(value):\n return type(value) is list\n\n\ndef is_tuple_or_list(value):\n return is_tuple(value) or is_list(value)\n\n\ndef convert_tuple_to_dict(tuple_value, tuple_item_names):\n count = len(tuple_item_names)\n\n if tuple_value is None:\n index = 0\n tuple_value = tuple()\n while index < count:\n tuple_value = tuple_value + (None,)\n index += 1\n\n result = dict()\n index = 0\n for item in tuple_value:\n key = tuple_item_names[index]\n result[key] = item\n index += 1\n\n return result\n\n\ndef is_empty_string(input_value):\n return (\n input_value is None\n or not isinstance(input_value, str)\n or input_value.strip() == \"\"\n )\n\n\ndef assemble_quotes(scraping_request, scraping_response):\n \"\"\"A quote is composed of a head and a body. The data of the head comes from a scraping\n request. The data of the body comes from a scrapng response.\"\"\"\n\n def is_meaningful_price(quote_body):\n price = quote_body[\"price\"]\n return price is not None\n\n quotes = []\n\n quote_head = {\n \"company_id\": scraping_request[\"company_id\"],\n \"rental_route_id\": scraping_request[\"rental_route_id\"],\n \"pick_up_date_id\": scraping_request[\"pick_up_date_id\"],\n \"pick_up_time_id\": scraping_request[\"pick_up_time_id\"],\n \"rental_duration_id\": scraping_request[\"rental_duration_id\"],\n \"created_on\": datetime.now(),\n }\n\n scraping_response = list(filter(is_meaningful_price, scraping_response))\n\n if len(scraping_response) == 0:\n quotes.append({**quote_head})\n else:\n for quote_body in scraping_response:\n quote = {**quote_head, **quote_body}\n quotes.append(quote)\n\n return quotes\n\n\ndef format_scraping_request(scraping_request):\n return (\n \"[company_id: {}, rental_route_id: {}, pick_up_date_id: {}, \"\n + \"pick_up_time_id: {}, rental_duration_id: {}]\"\n ).format(\n scraping_request[\"company_id\"],\n scraping_request[\"rental_route_id\"],\n scraping_request[\"pick_up_date_id\"],\n scraping_request[\"pick_up_time_id\"],\n scraping_request[\"rental_duration_id\"],\n )","repo_name":"zgo23/RentalsScraping","sub_path":"lib/utils/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24117966391","text":"from bibliopixel.animation.matrix import Matrix\nfrom bibliopixel.util.util import genVector\n\n\nclass Bloom(Matrix):\n\n def __init__(self, layout, dir=True, **kwds):\n super().__init__(layout, **kwds)\n self._vector = genVector(self.width, self.height)\n self._dir = dir\n\n def pre_run(self):\n self._step = 0\n\n def step(self, amt=8):\n if self._dir:\n s = 255 - self._step\n else:\n s = self._step\n\n # this respects master brightness but is slower\n for y in range(self.height):\n for x in range(self.width):\n index = self._vector[y][x] * 255 / self.height + s\n self.layout.set(x, y, self.palette(index))\n\n self._step += amt\n if(self._step >= 255):\n self._step = 0\n","repo_name":"ManiacalLabs/BiblioPixelAnimations","sub_path":"BiblioPixelAnimations/matrix/bloom.py","file_name":"bloom.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"78"} +{"seq_id":"17761509834","text":"import sys\nimport tensorflow as tf\nimport scipy.misc\nimport cv2\nimport utils\nsys.path.insert(0, '../model/')\nimport driving_data\n\nLOGDIR = '../save/model'\n\ndef main():\n\tfinal_mse = []\n\tmodel_configs = utils.from_recipe()\n\tfor config in model_configs:\n\t\tMODEL_TITLE = config[\"MODEL_TITLE\"]\n\n\t\tsess = tf.InteractiveSession()\n\t\tsaver = tf.train.Saver()\n\n\t\tcheckpoint_path = os.path.join(LOGDIR, MODEL_TITLE + \".ckpt\")\n\t\tsaver.restore(sess, checkpoint_path)\n\n\t\txs, ys_ = driving_data.test_xs, driving_data.test_ys\n\n\t\twith tf.name_scope('loss'):\n\t\t\tloss = tf.reduce_mean(tf.square(tf.sub(y_, y)))\n\t\t\ttf.scalar_summary('mse', loss)\n\n\t\tmse = sess.run(loss, feed_dict={x: xs, y_: ys, keep_prob: 0.8})\n\n\t\tjson_data = {\"model\": MODEL_TITLE, \"mse\": mse}\n\t\tfinal_mse.append(json_data)\n\tutils.to_json_file(final_mse, \"report\", \"final_mse.json\")\n\tbest_model, best_mse = utils.find_best_model()\n\tprint (\"Best Model is {0} With MSE {1}\".format(best_model, best_mse))\n\nif __name__ == '__main__':\n\tmain()","repo_name":"Minsu-Daniel-Kim/udacity_challenge2","sub_path":"pipeline/evaluation_pipeline.py","file_name":"evaluation_pipeline.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10289870745","text":"import logging\nimport time\nimport json\nfrom multiprocessing.pool import ThreadPool\nfrom threading import Thread, Event, Lock\n\nclass Orchestrator:\n def __init__(self, n_workers=4, logging_interval=10):\n self.n_workers = n_workers\n self.pool = ThreadPool(processes=self.n_workers)\n # Rename worker threads\n for worker_i, worker in enumerate(self.pool._pool):\n worker.name = f\"WorkThread-{worker_i:02d}\"\n self.queued = {}\n self.active = {}\n self.thread = Thread(target=self.__start)\n self.thread.name = \"OrchThread\"\n self.lock = Lock()\n self.__stop_event = Event()\n self.last_logged = 0\n self.logging_interval = logging_interval\n self.thread.start()\n\n def __start(self):\n logging.debug(f\"Orchestrator started with {self.n_workers} workers\")\n while not self.__stop_event.is_set():\n finished_jobs = []\n # Check for job completion\n for job_name, worker in self.active.items():\n if worker.ready():\n finished_jobs.append(job_name)\n if worker.successful():\n logging.debug(f\"{job_name} finished\")\n else:\n try:\n worker.get()\n except Exception as e:\n logging.error(f\"{job_name} failed, dumping error\\n{e}\")\n while len(finished_jobs) > 0:\n self.active.pop(finished_jobs.pop())\n # Submit jobs that do not have the same job name as any active job\n self.lock.acquire()\n for job_name, job_queue in self.queued.items():\n if job_name not in self.active.keys():\n worker_func, job_args = job_queue.pop()\n self.active[job_name] = self.pool.apply_async(worker_func, job_args)\n logging.debug(f\"{job_name} submitted\")\n if len(job_queue) == 0:\n finished_jobs.append(job_name)\n while len(finished_jobs) > 0:\n self.queued.pop(finished_jobs.pop())\n # Logging\n now = time.time()\n if (now - self.last_logged) >= self.logging_interval:\n if self.active:\n logging.debug(f\"Active jobs: {', '.join(self.active)}\")\n else:\n logging.debug(f\"No active orchestrator jobs\")\n if self.queued:\n queue_lengths = [f\"{n}: {len(q)}\" for n, q in self.queued.items()]\n logging.debug(f\"Queued jobs: {', '.join(queue_lengths)}\")\n else:\n logging.debug(f\"No queued orchestrator jobs\")\n self.last_logged = now\n self.lock.release()\n\n def stop(self):\n self.pool.close()\n self.pool.terminate()\n self.clear()\n self.__stop_event.set()\n self.thread.join()\n\n def clear(self, job_name=\"\"):\n self.lock.acquire()\n if not job_name:\n self.queued = {}\n elif job_name in self.queued.keys():\n self.queued[job_name] = []\n self.lock.release()\n\n def put(self, job_name, worker_func, job_args):\n self.lock.acquire()\n if job_name in self.queued.keys():\n self.queued[job_name].insert(0, (worker_func, job_args))\n else:\n self.queued[job_name] = [(worker_func, job_args)]\n self.lock.release()\n","repo_name":"jkguiang/rucio-sense-dmm","sub_path":"dmm/orchestrator.py","file_name":"orchestrator.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73418326011","text":"\nfrom supp.utils import send_request\nfrom supp.config import todo\n\nimport traceback\n\ntry:\n import readline\nexcept:\n pass \n\ndef repl(prompt):\n\n while True:\n\n try:\n user_input = input(prompt)\n\n except EOFError:\n print('')\n break \n\n if not user_input.strip():\n continue\n\n input_strings = user_input.split()\n\n command = input_strings[0].lower()\n\n try:\n\n if len(input_strings) == 1:\n\n if command in ('exit', 'quit'):\n break\n\n if command == 'list':\n\n response = send_request({'operation': 'list'})\n print(response)\n continue \n\n if command == 'reset':\n\n todo['handlers'].reset()\n continue \n\n if len(input_strings) == 2:\n\n assert all(char.isdigit() for char in input_strings[1])\n key = int(input_strings[1])\n\n if command == 'set':\n\n todo['handlers'].set(key)\n continue \n\n if command == 'del':\n\n todo['handlers'].remove(key)\n continue\n\n if command == 'get':\n\n todo['handlers'].get(key)\n continue\n \n raise AssertionError\n\n except AssertionError:\n print('Usage: { {set|get|del} } | list|reset | exit|quit }')\n\n except Exception as err:\n print(err)\n traceback.print_exc()\n","repo_name":"timedu/edis2023-ex08","sub_path":"app/supp/repl.py","file_name":"repl.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8898374097","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom .forms import EmailSignupForm\nfrom .models import Signup\n\nimport json\nimport requests\n\nMAILCHIMP_API_KEY = settings.MAILCHIMP_API_KEY\nMAILCHIMP_DATA_CENTER = settings.MAILCHIMP_DATA_CENTER\nMAILCHIMP_EMAIL_LIST_ID = settings.MAILCHIMP_EMAIL_LIST_ID\n\napi_url = 'https://{dc}.api.mailchimp.com/3.0'.format(dc=MAILCHIMP_DATA_CENTER)\nmembers_endpoint = '{api_url}/lists/{list_id}/members'.format(\n api_url=api_url,\n list_id=MAILCHIMP_EMAIL_LIST_ID\n)\n\n\ndef subscribe(email):\n data = {\n \"email_address\": email,\n \"status\": \"subscribed\"\n }\n r = requests.post(\n members_endpoint,\n auth=(\"\", MAILCHIMP_API_KEY),\n data=json.dumps(data)\n )\n return r.status_code, r.json()\n\n\n\n\ndef email_list_signup(request):\n if request.method == 'POST':\n sub = Signup(email=request.POST['email'])\n sub.save()\n send_mail('Newsletter confirmation', 'confirm to get into the subscription', settings.FROM_EMAIL,[sub.email],\n connection=None, html_message='Thank you for signing up for my email newsletter! sent by celery \\\n Please complete the process by \\\n
clicking here to \\\n confirm your registration.'.format(sub.email))\n return render(request, 'index.html', {'email': sub.email, 'action': 'added', 'form': EmailSignupForm()})\n else:\n return render(request, 'index.html', {'form': EmailSignupForm()})\n\n\ndef confirm(request):\n sub = Signup.objects.get(email=request.GET['email'])\n if sub.conf_num == request.GET['conf_num']:\n sub.confirmed = True\n sub.save()\n return render(request, 'index.html', {'email': sub.email, 'action': 'confirmed'})\n else:\n return render(request, 'index.html', {'email': sub.email, 'action': 'denied'})\n\ndef delete(request):\n sub = Signup.objects.get(email=request.GET['email'])\n if sub.conf_num == request.GET['conf_num']:\n sub.delete()\n return render(request, 'index.html', {'email': sub.email, 'action': 'unsubscribed'})\n else:\n return render(request, 'index.html', {'email': sub.email, 'action': 'denied'})\n","repo_name":"lunyamwi/soft-dj-mathtraining-python","sub_path":"marketing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73980214012","text":"import tkinter as tk\r\n\r\nclass scrollableFrame(tk.Frame):\r\n \"\"\"A Tkinter scrollable frame\r\n\r\n * Use the 'interior' attribute to place widgets inside the scrollable frame\r\n * Construct and pack/place/grid normally\r\n * This frame only allows vertical scrolling\r\n \"\"\"\r\n def __init__(self, master, *args, **kw):\r\n tk.Frame.__init__(self, master, *args, **kw)\r\n\r\n # create a canvas object and a vertical scrollbar for scrolling it\r\n self.vscrollbar = tk.Scrollbar(self, orient = \"vertical\")\r\n self.vscrollbar.pack(fill = \"both\", side = \"right\")\r\n self.canvas = tk.Canvas(self, bd = 0, highlightthickness = 0,\r\n yscrollcommand = self.vscrollbar.set)#, bg = \"black\")\r\n self.canvas.pack(side = \"left\", fill = \"both\", expand = \"yes\")\r\n self.vscrollbar.config(command = self.canvas.yview)\r\n\r\n # reset the view\r\n self.resetView()\r\n\r\n # create a frame inside the canvas which will be scrolled with it\r\n self.interior = tk.Frame(self.canvas)#, bg = \"pink\")\r\n self.interior_id = self.canvas.create_window(0, 0, window = self.interior,\r\n anchor = \"nw\")\r\n\r\n self.interior.bind(\"\", self.configureInterior)\r\n self.canvas.bind(\"\", self.configureCanvas)\r\n\r\n # track changes to the canvas and frame width and sync them,\r\n # also updating the scrollbar\r\n def configureInterior(self, event):\r\n # update the scrollbars to match the size of the inner frame\r\n size = (self.interior.winfo_reqwidth(), self.interior.winfo_reqheight())\r\n self.canvas.config(scrollregion = \"0 0 %s %s\" % size)\r\n if self.interior.winfo_reqwidth() != self.canvas.winfo_width():\r\n # update the canvas's width to fit the inner frame\r\n self.canvas.config(width = self.interior.winfo_reqwidth())\r\n\r\n def configureCanvas(self, event):\r\n if self.interior.winfo_reqwidth() != self.canvas.winfo_width():\r\n # update the inner frame's width to fill the canvas\r\n self.canvas.itemconfigure(self.interior_id, width = self.canvas.winfo_width())\r\n\r\n def getInterior(self):\r\n return self.interior\r\n \r\n def resetView(self):\r\n self.canvas.xview_moveto(0)\r\n self.canvas.yview_moveto(0)\r\n","repo_name":"AyalCiobotaru/pyVolleyStat","sub_path":"scrollableFrame.py","file_name":"scrollableFrame.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74938697851","text":"# Python3 program to validate Visa\n# Card number using regular expression\nimport re\n \n# Function to validate Visa Card\n# number using regular expression.\ndef isValidVisaCardNo(string):\n \n # Regex to check valid Visa\n # Card number\n regex = \"^4[0-9]{12}(?:[0-9]{3})?$\";\n \n # Compile the ReGex\n p = re.compile(regex);\n \n # If the string is empty\n # return false\n if (string == ''):\n return False;\n \n # Pattern class contains matcher()\n # method to find matching between\n # given string and regular expression.\n m = re.match(p, string);\n \n # Return True if the string\n # matched the ReGex else False\n if m is None:\n return False\n else:\n return True\n \n# Driver code\nif __name__ == \"__main__\":\n \n # Test Case 1\n str1 = \"4155279860457\";\n print(str1, isValidVisaCardNo(str1));\n \n # Test Case 2\n str2 = \"4155279860457201\";\n print(str2, isValidVisaCardNo(str2));\n \n # Test Case 3\n str3 = \"4155279\";\n print(str3, isValidVisaCardNo(str3));\n \n # Test Case 4\n str4 = \"6155279860457\";\n print(str4, isValidVisaCardNo(str4));\n \n # Test Case 5\n str5 = \"415a27##60457\";\n print(str5, isValidVisaCardNo(str5));\n \n# This code is contributed by AnkitRai01\n# https://www.geeksforgeeks.org/how-to-validate-visa-card-number-using-regular-expression/\n\n","repo_name":"sedwards/infrastructure_examples","sub_path":"coding/python_validator/python_validator.py","file_name":"python_validator.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43841280420","text":"import os\nimport numpy as np\nimport pandas as pd\nimport re\n#import csv\nimport sys\nimport matplotlib.pyplot as plt\n\n''' evttiming-hist.py\n- Get histograms illustrating time per event across all completed HPC jobs\n- Extract data from central framwework logging\n'''\n\n# get parameters\np = re.search('(?<=-)[a-z]+', str(sys.argv[1]))\nparameter_str = str(p.group(0))\nnum_param = 'no. of ' + parameter_str\n\nfilecount = 0\nevtparam = []\n\n# create new results file (text)\nf = open('evttimes.txt', 'w')\n\n# traverse results directory\nfor subdir, dirs, files in os.walk('RESULTSDIR'):\n\n for file in files:\n filepath = subdir + os.sep + file\n\n if filepath.endswith('AthenaMP.log'):\n\n filecount = filecount + 1\n\n # extract lines of interest from log file\n linelist = [ line.rstrip('\\n') for line in open(filepath) if 'INFO [evt' in line ]\n\n # extract CPU time and write to file\n for line in linelist:\n m = re.search('(?<=cpu=)\\d+', line)\n cputime = int(m.group(0))/1000.0\n f.write((str(cputime)+'\\n'))\n\nprint('\\nNumber of files scanned: %d\\n') % filecount\n\n# write to CSV file\nwith open('evttimes.csv', 'w') as f:\n for item in evtparam:\n csv.writer(f).writerow([item])\n\n# plot CPU time on histogram\nplt.figure\nplt.hist(evtparam, bins=np.arange(0, 3000, 100), histtype='step', color='b', label='Time per event')\nplt.xlabel('Time interval')\nplt.ylabel('No. of events')\nplt.legend()\nplt.savefig('evttime-all.png')\n","repo_name":"awashbro/examples","sub_path":"hpc-optimisation/analysis/evttiming-hist.py","file_name":"evttiming-hist.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6058029996","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom control import lqr\nfrom mass_spring_damper import MassSpringDamper\nfrom inverted_pendulum import InvertedPendulum\nimport cv2\n\nclass LQRController:\n \n def __init__(self, system, Q, R, visualize = False):\n \n self.system = system\n self.dt = self.system.dt\n self.K = self.calculate_K(system.A, system.B, Q, R)\n self.visualize = visualize\n \n def calculate_K(self, A, B, Q, R):\n \n K, S, E = lqr(A, B, Q, R)\n \n return K\n \n def simulate(self, totalTime, x, xRef):\n \n numForwardSimulationSteps = int(totalTime / self.dt)\n elapsedTime = 0\n xHist = np.zeros((numForwardSimulationSteps, 2))\n timeHist = np.zeros(numForwardSimulationSteps)\n uHist = np.zeros(numForwardSimulationSteps)\n \n for i in range(0, numForwardSimulationSteps):\n \n u = (-self.K @ (x - xRef)).flatten()\n if u < self.system.uMin:\n u = self.system.uMin\n if u > self.system.uMax:\n u = self.system.uMax\n x = self.system.forward_simulate(x, u)\n \n elapsedTime = elapsedTime + self.dt\n \n xHist[i, :] = x.flatten()\n timeHist[i] = elapsedTime\n uHist[i] = u\n \n if self.visualize == True:\n self.system.visualize(x)\n \n return xHist, timeHist, uHist\n \n def plot_hist(self, x, totalTime, u):\n \n self.system.plot_hist(x, totalTime, u)\n \nif __name__ == \"__main__\":\n \n system = MassSpringDamper(5.0, 1.0, 2.0, .01)\n Q = np.diag([20.0, 1.0])\n R = .001 * np.ones((1, 1)) \n controller = LQRController(system, Q, R, 0) \n xInit = np.array([[0., 0.]]).T\n xRef = np.array([[-1, 0.0]]).T\n \n xHist, timeHist, uHist = controller.simulate(20, xInit, xRef) \n controller.plot_hist(xHist, timeHist, uHist)\n \n # system = InvertedPendulum(.2, .1, 1.0, .01)\n # Q = np.diag([0.05, 1.0])\n # R = .1 * np.ones((1, 1)) \n # controller = LQRController(system, Q, R, 0) \n # xInit = np.array([[0.,.5]]).T\n # xRef = np.array([[0.0, 0.0]]).T\n \n # xHist, timeHist, uHist = controller.simulate(5, xInit, xRef) \n # controller.plot_hist(xHist, timeHist, uHist)\n \n","repo_name":"hpsanders98/linear_quadratic_regulator","sub_path":"lqr_controller.py","file_name":"lqr_controller.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20080870807","text":"from PIL import Image\nimport random\ncolors = {\n 'A': (225,0,0), #ROOD\n 'C': (0,225,0), #GROEN\n 'G': (0,0,225), #BLAUW\n 'T': (0,0,0) #ZWART\n}\n\ncolorsList = [(225,0,0), #ROOD\n (0,225,0), #GROEN\n (0,0,225), #BLAUW\n (0,0,0) ]\n\ndef decode(char): #Vertaal character uit het alfabet naar een drie letterige code als in DNA\n if char== '\\'':\n return ['A','A','A']\n if char== ',':\n return ['A','A','C']\n if char== '2':\n return ['A','A','G']\n if char== '3':\n return ['A','A','T']\n if char== '4':\n return ['A','C','A']\n if char== '5':\n return ['A','C','C']\n if char== '6':\n return ['A','C','G']\n if char== '7':\n return ['A','C','T']\n if char== '8':\n return ['A','G','A']\n if char== '9':\n return ['A','G','C']\n if char== 'a':\n return ['A','G','G']\n if char== 'b':\n return ['A','G','T']\n if char== 'c':\n return ['A','T','A']\n if char== 'd':\n return ['A','T','C']\n if char== 'e':\n return ['A','T','G']\n if char== 'f':\n return ['A','T','T']\n if char== 'g':\n return ['C','A','A']\n if char== 'h':\n return ['C','A','C']\n if char== 'i':\n return ['C','A','G']\n if char== 'j':\n return ['C','A','T']\n if char== 'k':\n return ['C','C','A']\n if char== 'l':\n return ['C','C','C']\n if char== 'm':\n return ['C','C','G']\n if char== 'n':\n return ['C','C','T']\n if char== 'o':\n return ['C','G','A']\n if char== 'p':\n return ['C','G','C']\n if char== 'q':\n return ['C','G','G']\n if char== 'r':\n return ['C','G','T']\n if char== 's':\n return ['C','T','A']\n if char== 't':\n return ['C','T','C']\n if char== 'u':\n return ['C','T','G']\n if char== 'v':\n return ['C','T','T']\n if char== 'w':\n return ['G','A','A']\n if char== 'x':\n return ['G','A','C']\n if char== 'y':\n return ['G','A','G']\n if char== 'z':\n return ['G','A','T']\n if char== 'A':\n return ['G','C','A']\n if char== 'B':\n return ['G','C','C']\n if char== 'C':\n return ['G','C','G']\n if char== 'D':\n return ['G','C','T']\n if char== 'E':\n return ['G','G','A']\n if char== 'F':\n return ['G','G','C']\n if char== 'G':\n return ['G','G','G']\n if char== 'H':\n return ['G','G','T']\n if char== 'I':\n return ['G','T','A']\n if char== 'J':\n return ['G','T','C']\n if char== 'K':\n return ['G','T','G']\n if char== 'L':\n return ['G','T','T']\n if char== 'M':\n return ['T','A','A']\n if char== 'N':\n return ['T','A','C']\n if char== 'O':\n return ['T','A','G']\n if char== 'P':\n return ['T','A','T']\n if char== 'Q':\n return ['T','C','A']\n if char== 'R':\n return ['T','C','C']\n if char== 'S':\n return ['T','C','G']\n if char== 'T':\n return ['T','C','T']\n if char== 'U':\n return ['T','G','A']\n if char== 'V':\n return ['T','G','C']\n if char== 'W':\n return ['T','G','G']\n if char== 'X':\n return ['T','G','T']\n if char== 'Y':\n return ['T','T','A']\n if char== 'Z':\n return ['T','T','C']\n if char== '.':\n return ['T','T','G']\n if char== ' ':\n return ['T','T','T']\n\ndef addLetter(array, char): #Voegt letter code (uit decode) toe aan de lijst\n for letter in decode(char):\n array.append(letter)\n\ndef translateText(array, input): #Voegt iedere letter code uit input toe aan de lijst, met wat chatches voor special cases\n for letter in input:\n if letter != '\\n':\n addLetter(array , letter)\n\ndef createImage(x,y, array): #Zorgt voor iedere code letter in de gecoderde text de juiste kleur toe volgens colors en maakt een plaatje\n padding = int(((x*y) - len(array))/2)\n img = Image.new('RGB', (x, y))\n for i in range(x):\n for j in range(y):\n if i*y + j > padding and i*y + j - padding < len(array): \n color = array[i*y + j - padding]\n img.putpixel((j,i), colors[color])\n else:\n img.putpixel((j,i), random.choice(colorsList))#voegt een willekeurige kleuern voor en na de text\n img.save('output.png')\n\n return img\n\ntext = \"\"\"If you are reeding this you have decoed this message and you probaply see how my image could be an DNA strand. \nAlthough the image migth seem like chaos it this has a meaning. Chaos is infromation anything that isn't chaos can be simplefied \nto it's simpelest form, chaos. But if you decoded this you probaply already know this. So information is chaos. Our DNA is Chaos. \nEvery physics thoery, filosofical thought or great poets words can be discribed by chaos. On the internet there is a website \ncalled the library of babel. On this site you can search any sentences thinkable and it is already in their database. \nBut if you look through this library you will find just random text. Al these senteces you could think of already exist in a chaos.\nNow to get to my real point, we have decoded the meaning of the randomness of our DNA. But there is another purely random thing\nin our universe. That thing is qauntum particles. I don't think I or you will ever on our own find the encrypted meaning of \nthe random interactions our reality is based on, but one day we might. There is onething about this randomness we can \nalready say about this randomness. This randomness is who we are what we do and the world we live in.\"\"\"\nresult = []\n\ntranslateText(result, text)\nprint(result)\n\nwallpaper = createImage(75,75,result)\nwallpaper.show()\n","repo_name":"WillemPaternotte/Atlas-van-de-menselijke-geografie","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71721873211","text":"import pymongo\nimport pprint\nimport json\nimport re\n\npos = [35.85835, -0.31198]\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\nmydb = myclient[\"mydb\"]\nmycol = mydb[\"adresse\"]\ntype_error = [ 'Lieu-dit', 'Sécurité et Protection', 'Santé', 'Justice', 'Administration','Autre','Service','Education', 'Hotels', 'Restauration','Banque','Sport']\ntype_comp = [\"Organisme\",\"Nom de lieu-dit\",\"Numéro\",\"Unité de batiment\",\"Voie\",\"Zone\",\"Boite postale\",\"Localité\"]\n\npos = [10 ,22]\n\n######## reshaping document ########\ndef reclasse(doc):\n k = []\n v = []\n v1 = []\n k1 = []\n ##lll = json.loads(json.dumps(doc))\n for key, value in doc.items() : \n if key == 'Composants':\n comp = value\n for key, value in doc.items() :\n k.append(key)\n v.append(value)\n abc = v[k.index('Composants')]\n for dicti in abc : \n for key, value in dicti.items() :\n if key == 'Type' :\n v1.append(value)\n if key == 'ValeurInformation' :\n k1.append(value)\n v_for_changes = \"\" \n k_for_changes = \"\" \n indx1 = 0\n indx2 = 0\n for i in range(0,len(v1)-1): \n for i in range(0,len(v1)-1): \n if i+1 > len(v1)-1 : \n break\n elif type_comp.index(v1[i])>type_comp.index(v1[i+1]) : \n k_for_changes = v1[i]\n v1[i] = v1[i+1]\n v1[i+1] = k_for_changes\n v_for_changes = comp[i]\n comp[i] = comp[i+1]\n comp[i+1] = v_for_changes\n indx1 +=1\n if indx1 == indx2:\n break\n else : \n indx2 = indx1\n continue\n \n doc.update({ \"Composants\":comp})\n return(doc)\n\n####### filtering address ########\ndef FilterAdress(input_adress): \n list = input_adress.split()\n if list[0].lower() in type_error:\n list.pop(0)\n filtred_adress = ' '.join([str(elem) for elem in list]) \n return filtred_adress\n else:\n return input_adress\n\n####### making the address function ##########\ndef make_adrss(select_id) : \n adrss = []\n sss = mycol.find( {\"Id\" :select_id} )\n\n for ss in sss : \n mkmkm = ss['Composants']\n\n for k in mkmkm : \n # using json.dumps() for cast it to string\n m = k['ValeurInformation']\n lll = json.loads(json.dumps(m))\n for key, value in lll.items() :\n if type(value) == int : \n adrss.append(value)\n else :\n adrss.append(value.capitalize())\n adrss.append(',')\n \n adrss = adrss[:-1]\n return(','.join(map(str, (FilterAdress( ' '.join(map(str, adrss)))).split(' ,'))))\n\n####### take position and give the address function ########## >>> reverse geocoding \ndef get_pos_give_adrs(pos) :\n adresse = 'none address'\n bb = mycol.aggregate([\n {\n '$geoNear': {\n 'near': { 'type': 'Point', 'coordinates': [ pos[0], pos[1] ]},\n 'distanceField': 'ObjetAdressable.PositionAdresse.Geometrie.coordinates',\n 'spherical': True, \n \"maxDistance\": 50,\n }\n }\n ])\n for i in bb :\n adresse = i ['Adresse'] \n if adresse == 'none address' : \n continue\n else : \n break\n if adresse == 'none address' : \n position = {\n \"isExist\" : False\n }\n else :\n position = {\n \"isExist\" : True,\n \"address\" : adresse,\n \"lat\" : pos[0],\n \"lon\" : pos[1]\n }\n return(position)\nprint (get_pos_give_adrs(pos)) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n####### insertion function ########## \ndef insert_doc(doc) :\n doc = reclasse(doc)\n doc.update({ \"Id\" : mycol.find().count() + 1})\n mycol.insert_one(doc)\n mycol.update_one({\"Id\" : mycol.find().count()},{\"$set\" : {\"Adresse\" : make_adrss(mycol.find().count())}}) \n\n####### finding list of address ####### >> autocomplete \ndef find_adresse(adresse) : \n if adresse[0] in [' ', ',', ';','.'] :\n adresse = adresse[1:]\n\n es = [\",\",\" ,\", \", \",\" ;\",\"; \",\" \",\" \"]\n for i in range(0,len(es)) :\n adresse = ' '.join(map(str,adresse.split(es[i])))\n words_adrs = adresse.split(' ')\n val_adrs = []\n adrs = []\n sss = mycol.find({},{\"_id\":0, \"Adresse\" : 1})\n for ss in sss : \n val_adrs.append(ss[\"Adresse\"])\n adrs = val_adrs\n val_adrs = []\n for k in range(0,len(words_adrs)) : \n for kk in adrs : \n if re.search(words_adrs[k].lower(), kk.lower()) != None : \n val_adrs.append(kk)\n adrs = val_adrs\n val_adrs = []\n if len(adrs)> 10 :\n adrs = adrs[0:10]\n return (adrs) # >>> autocoplete\n\n####### take address and give the position function ########\ndef get_adrs_give_pos(adresse) : \n kkk = mycol.aggregate([\n { \"$match\": { \"Adresse\": adresse } },\n { \"$project\": { \"_id\": 0, \"coordinates\" : \"$ObjetAdressable.PositionAdresse.Geometrie.coordinates\"}},\n { \"$sort\": {\"Id\": 1}}\n ])\n for kk in kkk :\n tt = kk\n\n try:\n tt\n except NameError:\n rep ={\n \"isExist\" : False,\n \"address\": adresse,\n }\n else:\n for key , value in kk.items() :\n coord = value\n rep ={\n \"isExist\" : True,\n \"address\": adresse,\n \"lat\" : coord[0],\n \"lng\" : coord[1]\n }\n return(rep)","repo_name":"b31zakx96/route-palnner-dz","sub_path":"flask_mongodb/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8739276805","text":"\"\"\"A suite of tools for processing FITS files.\n\n $Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/utilities/fitstools.py,v 1.21 2013/10/24 10:16:19 kerrm Exp $\n\n author: Matthew Kerr\n\n\"\"\"\n\nINT_TYPES = ['EVENT_CLASS','CONVERSION_TYPE']\n\n\nfrom astropy.io import fits as pyfits; pf= pyfits\nimport numpy as N; import numpy as np\nfrom types import ListType,FunctionType,MethodType\nfrom math import cos,sin,pi\nfrom skymaps import SkyDir,Gti,BinnedPhotonData,PythonUtilities\n\ndef rect_mask(lons,lats,cut_lon,cut_lat,lon_hwidth,lat_hwidth):\n \"\"\" lons -- longitude coordinate (deg.)\n lats -- latitude coordinate (deg.)\n \"\"\"\n mask = np.abs( lons - cut_lon)/np.cos(np.radians(lats)) < lon_hwidth\n return mask & (np.abs(lats - cut_lat) < lat_hwidth)\n\ndef trap_mask(ras,decs,cut_dir,radius):\n \"\"\" Make a conservative, trapezoidal cut as a precursor to a radius cut.\"\"\"\n try:\n rad_test = radius[0]\n except:\n rad_test = radius\n if rad_test > 30:\n return np.asarray([True] * len(ras)) # cut no good for large radii\n\n c1 = np.cos( np.radians(cut_dir.dec() - radius) )\n c2 = np.cos( np.radians(cut_dir.dec() + radius) )\n ra_radius = radius/np.minimum(c1,c2) #conservative\n\n mask = np.abs(ras - cut_dir.ra()) <= ra_radius\n mask &= np.abs(decs - cut_dir.dec()) <= radius\n #If cut radius overlaps the pole, keep a polar cap above cut_dir.dec()\n if np.any(cut_dir.dec()+radius > np.pi/2.) or np.any(cut_dir.dec() - radius < -N.pi/2.):\n mask |= np.abs(decs) > np.abs(cut_dir.dec())\n mask[radius > 20] = True # cut doesn't work for large radii?\n return mask\n\ndef rad_mask(ras,decs,cut_dir,radius,mask_only=False):\n \"\"\"Make a slower, exact cut on radius.\"\"\"\n ra0,dec0 = np.radians(cut_dir.ra()),np.radians(cut_dir.dec())\n ras,decs = np.radians(ras),np.radians(decs)\n cos_diffs = np.sin(decs)*np.sin(dec0)+np.cos(decs)*np.cos(dec0)*np.cos(ras-ra0)\n mask = cos_diffs > np.cos(np.radians(radius))\n if mask_only:\n return mask\n else:\n return mask,np.arccos(cos_diffs)[mask]\n\ndef get_gti_mask(ft1file,times):\n gti = Gti(ft1file)\n gti_starts,gti_stops = \\\n np.asarray([(x.minValue(),x.maxValue()) for x in gti]).transpose()\n a = np.argsort(gti_stops)\n gti_starts = gti_starts[a]; gti_stops = gti_stops[a]\n indices = np.searchsorted(gti_stops,times)\n accept = (times > gti_starts[indices]) & (times <= gti_stops[indices])\n return accept\n\ndef rad_extract(eventfiles,center,radius_function,return_cols=['PULSE_PHASE'],cuts=None,apply_GTI=True,theta_cut=66.4,zenith_cut=105,return_indices=False):\n \"\"\" Extract events with a radial cut. \n Return specified columns and perform additional boolean cuts.\n\n Return is in form of a dictionary whose keys are column names \n (and 'DIFFERENCES') and values are numpy arrays with the column \n values. These will have been concatenated if there are multiple\n FT1 files.\n\n ========= =======================================================\n Argument Description\n ========= =======================================================\n eventfiles -- a list of FT1 filenames\n center -- a SkyDir giving the center of the radial cut\n radius_function -- can be either a float specifying a cookier cutter \n radial cut, or a function taking as arguments the energy \n and event_class and speciying the radius in degrees, e.g.\n\n def radius(energy,event_class):\n return numpy.where(event_class,2,1)*(energy/1000)**-0.75\n\n ========= =======================================================\n Keyword Description\n ========= =======================================================\n return_cols ['RA','DEC','ENERGY','EVENT_CLASS','PULSE_PHASE'] - \n a list of FT1 column names to return\n cuts None - an optional list of boolean cuts to apply, \n e.g., ['ENERGY > 100']\n NB -- cuts not yet implemented!!\n no_cuts [False] do not apply default zenith and incidence angle cuts\n apply_GTI [True] accept or reject an event based on GTI if True; \n else ignore GTI\n return_indices [False] if True, return an array giving the index in the\n original file of each event; obviously only useful in the \n case of a single event file\n ========= =======================================================\n \"\"\"\n if not hasattr(radius_function,'__call__'):\n simple_scalar = True\n rval = radius_function\n radius_function = lambda e,event_class: rval\n else:\n simple_scalar = False\n\n eventfiles = __FITS_parse__(eventfiles)\n\n from collections import defaultdict,deque\n coldict = defaultdict(deque)\n cols = {}\n cut_cols = ['ZENITH_ANGLE','THETA','TIME']\n keys = list(set(['RA','DEC','ENERGY','CONVERSION_TYPE']+cut_cols+return_cols))\n accepted = 0\n total = 0\n\n for eventfile in eventfiles:\n #e = pf.open(eventfile,memmap=1)\n #nrows = e[1].data.shape[0]\n #e.close()\n nrows = pyfits.getheader(eventfile,'EVENTS')['NAXIS2']\n\n for key in keys:\n cols[key] = np.empty(nrows,dtype=float)\n PythonUtilities.get_float_col(cols[key],eventfile,'EVENTS',key)\n\n rad = radius_function(cols['ENERGY'],cols['CONVERSION_TYPE'])\n tmask = trap_mask(cols['RA'],cols['DEC'],center,rad)\n tmask &= (cols['ZENITH_ANGLE'] < zenith_cut) & (cols['THETA'] < theta_cut)\n if apply_GTI:\n tmask &= get_gti_mask(eventfile,cols['TIME'])\n print ('GTI will remove %d of %d photons.'%((~tmask).sum(),len(tmask)))\n if simple_scalar:\n rmask,diffs = rad_mask(cols['RA'][tmask],cols['DEC'][tmask],center,rad)\n else:\n rmask,diffs = rad_mask(cols['RA'][tmask],cols['DEC'][tmask],center,rad[tmask])\n\n for key in keys:\n coldict[key].append(cols[key][tmask][rmask])\n if return_indices:\n if 'EVENT_INDICES' not in return_cols:\n return_cols.append('EVENT_INDICES')\n coldict['EVENT_INDICES'].append(np.arange(len(tmask))[tmask][rmask])\n coldict['DIFFERENCES'].append(diffs)\n accepted += tmask.sum()\n total += len(tmask)\n\n for key in coldict.keys():\n if (key in cut_cols) and not (key in return_cols):\n cols.pop(key)\n continue\n cols[key] = np.concatenate([x for x in coldict[key]])\n if key in INT_TYPES: cols[key] = cols[key].astype(int)\n\n print ('Cuts removed %d of %d photons.'%(total-accepted,total))\n return cols\n\n#TODO: GTI\n#e.g. counts_plot(file_list,coordsys='galactic',cuts=['L > 50','L < 100','B > -60'])\ndef counts_plot(ft1files,center,fov=10,scale='log',pixels=256,coordsys='equatorial',\n cuts = None, print_locs = False):\n\n ft1 = merge_flight_data(ft1files,cuts=cuts)\n events = ft1[1]\n if coordsys == 'equatorial':\n lon,lat = N.asarray(events.data.field('RA')),N.asarray(events.data.field('DEC'))\n clon,clat = center.ra(),center.dec()\n else:\n lon,lat = N.asarray(events.data.field('L')),N.asarray(events.data.field('B'))\n clon,clat = center.l(),center.b()\n mask = rect_mask(lon,lat,clon,clat,fov/2.,fov/2.)\n\n lon = lon[mask]\n lat = lat[mask]\n\n img,x,y = N.histogram2d(lon,lat,bins=pixels)\n if scale == 'log':\n img = N.where(img > 0,N.log10(img),-1)\n from pylab import pcolor,xlabel,ylabel,imshow\n\n #pcolor(x,y,img.transpose()) #TODO -- make RA go the right way!\n imshow(img.transpose())\n xlabel( ('RA' if coordsys=='equatorial' else 'L') + ' (deg)')\n ylabel( ('DEC' if coordsys=='equatorial' else 'B') + ' (deg)')\n if print_locs:\n if coordsys == 'equatorial': print ('RA DEC ENERGY TIME EVENT_CLASS')\n else: print ('L B ENERGY TIME EVENT_CLASS')\n en = events.data.field('ENERGY')\n time = events.data.field('TIME')\n ec = events.data.field('EVENT_CLASS')\n for i in xrange(len(lon)):\n\n print ('%.2f %.2f %.2g %.10g %d'%(lon[i],lat[i],en[i],time[i],ec[i]))\n return img\n\ndef merge_flight_data(files, outputfile = None, cuts = None, fields = None):\n \"\"\"Merge FT1 or FT2 files and make cuts on the columns.\n\n If a cut is made on MET, the GTI are pruned and updated so that the exposure\n calculation will accurately reflect the MET cuts.\n\n outputfile -- [None] string argument giving output file name; if None, return the\n pyfits Table instance\n cuts -- [None] a list of logical cuts to apply to FITS columns, e.g. 'ENERGY > 100'\n fields -- not implemented\n\n NOTA BENE: the headers will not be treated correctly!!!\n \"\"\"\n\n handles = __get_handles__(files)\n table_name = handles[0][1].name #set to 'EVENTS' for FT1 or 'SC_DATA' for FT2\n\n event_table = __merge_events__(handles, table_name = table_name)\n\n if cuts is not None:\n __arbitrary_cuts__(event_table,cuts)\n interval = [event_table.data.field('TIME').min(),event_table.data.field('TIME').max()]\n else: interval = None\n\n #Overwrite data in dummy table and write it to file\n #handles[0][table_name].data = event_table.data\n #handles[0][table_name].columns = event_table.columns\n handles[0][table_name] = event_table\n\n if table_name == 'EVENTS':\n handles[0]['GTI'].data = __merge_gti__(handles,interval=interval).data\n\n if outputfile is not None: handles[0].writeto(outputfile,clobber=True)\n for x in handles: x.close()\n\n return handles[0]\n\ndef get_fields(files, fields, cuts = None, memmap = False):\n \"\"\"A lightweight version to get only certain fields of flight data.\"\"\"\n\n data = dict()\n files = __FITS_parse__(files)\n\n #Process the cuts\n f = pf.open(files[0],memmap=memmap)\n if cuts is not None:\n cut_fields = set()\n for i,cut in enumerate(cuts): #put cut columns in namespace\n tokens = [tok.strip() for tok in cut.split()]\n for name in f['EVENTS'].columns.names:\n if name in tokens:\n cut_fields.add(name)\n cuts[i] = cuts[i].replace(name,'data[\\'%s\\']'%name)\n cut_fields = list(cut_fields)\n else: cut_fields = []\n f.close()\n del(f)\n\n counts = N.empty(len(files),dtype=int)\n\n for nfi,fi in enumerate(files):\n f = pf.open(fi,memmap=memmap)\n counts[nfi] = len(f['EVENTS'].data) #f['EVENTS'].data.getshape()[0]\n f.close()\n del(f)\n\n for field in fields + cut_fields:\n data[field] = N.empty(counts.sum(),dtype=float)\n\n #Get fields\n counter = 0\n for fi,counts in zip(files,counts):\n f = pf.open(fi,memmap=memmap)\n for field in fields + cut_fields:\n #data[field] += [N.array(f['EVENTS'].data.field(field),dtype=N.float32)]\n data[field][counter:counter+counts] = f['EVENTS'].data.field(field)\n counter += counts\n f.close()\n del(f)\n\n import gc\n gc.collect()\n gc.collect()\n\n \"\"\"\n #Concatenate results\n for field in fields + cut_fields:\n data[field] = N.concatenate(data[field]).astype(float) #note hard case\n gc.collect()\n \"\"\"\n\n #Apply cuts\n if cuts is not None:\n mask = N.asarray([True]*len(data[data.keys()[0]]))\n for cut in cuts:\n mask = N.logical_and(mask,eval(cut))\n for field in fields:\n data[field] = data[field][mask]\n\n #Remove \"helper\" data for cuts\n for cut in cut_fields:\n if cut not in fields: data.pop(cut)\n\n return data\n\n\ndef FT1_to_GTI(files):\n \"\"\"Convert FT1 files to a single GTI object.\"\"\"\n handles = __get_handles__(files)\n from skymaps import Gti\n g = Gti(files[0])\n starts,stops = __merge_gti__(handles[1:],no_table = True)\n if len(starts) == 0: return g\n for i in xrange(len(starts)):\n g.insertInterval(starts[i],stops[i])\n return g\n\ndef sum_ltcubes(files,outputfile = 'summed_ltcube.fits'):\n \"\"\"Pass either a name of an ASCII file with other FITS file names or a list of filenames.\"\"\"\n\n files = __FITS_parse__(files)\n\n try:\n f = pf.open(files[0])\n header = f['EXPOSURE'].header\n exposures = [f['EXPOSURE'].data.field('COSBINS')] \n except KeyError:\n print('file %s has no EXPOSURE table: aborting'%files[0])\n return\n finally:\n f.close() \n\n for file in files[1:]:\n try:\n f = pf.open(file)\n h = f['EXPOSURE'].header\n for key in header.keys():\n if key not in ['DATE','DATE-OBS','DATE-END','TSTART','TSTOP']:\n assert(h[key]==header[key])\n exposures+=[f['EXPOSURE'].data.field('COSBINS')]\n except AssertionError:\n print('Inconsistent header values, file %s, keyword %s'%(file,key))\n return\n except KeyError:\n print('File %s has no table \"EXPOSURE\": aborting'%file)\n return\n finally:\n f.close()\n\n summed_exposure = N.array(exposures).sum(axis=0)\n null = header.get('TNULL1', None) #THB: kluge fix.\n coldef = pf.ColDefs([pf.Column(name='COSBINS',format=header['TFORM1'],null=null,array=summed_exposure)])\n exp_table = pf.new_table(coldef,header=header,nrows=exposures[0].shape[0])\n gti = merge_gti(files)\n exp_table.writeto(outputfile,clobber=True)\n gti.writeExtension(outputfile)\n\ndef merge_gti(files,table_name = 'GTI',interval = None):\n \"\"\"Return the union of Gtis specified in files, with an optional time range cut.\"\"\"\n gtis = [Gti(f,table_name) for f in files]\n new_gti = Gti(gtis[0])\n for gti in gtis:\n new_gti.combine(gti)\n\n if interval is not None:\n new_gti = new_gti.applyTimeRangeCut(*interval)\n\n return new_gti\n\ndef merge_bpd(bpd_files,outfile = None):\n \"\"\"Merge a set of BinnedPhotonData files.\n \n outfile: File to write the merged BinnedPhotonData to. If None, don't save it.\"\"\"\n bpds = [BinnedPhotonData(bf) for bf in bpd_files]\n bpds = [bpd for bpd in bpds if bpd.gti().computeOntime()>0] #Ignore entries with empty GTIs\n new_bpd = bpds[0]\n for b in bpds[1:]:\n new_bpd.add(b)\n if outfile:\n new_bpd.write(outfile)\n\n return new_bpd\n\ndef merge_lt(lt_files,outfile = 'merged_lt.fits',weighted = True):\n \"\"\"Merge a list of LivetimeCube files, handling the WEIGHTED_EXPOSURE extension.\n\n kwargs:\n outfile: File to save the merged LivetimeCube to\n weighted: If true, merge WEIGHTED_EXPOSURE tables, in addition to\n EXPOSURE.\n \"\"\"\n\n files = __FITS_parse__(lt_files)\n\n try:\n f = pf.open(files[0])\n p_header = f['PRIMARY'].header\n err = 'EXPOSURE'\n header = f['EXPOSURE'].header\n exposures = [f['EXPOSURE'].data.COSBINS]\n err = 'WEIGHTED_EXPOSURE'\n if weighted:\n w_header = f['WEIGHTED_EXPOSURE'].header\n w_exposures = [f['WEIGHTED_EXPOSURE'].data.COSBINS]\n except KeyError:\n print('file %s has no %s table: aborting'%(files[0],ext))\n return\n finally:\n f.close() \n\n for file in files[1:]:\n try:\n f = pf.open(file)\n ext = 'EXPOSURE'\n h = f['EXPOSURE'].header\n for key in header.keys():\n assert(h[key]==header[key])\n exposures+=[f['EXPOSURE'].data.COSBINS]\n\n if weighted:\n ext = 'WEIGHTED_EXPOSURE'\n hw = f['WEIGHTED_EXPOSURE'].header\n for key in w_header.keys():\n assert(hw[key]==w_header[key])\n w_exposures += [f['WEIGHTED_EXPOSURE'].data.COSBINS]\n\n except AssertionError:\n print('Inconsistent header values, file %s, extension %s, keyword %s'%(file,ext,key))\n return\n except KeyError:\n print('File %s has no table \"%s\": aborting'%(file,ext))\n return\n finally:\n f.close()\n primary = pf.PrimaryHDU(data=None,header = p_header)\n hdulist = pf.HDUList([primary])\n summed_exposure = N.array(exposures).sum(axis=0)\n null = header.get('TNULL1',None)\n coldef = pf.ColDefs([pf.Column(name='COSBINS',format=header['TFORM1'],null=null,array=summed_exposure)])\n exp_table = pf.new_table(coldef,header=header,nrows=exposures[0].shape[0])\n hdulist.append(exp_table)\n if weighted:\n summed_w_exposure = N.array(w_exposures).sum(axis=0)\n null = w_header.get('TNULL1',None)\n coldef_w = pf.ColDefs([pf.Column(name='COSBINS',format=w_header['TFORM1'],null=null,array=summed_w_exposure)])\n w_exp_table = pf.new_table(coldef_w,header=w_header,nrows=w_exposures[0].shape[0])\n hdulist.append(w_exp_table)\n hdulist.writeto(outfile,clobber=True)\n gti = merge_gti(files)\n gti.writeExtension(outfile)\n\n#EVERYTHING BELOW IS AN INTERNAL CALL.\n\ndef __FITS_parse__(files):\n \"\"\"Parse input and return a list of (FITS) filenames.\n\n files -- a glob-style wildcard, an ASCII file containing a list of filenames, a list of filenames, or a single FITS file.\"\"\"\n #This doesn't detect ndarrays.\n #if type(files) is ListType: return files\n\n #Try testing for non-string iterability instead.\n if getattr(files,'__iter__',False):\n return files\n\n try: #See if it's a FITS file\n f = pf.open(files)\n f[0]\n f.close()\n return [files]\n except:\n pass\n if files[0] == '@':\n return [line.strip() for line in file(files[1:]) if len(line)>0 and line[0]!='#']\n from glob import glob\n return glob(files)\n\ndef __get_handles__(files):\n files = __FITS_parse__(files)\n handles = [pf.open(x,memmap=1) for x in files] #some versions may need to disable memmap (=0)\n return handles\n\ndef __merge_events__(handles,table_name = 'EVENTS'):\n \"\"\"Return a FITS table of merged FT1 events. Now works for FT2 too!\"\"\"\n\n num_events = [x[table_name].data.shape[0] for x in handles]\n columns,header = __common_columns__(handles,table_name)\n event_table = pf.new_table(columns,header=header,nrows=sum(num_events))\n previous_loc = 0\n for i,handle in enumerate(handles):\n for j in xrange(len(columns)):\n name = columns[j].name\n event_table.data.field(name)[previous_loc:previous_loc+num_events[i]] = handle[table_name].data.field(name)[:]\n previous_loc += num_events[i]\n return event_table\n\ndef __common_columns__(handles,table_name = 'EVENTS'):\n \"\"\"Find the columns common to all files.\"\"\"\n #Quick kluge - return the shortest one...\n all_cols = [handle[table_name].columns for handle in handles]\n arg = N.argmin([len(col) for col in all_cols])\n return all_cols[arg],handles[arg][table_name].header\n\n \"\"\"\n selector = {}\n for col in all_cols:\n for n in col: selector[n.name] = n\n for col in all_cols:\n names = [n.name for n in col]\n for key in selector.keys():\n if not (key in names): selector.pop(key)\n print selector\n \"\"\"\n\n\n\n\ndef __merge_gti__(handles,no_table=False,interval=None):\n \"\"\"Return a FITS table of merged GTI.\"\"\"\n\n if len(handles) == 0: return ([],[])\n\n #Merge the gti and sort the results\n starts = N.concatenate([x['GTI'].data.field('START') for x in handles])\n stops = N.concatenate([x['GTI'].data.field('STOP') for x in handles])\n sorting = N.argsort(starts)\n starts = starts[sorting]\n stops = stops[sorting]\n\n if interval is not None:\n mask = N.logical_and(starts > interval[0], stops < interval[1])\n starts = N.append(interval[0],starts[mask])\n stops = N.append(stops[mask],interval[1])\n\n if no_table: return (starts,stops)\n\n #Put GTI in new table\n gti_table = pf.new_table(handles[0]['GTI'].columns,nrows = len(starts))\n gti_table.data.field('START')[:] = starts\n gti_table.data.field('STOP')[:] = stops\n\n return gti_table\n\ndef __arbitrary_cuts__(events,cuts):\n \"\"\"Perform the cuts provided as arguments.\n\n events -- a major table in FITS format such as might be returned by __merge_events__\n cuts -- a list of cuts; the final result is the intersection of all cuts. e.g,\n ['ENERGY>100','MET > 5000','MET < 10000']\n \"\"\"\n\n if cuts is None: return\n\n from numarray import array,logical_and #some installations may need the numpy version of these calls\n\n for cut in cuts: #put cut columns in namespace\n for name in events.columns.names:\n if name in cut:\n exec('%s = events.data.field(\\'%s\\')'%(name,name))\n mask = array([True]*events.data.shape[0])\n for cut in cuts: #build mask cumulatively\n exec('mask = logical_and(mask,%s)'%cut)\n\n new_table = pf.new_table(events.columns,nrows=len(mask[mask]))\n for i in xrange(len(events.columns)):\n new_table.data.field(i)[:] = events.data.field(i)[mask]\n events.data = new_table.data\n\n","repo_name":"fermi-lat/pointlike","sub_path":"python/uw/utilities/fitstools.py","file_name":"fitstools.py","file_ext":"py","file_size_in_byte":20781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"18633586319","text":"from typing import List\n\nclass Solution:\n def numberOfArithmeticSlices_slow(self, A: List[int]) -> int:\n dp = [[False] * len(A) for _ in range(len(A))]\n cnt = 0\n for i in range(len(A)-2):\n if A[i+2]-A[i+1]==A[i+1]-A[i]:\n dp[i][i+2] = True\n cnt = cnt + 1\n print(dp)\n for k in range(3, len(A)):\n for i in range(len(A)-k):\n j = i + k\n dp[i][j] = dp[i][j-1] and dp[i+1][j]\n print('dp[{0}][{1}]={2}'.format(i,j,dp[i][j]))\n if dp[i][j]:\n cnt = cnt+1\n return cnt\n\n def numberOfArithmeticSlices(self, A: List[int]) -> int:\n res = 0\n dp = 0\n for i in range(2,len(A)):\n if A[i]-A[i-1]==A[i-1]-A[i-2]:\n dp += 1\n res += dp\n else:\n dp = 0\n return res\n\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.numberOfArithmeticSlices([1,2,3,4]))","repo_name":"ivan0703/leetcode_dp","sub_path":"413. Arithmetic Slices/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27636203999","text":"# -*-coding:UTF-8-*-\nfrom win32com.client import Dispatch\nimport win32com.client\nfrom tkinter import *\nimport os, datetime, string\n\n\"\"\"\nauth:zhongqionglun\ntime:2019/03/29\ndescription: 统计项目中每人每天测试用例数\n\"\"\"\n\n\ndef cases_days():\n testers = member.get().split(\"|\")\n print(testers)\n xlApp = win32com.client.Dispatch('Excel.Application')\n filepath = os.getcwd() + \"\\\\\" + filename.get() + \".xls\"\n xlBook = xlApp.Workbooks.Open(filepath)\n # print(xlBook)\n xlSheet = xlBook.Worksheets(\"Worksheet\")\n dic = dict.fromkeys(testers)\n # print(dic)\n for tester in testers:\n row = 6\n numday = dict.fromkeys(list(range(1, 31)), 0)\n # print(numday)\n while True:\n if not xlSheet.Cells(row, 1).Value:\n\n dic[tester] = numday\n # print(dic)\n break\n elif (xlSheet.Cells(row, 7).Value == tester):\n # print(xlSheet.Cells(row, 7))\n # days.append(xlSheet.Cells(row, 6).Value)\n # print(days)\n for day in range(1, 31):\n # print(\"the day is : %s\" % day)\n if datetime.datetime.now().month >= 10:\n substring = \"2019-\" + str(datetime.datetime.now().month) + \"-\" + str(day)\n else:\n if day >= 10:\n substring = \"2019-0\" + str(datetime.datetime.now().month) + \"-\" + str(day)\n else:\n substring = \"2019-0\" + str(datetime.datetime.now().month) + \"-0\" + str(day)\n # print(substring)\n if not str(xlSheet.Cells(row, 6).Value).find(substring):\n numday[day] = numday.get(day) + 1\n # print(numday.get(day))\n # print(numday)\n break\n\n row += 1\n continue\n print(dic)\n\n\ndef writeexcel():\n outfilename = \"执行效率月统计结果.xlsx\"\n xlApp = win32com.client.Dispatch('Excel.Application')\n if os.path.exists(\"执行效率月统计结果.xlsx\"):\n print(\"The file is exists....\")\n # 判断shett是否存在,一个tester一个sheet#\n\n else:\n print(\"The file isn't exists....\")\n\n\nwindow = Tk()\nwindow.title('用例执行分析')\nwindow.iconbitmap('py-blue-trans-out.ico')\nform = Frame(window)\nform.pack()\nlab1 = Label(form, text='文件名:')\nfilename = StringVar()\nent1 = Entry(form, textvariable=filename)\nfilename.set(\"\")\nlab1.grid(row=0, column=0)\nent1.grid(row=0, column=1)\nlab2 = Label(window, text=\"测试成员:\")\nlab2.pack()\nmember = StringVar()\nent2 = Entry(window, width=40, textvariable=member)\nmember.set(\"\")\nent2.pack()\nbtn = Button(window, text='统计', command=cases_days)\nbtn.pack()\nwindow.mainloop()\n","repo_name":"zhongqionglun/GitLibrary","sub_path":"Testlink/cases_per_days.py","file_name":"cases_per_days.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"74616411771","text":"import pytest\nfrom harness.logger import logger\n\nlarge_file_01 = 'large_file_01'\nlarge_file_02 = 'large_file_02'\n\n@pytest.mark.skip(reason=\"using >> to concatenate files seems to hang clients\")\ndef test_concat(gkfs_daemon, gkfs_shell, file_factory):\n \"\"\"Concatenate two large files\"\"\"\n\n lf01 = file_factory.create(large_file_01, size=4.0, unit='MB')\n lf02 = file_factory.create(large_file_02, size=4.0, unit='MB')\n\n cmd = gkfs_shell.cp(lf01.pathname, gkfs_daemon.mountdir)\n assert cmd.exit_code == 0\n\n cmd = gkfs_shell.cp(lf02.pathname, gkfs_daemon.mountdir)\n assert cmd.exit_code == 0\n\n cmd = gkfs_shell.stat('--terse', gkfs_daemon.mountdir / large_file_01)\n assert cmd.exit_code == 0\n out = cmd.parsed_stdout\n assert out.size == lf01.size\n\n cmd = gkfs_shell.stat('--terse', gkfs_daemon.mountdir / large_file_02)\n assert cmd.exit_code == 0\n out = cmd.parsed_stdout\n assert out.size == lf02.size\n\n cmd = gkfs_shell.md5sum(gkfs_daemon.mountdir / large_file_01)\n assert cmd.exit_code == 0\n assert cmd.parsed_stdout.digest == lf01.md5sum()\n\n cmd = gkfs_shell.md5sum(gkfs_daemon.mountdir / large_file_02)\n assert cmd.exit_code == 0\n assert cmd.parsed_stdout.digest == lf02.md5sum()\n\n ##XXX hangs!\n cmd = gkfs_client.bash('cat', gkfs_daemon.mountdir / large_file_01, '>>', gkfs_daemon.mountdir / large_file_02)\n assert cmd.exit_code == 0\n","repo_name":"NGIOproject/old_GekkoFS_old","sub_path":"tests/integration/shell/test_concat.py","file_name":"test_concat.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"78"} +{"seq_id":"2280126816","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2020-02-11 06:51:38\n# @Author : mutudeh (josephmathone@gmail.com)\n# @Link : ${link}\n# @Version : $Id$\n\nimport os\n\nclass Solution():\n def search(self, nums, target):\n if not nums:\n return -1\n left = 0\n right = len(nums) - 1\n\n\n while left <= right:\n mid = int((left + right)/2)\n\n if nums[mid] == target:\n return mid\n if nums[mid] >= nums[left]:\n #说明左边有序,利用左边进行判断\n if target >= nums[left] and target <= nums[mid]:\n right = mid - 1\n else:\n left = mid + 1\n else:\n # 说明右边有序,利用右边进行判断\n if target >= nums[mid] and target <= nums[right]:\n left = mid + 1\n else:\n right = mid - 1\n return -1\n\ns = Solution()\ndata = [5,1]\nprint(s.search(data,1))","repo_name":"joseph-mutu/Codes-of-Algorithms-and-Data-Structure","sub_path":"Leetcode/在旋转数组中查找-使用左边进行判断.py","file_name":"在旋转数组中查找-使用左边进行判断.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23441221568","text":"from jinja2 import FileSystemLoader , Environment\nfrom .resource import Resource\nimport pathlib\n \nclass Instance(Resource):\n def _load_template(self):\n searchpath = pathlib.Path(__file__).parent.parent/\"templates\"\n loader = FileSystemLoader(searchpath=searchpath)\n template = Environment(loader=loader).get_template(\"instance.json.j2\")\n return template\n \n def _get_client(self):\n return self.compute.instances()\n \n def _execute_request(self):\n return self._get_client().insert(\n project=self.data.get(\"project_name\",\"\"),\n zone=self.data.get(\"zone_name\",\"\"),\n body = self.body\n ).execute()\n\n def _get_operation(self):\n return (\n self\n .compute\n .zoneOperations()\n .get(\n project=self.data.get(\"project_name\",\"\"),\n zone=self.data.get(\"zone_name\",\"\"),\n operation=self.request.get(\"name\",\"\")\n )\n )\n def get_ip(self):\n response = self._get_client().get(\n project=self.data.get(\"project_name\",\"\"),\n zone=self.data.get(\"zone_name\",\"\"),\n instance=self.data.get(\"instance_name\",\"\")\n ).execute()\n\n return {\n \"private\": (\n response\n .get(\"networkInterfaces\",[])[0]\n .get(\"networkIP\",\"\")\n ),\n \"public\": (\n response\n .get(\"networkInterfaces\",[])[0]\n .get(\"accessConfigs\",[])[0]\n .get(\"natIP\",\"\")\n )\n }","repo_name":"fermented-cat/potential-octo-guide","sub_path":"scripts/provision/resources/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26266316059","text":"import android\ndroide=android.Android()\n\narchivo = droide.dialogGetInput(\"Ecriba nombre de archivo\")\ndt = 100\nfin = 30000\ntiempo = 0\ndroide.startSensingTimed(2,dt)\n\nlecturas = []\ndroide.ttsSpeak(\"Inicio de recorrido\")\nimport time\nwhile tiempo <= fin:\n lecturas.append(droide.sensorsReadAccelerometer().result)\n time.sleep(dt/1000.0)\n tiempo += dt\n \ndroide.stopSensing();\ndroide.ttsSpeak(\"Fin de recorrido\")\n\nimport csv\n#c = csv.writer(open(\"MYFILE.csv\", \"wb\"))\n#c.writerow(lecturas)\nwith open(archivo.result + '.csv', 'w') as fp:\n a=csv.writer(fp,delimiter=',')\n a.writerows(lecturas)","repo_name":"Blindbandit01/ProyectoAndroidz","sub_path":"ProyectoAndroid/lania.ipython/Dia2/notebooks/D2L5.py","file_name":"D2L5.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71756409851","text":"from django.shortcuts import render,redirect,reverse,HttpResponse\nimport datetime\nfrom django.views.generic import ListView,DetailView\nfrom comments.models import Comment\nfrom .models import Mypost,PostCategory\n# Create your views here.\n\ndef error_handle(request):\n return render(request,\"error.html\")\n\n\ndef feedContent(filepath):\n posted = filepath.path\n with open(posted,\"r\") as fp:\n return fp.read()\n\n#使用ListView显示博客列表页\nclass HomeView(ListView):\n model = Mypost\n template_name = \"myblog/index.html\"\n context_object_name = \"post_list\"\n ordering = ['-post_update_date']\n paginate_by = 5\n\n #添加附加信息\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n category_list = PostCategory.objects.exclude(name=\"undefined\")\n context[\"category_list\"] = category_list\n return context\n\ndef postview(request,post_id):\n try:\n posted = Mypost.objects.get(pk=post_id)\n except:\n #博客文章不存在 重定向至错误页面\n return redirect(reverse(\"blog:error\"))\n else:\n post_info = {}\n post_info[\"posted_id\"] = post_id\n post_info[\"post_title\"] = posted.post_title\n post_info[\"post_author\"] = posted.post_author\n post_info[\"post_date\"] = posted.post_date\n post_info[\"post_update_date\"] = posted.post_update_date\n post_info[\"post_content\"] = feedContent(posted.post_article)\n\n # 评论\n comment_obj = Comment.objects.filter(comm_posted=posted).order_by(\"comm_created_time\")\n\n #分类\n category_list = PostCategory.objects.exclude(name=\"undefined\")\n\n context = {\n \"post_info\":post_info,\n \"comment_info\":comment_obj,\n \"category_list\":category_list,\n }\n return render(request,\"myblog/blog-post.html\",context)\n\n\nclass CategoryView(ListView):\n model = Mypost\n template_name = \"myblog/index.html\"\n context_object_name = \"post_list\"\n ordering = ['-post_update_date']\n paginate_by = 5\n\n\n #与home相同,但要筛选出需要的\n def get_queryset(self):\n ca_name = self.request.GET.get('name', None)\n pkid = PostCategory.objects.get(name=ca_name)\n return Mypost.objects.filter(post_category=pkid).order_by('-post_update_date')\n\n #添加附加信息\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n category_list = PostCategory.objects.exclude(name=\"undefined\")\n context[\"category_list\"] = category_list\n return context\n# class PostView(DetailView):\n# model = Mypost\n# template_name = \"myblog/blog-post.html\"\n# context_object_name = \"post_info\"","repo_name":"Ma5ker/website","sub_path":"myblog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70845207612","text":"#!/usr/bin/python3\n\"\"\" comparations with API request title \"\"\"\nimport operator\nimport requests\n\n\ndef count_words(subreddit, word_list, hot_list=[], after='', values={}):\n \"\"\" Request with recursion and compare the response \"\"\"\n url = 'https://www.reddit.com/r/{}/hot.json?after={}'.format(\n subreddit, after)\n data = {'user-agent': 'scraping_rec-0-0-3'}\n if len(hot_list) == 0:\n for x in range(0, len(word_list)):\n values[word_list[x]] = 0\n try:\n res = requests.get(url, headers=data,\n allow_redirects=False).json()\n query = res.get('data').get('children')\n after = res.get('data').get('after')\n for child in query:\n data = child.get('data').get('title')\n for x in range(0, len(word_list)):\n if word_list[x].lower() in data.lower():\n values[word_list[x]] += 1\n hot_list.append(data)\n if after is None:\n values_sort = sorted(values.items(), key=operator.itemgetter(1))\n for n in range(len(values_sort), 0, -1):\n if values_sort[n - 1][1] > 0:\n print(\"{}: {}\".format(values_sort[n - 1][0],\n values_sort[n - 1][1]))\n return(hot_list)\n return(count_words(subreddit, word_list, hot_list, after, values))\n except:\n return(None)\n","repo_name":"cristian0497/holberton-system_engineering-devops","sub_path":"0x16-api_advanced/100-count.py","file_name":"100-count.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11200204344","text":"#plottting 3D surface plot f(x,y)=x^2 + y^2 using python\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.cm as cm\nfrom pylab import rcParams\nrcParams['figure.figsize']=5,5\n\nfig=plt.figure()\naxes=fig.gca(projection='3d') #gca=get current axis\nx= np.linspace(-1,1,100)\ny= np.linspace(-1,1,100)\n\nxv,yv =np.meshgrid(x,y)\nz=(xv**2+yv**2)/2\n# surface_plot with color grading and color bar\np=axes.plot_surface(xv,yv,z,rstride=4,cstride=4,cmap=cm.RdBu,linewidth=0,antialiased =False)\n\nfig.colorbar(p,shrink=0.5)\naxes.set_xlabel('$x$',fontsize=15)\naxes.set_ylabel('$y$',fontsize=15)\naxes.set_zlabel('$z$',fontsize=15)\n\nplt.tight_layout()\nplt.show()\n","repo_name":"baleshwar-mahto/cse-using-python","sub_path":"surface.py","file_name":"surface.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21133884824","text":"import logging\n\nlog = logging.getLogger(__name__)\n\n\ndef get_transaction_metadata(cursor):\n \"\"\"Return PostgreSQL-specific transaction ID.\"\"\"\n cursor.execute('SELECT txid_current();')\n txid = cursor.fetchone()[0]\n return txid\n\n\ndef is_transaction_complete(cursor, txid):\n \"\"\"Test if transaction ID is finished (committed or aborted.\"\"\"\n # See http://www.postgresql.org/docs/9.1/static/functions-info.html for\n # descriptions of the postgres system functions.\n cursor.execute('SELECT txid_current_snapshot();')\n # \"txid_snapshot's textual representation is xmin:xmax:xip_list.\n # For example 10:20:10,14,15 means xmin=10, xmax=20, xip_list=10, 14, 15.\"\n xmin, xmax, xip_list = cursor.fetchone()[0].split(':')\n xmin = long(xmin)\n xmax = long(xmax)\n xip_list = map(long, xip_list.split(',')) if xip_list else []\n\n # If the transaction is finished according to the postgres system functions,\n # but we can't find the TransactionCommitBarrier, we know the transaction is\n # aborted.\n\n # xmin is the \"earliest transaction ID (txid) that is still active. All\n # earlier transactions will either be committed and visible, or rolled\n # back and dead.\"\n if txid < xmin:\n log.info('Transaction(%lu) finished xmin', txid)\n return True\n\n # \"A txid that is xmin <= txid < xmax and not in this list was already\n # completed at the time of the snapshot, and thus either visible or dead\n # according to its commit status.\"\n if xmin <= txid and txid < xmax and txid not in xip_list:\n log.info('Transaction(%lu) finished xip_list', txid)\n return True\n\n return False\n\n\ndef get_debug_info(cursor):\n cursor.execute('SELECT txid_current_snapshot();')\n return cursor.fetchone()[0]\n","repo_name":"silasbw/django_transaction_barrier","sub_path":"django_transaction_barrier/postgresql.py","file_name":"postgresql.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"31255779229","text":"import sys\nsys.path.append('/home/zikun/work/tracking/pysot-toolkit')\nfrom pysot.datasets import DatasetFactory\nfrom pysot.utils.region import vot_overlap\nimport numpy as np\nfrom tadt_tracker import Tadt_Tracker\nimport os\nimport torch\n\nfrom defaults import _C as cfg\nfrom backbone_v2 import build_vgg16\n\n\notb_root = 'the root path of otb benchmark'\nresult_path = 'the root path that you want to put the result'\nvgg16_model_mat_path = 'the path of the vgg model'\n\ndataset = DatasetFactory.create_dataset(name = 'OTB100', dataset_root = otb_root, load_img = False)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nframe_counter = 0\npred_bboxes = []\nmodel = build_vgg16(cfg)\n\n\nif not os.path.exists(result_path):\n os.mkdir(result_path)\nfor video in dataset:\n #if video.name != 'CarScale':\n # continue\n print('video',video.name)\n for idx, (img_path, gt_bbox) in enumerate(video):\n print('frame:',idx)\n if idx == frame_counter:\n target_location = np.array(gt_bbox)\n tracker = Tadt_Tracker(cfg, model = model, device = device, display = False)\n tracker.initialize_tadt(img_path, target_location)\n else:\n tracker.tracking(img_path, idx)\n tracker.end_visualize()\n tracker.saving_result(result_path, video.name, zero_index = False)\n","repo_name":"ZikunZhou/TADT-python","sub_path":"tadt_otb_v2.py","file_name":"tadt_otb_v2.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"78"} +{"seq_id":"29947274398","text":"import discord\nfrom discord.ext import commands\n\nclass MCwiki(commands.Cog):\n \n def __init__(self, client):\n self.client = client\n \n @commands.command()\n async def mcwiki(self, ctx):\n await ctx.channel.send(\"https://minecraft.gamepedia.com/Minecraft_Wiki\")\n \ndef setup(client):\n client.add_cog(MCwiki(client))","repo_name":"0rang-3/GRAPE-Discord-Bot","sub_path":"cogs/mcwiki.py","file_name":"mcwiki.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72714441212","text":"\nimport os\n\ndef removeFile(filePath):\n files = os.listdir(filePath)\n for file in files:\n fileName = f\"{filePath}/{file}\"\n if os.path.isdir(fileName):\n removeFile(fileName)\n else:\n if \"长期更新\" in fileName :\n os.remove(fileName)","repo_name":"luofanrain/lanzou-more-upload","sub_path":"controller/removeFile.py","file_name":"removeFile.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32529668950","text":"from typing import Optional, Callable\nimport Box2D as b2\nimport gym\nimport numpy as np\nfrom gym import spaces\nfrom gym.utils import seeding\n\nfrom .car_model import CarModel, CAR_WIDTH, CAR_HEIGHT\n\nROAD_HIGHT = 25.0\nDT = 1 / 60\n\n\nclass FrictionZoneListener(b2.b2ContactListener):\n def __init__(self, env):\n b2.b2ContactListener.__init__(self)\n self.env = env\n\n def BeginContact(self, contact):\n self._contact(contact, True)\n\n def EndContact(self, contact):\n self._contact(contact, False)\n\n def _contact(self, contact, begin):\n if (\n contact.fixtureA == self.env.friction_zone\n or contact.fixtureB == self.env.friction_zone\n ):\n if begin:\n self.env.friction_zone.touch = True\n else:\n self.env.friction_zone.touch = False\n\n\nclass CliffDaredevil(gym.Env):\n metadata = {\"render.modes\": [\"human\", \"rgb_array\"], \"video.frames_per_second\": 30}\n\n def __init__(\n self, render_mode: Optional[str] = None, friction_profile: float = 0.1\n ):\n self.contactListener_keepref = FrictionZoneListener(self)\n self.min_position = -5.0\n self.max_position = 75.0\n self.goal_zone = (50.0, 50.0 + CAR_WIDTH)\n self.cliff_edge = 0.3\n self.friction_start = 40.0\n self.friction = (\n friction_profile\n if callable(friction_profile)\n else lambda _: friction_profile\n )\n self.world = b2.b2World((0, -10), contactListener=self.contactListener_keepref)\n self._build_road()\n self.viewer = None\n self.action_space = spaces.Box(\n np.array([-1, 0]), np.array([+1, +1]), dtype=np.float32\n ) # gas, brake\n self.observation_space = spaces.Box(\n low=np.array([self.min_position, -32.0]),\n high=np.array([self.max_position, 32.0]),\n dtype=np.float32,\n ) # position, velocity\n self.car: Optional[CarModel] = None\n self.render_mode = render_mode\n self.seed()\n self.reset()\n\n def _build_road(self):\n self.ground = self.world.CreateStaticBody(\n fixtures=[\n b2.b2FixtureDef(\n shape=b2.b2EdgeShape(\n vertices=[\n (self.min_position, ROAD_HIGHT),\n (self.goal_zone[1] + self.cliff_edge, ROAD_HIGHT),\n ]\n ),\n friction=0.99,\n ),\n b2.b2FixtureDef(\n shape=b2.b2EdgeShape(\n vertices=[\n (self.friction_start, ROAD_HIGHT),\n (self.goal_zone[1], ROAD_HIGHT),\n ]\n ),\n friction=self.friction(0),\n ),\n ]\n )\n self.ground.fixtures[1].touch = False\n self.friction_zone = self.ground.fixtures[1]\n self.ground.userData = self.ground\n\n # In new Gym API, this function is deprecated\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, action):\n truncated = False\n if action is not None:\n action = np.clip(action, -1.0, 1.0)\n self.car.gas(action[0])\n self.car.brake(action[1])\n self.car.step()\n self.world.Step(DT, 6 * 30, 2 * 30)\n x, y = self.car.hull.position\n if self.friction_zone.touch:\n progress = np.clip(\n (x - self.friction_start) / (self.goal_zone[1] - self.friction_start),\n 0.0,\n 1.0,\n )\n friction = self.friction(progress)\n self.friction_zone.friction = friction\n backward = x < self.min_position\n reward = 0\n if backward:\n reward -= 1.0\n angle = self.car.hull.angle\n terminated = y < ROAD_HIGHT or backward or np.abs(angle) > 0.9\n if self.goal_zone[0] < x < self.goal_zone[1]:\n reward += 1.0\n distance = self.goal_zone[0] - x\n reward -= np.abs(distance)\n cost = -(self.goal_zone[1] + self.cliff_edge - x)\n v = self.car.hull.linearVelocity[0]\n if self.render_mode == \"human\":\n self.render()\n return (\n np.array([x, v], np.float32),\n reward,\n terminated,\n truncated,\n {\"cost\": cost},\n )\n\n def reset(\n self,\n *,\n seed: Optional[int] = None,\n options: Optional[dict] = None,\n ):\n self._destroy()\n self.friction_zone.touch = False\n #assert type(self.friction) is Callable\n self.friction_zone.friction = self.friction(0.0)\n position = self.np_random.uniform(low=-0.1, high=0.1)\n self.car = CarModel(self.world, position, ROAD_HIGHT)\n if self.render_mode == \"human\":\n self.render()\n return self.step(None)[0], {}\n\n def _destroy(self):\n if self.car is None:\n return\n self.car.destroy()\n\n def render(self):\n mode = self.render_mode\n screen_width, screen_height = 640, 320\n if self.viewer is None:\n import cliff_daredevil.rendering as rendering\n\n self.viewer = rendering.Viewer(screen_width, screen_height)\n self.viewer.set_bounds(self.min_position, self.max_position, 0.0, 40.0)\n sky = rendering.make_polygon(\n [\n (self.min_position, 0.0),\n (self.min_position, 40.0),\n (self.max_position, 40.0),\n (self.max_position, 0.0),\n ]\n )\n sky.set_color(135 / 255, 206 / 255, 235 / 255)\n xs_ground = np.linspace(self.goal_zone[1], self.goal_zone[1] + 1.0, 25)\n ys_ground = np.linspace(ROAD_HIGHT, 0.0, 25)\n xs_ground += self.np_random.uniform(-0.75, 0.75, 25)\n ground = rendering.make_polygon(\n [(self.min_position, 0.0), (self.min_position, ROAD_HIGHT)]\n + [*zip(xs_ground, ys_ground)]\n )\n ground.set_color(237 / 255, 201 / 255, 175 / 255)\n oil = rendering.make_polyline(\n [\n (self.friction_start, ROAD_HIGHT - 0.1),\n (self.goal_zone[1], ROAD_HIGHT - 0.1),\n ]\n )\n oil.set_linewidth(2)\n xs_sea = np.linspace(self.goal_zone[1], self.max_position, 25)\n ys_sea = np.maximum(np.sin(xs_sea * 7.1) * 2.0, 0.2)\n sea = rendering.make_polygon(\n [*zip(xs_sea, ys_sea)] + [(xs_sea[-1], 0.0), (xs_sea[0], 0.0)]\n )\n sea.set_color(0, 105 / 255, 148 / 255)\n sun = rendering.make_circle(2.5)\n sun.set_color(252 / 255, 212 / 255, 64 / 255)\n sun.add_attr(rendering.Transform((65, 35)))\n car_width, car_height = CAR_WIDTH, CAR_HEIGHT\n l, r, t, b = -car_width / 2, car_width / 2, car_height, 0\n car = rendering.make_polygon([(l, b), (l, t), (r, t), (r, b)])\n self.car_transform = rendering.Transform()\n car.add_attr(self.car_transform)\n radius = car_height / 2.5\n frontwheel = rendering.make_circle(radius)\n frontwheel.add_attr(rendering.Transform(translation=(car_width / 4, 0)))\n frontwheel.add_attr(self.car_transform)\n frontwheel.set_color(0.5, 0.5, 0.5)\n frontwheel_rim = rendering.make_circle(0.3, res=30, filled=True)\n frontwheel_rim.set_color(1.0, 0.0, 0.0)\n frontwheel_rim.add_attr(rendering.Transform(translation=(radius - 0.3, 0)))\n self.frontwheel_rim_transform = rendering.Transform()\n frontwheel_rim.add_attr(self.frontwheel_rim_transform)\n backwheel = rendering.make_circle(radius)\n backwheel.add_attr(rendering.Transform(translation=(-car_width / 4, 0)))\n backwheel.add_attr(self.car_transform)\n backwheel.set_color(0.5, 0.5, 0.5)\n backwheel_rim = rendering.make_circle(0.3, res=30, filled=True)\n backwheel_rim.set_color(1.0, 0.0, 0.0)\n backwheel_rim.add_attr(rendering.Transform(translation=(radius - 0.3, 0)))\n self.backwheel_rim_transform = rendering.Transform()\n backwheel_rim.add_attr(self.backwheel_rim_transform)\n\n def make_flag(position):\n flagx = position\n flagy1 = ROAD_HIGHT\n flagy2 = flagy1 + 2.0\n flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))\n flag = rendering.FilledPolygon(\n [\n (flagx, flagy2),\n (flagx, flagy2 - 1.0),\n (flagx + 2.5, flagy2 - 0.5),\n ]\n )\n flag.set_color(0.8, 0.8, 0)\n return flag, flagpole\n\n right_flag, right_flagpole = make_flag(self.goal_zone[0])\n left_flag, left_flagpole = make_flag(self.goal_zone[1])\n self.viewer.add_geom(sky)\n self.viewer.add_geom(sea)\n self.viewer.add_geom(ground)\n self.viewer.add_geom(oil)\n self.viewer.add_geom(sun)\n self.viewer.add_geom(car)\n self.viewer.add_geom(frontwheel)\n self.viewer.add_geom(frontwheel_rim)\n self.viewer.add_geom(backwheel)\n self.viewer.add_geom(backwheel_rim)\n self.viewer.add_geom(right_flagpole)\n self.viewer.add_geom(right_flag)\n self.viewer.add_geom(left_flagpole)\n self.viewer.add_geom(left_flag)\n pos = self.car.hull.position\n self.car_transform.set_translation(*pos)\n self.car_transform.set_rotation(self.car.hull.angle)\n self.frontwheel_rim_transform.set_translation(*pos)\n self.backwheel_rim_transform.set_translation(*pos)\n self.frontwheel_rim_transform.set_translation(*self.car.wheels[0].position)\n self.frontwheel_rim_transform.set_rotation(self.car.wheels[0].angle)\n self.backwheel_rim_transform.set_rotation(self.car.wheels[1].angle)\n self.backwheel_rim_transform.set_translation(*self.car.wheels[1].position)\n return self.viewer.render(return_rgb_array=mode == \"rgb_array\")\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n\n\nif __name__ == \"__main__\":\n from pyglet.window import key\n from gym.wrappers import TimeLimit\n\n a = np.array([0.0, 0.0])\n\n def key_press(k, mod):\n global restart\n if k == 0xFF0D:\n restart = True\n if k == key.RIGHT:\n a[0] = +1.0\n if k == key.LEFT:\n a[0] = -1.0\n if k == key.SPACE:\n a[1] = +0.8\n\n def key_release(k, mod):\n if k == key.RIGHT:\n a[0] = 0\n if k == key.LEFT:\n a[0] = 0\n if k == key.SPACE:\n a[1] = 0\n\n env: gym.Env = CliffDaredevil()\n env = TimeLimit(env, 600)\n env.render()\n env.viewer.window.on_key_press = key_press\n env.viewer.window.on_key_release = key_release\n isopen = True\n while isopen:\n env.reset()\n total_reward = 0.0\n total_cost = 0.0\n steps = 0\n restart = False\n while True:\n s, r, done, truncated, info = env.step(a)\n total_reward += r\n total_cost += info[\"cost\"]\n if steps % 200 == 0 or done:\n print(\"\\naction \" + str([\"{:+0.4f}\".format(x) for x in a]))\n print(\"step {} total_reward {:+0.4f}\".format(steps, total_reward))\n print(\"step {} total_cost {:+0.4f}\".format(steps, total_cost))\n print(\"step {} cost {:+0.4f}\".format(steps, info[\"cost\"]))\n print(\"step {} reward {:+0.4f}\".format(steps, r))\n steps += 1\n isopen, _ = env.render()\n if done or restart or not isopen:\n break\n env.close()\n","repo_name":"yardenas/cliff-daredevil","sub_path":"cliff_daredevil/cliff_daredevil.py","file_name":"cliff_daredevil.py","file_ext":"py","file_size_in_byte":12200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28160549561","text":"\"\"\"This module implements the models for the Blog core system.\"\"\"\n\nfrom app.models.acl import AccessControlMixin\nfrom app.models.patch import CommentMixin, StatusMixin, TagMixin\nfrom app.models import db\n\n\nclass Post(AccessControlMixin, StatusMixin, CommentMixin, TagMixin, db.Model):\n \"\"\"Implements the Post model.\n \"\"\"\n\n title = db.Column(db.String(32))\n sub = db.Column(db.String(128))\n user_id = db.Column(db.Integer, db.ForeignKey(u'user.id'))\n user = db.relationship('User', backref='posts', lazy='select')\n content = db.Column(db.Text())\n\n def __init__(self, title, sub, user, content):\n \"\"\"Initialize the Sketch object.\n\n Args:\n :param title: The title of the post\n :param sub: The subtitle of the post\n :param user: A user\n :param content: Content db.String\n \"\"\"\n super(Post, self).__init__()\n self.title = title\n self.sub = sub\n self.user = user\n self.content = content\n\n def __repr__(self):\n return self.title\n","repo_name":"OhBonsai/flask-boilerplate","sub_path":"app/models/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"27607036198","text":"from collections import OrderedDict\nfrom warnings import warn\n\nfrom astropy.table import join\nimport astropy.table as table\nfrom astropy.table.operations import _join, _merge_table_meta\nfrom astropy.units import Unit\nimport numpy as np\nfrom astropy.coordinates import SkyCoord\nfrom .exceptions import MultipleResultsWarning, NotFoundWarning, NotFoundError\n\n\nclass MaskedColumn(table.MaskedColumn):\n\n def find(self, sub, start=0, end=None):\n if isinstance(self, (MaskedColumn)):\n str_array = np.array(self, dtype=str)\n index = np.core.defchararray.find(str_array, sub, start=start, end=end) != -1\n return np.where(index)[0], str_array[index]\n\n else:\n return NotImplemented\n\n def to_array(self, units=None):\n \"\"\"\n Returns the columns as a MaskedArray.\n If units are specified, convert to good units\n before returning the array.\n \"\"\"\n if units is None:\n return self.data.copy()\n else:\n # Convert to quantity array\n data = self.quantity\n # Need to stored masked values\n # (quantity arrays don't deal with masks)\n mask = self.mask\n\n # Make sure it has the good units\n data = data.to(units)\n\n # Convert to masked array\n return np.ma.array(data.value, mask=mask)\n\n\nclass Column(table.Column):\n\n def find(self, sub, start=0, end=None):\n if isinstance(self, (Column)):\n str_array = np.array(self, dtype=str)\n index = np.core.defchararray.find(str_array, sub, start=start, end=end) != -1\n return np.where(index)[0], str_array[index]\n\n else:\n return NotImplemented\n\n def to_array(self, units=None):\n \"\"\"\n Returns the columns as an array.\n If units are specified, convert to good units\n before returning the array.\n \"\"\"\n if units is None:\n return self.data.copy()\n else:\n # Convert to quantity array\n data = self.quantity\n\n # Make sure it has the good units\n data = data.to(units)\n\n # Convert to array\n return np.array(data.value)\n\n\nclass Table(table.Table):\n\n # Redefine class attributes (if not, the originals would be taken)\n Column = Column\n MaskedColumn = MaskedColumn\n\n # Set attributes\n main_col = None # Default column used to order\n log = [] # Save output when using insert_value method\n\n def __init__(self, *args, masked=True, **kwargs):\n\n super().__init__(*args, masked=masked, **kwargs)\n\n if self.masked:\n self.nan_to_mask()\n\n # New methods\n def rename_columns(self, old, new):\n\n for ko, kn in zip(old, new):\n self.rename_column(ko, kn)\n\n def nan_to_mask(self):\n \"\"\"\n Replace nan by masked array\n \"\"\"\n if self.masked:\n for k in self.keys():\n if not isinstance(self[k], SkyCoord) and self[k].dtype == float:\n self[k].mask = np.isnan(self[k]) | self[k].mask\n else:\n raise TypeError(\"Input must be a Masked Table.\" +\n \"\\n \\t Set its mask to True before calling\" +\n \" (example: t = Table(t, masked=True, copy=False)).\")\n\n def by_pl_name(self, *plName, name_key=None):\n \"\"\"\n Return the complete line of a given planet name (plName)\n \"\"\"\n position = self.get_index(*plName, name_key=name_key)\n position = np.array(position)\n\n is_valid = (position >= 0)\n\n if is_valid.any():\n out = self[position[is_valid]]\n else:\n raise NotFoundError(*plName)\n\n return out\n\n def by_plName(self, *args, **kwargs):\n \"\"\"\n Old name of by_pl_name. Just an alias\n \"\"\"\n\n return self.by_pl_name(*args, **kwargs)\n\n def get_index(self, *plName, name_key=None):\n '''\n Return the lines index where plName are located for the column given by name_key\n name_key default is given by main_col attribute of the object\n '''\n name_key = name_key or self.main_col\n\n position = []\n for pl in plName:\n try:\n position.append(int(self[name_key].find(pl)[0]))\n\n except TypeError:\n values = self[name_key].find(pl)[1]\n if len(values) > 0:\n warn(MultipleResultsWarning(pl, values))\n else:\n warn(NotFoundWarning(pl))\n position.append(-1)\n\n return position\n\n def set_main_col(self, colname=None, extension='_temp'):\n '''\n Set self.main_col and assign it to the first column.\n If colname is None, simply assign self.main_col to the\n first column.\n '''\n if self.main_col is None:\n self.main_col = colname\n elif colname is None:\n colname = self.main_col\n\n colname_temp = colname+extension\n self.rename_column(colname, colname_temp)\n self.add_column(self[colname_temp], name=colname, index=0)\n self.remove_column(colname_temp)\n\n def correct_units(self, badunits=['degrees', 'days', 'hours','jovMass', 'mags'],\n gunits=['degree', 'day', 'hour','jupiterMass', 'mag'], verbose=True,\n debug=False):\n '''\n Correct columns units for astropy units\n '''\n text_frame = \"Column {} corrected for '{}' unit (previous was '{}')\"\n\n for col in self.colnames:\n if debug:\n print(col, self[col].unit)\n\n # Skip skycoord: no unit attribute\n if isinstance(self[col], SkyCoord):\n continue\n\n # Search for bad units\n # TODO: check if unit is in list instead of loop and use dict\n # to replace\n for bunit, gunit in zip(badunits, gunits):\n if self[col].unit == bunit:\n self[col].unit = gunit\n\n # Message and log it\n self.log.append(\n text_frame.format(col, self[col].unit, bunit))\n if verbose:\n print(self.log[-1])\n print(self.log[-1])\n\n def cols_2_qarr(self, *keys):\n '''\n Returns columns given in input as astropy q_arrays\n '''\n\n warn(DeprecationWarning(\n \"This method will be removed in future versions. Do not use it.\"))\n\n out = []\n for k in keys:\n try:\n out.append(np.ma.array(self[k].data) * self[k].unit)\n except TypeError:\n out.append(np.ma.array(self[k].data))\n\n return tuple(out)\n\n def set_units(self, units, cols=None):\n '''\n Assign units to columns.\n units:\n list of units (str or astropy units) to be assign\n cols:\n list of columns names (str).\n If None is given, it takes all the keys, so Table.keys() as default\n '''\n\n if not cols:\n cols = self.keys()\n\n for col, u in zip(cols, units):\n self[col].unit = u\n\n def new_value(self, plName, col, value):\n\n names = np.array(self[self.main_col], dtype=str)\n position = np.where(names == plName)[0]\n\n self[col][position] = value\n\n def complete(self, right, key=None, join_type='left',\n add_col=True, metadata_conflicts='warn',\n verbose=True, debug=False, **kwargs):\n \"\"\"\n Add every missing data in self if present in right.\n\n join_type : 'inner': do not add new rows\n 'outer': add new rows if not present in self\n add_col: add new colums from right\n \"\"\"\n\n key = key or self.main_col\n\n # Try converting inputs to Table as needed\n if not isinstance(right, Table):\n right = Table(right)\n\n try:\n out = self._complete(right, key=key, join_type=join_type,\n add_col=add_col, verbose=verbose, debug=debug)\n except:\n warn(\"Custom table completion failed, trying default astropy join.\")\n # NOTE: This seemd to break when I tested it quickly,\n # but I needed masking anyway so I did not get to the bottom of the problem.\n # - Thomas\n col_name_map = OrderedDict()\n out = _join(\n self,\n right,\n join_type=join_type,\n col_name_map=col_name_map,\n keys=key,\n **kwargs\n )\n\n # Merge the column and table meta data. Table subclasses might override\n # these methods for custom merge behavior.\n _merge_table_meta(out, [self, right], metadata_conflicts=metadata_conflicts)\n\n return out\n\n def _complete(self, right, key=None, join_type='left', add_col=True,\n verbose=True, debug=False):\n\n if not key:\n raise ValueError('key is empty')\n\n # Save shared columns without \"key\"\n cols = intersection(self.keys(), right.keys())\n cols.remove(key)\n\n # Join tables\n join_t = join(self, right, join_type=join_type, keys=key)\n\n # Complete masked values of \"self\" if available in \"right\"\n for col in cols:\n\n # Add eventually a condition to check units!\n\n # Names of joined columns (default from join())\n col1, col2 = col + '_1', col + '_2'\n\n # Index of masked in \"self\" and not masked in \"right\"\n index = join_t[col1].mask & ~join_t[col2].mask\n\n # Reassign value\n join_t[col1].unshare_mask()\n join_t[col1][index] = join_t[col2][index]\n\n # Remove 2nd column and rename to original\n join_t[col1].name = col\n del join_t[col2]\n\n # Remove added columns from \"right\" if not wanted\n supp_cols = difference(right.keys(), self.keys())\n if debug: print(supp_cols)\n\n if not add_col and supp_cols:\n if verbose:\n print('remove non shared columns from second table')\n join_t.remove_columns(supp_cols)\n\n return join_t\n\n def complete_cols(self, col_in, col_out, name_key=None):\n '''\n Use a column from table to complete another column.\n Input:\n col_in: list of names of columns to use (list of str)\n col_out: list of names of columns to complete (list of str)\n '''\n # Take default col if none is given\n name_key = name_key or self.main_col\n\n # Def table with cols to use and rename it to cols to complete\n temp_table = Table(self[[name_key] + col_in], masked=True)\n temp_table.nan_to_mask()\n temp_table.rename_columns(col_in, col_out)\n\n # Complete with the temp_table\n return self.complete(temp_table, key=name_key)\n\n def add_calc_col(self, fct, *args, f_args=(), f_kwargs={}, col_keys=[], **kwargs):\n '''\n Add new column wich is the result of fct(table[col_keys], *f_args, **f_kwargs)\n\n args and kwargs are passed to MaskedColumn instantiation\n\n '''\n\n # Build tuple of columns inputs to fct and add to f_args\n cols = ()\n for key in col_keys:\n cols += (self[key],)\n f_args = cols + f_args\n\n # Define column and add it\n col = MaskedColumn(*args, data=fct(*f_args, **f_kwargs), **kwargs)\n self.add_column(col)\n\n def check_col_units(self, colname):\n\n col_units = self[colname].unit\n try: # Check if col_units valid\n 1. * Unit(col_units)\n except TypeError:\n print('Column has no units (unit = None)')\n except: # Enter valid unit and refresh all table\n print(\"Column units '{}' are not\".format(col_units) +\n ' recognized by astropy.\\n')\n print(\"Error message from astropy:\")\n print_unit_error(str(col_units))\n print(\"-------------------------\")\n gunit = input('***** Please enter the corresponding unit'\n + ' recognized by astropy unit: ')\n self.correct_units(badunits=[str(col_units)], gunits=[gunit])\n\n\ndef difference(left, right):\n if isinstance(left, list) and isinstance(right, list):\n return list(set(left) - set(right))\n else:\n return NotImplemented\n\n\ndef intersection(tbl, other):\n if isinstance(tbl, list):\n return list(set(tbl).intersection(other))\n else:\n return NotImplemented\n\n\ndef print_unit_error(str_unit):\n\n try:\n Unit(str_unit)\n except ValueError as e:\n print(e)\n","repo_name":"AntoineDarveau/exofile","sub_path":"exofile/table_custom.py","file_name":"table_custom.py","file_ext":"py","file_size_in_byte":12812,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"2077249573","text":"from typing import List, Optional, Union\n\nimport numpy as np\n\nfrom . import hm\nfrom .base import Point2PointCalibrationBase\n\n\nclass Arun(Point2PointCalibrationBase):\n # Least-Squares Fitting of Two 3-D Point Sets\n # K. S. Arun, T. S. Huang, and S.D. Blostein\n # IEEE Transactions on Pattern Analysis and Machine Intelligence, 1987\n\n @staticmethod\n def name():\n return 'Arun'\n\n def __init__(self):\n super().__init__()\n self._points_a = None\n self._points_b = None\n\n def set_points(self, points_a: np.ndarray, points_b: np.ndarray,\n weights: Optional[Union[List, np.ndarray]] = None) -> None:\n if weights is not None:\n raise RuntimeError(\"Weights are not supported by Arun\")\n if points_a.shape[0] != 3 or points_b.shape[0] != 3 or points_a.shape[1] != points_b.shape[1]:\n raise RuntimeError(\"Both point clouds must have shapes (3, n)\")\n self._points_a = points_a\n self._points_b = points_b\n\n def _calibrate(self, **_):\n if self._points_a is None or self._points_b is None:\n raise RuntimeError(\"Point data is missing\")\n return hm.analytic.solve_arun(self._points_a, self._points_b)\n","repo_name":"uulm-mrm/excalibur","sub_path":"excalibur/calibration/point2point/arun.py","file_name":"arun.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"37026052500","text":"import os\nimport sys\nimport numpy as np\nimport glob\n\ndirectory = \".\"\nif len(sys.argv) > 1:\n directory = sys.argv[1]\nnpzglob = os.path.join(directory, \"*.npz\")\nnpzfiles = glob.glob(npzglob)\n\nnfiles = len(npzfiles)\nflaggedfiles = []\nfor idx, npz in enumerate(npzfiles):\n npzdata = np.load(npz)\n if \"sigma_in\" in npzdata.files:\n sigma = np.array(npzdata[\"sigma_in\"])\n npss = np.array(npzdata[\"npss\"])\n if np.count_nonzero(sigma) > 0 and np.count_nonzero(npss) == 0:\n flaggedfiles.append(npz)\n","repo_name":"achilleas-k/connectivity","sub_path":"checksigma.py","file_name":"checksigma.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70873657853","text":"def corresponding_grade(grade):\n if 2.00 <= grade <= 2.99:\n return 'Fail'\n elif 3.00 <= grade <= 3.49:\n return 'Poor'\n elif 3.50 <= grade <= 4.49:\n return 'Good'\n elif 4.50 <= grade <= 5.49:\n return 'Very Good'\n elif 5.50 <= grade <= 6.00:\n return 'Excellent'\n\n\nresult = float(input())\nprint(corresponding_grade(result))\n","repo_name":"KaloyankerR/python-fundamentals-repository","sub_path":"Assignments/Functions/Lab/01. Grades.py","file_name":"01. Grades.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17535315807","text":"import random\n\napp.stepsPerSecond = 5\napp.nextRadius = 0\n\nRect(0, 0, 400, 400)\n\ndef onStep():\n if (app.nextRadius < 200):\n # Increases the radius by 5.\n app.nextRadius += 5\n\n # Define these variables to generate new random values for the next\n # circle. Borders are at most 50 and dash values are at most 100.\n ### (HINT: redGreen is used in an rgb so it can't be bigger than 255!)\n ### Fix Your Code Here ###\n redGreen = random.randint(0,255)\n newBorderWidth = random.randint(0,50)\n dashWidth = random.randint(0,100)\n dashSpace = random.randint(0,100)\n\n # Draws the next circle with the values generated above.\n Circle(200, 200, app.nextRadius, fill=None,\n border=rgb(redGreen, 255 - redGreen, 255),\n borderWidth=newBorderWidth, dashes=(dashWidth, dashSpace))\n","repo_name":"Psycho461/-APCSP-CSAcademyANSWERS","sub_path":"4.5.3 Strange machine.py","file_name":"4.5.3 Strange machine.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"71132450813","text":"import os\r\nimport json\r\n\r\nimport pytest\r\n\r\nfrom habit import Habit\r\nimport analytics\r\n\r\n\r\nDATA_DIR = os.path.join(os.path.realpath(os.path.pardir), \"habittracker\", \"data\")\r\n\r\n\r\n@pytest.fixture()\r\ndef sample_habits_objects():\r\n sample_data_file_name = os.path.join(DATA_DIR, \"sample_data.json\")\r\n if os.path.exists(sample_data_file_name):\r\n # Read json file with example data\r\n with open(sample_data_file_name, encoding=\"utf-8\") as json_file:\r\n # Load json file containing example data\r\n sample_habits = json.load(json_file)\r\n # Create habit coming from json and save to habit database\r\n sample_habits_objects = list()\r\n for item in sample_habits[\"sample_data\"]:\r\n habit = Habit(item[\"name\"], item[\"description\"], item[\"periodicity\"])\r\n for checkoff_date in item[\"checkoffs\"]:\r\n habit.check_off(checkoff_date)\r\n sample_habits_objects.append((habit.id, habit))\r\n return sample_habits_objects\r\n else:\r\n assert False, f\"File {sample_data_file_name} doest not exist.\"\r\n\r\n\r\nclass TestListHabits:\r\n @pytest.mark.unit\r\n @pytest.mark.positive\r\n def test_list_habits_valid_option_all(self, sample_habits_objects):\r\n expected_list_length = len(sample_habits_objects)\r\n expected_habit_names = [habit[1].name for habit in sample_habits_objects]\r\n actual = analytics.list_habits(sample_habits_objects)\r\n actual_habit_names = [habit[1].name for habit in actual]\r\n assert sorted(actual_habit_names) == sorted(expected_habit_names)\r\n assert len(actual) == expected_list_length\r\n\r\n @pytest.mark.unit\r\n @pytest.mark.negative\r\n def test_list_habits_invalid_option_habits_missing(self):\r\n with pytest.raises(TypeError) as exc_info:\r\n analytics.list_habits()\r\n expected_error_message = \"list_habits() missing 1 required positional argument: 'habits'\"\r\n assert str(exc_info.value) == expected_error_message\r\n\r\n\r\nclass TestListHabitsByPeriodicity:\r\n @pytest.mark.unit\r\n @pytest.mark.positive\r\n @pytest.mark.parametrize(\"expected_periodicity\", [\"DAILY\", \"WEEKLY\"])\r\n def test_list_habits_by_periodicity_valid_option_all(self, sample_habits_objects, expected_periodicity):\r\n expected_habit_names = [habit[1].name for habit in sample_habits_objects\r\n if habit[1].periodicity.name == expected_periodicity]\r\n expected_list_length = len(expected_habit_names)\r\n actual = analytics.list_habits_by_periodicity(sample_habits_objects, expected_periodicity)\r\n actual_habit_names = [habit[1].name for habit in actual]\r\n assert sorted(actual_habit_names) == sorted(expected_habit_names)\r\n assert len(actual) == expected_list_length\r\n\r\n @pytest.mark.unit\r\n @pytest.mark.negative\r\n @pytest.mark.parametrize(\"expected_periodicity\", [\"DAILY\", \"WEEKLY\"])\r\n def test_list_habits_by_periodicity_invalid_option_habits_missing(self, expected_periodicity):\r\n with pytest.raises(TypeError) as exc_info:\r\n analytics.list_habits_by_periodicity(periodicity=expected_periodicity)\r\n expected_error_message = \"list_habits_by_periodicity() missing 1 required positional argument: 'habits'\"\r\n assert str(exc_info.value) == expected_error_message\r\n\r\n @pytest.mark.unit\r\n @pytest.mark.negative\r\n def test_list_habits_by_periodicity_invalid_option_periodicity_missing(self, sample_habits_objects):\r\n with pytest.raises(TypeError) as exc_info:\r\n analytics.list_habits_by_periodicity(habits=sample_habits_objects)\r\n expected_error_message = \"list_habits_by_periodicity() missing 1 required positional argument: 'periodicity'\"\r\n assert str(exc_info.value) == expected_error_message\r\n\r\n\r\nclass TestGetLongestStreak:\r\n @pytest.mark.unit\r\n @pytest.mark.positive\r\n def test_get_longest_streak_valid_option_all(self, sample_habits_objects):\r\n expected_longest_streak = 11\r\n actual = analytics.get_longest_streak(sample_habits_objects)\r\n assert actual == expected_longest_streak\r\n\r\n @pytest.mark.unit\r\n @pytest.mark.positive\r\n def test_get_longest_streak_valid_option_all_empty_habits(self):\r\n expected_longest_streak = 0\r\n actual = analytics.get_longest_streak(habits=[])\r\n assert actual == expected_longest_streak\r\n\r\n\r\nclass TestGetLongestStreakForHabit:\r\n @pytest.mark.unit\r\n @pytest.mark.positive\r\n @pytest.mark.parametrize(\"expected_longest_streaks\", [[\"Meditating\", 5],\r\n [\"Workout\", 4],\r\n [\"Drink 2 liter of water\", 11],\r\n [\"Didn't watch TV\", 1],\r\n [\"No spend\", 1]])\r\n def test_get_longest_streak_for_habit_valid_option_all(self, sample_habits_objects, expected_longest_streaks):\r\n habit_id = [habit[1].id for habit in sample_habits_objects if habit[1].name == expected_longest_streaks[0]]\r\n actual = analytics.get_longest_streak_for_habit(sample_habits_objects, habit_id[0])\r\n assert actual == expected_longest_streaks[1]\r\n\r\n @pytest.mark.unit\r\n @pytest.mark.positive\r\n def test_get_longest_streak_for_habit_invalid_option_habit_id_not_in_objects(self, sample_habits_objects):\r\n expected_longest_streak = 0\r\n actual = analytics.get_longest_streak_for_habit(habits=sample_habits_objects, habit_id=\"42\")\r\n assert actual == expected_longest_streak\r\n\r\n @pytest.mark.unit\r\n @pytest.mark.negative\r\n def test_get_longest_streak_for_habit_invalid_option_habits_missing(self):\r\n with pytest.raises(TypeError) as exc_info:\r\n analytics.get_longest_streak_for_habit(habit_id=\"42\")\r\n expected_error_message = \"get_longest_streak_for_habit() missing 1 required positional argument: 'habits'\"\r\n assert str(exc_info.value) == expected_error_message\r\n\r\n @pytest.mark.unit\r\n @pytest.mark.negative\r\n def test_get_longest_streak_for_habit_invalid_option_habit_id_missing(self, sample_habits_objects):\r\n with pytest.raises(TypeError) as exc_info:\r\n analytics.get_longest_streak_for_habit(habits=sample_habits_objects)\r\n expected_error_message = \"get_longest_streak_for_habit() missing 1 required positional argument: 'habit_id'\"\r\n assert str(exc_info.value) == expected_error_message\r\n","repo_name":"anjakuchenbecker/oofpp_habits_project","sub_path":"tests/test_analytics.py","file_name":"test_analytics.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"22495850660","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom exp1a_psignifit_analysis import a_data_extraction, b3_plot_staircase, c_plots, \\\n d_average_participant, e_average_exp_data, make_average_plots, make_long_df, \\\n plot_w_errors_no_1probe\nfrom rad_flow_psignifit_analysis import plot_runs_ave_w_errors\nfrom psignifit_tools import get_psignifit_threshold_df, get_psig_thr_w_hue\nfrom python_tools import which_path, running_on_laptop, switch_path\n\n'''\nThis script if for checking for any differences between thr upper visual field and lower visual field.\nIt will use the already analysied RUNDATA_sorted.xlsx to do this.\nLoop through the participant run folders and append each RUNDATA-sorted.xlsx, with addition 'run' column.\nSave to P_all_runs_master_output.csv.\n\nThen run psignifit on this\n'''\n\n# # loop through run folders with first 4 scripts (a, get_psignifit_threshold_df, b3, c)\n# # then run script d to get master lists and averages\n# exp_path = '/Users/nickmartin/Documents/PycharmProjects/Cardiff/exp1a_data'\n# participant_list = ['aa', 'bb', 'cc', 'dd', 'ee']\n# exp_path = '/Users/nickmartin/Documents/PycharmProjects/Cardiff/EXP1_sep4_5'\n# exp_path = r\"C:\\Users\\sapnm4\\OneDrive - Cardiff University\\PycharmProjects\\Cardiff\\EXP1_sep4_5\"\n# exp_path = r\"C:\\Users\\sapnm4\\OneDrive - Cardiff University\\PycharmProjects\\Cardiff\\Exp3_Ricco_NM_v4\"\nexp_path = r\"C:\\Users\\sapnm4\\OneDrive - Cardiff University\\PycharmProjects\\Cardiff\\exp1a_data\"\nconvert_path1 = os.path.normpath(exp_path)\nif running_on_laptop():\n convert_path1 = switch_path(convert_path1, 'mac_oneDrive')\nexp_path = convert_path1\n\nparticipant_list = ['aa', 'bb', 'cc', 'dd', 'ee', 'Nick'] # ['Kim', 'Kris', 'Simon', 'Nick']\n# participant_list = ['Nick'] # ['Kim', 'Kris', 'Simon', 'Nick']\np_idx_plus = 1\n\nn_runs = 12\nanalyse_from_run = 1\ntrim_list = []\n\n'''Part 1, get threshold for each participant and make master list'''\n\nexp_thr = []\nexp_CI_width = []\n\n# for p_idx, participant_name in enumerate(participant_list):\n#\n# root_path = os.path.join(exp_path, participant_name)\n#\n# # search to automatically get run_folder_names\n# dir_list = os.listdir(root_path)\n# run_folder_names = []\n# for i in range(n_runs): # numbers 0 to 11\n# check_dir = f'{participant_name}_{i + analyse_from_run}' # numbers 1 to 12\n# if check_dir in dir_list:\n# run_folder_names.append(check_dir)\n#\n# if len(run_folder_names) > 0:\n# print(\"running analysis for:\")\n# for i in run_folder_names:\n# print(i)\n# else:\n# print(\"no run folders found\")\n#\n#\n# # add RUNDATA-sorted to all_data\n# all_data = []\n#\n# for run_idx, run_dir in enumerate(run_folder_names):\n#\n# print(f'\\ncompiling analysis for {participant_name}, {run_dir}, {participant_name}_{run_idx+1}\\n')\n# save_path = f'{root_path}{os.sep}{run_dir}'\n#\n# # don't delete this (participant_name = participant_name),\n# # needed to ensure names go name1, name2, name3 not name1, name12, name123\n# p_name = participant_name\n#\n# # '''a'''\n# p_name = f'{participant_name}_{run_idx+1}_output.csv'\n# # p_name = f'{participant_name}{run_idx+1}'\n# # isi_list = [-1, 0, 2, 4, 6, 9, 12, 24]\n#\n# if os.path.isfile(os.path.join(save_path, 'RUNDATA-sorted.xlsx')):\n# run_data_path = os.path.join(save_path, 'RUNDATA-sorted.xlsx')\n# elif os.path.isfile(os.path.join(save_path, p_name)):\n# run_data_path = os.path.join(save_path, p_name)\n# elif os.path.isfile(os.path.join(save_path, f'{run_dir}_output.csv')):\n# run_data_path = os.path.join(save_path, f'{run_dir}_output.csv')\n# elif os.path.isfile(os.path.join(save_path, f'{participant_name}_output.csv')):\n# run_data_path = os.path.join(save_path, f'{participant_name}_output.csv')\n# else:\n# raise FileNotFoundError(f'{participant_name}, run_dir {run_dir}')\n#\n# # run_data_path = f'{save_path}{os.sep}RUNDATA-sorted.xlsx'\n#\n# # run_data_path = os.path.join(save_path, )\n#\n# if run_data_path[-4:] == 'xlsx':\n# run_data_df = pd.read_excel(run_data_path, engine='openpyxl',\n# # usecols=['ISI',\n# # 'stair',\n# # 'separation',\n# # # 'group',\n# # 'probeLum', 'trial_response', 'corner']\n# )\n# else:\n# run_data_df = pd.read_csv(run_data_path)\n# print(f\"run_data_df:\\n{run_data_df}\")\n#\n# # add isi column for multi-indexing\n# if 'run' not in list(run_data_df.columns):\n# run_data_df.insert(0, 'run', int(run_idx+1))\n# # if verbose:\n# print(f'run_data_df:\\n{run_data_df.head()}')\n#\n# # get column names to use on all_data_df\n# column_names = list(run_data_df)\n#\n# # add to all_data\n# all_data.append(run_data_df)\n#\n# # create all_data_df - reshape to 2d\n# all_data_shape = np.shape(all_data)\n# print(f'all_data_shape:\\n{all_data_shape}')\n#\n# if len(np.shape(all_data)) == 2:\n# sheets, rows, columns = np.shape(all_data)\n# all_data = np.reshape(all_data, newshape=(sheets * rows, columns))\n# # if verbose:\n# print(f'all_data reshaped from {all_data_shape} to {np.shape(all_data)}')\n# all_data_df = pd.DataFrame(all_data, columns=column_names)\n# else:\n# all_data_df = pd.concat(all_data, ignore_index=True)\n#\n# visual_field_list = ['UVF' if i < 200 else 'LVF' for i in all_data_df['corner'].to_list()]\n# all_data_df['vis_field'] = visual_field_list\n# # if verbose:\n# print(f\"all_data_df:\\n{all_data_df}\")\n#\n# sep_list = sorted(list(all_data_df['separation'].unique()))\n# print(f\"sep_list: {sep_list}\")\n#\n#\n# # # if save_all_data:\n# save_name = 'P_all_runs_master_output.csv'\n# save_csv_path = os.path.join(root_path, save_name)\n# # # if verbose:\n# print(f\"\\nsaving all_data_df to save_csv_path: {save_csv_path}\")\n# all_data_df.to_csv(save_csv_path, index=False)\n#\n#\n#\n# all_data_df = pd.read_csv(os.path.join(root_path, 'P_all_runs_master_output.csv'))\n#\n# vis_field_names = ['UVF', 'LVF']\n#\n#\n# both_vfs_thr = []\n# both_vfs_CI_width = []\n#\n# for idx, vis_field_name in enumerate(vis_field_names):\n#\n#\n# print(f'Running psignifit for {vis_field_name}')\n#\n# vis_field_df = all_data_df[all_data_df['vis_field'] == vis_field_name]\n# print(vis_field_df)\n#\n# isi_list = sorted(list(vis_field_df['ISI'].unique()))\n# print(f\"isi_list: {isi_list}\")\n#\n# sep_list = sorted(list(vis_field_df['separation'].unique()))\n# print(f\"sep_list: {sep_list}\")\n#\n#\n#\n#\n# '''get psignifit thresholds df - use stairs as sep levels rather than using groups'''\n#\n# thr_df = get_psignifit_threshold_df(root_path=exp_path,\n# p_run_name=participant_name,\n# csv_name=vis_field_df,\n# n_bins=9, q_bins=True,\n# thr_col='probeLum',\n# sep_col='separation', sep_list=sep_list,\n# isi_col='ISI', isi_list=isi_list,\n# conf_int=True, thr_type='Bayes',\n# plot_both_curves=False,\n# # cols_to_add_dict=None, save_name=f'psignifit_{vis_field_name}_ISI{ISI}_sep{separation}',\n# cols_to_add_dict=None, save_name=f'psignifit_{vis_field_name}',\n# show_plots=False, save_plots=False,\n# verbose=True)\n#\n# # thr_df['vis_field'] = vis_field_name\n# thr_df.insert(1, 'vis_field', vis_field_name)\n#\n# cond_list = thr_df['separation'].to_list()\n# if vis_field_name == 'LVF':\n# cond_list = [-.01 if i == 0 else -i for i in cond_list]\n# # thr_df['cond'] = cond_list\n# thr_df.insert(2, 'cond', cond_list)\n#\n# print(f'psignifit_{vis_field_name}:\\n{thr_df}')\n# column_names = list(thr_df)\n#\n# # add this VFs thr and CI width to list to concat with other VF\n# both_vfs_thr.append(thr_df)\n#\n# CI_width_filename = f'psignifit_{vis_field_name}_CI_width.csv'\n#\n# VF_CI_width_df = pd.read_csv(os.path.join(root_path, CI_width_filename))\n# VF_CI_width_df.insert(1, 'vis_field', vis_field_name)\n# VF_CI_width_df.insert(2, 'cond', cond_list)\n# both_vfs_CI_width.append(VF_CI_width_df)\n#\n# # progress_df = pd.concat(both_vfs_thr)\n# # save_name = 'psignifit_progress.csv'\n# # save_csv_path = os.path.join(root_path, save_name)\n# # print(f\"\\nsaving progress_df to save_csv_path:\\n{save_csv_path}\")\n# # progress_df.to_csv(save_csv_path, index=False)\n#\n#\n# # create both_vfs_df - reshape to 2d\n# both_vfs_shape = np.shape(both_vfs_thr)\n# sheets, rows, columns = np.shape(both_vfs_thr)\n# both_vfs_thr = np.reshape(both_vfs_thr, newshape=(sheets * rows, columns))\n# print(f'both_vfs_thr reshaped from {both_vfs_shape} to {np.shape(both_vfs_thr)}')\n# both_vfs_df = pd.DataFrame(both_vfs_thr, columns=column_names)\n# print(f\"both_vfs_df:\\n{both_vfs_df}\")\n#\n# save_name = 'psignifit_both_vfs.csv'\n# save_csv_path = os.path.join(root_path, save_name)\n# print(f\"\\nsaving all_data_df to save_csv_path:\\n{save_csv_path}\")\n# both_vfs_df.to_csv(save_csv_path, index=False)\n#\n# # create both_vfs_CI_width_df - reshape to 2d\n# both_vfs_CI_width_shape = np.shape(both_vfs_CI_width)\n# sheets, rows, columns = np.shape(both_vfs_CI_width)\n# both_vfs_CI_width = np.reshape(both_vfs_CI_width, newshape=(sheets * rows, columns))\n# print(f'both_vfs_thr reshaped from {both_vfs_CI_width_shape} to {np.shape(both_vfs_CI_width)}')\n# both_vfs_CI_width_df = pd.DataFrame(both_vfs_CI_width, columns=column_names)\n# print(f\"both_vfs_CI_width_df:\\n{both_vfs_CI_width_df}\")\n#\n# save_name = 'both_vfs_CI_width.csv'\n# save_csv_path = os.path.join(root_path, save_name)\n# print(f\"\\nsaving both_vfs_CI_width to save_csv_path:\\n{save_csv_path}\")\n# both_vfs_CI_width_df.to_csv(save_csv_path, index=False)\n#\n#\n# '''Load psignifit_both_vfs and check columns'''\n# # make plot to show UVF and LVF on one axis\n# psig_both_vf_df = pd.read_csv(os.path.join(root_path, 'psignifit_both_vfs.csv'))\n# print(f\"\\npsig_both_vf_df:\\n{psig_both_vf_df}\")\n#\n# '''Load both_vfs_CI_width_df and check columns'''\n# # make plot to show UVF and LVF on one axis\n# both_vfs_CI_width_df = pd.read_csv(os.path.join(root_path, 'both_vfs_CI_width.csv'))\n# print(f\"\\nboth_vfs_CI_width_df:\\n{both_vfs_CI_width_df}\")\n#\n# # change 1probe from 99 to 20\n# both_vf_columns = list(psig_both_vf_df.columns)\n# sep_list = psig_both_vf_df['separation'].to_list()\n# sep_list = [20 if i == 99 else i for i in sep_list]\n# psig_both_vf_df['separation'] = sep_list\n# both_vfs_CI_width_df['separation'] = sep_list\n#\n# if 'cond' not in both_vf_columns:\n# print(\"\\nMaking cond column\")\n# # add condition list which is equal to sep for uVF or negative sep for LVF (with -.01 instead of -0)\n# sep_list = psig_both_vf_df['separation'].to_list()\n# vf_list = psig_both_vf_df['vis_field'].to_list()\n# cond_list = []\n# for vf, sep in zip(vf_list, sep_list):\n# if vf == 'LVF':\n# if sep == 0:\n# this_cond = -.01\n# else:\n# this_cond = -sep\n# else:\n# this_cond = sep\n# print(f\"vf: {vf}, sep: {sep}, this_cond: {this_cond}\")\n# cond_list.append(this_cond)\n# print(f\"cond_list: {cond_list}\")\n# psig_both_vf_df.insert(2, 'cond', cond_list)\n# both_vfs_CI_width_df.insert(2, 'cond', cond_list)\n#\n#\n# # change 1probe from 99 to 20\n# cond_list = psig_both_vf_df['cond'].to_list()\n# cond_list = [20 if i == 99 else i for i in cond_list]\n# cond_list = [-20 if i == -99 else i for i in cond_list]\n# psig_both_vf_df['cond'] = cond_list\n# both_vfs_CI_width_df['cond'] = cond_list\n#\n#\n# save_name = 'psignifit_both_vfs.csv'\n# save_csv_path = os.path.join(root_path, save_name)\n# print(f\"\\nsaving all_data_df to save_csv_path:\\n{save_csv_path}\")\n# psig_both_vf_df.to_csv(save_csv_path, index=False)\n#\n# save_name = 'both_vfs_CI_width.csv'\n# save_csv_path = os.path.join(root_path, save_name)\n# print(f\"\\nsaving both_vfs_CI_width to save_csv_path:\\n{save_csv_path}\")\n# both_vfs_CI_width_df.to_csv(save_csv_path, index=False)\n#\n# # add participant name\n# if 'p_name' not in both_vf_columns:\n# psig_both_vf_df.insert(0, 'p_name', participant_name)\n# both_vfs_CI_width_df.insert(0, 'p_name', participant_name)\n#\n# print(f\"psig_both_vf_df:\\n{psig_both_vf_df}\")\n# exp_thr.append(psig_both_vf_df)\n# exp_CI_width.append(both_vfs_CI_width_df)\n#\n#\n#\n# # save master dfs\n# exp_thr_df = pd.concat(exp_thr)\n# save_csv_path = os.path.join(exp_path, 'MASTER_exp_VF_thr.csv')\n# exp_thr_df.to_csv(save_csv_path, index=False)\n# print(f\"exp_thr_df:\\n{exp_thr_df}\")\n#\n# # save master dfs\n# exp_CI_width_df = pd.concat(exp_CI_width)\n# save_csv_path = os.path.join(exp_path, 'MASTER_exp_VF_CI.csv')\n# exp_CI_width_df.to_csv(save_csv_path, index=False)\n# print(f\"exp_CI_width_df:\\n{exp_thr_df}\")\n#\n#\n#\n#\n#\n#\n#\n# # # make long form df\n# exp_VF_thr_df = pd.read_csv(os.path.join(exp_path, 'MASTER_exp_VF_thr.csv'))\n# print(f\"\\nexp_VF_thr_df:\\n{exp_VF_thr_df}\")\n#\n# exp_VF_thr_df.rename({'ISI_-1': 'ISI_99'}, axis=1, inplace=True)\n# exp_VF_thr_long_df = pd.wide_to_long(exp_VF_thr_df, stubnames='ISI_',\n# i=['vis_field', 'separation', 'p_name', 'cond'],\n# j='ISI',\n# sep='')\n# # exp_VF_thr_long_df.rename({'ISI val': 'ISI', 'ISI_': 'probeLum'}, axis=1, inplace=True)\n# exp_VF_thr_long_df.rename({'ISI_': 'probeLum'}, axis=1, inplace=True)\n# exp_VF_thr_long_df.reset_index(inplace=True)\n# print(f\"\\nexp_VF_thr_long_df:\\n{exp_VF_thr_long_df}\")\n#\n# # make long form CIs\n# exp_VF_CI_df = pd.read_csv(os.path.join(exp_path, 'MASTER_exp_VF_CI.csv'))\n# print(f\"\\nexp_VF_CI_df:\\n{exp_VF_CI_df}\")\n#\n#\n# exp_VF_CI_df.rename({'ISI_-1': 'ISI_99'}, axis=1, inplace=True)\n# exp_VF_CI_long_df = pd.wide_to_long(exp_VF_CI_df, stubnames='ISI_',\n# i=['vis_field', 'separation', 'p_name', 'cond'],\n# j='ISI',\n# sep='')\n# # exp_VF_CI_long_df.rename({'ISI val': 'ISI', 'ISI_': 'probeLum'}, axis=1, inplace=True)\n# exp_VF_CI_long_df.rename({'ISI_': 'CI_width'}, axis=1, inplace=True)\n# exp_VF_CI_long_df.reset_index(inplace=True)\n# print(f\"\\nexp_VF_CI_long_df:\\n{exp_VF_CI_long_df}\")\n#\n#\n# # add cond number column\n# cond_vals = sorted(exp_VF_thr_long_df['cond'].unique())\n# neg_sep_num_dict = dict(zip(cond_vals, list(range(len(cond_vals)))))\n# print(f\"\\nneg_sep_num_dict: {neg_sep_num_dict}\")\n#\n# exp_VF_thr_long_df.insert(4, 'cond_num', exp_VF_thr_long_df[\"cond\"].map(neg_sep_num_dict))\n# exp_VF_CI_long_df.insert(4, 'cond_num', exp_VF_CI_long_df[\"cond\"].map(neg_sep_num_dict))\n# print(f\"\\nexp_VF_thr_long_df:\\n{exp_VF_thr_long_df}\")\n# save_csv_path = os.path.join(exp_path, 'MASTER_exp_VF_thr_long.csv')\n# exp_VF_thr_long_df.to_csv(save_csv_path, index=False)\n#\n# save_csv_path = os.path.join(exp_path, 'MASTER_exp_VF_CI_long.csv')\n# exp_VF_CI_long_df.to_csv(save_csv_path, index=False)\n#\n# print('\\nPart 1, get threshold for each participant and make master list: finished\\n')\n\n\n\n\n\n'''Part 2: make plots\nfor each participant make plots for all data\nthresholds (with negative sep)\nDifference\n\nMake conditions plots\nJust concurrent (ISI_-1)\nJust sep (0, 2, 3, 6)\n\nAll participants\ndifference, \nJust concurrent (ISI_-1)\nJust sep (0, 2, 3, 6)\n'''\nexp_VF_thr_long_df = pd.read_csv(os.path.join(exp_path, 'MASTER_exp_VF_thr_long.csv'))\nprint(f\"\\nexp_VF_thr_long_df:\\n{exp_VF_thr_long_df}\")\n\n# get means per condition\ngroupby_sep_thr_df = exp_VF_thr_long_df.drop('p_name', axis=1)\nexp_mean_thr_long_df = groupby_sep_thr_df.groupby(['cond_num', 'ISI'], sort=True).mean()\nexp_mean_thr_long_df.reset_index(inplace=True)\nprint(f\"\\nexp_mean_thr_long_df:\\n{exp_mean_thr_long_df}\")\n\n\nexp_VF_CI_long_df = pd.read_csv(os.path.join(exp_path, 'MASTER_exp_VF_CI_long.csv'))\nprint(f\"\\nexp_VF_CI_long_df:\\n{exp_VF_CI_long_df}\")\ngroupby_sep_CI_df = exp_VF_CI_long_df.drop('p_name', axis=1)\nexp_mean_CI_long_df = groupby_sep_CI_df.groupby(['cond_num', 'ISI'], sort=True).mean()\nexp_mean_CI_long_df.reset_index(inplace=True)\n# exp_mean_CI_long_df = exp_mean_CI_long_df.CI_width.div(2, fill_value=0),\nexp_mean_CI_long_df['halved_CI'] = exp_mean_CI_long_df.CI_width.div(2, fill_value=0)\nprint(f\"\\nexp_mean_CI_long_df:\\n{exp_mean_CI_long_df}\")\n\n\nexp_mean_thr_long_df['ISI'] = [str(i) for i in exp_mean_thr_long_df['ISI'].to_list()]\nexp_mean_CI_long_df['ISI'] = [str(i) for i in exp_mean_CI_long_df['ISI'].to_list()]\nprint(f\"\\nexp_mean_thr_long_df:\\n{exp_mean_thr_long_df}\")\n\n\n\n'''Fig 1 - all data'''\nprint('\\nFig 1 - all data')\n\nfig_1_thr_df = exp_mean_thr_long_df.copy()\nfig_1_err_df = exp_mean_CI_long_df.copy()\n\n# use wide means df\nprint(f\"exp_mean_thr_long_df:\\n{exp_mean_thr_long_df}\")\nfig_1_thr_df = exp_mean_thr_long_df.pivot(index=['cond_num', 'separation', 'cond'], columns='ISI', values='probeLum')\nfig_1_thr_df.reset_index(inplace=True, drop=False)\nprint(f\"fig_1_thr_df:\\n{fig_1_thr_df}\")\n\n# fig_1_thr_df.index.name = None\nisi_col_dict = {'99': 'conc', '0': 'ISI_0', '2': 'ISI_2',\n '4': 'ISI_4', '6': 'ISI_6', '9': 'ISI_9',\n '12': 'ISI_12', '24': 'ISI_24'}\nfig_1_thr_df.rename(columns=isi_col_dict, inplace=True)\nprint(f\"fig_1_thr_df:\\n{fig_1_thr_df}\")\nfig_1_thr_df = fig_1_thr_df[['cond_num', 'separation', 'cond',\n 'conc', 'ISI_0', 'ISI_2', 'ISI_4',\n 'ISI_6', 'ISI_9', 'ISI_12', 'ISI_24']]\nfig_1_thr_df.index.name = None\nprint(f\"fig_1_thr_df:\\n{fig_1_thr_df}\")\n\nprint(f\"\\nexp_mean_CI_long_df:\\n{exp_mean_CI_long_df}\")\nwide_mean_CI_df = exp_mean_CI_long_df.pivot(index=['cond_num', 'separation', 'cond'], columns='ISI', values='halved_CI')\nwide_mean_CI_df.reset_index(inplace=True, drop=False)\nwide_mean_CI_df.index.name = None\nwide_mean_CI_df.rename(columns=isi_col_dict, inplace=True)\n\nwide_mean_CI_df = wide_mean_CI_df[['cond_num', 'separation', 'cond',\n 'conc', 'ISI_0', 'ISI_2', 'ISI_4',\n 'ISI_6', 'ISI_9', 'ISI_12', 'ISI_24']]\nwide_mean_CI_df.index.name = None\nprint(f\"wide_mean_CI_df:\\n{wide_mean_CI_df}\")\n\n\n# add cond number column\nif 'cond_num' not in list(fig_1_thr_df.columns):\n cond_vals = fig_1_thr_df['cond'].unique()\n neg_sep_num_dict = dict(zip(cond_vals, list(range(len(cond_vals)))))\n print(f\"\\nneg_sep_num_dict: {neg_sep_num_dict}\")\n\n fig_1_thr_df.insert(4, 'cond_num', exp_mean_thr_long_df[\"cond\"].map(neg_sep_num_dict))\n wide_mean_CI_df.insert(4, 'cond_num', exp_mean_CI_long_df[\"cond\"].map(neg_sep_num_dict))\n\nwide_mean_thr_w_cond_idx_df = fig_1_thr_df.set_index('cond_num')\nwide_mean_thr_w_cond_idx_df.sort_index(inplace=True)\nif 'p_name' in list(wide_mean_thr_w_cond_idx_df.columns):\n wide_mean_thr_w_cond_idx_df.drop('p_name', axis=1, inplace=True)\nif 'vis_field' in list(wide_mean_thr_w_cond_idx_df.columns):\n wide_mean_thr_w_cond_idx_df.drop('vis_field', axis=1, inplace=True)\nif 'cond' in list(wide_mean_thr_w_cond_idx_df.columns):\n wide_mean_thr_w_cond_idx_df.drop('cond', axis=1, inplace=True)\nif 'separation' in list(wide_mean_thr_w_cond_idx_df.columns):\n wide_mean_thr_w_cond_idx_df.drop('separation', axis=1, inplace=True)\nprint(f\"exp_thr_w_cond_idx_df:\\n{wide_mean_thr_w_cond_idx_df}\")\n\nwide_mean_CI_w_cond_idx_df = wide_mean_CI_df.set_index('cond_num')\nwide_mean_CI_w_cond_idx_df.sort_index(inplace=True)\n\nx_tick_vals = wide_mean_thr_w_cond_idx_df.index.tolist()\nx_tick_labels = sorted(list(exp_mean_thr_long_df['cond'].unique()))\nx_tick_labels = ['1pr' if i in [20.0, -20.0] else str(i) for i in x_tick_labels]\nx_tick_labels = ['-0' if i == '-0.01' else str(i) for i in x_tick_labels]\nx_tick_labels = [i[:-2] if i not in ['1pr', '-0'] else i for i in x_tick_labels]\n\nprint(f\"x_tick_vals: {x_tick_vals}\")\nprint(f\"x_tick_labels: {x_tick_labels}\")\n\nif 'p_name' in list(wide_mean_CI_w_cond_idx_df.columns):\n wide_mean_CI_w_cond_idx_df.drop('p_name', axis=1, inplace=True)\nif 'vis_field' in list(wide_mean_CI_w_cond_idx_df.columns):\n wide_mean_CI_w_cond_idx_df.drop('vis_field', axis=1, inplace=True)\nif 'cond' in list(wide_mean_CI_w_cond_idx_df.columns):\n wide_mean_CI_w_cond_idx_df.drop('cond', axis=1, inplace=True)\nif 'separation' in list(wide_mean_CI_w_cond_idx_df.columns):\n wide_mean_CI_w_cond_idx_df.drop('separation', axis=1, inplace=True)\nprint(f\"exp_CI_w_cond_idx_df:\\n{wide_mean_CI_w_cond_idx_df}\")\n\n\nisi_name_list = [i for i in list(wide_mean_thr_w_cond_idx_df.columns) if 'ISI_' in i]\n\nfig_1a_title = 'all data: compare UVF & LVF\\n(Errors are mean of participant CIs, per ISI)'\n\nplot_runs_ave_w_errors(fig_df=wide_mean_thr_w_cond_idx_df, error_df=wide_mean_CI_w_cond_idx_df,\n jitter=.1, error_caps=True, alt_colours=False,\n legend_names=isi_name_list,\n x_tick_vals=x_tick_vals,\n x_tick_labels=x_tick_labels,\n x_axis_label='Sep in diag pixels. Neg=LVF, Pos=UVF',\n even_spaced_x=True, fixed_y_range=False,\n fig_title=fig_1a_title, save_name='all_data_VFs.png',\n save_path=exp_path, verbose=True)\nax = plt.gca() # to get the axis\nax.axvline(x=(x_tick_vals[-1]/2), linestyle=\"-.\", color='lightgrey') # add dotted line at zero\n\nplt.show()\nplt.close()\n\n\n\n'''Fig 2, difference between UVF and LVF'''\n'''Plot shoing difference in VF for each ISI'''\nprint(f\"\\nplot diff between VFs for each ISI\")\n# for each separation value, subtract LFV from UVF for difference score.\n\nget_diff_df = exp_VF_thr_long_df.copy()\nprint(f\"get_diff_df ({get_diff_df.shape}):\\n{get_diff_df}\")\n\nLVF_df = get_diff_df.loc[get_diff_df['cond_num'] < 7]\ncond_num_list = LVF_df['cond_num'].tolist()\nISI_val_list = LVF_df.pop('ISI').tolist()\np_name_list = LVF_df.pop('p_name').tolist()\nLVF_df = LVF_df.drop(['cond', 'vis_field'], axis=1)\nLVF_df.set_index('separation', inplace=True)\n\n\nUVF_df = get_diff_df.loc[get_diff_df['cond_num'] >= 7]\nUVF_df = UVF_df.drop(['cond', 'ISI', 'vis_field', 'p_name'], axis=1)\nUVF_df.set_index('separation', inplace=True)\nprint(f\"LVF_df ({LVF_df.shape}):\\n{LVF_df}\")\nprint(f\"UVF_df ({UVF_df.shape}):\\n{UVF_df}\")\n\n# plot difference.\ndiff_df = UVF_df.subtract(LVF_df, fill_value=0)\nprint(f\"diff_df ({diff_df.shape}):\\n{diff_df}\")\n\ndiff_df['cond_num'] = cond_num_list\nif 'ISI' not in list(diff_df.columns):\n diff_df.insert(1, 'ISI', ISI_val_list)\ndiff_df = diff_df.rename(columns={'probeLum': 'thr_diff'})\n\npos_sep_vals = sorted(diff_df.index.unique())\ndiff_df.reset_index(inplace=True)\nprint(f\"diff_df ({diff_df.shape}):\\n{diff_df}\")\n\n# convert ISI column to string, to make it work as Hue\ndiff_df['ISI'] = ['conc' if i == 99 else str(i) for i in diff_df['ISI'].to_list()]\n\nfig, ax = plt.subplots(figsize=(10, 6))\nsns.pointplot(data=diff_df, x='cond_num', y='thr_diff',\n hue='ISI',\n estimator=np.mean, errorbar='se', dodge=True, markers='.',\n errwidth=1, capsize=.2,\n )\n\nx_tick_vals = sorted(diff_df['cond_num'].unique())\nax.set_xticks(x_tick_vals)\ncond_sep_dict = {0: '0', 1: '1', 2: '2', 3: '3', 4: '6', 5: '18', 6: '1pr'}\nx_tick_labels = [cond_sep_dict[k] for k in x_tick_vals]\nax.set_xticklabels(x_tick_labels)\nprint(f\"pos_sep_vals: {pos_sep_vals}\")\nprint(f\"x_tick_labels: {x_tick_labels}\")\n\nfig_title = f'exp1a all data: diff UVF - LVF\\n(Errors are SEs of means collapsed across participants)'\nplt.title(fig_title)\nx_axis = 'Sep in diag pixels'\nax.set_xlabel(x_axis)\ny_axis = 'Threshold different (UVF - LVF)'\nax.set_ylabel(y_axis)\nax.axhline(y=0, linestyle=\"-.\", color='lightgrey') # add dotted line at zero\n\nsave_as = os.path.join(exp_path, 'diff_vfs.png')\nplt.savefig(save_as)\nplt.show()\n\n\n'''Make diff plot per participant'''\nif 'p_name' not in list(diff_df.columns):\n diff_df.insert(1, 'p_name', p_name_list)\nprint(f\"diff_df ({diff_df.shape}):\\n{diff_df}\")\n\np_name_list = set(p_name_list)\nprint(f\"p_name_list: {p_name_list}\")\n\nfor p_name in p_name_list:\n p_name_diff_df = diff_df[diff_df['p_name'] == p_name]\n\n fig, ax = plt.subplots(figsize=(10, 6))\n sns.pointplot(data=p_name_diff_df, x='cond_num', y='thr_diff',\n hue='ISI',\n estimator=np.mean, errorbar='se',\n dodge=True, markers='.',\n errwidth=1, capsize=.2,\n )\n\n x_tick_vals = sorted(diff_df['cond_num'].unique())\n ax.set_xticks(x_tick_vals)\n cond_sep_dict = {0: '0', 1: '1', 2: '2', 3: '3', 4: '6', 5: '18', 6: '1pr'}\n x_tick_labels = [cond_sep_dict[k] for k in x_tick_vals]\n ax.set_xticklabels(x_tick_labels)\n print(f\"pos_sep_vals: {pos_sep_vals}\")\n print(f\"x_tick_labels: {x_tick_labels}\")\n\n fig_title = f'exp1a {p_name}: diff UVF - LVF' \\\n # f'\\n(Errors are SEs of means collapsed across participants)'\n plt.title(fig_title)\n x_axis = 'Sep in diag pixels'\n ax.set_xlabel(x_axis)\n y_axis = 'Threshold different (UVF - LVF)'\n ax.set_ylabel(y_axis)\n ax.axhline(y=0, linestyle=\"-.\", color='lightgrey') # add dotted line at zero\n\n save_as = os.path.join(exp_path, f'{p_name}_diff_vfs.png')\n plt.savefig(save_as)\n plt.show()\n\n\n'''One plot, all participants, collapsed across ISIs'''\nfig, ax = plt.subplots(figsize=(10, 6))\nsns.pointplot(data=diff_df, x='cond_num', y='thr_diff',\n hue='p_name',\n estimator=np.mean, errorbar='se',\n dodge=True, markers='.',\n errwidth=1, capsize=.2,\n )\n\nx_tick_vals = sorted(diff_df['cond_num'].unique())\nax.set_xticks(x_tick_vals)\ncond_sep_dict = {0: '0', 1: '1', 2: '2', 3: '3', 4: '6', 5: '18', 6: '1pr'}\nx_tick_labels = [cond_sep_dict[k] for k in x_tick_vals]\nax.set_xticklabels(x_tick_labels)\nprint(f\"pos_sep_vals: {pos_sep_vals}\")\nprint(f\"x_tick_labels: {x_tick_labels}\")\n\nfig_title = f'exp1a participants: diff UVF - LVF' \\\n f'\\n(Errors are SEs of participant means collapsed across ISIs)'\nplt.title(fig_title)\nx_axis = 'Sep in diag pixels'\nax.set_xlabel(x_axis)\ny_axis = 'Threshold different (UVF - LVF)'\nax.set_ylabel(y_axis)\nax.axhline(y=0, linestyle=\"-.\", color='lightgrey') # add dotted line at zero\n\nsave_as = os.path.join(exp_path, f'participant_diff_vfs.png')\nplt.savefig(save_as)\nplt.show()\n\n\n'''just concurrent differences - mean and per participant'''\nconc_diff_df = diff_df[diff_df['ISI'] == 'conc']\nprint(f\"conc_diff_df ({conc_diff_df.shape}):\\n{conc_diff_df}\")\n\nx_tick_vals = conc_diff_df['cond_num'].unique()\n# sort variables by sorted(neg_sep_num_list) order\nneg_sep_num_array = np.array(x_tick_vals)\nprint(f\"\\nneg_sep_num_array: {neg_sep_num_array}\")\nsort_index = np.argsort(x_tick_vals)\nprint(f\"sort_index: {sort_index}\")\n\nx_tick_vals = [x_tick_vals[i] for i in sort_index]\nprint(f\"x_tick_vals: {x_tick_vals}\")\n\nx_tick_labels = conc_diff_df['cond_num'].unique()\nx_tick_labels = [x_tick_labels[i] for i in sort_index]\nx_tick_labels = ['1pr' if i == 20 else i for i in x_tick_labels]\nprint(f\"x_tick_labels: {x_tick_labels}\")\n\n# plot participant and mean differences.\nfig, ax = plt.subplots(figsize=(10, 6))\n\nsns.lineplot(data=conc_diff_df, x='cond_num', y='thr_diff', hue='p_name',\n alpha=.7)\n\nsns.pointplot(data=conc_diff_df, x='cond_num', y='thr_diff',\n estimator=np.mean, errorbar='se',\n markers='.',\n errwidth=1, capsize=.2, color='black')\n\n\nfig_title = f'Concurrent: diff UVF - LVF\\n' \\\n f'(Errors are SEs of means collapsed across participants)'\nplt.title(fig_title)\nx_axis = 'Probe separation (diag pixels)'\nax.set_xlabel(x_axis)\ny_axis = 'Threshold different (UVF - LVF)'\nax.set_ylabel(y_axis)\nax.set_xticks(x_tick_vals)\nax.set_xticklabels(x_tick_labels)\nax.axhline(y=0, linestyle=\"-.\", color='lightgrey') # add dotted line at zero\n\nsave_as = os.path.join(exp_path, 'conc_diff_vfs.png')\nplt.savefig(save_as)\nplt.show()\n\n\n\n\n'''fig 3: make ISI plots'''\nprint(\"\\nMaking ISI plots\")\n\nISI_plots_df = exp_VF_thr_long_df.copy()\nISI_er_plot_df = exp_VF_CI_long_df.copy()\nprint(f\"ISI_plots_df ({ISI_plots_df.shape}):\\n{ISI_plots_df}\")\nprint(f\"ISI_er_plot_df ({ISI_er_plot_df.shape}):\\n{ISI_er_plot_df}\")\n\nsep_vals = ISI_plots_df['separation'].unique()\nsep_num_dict = dict(zip(sep_vals, list(range(len(sep_vals)))))\nprint(f\"\\nsep_num_dict: {sep_num_dict}\")\nISI_plots_df.insert(4, 'sep_num', ISI_plots_df[\"separation\"].map(sep_num_dict))\nprint(f\"ISI_plots_df ({ISI_plots_df.shape}):\\n{ISI_plots_df}\")\n\nISI_er_plot_df.insert(4, 'sep_num', ISI_er_plot_df[\"separation\"].map(sep_num_dict))\nISI_er_plot_df['halved_CI'] = ISI_er_plot_df.CI_width.div(2, fill_value=0)\nprint(f\"ISI_er_plot_df ({ISI_er_plot_df.shape}):\\n{exp_VF_CI_long_df}\")\n\n\nISIs_to_plot = [99, 4, 12]\n\nfor this_ISI in ISIs_to_plot:\n\n ISI_name = this_ISI\n if this_ISI == 99:\n ISI_name = 'Concurrent'\n print(f\"ISI: {ISI_name} ({this_ISI})\")\n\n ISI_df = ISI_plots_df.loc[ISI_plots_df['ISI'] == this_ISI]\n print(f\"ISI_df ({ISI_df.shape}):\\n{ISI_df}\")\n\n ISI_error_df = ISI_er_plot_df[ISI_er_plot_df['ISI'] == this_ISI]\n print(f\" ISI_error_df ({ISI_error_df.shape}):\\n{ISI_error_df}\")\n\n x_tick_vals = sorted(ISI_df['sep_num'].unique())\n print(f\"\\nx_tick_vals: {x_tick_vals}\")\n\n x_tick_labels = ['1pr' if i == 20 else str(i) for i in sorted(ISI_df['separation'].unique())]\n print(f\"\\nx_tick_labels: {x_tick_labels}\")\n\n fig, ax = plt.subplots(figsize=(10, 6))\n\n # thick lines showing means with errors.\n sns.pointplot(data=ISI_df, x='sep_num', y='probeLum',\n hue='vis_field',\n errorbar='se', capsize=.05,\n scale=1.25,\n )\n\n # background faint lines showing actual thr\n sns.lineplot(data=ISI_df, x='sep_num', y='probeLum',\n hue='vis_field',\n style='p_name',\n dashes=False,\n alpha=.3,\n legend=False\n )\n\n\n ax.set_xticks(x_tick_vals)\n ax.set_xticklabels(x_tick_labels)\n plt.title(f'ISI {ISI_name}: compare UVF & LVF\\n(Error bars: SE of Participant thresholds)')\n ax.set_xlabel('Sep in diag pixels')\n ax.set_ylabel('Threshold')\n plt.savefig(os.path.join(exp_path, f'exp1a_ISI_{ISI_name}_VFs'))\n plt.show()\n\n\n\n'''figs 4, 5, 6, 6: make sep plots for sep 0, 2, 3, 6'''\nprint(\"\\nMaking separation plots\")\nISI_vals = exp_VF_thr_long_df['ISI'].unique()\nISI_num_dict = dict(zip(ISI_vals, list(range(len(ISI_vals)))))\nprint(f\"\\nISI_num_dict: {ISI_num_dict}\")\nexp_VF_thr_long_df.insert(4, 'ISI_num', exp_VF_thr_long_df[\"ISI\"].map(ISI_num_dict))\nsep_to_plot = [0, 2, 3, 6]\n\nfor this_sep in sep_to_plot:\n sep_df = exp_VF_thr_long_df[exp_VF_thr_long_df['separation'] == this_sep]\n print(f\"sep_df:\\n{sep_df}\")\n\n x_tick_vals = sep_df['ISI_num'].unique()\n x_tick_labels = ['conc' if i == 99 else str(i) for i in sep_df['ISI'].unique()]\n print(f\"\\nx_tick_vals: {x_tick_vals}\")\n print(f\"x_tick_labels: {x_tick_labels}\")\n\n\n # thick lines showing means with errors.\n sns.pointplot(data=sep_df, x='ISI_num', y='probeLum',\n hue='vis_field',\n style='p_name',\n scale=1.25,\n errorbar='se', capsize=.05,\n )\n\n # background faint lines showing actual thr\n sns.lineplot(data=sep_df, x='ISI_num', y='probeLum',\n hue='vis_field',\n style='p_name',\n dashes=False,\n alpha=.3,\n legend=False\n )\n\n ax = plt.gca() # to get the axis\n ax.set_xticks(x_tick_vals)\n ax.set_xticklabels(x_tick_labels)\n plt.title(f'Sep {this_sep}: compare UVF & LVF\\n(Error bars: SE of Participant thresholds)')\n ax.set_xlabel('ISI')\n ax.set_ylabel('Threshold')\n plt.savefig(os.path.join(exp_path, f'exp1a_sep{this_sep}_VFs'))\n plt.show()\n\n\n\n'''make plots for each participant showing there actual thresholds for concurrent'''\nprint(\"\\nMaking participant plots for concurrent probes\")\n\n# add sep num column to thr and err dfs.\nsep_vals = exp_VF_thr_long_df['separation'].unique()\nsep_num_dict = dict(zip(sep_vals, list(range(len(sep_vals)))))\nprint(f\"\\nsep_num_dict: {sep_num_dict}\")\n\nif 'sep_num' not in list(exp_VF_thr_long_df.columns):\n exp_VF_thr_long_df.insert(4, 'sep_num', exp_VF_thr_long_df[\"separation\"].map(sep_num_dict))\nprint(f\"exp_VF_thr_long_df ({exp_VF_thr_long_df.shape}):\\n{exp_VF_thr_long_df}\")\n\nif 'sep_num' not in list(exp_VF_CI_long_df.columns):\n exp_VF_CI_long_df.insert(4, 'sep_num', exp_VF_CI_long_df[\"separation\"].map(sep_num_dict))\n\n# add calved CI column to err_df\nif 'halved_CI' not in list(exp_VF_CI_long_df.columns):\n exp_VF_CI_long_df['halved_CI'] = exp_VF_CI_long_df.CI_width.div(2, fill_value=0)\nprint(f\"exp_VF_CI_long_df ({exp_VF_CI_long_df.shape}):\\n{exp_VF_CI_long_df}\")\n\n# Just concurrent data\nconc_thr_df = exp_VF_thr_long_df[exp_VF_thr_long_df['ISI'] == 99]\nprint(f\"conc_thr_df ({conc_thr_df.shape}):\\n{conc_thr_df}\")\nconc_CI_df = exp_VF_CI_long_df[exp_VF_CI_long_df['ISI'] == 99]\nprint(f\"conc_CI_df ({conc_CI_df.shape}):\\n{conc_CI_df}\")\n\n# get participant and vis_field conditions to loop through\np_name_list = conc_thr_df['p_name'].unique()\nprint(f\"p_name_list: {p_name_list}\")\nvf_list = conc_thr_df['vis_field'].unique()\nprint(f\"vf_list: {vf_list}\")\n\n# make plots\ncap_size = 7\nfor p_name in p_name_list:\n p_name_thr_df = conc_thr_df[conc_thr_df['p_name'] == p_name]\n print(f\"p_name_thr_df:\\n{p_name_thr_df}\")\n\n p_name_err_df = conc_CI_df[conc_CI_df['p_name'] == p_name]\n print(f\"p_name_err_df:\\n{p_name_err_df}\")\n\n x_tick_vals = sorted(p_name_thr_df['sep_num'].unique())\n x_tick_labels = ['1pr' if i == 20 else str(i) for i in sorted(p_name_thr_df['separation'].unique())]\n\n fig, ax = plt.subplots(figsize=(10, 6))\n for vis_field in vf_list:\n\n # dfs just for this vis field\n VF_thr_df = p_name_thr_df[p_name_thr_df['vis_field'] == vis_field]\n VF_err_df = p_name_err_df[p_name_err_df['vis_field'] == vis_field]\n\n ax.errorbar(data=VF_thr_df, x='sep_num', y='probeLum',\n yerr=VF_err_df['halved_CI'],\n marker='o', lw=3, elinewidth=2,\n capsize=cap_size)\n\n ax = plt.gca() # to get the axis\n ax.set_xticks(x_tick_vals)\n ax.set_xticklabels(x_tick_labels)\n plt.title(f'{p_name}: compare UVF & LVF\\n(Error bars: Participant 95% CIs)')\n ax.set_xlabel('ISI')\n ax.set_ylabel('Threshold')\n plt.savefig(os.path.join(exp_path, f'exp1a_{p_name}_VFs.png'))\n plt.show()\n\n\nprint('\\nexp1a_analysis_pipe_UVF_LVF finished\\n')\n","repo_name":"Nickdotmartin/Cardiff","sub_path":"Nick_scripts/UVF_LVF_exp1a_analysis_pipe.py","file_name":"UVF_LVF_exp1a_analysis_pipe.py","file_ext":"py","file_size_in_byte":35835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10510682092","text":"import json\nimport os\nimport platform\nimport time\nfrom io import StringIO\n\nimport numpy as np\nimport pandas as pd\nimport psycopg2\nimport requests\nfrom psycopg2.extras import Json\nfrom tqdm.auto import tqdm\n\nfrom db import DB\nfrom misc.reddit_logger import root_logger\nfrom utils import batch\n\nMINUTE = 60\n\n\nclass Gather(DB):\n def __init__(self):\n super(Gather, self).__init__()\n\n self.MINUTE = 60\n self.logger = root_logger\n self.system_type = platform.system()\n self.pushshift_api_client = self.get_pushshift_client()\n\n self.prepared_statement_filepath = os.path.join(\n self.curdir_fullpath, \"prepared_statements.csv\"\n )\n\n self.pushshift_comment_ids_endpoint = (\n \"https://api.pushshift.io/reddit/submission/comment_ids\"\n )\n\n self.pushshift_submissions_endpoint = (\n \"https://api.pushshift.io/reddit/search/submission/?\"\n )\n\n def get_submissions_to_backfill(\n self, dates_absent: list, stickied: bool = True\n ) -> list:\n before_after_dates = [\n (\n int((x - pd.Timedelta(days=1)).timestamp()),\n int((x + pd.Timedelta(days=1)).timestamp()),\n )\n for x in dates_absent\n ]\n\n # for all dates absent, find out if there are more stickied submissions, and merge them back into stickied_df at the end\n all_params = [\n {\n \"subreddit\": \"wallstreetbets\",\n \"stickied\": \"true\" if stickied is True else \"false\",\n \"before\": x[1],\n \"after\": x[0],\n }\n for x in before_after_dates\n ]\n\n all_backfilled_submissions = []\n for param in tqdm(all_params):\n response = requests.get(self.pushshift_submissions_endpoint, params=param)\n\n if response.status_code == 200:\n data = response.json()\n if \"data\" in data.keys():\n all_backfilled_submissions.append(data[\"data\"])\n\n time.sleep(1)\n\n return all_backfilled_submissions\n\n @staticmethod\n def get_all_daily_discussion_thread_sub_ids_pmaw(\n last_n_days: int = 30, after_n_days: int = None\n ) -> list:\n endpoint = \"https://api.pushshift.io/reddit/search/submission/?\"\n if after_n_days:\n params = {\n \"q\": \"Daily Discussion Thread\",\n \"subreddit\": \"wallstreetbets\",\n \"before\": f\"{last_n_days}\",\n \"after\": f\"{after_n_days}\",\n }\n else:\n params = {\n \"q\": \"Daily Discussion Thread\",\n \"subreddit\": \"wallstreetbets\",\n \"before\": f\"{last_n_days}\",\n }\n\n res = requests.get(endpoint, params=params)\n\n if res.status_code == 200:\n results = res.json()\n\n all_submissions = results[\"data\"]\n\n all_daily_discussion_thread_sub_ids = []\n for sub in all_submissions:\n if \"link_flair_text\" in sub.keys():\n if sub[\"link_flair_text\"] == \"Daily Discussion\":\n all_daily_discussion_thread_sub_ids.append(sub)\n\n return all_daily_discussion_thread_sub_ids\n\n def insert_comments_from_praw(self, comment: dict) -> None:\n insert_query = f\"\"\"INSERT INTO wsb_comments({','.join(comment.keys())}) VALUES %s ON CONFLICT (id) DO NOTHING;\"\"\"\n with self.get_psycopg2_conn() as conn:\n with conn.cursor() as cur:\n insert_query_2 = cur.mogrify(insert_query, tuple(comment.values()))\n print(insert_query_2)\n cur.execute(insert_query_2)\n conn.commit()\n\n def insert_submissions_from_pmaw(self, all_subreddit_submissions: list) -> None:\n all_mogrified_comments, un_mogrified_comments = [], []\n\n with self.get_psycopg2_conn() as conn:\n with conn.cursor() as cur:\n for submission in tqdm(all_subreddit_submissions):\n pop_keys = [\n key\n for key in submission.keys()\n if key not in self.submission_cols\n ]\n for key in pop_keys:\n _ = submission.pop(key)\n\n for key, val in submission.items():\n if key in [\n \"all_awardings\",\n \"awarders\",\n \"author_flair_richtext\",\n \"gildings\",\n \"link_flair_richtext\",\n \"treatment_tags\",\n \"collections\",\n \"crosspost_parent_list\",\n \"poll_data\",\n ]:\n if isinstance(submission[key], dict) or isinstance(\n submission[key], list\n ):\n submission[key] = Json(json.dumps(val))\n try:\n cols_str = \",\".join(submission.keys())\n values_str = (\"%s,\" * len(submission)).rstrip(\",\")\n insert_query = f\"\"\"INSERT INTO wsb_submissions ({cols_str}) VALUES ({values_str}) ON CONFLICT (id) DO NOTHING;\"\"\"\n\n mogrified_comment = cur.mogrify(\n insert_query, list(submission.values())\n )\n all_mogrified_comments.append(mogrified_comment)\n\n except (TypeError, psycopg2.ProgrammingError) as _:\n un_mogrified_comments.append(submission)\n\n all_joined_comments = b\" \".join(all_mogrified_comments)\n\n with self.get_psycopg2_conn() as conn:\n with conn.cursor() as cur:\n cur.execute(all_joined_comments)\n conn.commit()\n\n def get_daily_discussion_thread_submissions_pmaw(self, limit: int = 10000) -> list:\n posts = self.pushshift_api_client.search_submissions(\n q=\"Daily Discussion Thread\", subreddit=\"wallstreetbets\", limit=limit\n )\n submissions = [s for s in posts]\n return submissions\n\n def get_all_comment_ids_from_submission_ids_pmaw(\n self, submission_comment_ids: dict\n ) -> list:\n all_sub_id_with_comments = []\n for sub_id, comments in tqdm(submission_comment_ids.items()):\n comments_arr = self.pushshift_api_client.search_comments(ids=comments)\n for c in comments_arr:\n c[\"submission_id\"] = sub_id\n all_sub_id_with_comments.append(c)\n return all_sub_id_with_comments\n\n def get_all_comments_from_pmaw_single_thread(\n self, submissions: list = None, before: int = 30, after: int = 100\n ) -> list:\n if isinstance(submissions, list) and len(submissions) == 0:\n submissions = self.get_all_daily_discussion_thread_sub_ids_pmaw(\n last_n_days=before, after_n_days=after\n )\n elif submissions is None:\n submissions = self.get_all_daily_discussion_thread_sub_ids_pmaw(\n last_n_days=before, after_n_days=after\n )\n\n all_results = []\n for sub in tqdm(submissions):\n url = f\"{self.pushshift_comment_ids_endpoint}/{sub['id']}\"\n res = requests.get(url)\n\n if res.status_code == 200:\n all_results.append(\n {\"submission_id\": sub[\"id\"], \"comment_ids\": res.json()[\"data\"]}\n )\n\n return all_results\n\n @staticmethod\n def filter_all_submissions_for_correct_link_flair_text(\n submissions: list,\n link_flair_text: str = \"Daily Discussion\",\n ) -> tuple:\n exceptions = []\n all_dates_gathered = []\n for submission in submissions:\n if \"link_flair_text\" in list(submission.keys()):\n if submission[\"link_flair_text\"] == link_flair_text:\n try:\n date_info = pd.to_datetime(\n \"-\".join(\n submission[\"full_link\"]\n .split(\"/\")[-2]\n .split(\"_\")[-4:][1:]\n ),\n format=\"%B-%d-%Y\",\n )\n it = {\"date\": date_info, \"submission_links\": []}\n all_dates_gathered.append(it)\n except (TypeError, ValueError) as _:\n exceptions.append(submission)\n else:\n pass\n else:\n pass\n\n return all_dates_gathered, exceptions\n\n @staticmethod\n def match_correct_submission_links_with_their_dates(\n all_dates_gathered: list, submissions: list\n ) -> tuple:\n final_dates_gathered = []\n exceptions = []\n for submission in tqdm(submissions):\n if \"link_flair_text\" in submission.keys():\n if submission[\"link_flair_text\"] == \"Daily Discussion\":\n try:\n date_info = pd.to_datetime(\n \"-\".join(\n submission[\"full_link\"]\n .split(\"/\")[-2]\n .split(\"_\")[-4:][1:]\n ),\n format=\"%B-%d-%Y\",\n )\n for ele in all_dates_gathered:\n if ele[\"date\"] == date_info:\n ele[\"submission_links\"].append(submission[\"full_link\"])\n final_dates_gathered.append(ele)\n\n except (TypeError, ValueError, AssertionError) as _:\n exceptions.append(submission)\n else:\n pass\n else:\n pass\n return final_dates_gathered, exceptions\n\n def compare_submission_dates_in_pmaw_to_dates_in_db(\n self, final_dates_gathered: list\n ) -> tuple[list, pd.DataFrame]:\n all_submissions_meta_df = (\n pd.DataFrame(final_dates_gathered)\n .drop_duplicates(subset=[\"date\"], inplace=False)\n .reset_index(drop=True)\n .set_index(\"date\")\n .explode(\"submission_links\")\n .reset_index()\n )\n\n all_submissions_meta_df.columns = [\"date\", \"links_in_reddit\"]\n\n # Check if these are present in the database, and how many comments do each of these submissions have?\n with self.get_psycopg2_conn() as conn:\n with conn.cursor() as cur:\n cur.execute(\n \"SELECT DISTINCT id, full_link FROM wsb_submissions WHERE link_flair_text = 'Daily Discussion';\"\n )\n sub_in_db_df = pd.DataFrame(\n cur.fetchall(), columns=[\"id\", \"links_in_db\"]\n )\n\n merged_df = pd.merge(\n left=all_submissions_meta_df,\n right=sub_in_db_df,\n left_on=\"links_in_reddit\",\n right_on=\"links_in_db\",\n how=\"outer\",\n )\n\n in_db_possible_backup_needed_df = merged_df[~merged_df[\"date\"].isna()].copy()\n in_db_possible_backup_needed_df = in_db_possible_backup_needed_df.sort_values(\n by=\"date\"\n )\n in_db_possible_backup_needed_df = in_db_possible_backup_needed_df.reset_index(\n drop=True\n )\n\n # Grab the submissions that are not there in db and push them\n submission_links_not_uploaded = set(\n in_db_possible_backup_needed_df[\"links_in_reddit\"].values.tolist()\n ) - set(in_db_possible_backup_needed_df[\"links_in_db\"].values.tolist())\n\n submission_ids = [ele.split(\"/\")[6] for ele in submission_links_not_uploaded]\n in_db_not_daily_subs_df = merged_df[merged_df[\"date\"].isna()].copy()\n\n return submission_ids, in_db_not_daily_subs_df\n\n def get_all_comment_ids_for_given_submission_ids_in_db(\n self, submission_ids: list\n ) -> list:\n with self.get_psycopg2_conn() as conn:\n with conn.cursor() as cur:\n cur.execute(\n f\"\"\"SELECT submission_id, id as comment_id from comments;\"\"\"\n )\n submission_comments_df = pd.DataFrame(\n cur.fetchall(), columns=[\"submission_id\", \"comment_id\"]\n )\n\n comment_ids_present = submission_comments_df.loc[\n submission_comments_df.submission_id.isin(submission_ids), \"comment_id\"\n ].values.tolist()\n return comment_ids_present\n\n def find_missing_comment_ids_from_pmaw(\n self,\n submission_ids_to_be_refreshed: list = None,\n comment_ids_present: list = None,\n ) -> tuple[list, list]:\n \"\"\"\n Get all the backfill submissions for some submissions that might be incomplete.\n :return:\n \"\"\"\n\n all_comments, exceptions = [], []\n\n for submission_id in tqdm(submission_ids_to_be_refreshed):\n comments = self.pushshift_api_client.search_submission_comment_ids(\n ids=[submission_id], safe_exit=True, memsafe=True\n )\n\n submission_id_comments = {\n \"submission_id\": submission_id,\n \"comments\": [\n comment\n for comment in comments\n if comment not in comment_ids_present\n ],\n }\n\n if len(submission_id_comments[\"comments\"]) > 0:\n\n comments_arr = self.pushshift_api_client.search_comments(\n ids=submission_id_comments[\"comments\"]\n )\n for c in comments_arr:\n c[\"submission_id\"] = submission_id_comments[\"submission_id\"]\n all_comments.append(c)\n\n return all_comments, exceptions\n\n def insert_all_comments_to_db_using_df_pmaw(self, df: pd.DataFrame) -> None:\n \"\"\"\n For each comment in all_comments... insert that into the database.\n :param df: a list of dicts, [{... all dict properties of the wsb_comments table}]\n :return: None\n \"\"\"\n\n print(\"Formatting into correct format...\")\n to_insert_comments = []\n\n comment = df.iloc[\n 0,\n ].to_dict()\n\n comment_copy = comment.copy()\n to_pop = [\n key for key in list(comment_copy.keys()) if key not in self.comments_cols\n ]\n for key in to_pop:\n _ = comment.pop(key)\n\n # comment = {\n # \"subreddit_name_prefixed\" if k == \"subreddit\" else k: v\n # for k, v in comment.items()\n # }\n\n for i in tqdm(range(len(df))):\n comment = df.iloc[\n i,\n ].to_dict()\n comment_copy = comment.copy()\n comment[\"subreddit_name_prefixed\"] = f\"r/{comment['subreddit']}\"\n to_pop = [\n key\n for key in list(comment_copy.keys())\n if key not in self.comments_cols\n ]\n\n for key in to_pop:\n _ = comment.pop(key)\n\n for key, val in comment.items():\n if key in [\n \"awarders\",\n \"user_reports\",\n \"all_awardings\",\n \"report_reasons\",\n \"gildings\",\n \"author_flair_richtext\",\n \"treatment_tags\",\n \"mod_reports\",\n ]:\n comment[key] = Json(json.dumps(val))\n\n if isinstance(comment[key], np.bool_):\n comment[key] = bool(val)\n\n if isinstance(comment[key], np.int64):\n comment[key] = int(val)\n\n if \"edited\" in comment.keys() and isinstance(comment[\"edited\"], bool):\n comment[\"edited\"] = int(0)\n\n if \"author_patreon_flair\" in comment.keys() and isinstance(\n comment[\"author_patreon_flair\"], bool\n ):\n comment[\"author_patreon_flair\"] = \"\"\n\n if \"author_cakeday\" in comment.keys() and isinstance(\n comment[\"author_cakeday\"], np.float\n ):\n comment[\"author_cakeday\"] = bool(0)\n\n if \"author_premium\" in comment.keys() and isinstance(\n comment[\"author_premium\"], float\n ):\n comment[\"author_premium\"] = bool(0)\n\n if \"can_mod_post\" in comment.keys() and isinstance(\n comment[\"can_mod_post\"], float\n ):\n comment[\"can_mod_post\"] = bool(0)\n\n to_insert_comments.append(tuple(comment.values()))\n\n if (i % 1000000 == 0) & (i > 0):\n\n df_ = pd.DataFrame.from_records(\n to_insert_comments, columns=list(comment.keys())\n )\n\n for key in [\n \"author_flair_richtext\",\n \"gildings\",\n \"all_awardings\",\n \"awarders\",\n \"treatment_tags\",\n ]:\n if key in df_.columns.tolist():\n df_ = df_.drop(columns=[key])\n\n if \"locked\" in df_.columns:\n df_.loc[:, \"locked\"] = df_[\"locked\"].astype(bool)\n\n if \"total_awards_received\" in df_.columns:\n df_.loc[:, \"total_awards_received\"] = (\n df_[\"total_awards_received\"].fillna(0.0).astype(int)\n )\n\n df_.loc[:, \"edited\"] = df_[\"edited\"].astype(int)\n\n text_stream: StringIO = StringIO()\n df_.to_csv(path_or_buf=text_stream, header=True, index=False)\n text_stream.seek(0)\n\n print(\"-- COPY-ing into the db...\")\n focused_cols = \",\".join(df_.columns.tolist())\n with self.get_psycopg2_conn() as conn:\n with conn.cursor() as cur:\n try:\n cur.copy_expert(\n sql=f\"\"\"COPY wsb_comments ({focused_cols}) FROM STDIN WITH (FORMAT CSV, DELIMITER ',', HEADER);\"\"\",\n file=text_stream,\n )\n conn.commit()\n except psycopg2.ProgrammingError as e:\n print(e)\n print(\"-- Done.\")\n\n print(\"-- Vacuuming wsb_comments...\")\n self.execute_command_in_db(command_str=\"VACUUM wsb_comments;\")\n print(\"-- Done.\")\n\n def insert_all_comments_to_db_using_list_pmaw(\n self, all_comments_list: list, skip_iterations: int = None\n ) -> None:\n \"\"\"\n For each comment in all_comments... insert that into the database.\n :param all_comments_list: A list of dicts, [{... all dict properties of the wsb_comments table}]\n :param skip_iterations: If some iterations have already been completed, then just skip past those and move on..\n :return: None\n \"\"\"\n\n print(\"formatting into correct format...\")\n comments_cols = []\n to_insert_comments = []\n\n # make a reasonable batch len\n batch_len = 50000\n\n # there are too many comments, batch them\n batched_comments = batch(all_comments_list, n=batch_len)\n if skip_iterations is not None:\n skip_iterations = int(skip_iterations)\n print(f\"-- skipping iterations: {skip_iterations}\")\n for _ in tqdm(range(skip_iterations), total=skip_iterations):\n _ = next(batched_comments)\n\n # we need to pre-calculate the total number of batches here\n total_batches = (\n int(len(all_comments_list) // batch_len) - skip_iterations\n if skip_iterations is not None\n else int(len(all_comments_list) // batch_len)\n )\n\n j = 0\n for bc in tqdm(batched_comments, total=total_batches):\n j += 1\n\n for comment in tqdm(bc, total=batch_len, leave=False):\n # we need to remove some keys from comment, but cant pop keys in a loop within a dict, so a workaround\n # is used, by using comment_copy.\n comment_copy = comment.copy()\n to_pop = [\n key\n for key in list(comment_copy.keys())\n if key not in self.comments_cols\n ]\n for key in to_pop:\n _ = comment.pop(key)\n\n comment = {\n \"subreddit_name_prefixed\" if k == \"subreddit\" else k: v\n for k, v in comment.items()\n }\n comment[\"subreddit_name_prefixed\"] = f\"r/wallstreetbets\"\n\n for key, val in comment.items():\n if key in [\n \"awarders\",\n \"user_reports\",\n \"all_awardings\",\n \"report_reasons\",\n \"gildings\",\n \"author_flair_richtext\",\n \"treatment_tags\",\n \"mod_reports\",\n ]:\n # comment[key] = json(json.dumps(val, default=vars))\n comment[key] = json.dumps(val, default=vars)\n\n if isinstance(comment[key], np.bool_):\n comment[key] = bool(val)\n\n if isinstance(comment[key], np.int64):\n comment[key] = int(val)\n\n if \"edited\" in comment.keys() and isinstance(comment[\"edited\"], bool):\n comment[\"edited\"] = int(0)\n\n if \"author_patreon_flair\" in comment.keys() and isinstance(\n comment[\"author_patreon_flair\"], bool\n ):\n comment[\"author_patreon_flair\"] = \"\"\n\n if \"author_cakeday\" in comment.keys() and isinstance(\n comment[\"author_cakeday\"], np.float\n ):\n comment[\"author_cakeday\"] = bool(0)\n\n if \"author_premium\" in comment.keys() and isinstance(\n comment[\"author_premium\"], float\n ):\n comment[\"author_premium\"] = bool(0)\n\n if \"can_mod_post\" in comment.keys() and isinstance(\n comment[\"can_mod_post\"], float\n ):\n comment[\"can_mod_post\"] = bool(0)\n\n if len(comments_cols) == 0:\n comments_cols = list(comment.keys())\n\n to_insert_comments.append(comment)\n\n # turn into dataframe to make things easier downstream.\n df_ = pd.dataframe.from_dict(to_insert_comments)\n\n text_stream = StringIO()\n for key in [\"author_flair_richtext\", \"gildings\", \"all_awardings\"]:\n if key in df_.columns.tolist():\n df_ = df_.drop(columns=[key])\n\n if \"locked\" in df_.columns:\n df_.loc[:, \"locked\"] = df_[\"locked\"].astype(bool)\n\n if \"total_awards_received\" in df_.columns:\n df_.loc[:, \"total_awards_received\"] = (\n df_[\"total_awards_received\"].fillna(0.0).astype(int)\n )\n\n df_.loc[:, \"edited\"] = df_[\"edited\"].astype(int)\n df_.to_csv(text_stream, header=True, index=False)\n text_stream.seek(0)\n\n print(\"-- copy-ing into the db...\")\n focused_cols = \",\".join(df_.columns.tolist())\n with self.get_psycopg2_conn() as conn:\n with conn.cursor() as cur:\n try:\n cur.copy_expert(\n sql=f\"\"\"copy comments({focused_cols}) from stdin with (format csv, delimiter ',', header);\"\"\",\n file=text_stream,\n )\n conn.commit()\n except psycopg2.ProgrammingError as e:\n print(e)\n print(\"-- done.\")\n\n def backfill_comments(self) -> None:\n self.logger.info(\n \"-- 1/9 Fetching 'Daily Discussions Thread' submissions from pmaw ...\"\n )\n all_submissions_from_praw = self.get_daily_discussion_thread_submissions_pmaw()\n\n self.logger.info(\"-- 2/9 Updating wsb_submissions table...\")\n self.update_wsb_submissions_table(submissions=all_submissions_from_praw)\n\n self.logger.info(\"-- 3/9 Fetching distinct wsb_submissions ids...\")\n with self.get_psycopg2_conn() as conn:\n with conn.cursor() as cur:\n cur.execute(\"SELECT DISTINCT(id) FROM wsb_submissions;\")\n submission_ids_present = [i[0] for i in cur.fetchall()]\n\n self.logger.info(\n \"-- 4/9 Filtering all submissions for correct link flair text ...\"\n )\n (\n all_dates_gathered,\n submission_exceptions_1,\n ) = self.filter_all_submissions_for_correct_link_flair_text(\n submissions=all_submissions_from_praw\n )\n\n self.logger.info(\n \"-- 5/9 Matching correct submission links with their dates ...\"\n )\n (\n final_dates_gathered,\n submission_exceptions_2,\n ) = self.match_correct_submission_links_with_their_dates(\n all_dates_gathered, submissions=all_submissions_from_praw\n )\n\n self.logger.info(\"-- 6/9 Comparing submission dates in pmaw to dates in db...\")\n (\n submission_ids_to_be_refreshed,\n in_db_not_daily_sub_df,\n ) = self.compare_submission_dates_in_pmaw_to_dates_in_db(\n final_dates_gathered=final_dates_gathered\n )\n\n if len(submission_ids_to_be_refreshed) > 0:\n\n self.logger.info(\n \"-- 7/9 Getting all comment_ids for given submission ids...\"\n )\n comment_ids_present = (\n self.get_all_comment_ids_for_given_submission_ids_in_db(\n submission_ids=submission_ids_present\n )\n )\n\n self.logger.info(\"-- 8/9 Finding mission comment ids from pmaw..\")\n all_comments, comments_exceptions = self.find_missing_comment_ids_from_pmaw(\n submission_ids_to_be_refreshed=submission_ids_to_be_refreshed,\n comment_ids_present=comment_ids_present,\n )\n\n self.logger.info(\"-- 9/9 Inserting all comments to db...\")\n self.insert_all_comments_to_db_pmaw(df=pd.DataFrame(all_comments))\n else:\n self.logger.info(\"-- No submission ids to be refreshed...\")\n","repo_name":"tranquilo12/WSB","sub_path":"wsb.py","file_name":"wsb.py","file_ext":"py","file_size_in_byte":26994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2392108762","text":"class Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\nclass LinkedList:\r\n def __init__(self):\r\n self.head = None\r\n \r\n\r\n def display(self):\r\n temp = self.head\r\n while temp is not None:\r\n print(temp.data)\r\n temp = temp.next\r\n\r\nlList = LinkedList()\r\nlList.head = Node(\"CHandigarh\")\r\nsecond = Node(\"Hyderabad\")\r\nthird = Node(\"Banglore\")\r\n\r\nlList.head.next = second\r\nsecond.next = third\r\n\r\nlList.display()\r\n\r\n","repo_name":"saitejamarolix/linkedlists","sub_path":"traversingLinkedList.py","file_name":"traversingLinkedList.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27563086980","text":"from selenium.common import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n\nDELAY = 0.5\n\ndef update_stack_capacity(driver, val):\n capacity_input = driver.find_element(value=\"capacityInput\")\n capacity_input.send_keys(val)\n\n\ndef click_stack_capacity(driver):\n capacity_button = driver.find_element(value=\"capacityButton\")\n capacity_button.click()\n time.sleep(DELAY)\n\n\ndef check_stack_capacity(driver, val):\n capacity_input = driver.find_element(value=\"capacityInput\")\n if capacity_input.text != val:\n raise Exception(\"Stack capacity not matched\")\n\n\ndef stack_ops_availability(driver, state):\n stack_ops = driver.find_element(value=\"stackOps\")\n is_stack_ops_visible = stack_ops.is_displayed()\n if is_stack_ops_visible != state:\n raise Exception(\"Stack operation not found\")\n\n\ndef stack_push(driver, val):\n push_input = driver.find_element(value=\"pushInput\")\n push_input.send_keys(val)\n push_button = driver.find_element(value=\"pushButton\")\n push_button.click()\n time.sleep(DELAY)\n\n\ndef click_stack_display(driver):\n display_button = driver.find_element(value=\"displayButton\")\n display_button.click()\n time.sleep(DELAY)\n\n\ndef click_stack_pop(driver):\n display_button = driver.find_element(value=\"popButton\")\n display_button.click()\n time.sleep(DELAY)\n\n\ndef verify_stack_values(driver, values):\n stack_values = driver.find_element(value=\"stackValuesLabel\")\n if \", \".join([str(val) for val in values]) != stack_values.text:\n raise Exception(\"stack content incorrect\")\n\n\ndef click_stack_reset(driver):\n reset_button = driver.find_element(value=\"resetButton\")\n reset_button.click()\n time.sleep(DELAY)\n\n\ndef check_alert_and_accept(driver):\n try:\n WebDriverWait(driver, 4).until(EC.alert_is_present(),\n 'Timed out waiting for PA creation confirmation popup to appear.')\n\n alert = driver.switch_to.alert\n alert.accept()\n except TimeoutException as error:\n print(\"no alert\")\n raise error\n","repo_name":"dareofmeashwani/StackApp","sub_path":"tests/selenium_util.py","file_name":"selenium_util.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6135128093","text":"from l10ntool import AbstractL10nTool\nfrom sdf import SdfEntity\nimport sys\nimport shutil\n\nclass Xtxex(AbstractL10nTool):\n _resource_type = \"xtx\"\n\n def __init__(self):\n AbstractL10nTool.__init__(self)\n\n def merge_file(self, inputfilename, outputfilename, parsed_file_ref, lang, is_forced_lang, sdfdata):\n # Special handling for en-US files\n if lang == \"en-US\":\n mod_outputfilename = outputfilename\n # mod here if needed\n self.copy_file(inputfilename, mod_outputfilename)\n return\n # merge usual lang\n sdfline = self.prepare_sdf_line(inputfilename,lang)\n if sdfline.get_id() in sdfdata:\n line = sdfdata[sdfline.get_id()].text.replace(\"\\\\n\", '\\n')\n self.make_dirs(outputfilename)\n try:\n f = open(outputfilename, \"w+\")\n f.write(line)\n except IOError:\n print(\"ERROR: Can not write file \" + outputfilename)\n sys.exit(-1)\n else:\n f.close()\n return\n # no sdf data found then copy en-US source file\n if is_forced_lang:\n self.copy_file(inputfilename, outputfilename)\n\n ##### Extract a single File\n def extract_file(self, inputfile):\n lines = []\n try:\n f = open(inputfile, \"r\")\n lines = f.readlines()\n except IOError:\n print(\"ERROR: Can not open file \" + inputfile)\n sys.exit(-1)\n else:\n f.close()\n # remove legal header\n lines = [line for line in lines if len(line) > 0 and not line[0] == '#']\n # escape all returns\n lines = [line.replace('\\n', \"\\\\n\") for line in lines]\n line = ''.join(lines)\n test = str(line)\n if len(test.strip()):\n sdf_entity = self.prepare_sdf_line(inputfile);\n sdf_entity.text = line\n return str(sdf_entity)\n else:\n return \"\"\n\n def prepare_sdf_line(self, inputfile=\"\", lang=\"\"):\n if lang == \"\":\n lang = self._source_language\n return SdfEntity(project=self._options.project_name, source_file=self.get_filename_string(inputfile),\n resource_type=self._resource_type, gid=\"none\", lid=\"none\", langid=lang,text=\"\")\n\nrun = Xtxex()\n","repo_name":"apache/openoffice","sub_path":"main/l10ntools/scripts/tool/xtxex.py","file_name":"xtxex.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":834,"dataset":"github-code","pt":"78"} +{"seq_id":"72625309052","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn import tree\n\ndef read_csv(file):\n df = pd.DataFrame.from_csv('%s.csv'%(file),sep=';',index_col=0,encoding ='ISO-8859-1')\n df = df.reset_index()\n \n return df\n \ndef convert_dataset(df):\n \n label_enco = LabelEncoder()\n \n \n df['País'] = label_enco.fit_transform(df['País'])\n df['Tipo-viagem'] = label_enco.fit_transform(df['Tipo-viagem'])\n df['Piscina'] = label_enco.fit_transform(df['Piscina'])\n df['Academia'] = label_enco.fit_transform(df['Academia'])\n df['Tenis'] = label_enco.fit_transform(df['Tenis'])\n df['Spa'] = label_enco.fit_transform(df['Spa'])\n df['Casino'] = label_enco.fit_transform(df['Casino'])\n df['Internet'] = label_enco.fit_transform(df['Internet'])\n df['Hotel'] = label_enco.fit_transform(df['Hotel'])\n \n \n return df\n\ndef convert_input(value):\n \n label_enco = LabelEncoder()\n \n #label_enco.fit_transform()\n\ndef inverse_result(result):\n \n label_enco = LabelEncoder()\n \n label_enco.fit(['Circus Circus Hotel & Casino Las Vegas','Excalibur Hotel & Casino','Monte Carlo Resort&Casino','Treasure Island- TI Hotel & Casino',\n 'Tropicana Las Vegas - A Double Tree by Hilton Hotel','Caesars Palace','The Cosmopolitan Las Vegas','The Palazzo Resort Hotel Casino',\n 'Wynn Las Vegas','Trump International Hotel Las Vegas','The Cromwell','Encore at wynn Las Vegas','Hilton Grand Vacations on the Boulevard',\n \"Marriott's Grand Chateau\",'Tuscany Las Vegas Suites & Casino','Hilton Grand Vacations at the Flamingo','Wyndham Grand Desert',\n 'The Venetian Las Vegas Hotel','Bellagio Las Vegas','Paris Las Vegas','The Westin las Vegas Hotel Casino & Spa'])\n \n r = label_enco.inverse_transform(result)\n \n return r\n\ndef predict_tree(X,y,input_):\n \n clf = tree.DecisionTreeClassifier(criterion='gini', splitter='best')\n \n pred = clf.fit(X,y).predict(input_)\n \n return inverse_result(pred)\n\ndef split_dataset(df):\n \n new_df = pd.DataFrame()\n \n new_df['País'] = df['País']\n new_df['Reviews'] = df['Reviews']\n new_df['Hotel-reviews'] = df['Hotel-reviews']\n new_df['Votes'] = df['Votes']\n new_df['Score'] = df['Score']\n new_df['Tipo-viagem'] = df['Tipo-viagem']\n new_df['Piscina'] = df['Piscina']\n new_df['Academia'] = df['Academia']\n new_df['Tenis'] = df['Tenis']\n new_df['Spa'] = df['Spa']\n new_df['Casino'] = df['Casino']\n new_df['Internet'] = df['Internet']\n new_df['Estrelas'] = df['Estrelas']\n \n train = new_df.values\n \n target = df['Hotel'].values\n \n return train,target\n\ndef main():\n df = read_csv('hotel')\n df = convert_dataset(df)\n train,target = split_dataset(df)\n pred = predict_tree(train,target,[1,36,5,20,4,1,1,0,1,1,0,0,4])\n \n print(pred)\n\n\nif __name__ == '__main__':\n \n main()\n ","repo_name":"eriksonJAguiar/Data-Analytics","sub_path":"hotel.py","file_name":"hotel.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"13625449160","text":"import requests\nimport base64\nimport random\nfrom time import sleep\nfrom time import time\n\napi_url = 'http://127.0.0.1:5000'\n\n\ndef get_admin_token():\n params = {'username': 'admin', 'password': 'password'}\n resp = requests.get(url=api_url + '/api/v1/authenticate', params=params)\n token_from_api = resp.json()['token']\n return token_from_api\n\n\ndef get_header_bearer(token_for_auth):\n header = {'Authorization': 'Bearer ' + token_for_auth}\n return header\n\n\ndef list_devices(admin_token):\n header = get_header_bearer(admin_token)\n resp = requests.get(url=api_url + '/api/v1/device', headers=header)\n devices_from_api = resp.json()\n return devices_from_api\n\n\ndef add_device(admin_token):\n header = get_header_bearer(admin_token)\n resp = requests.post(url=api_url + '/api/v1/device/{}'.format('DemoDevice'),\n json={'data': {'password': '12345'},\n 'properties': {'temperature': {'value': 0}}}, headers=header)\n return resp.status_code\n\n\ndef update_property(device_properties, device_token):\n header = get_header_bearer(device_token)\n resp = requests.post(url=api_url + '/api/v1/device/DemoDevice/details', headers=header, json=device_properties)\n return resp\n\n\ndef update_status(device_status, device_token):\n header = get_header_bearer(device_token)\n resp = requests.post(url=api_url + '/api/v1/device/DemoDevice/status', headers=header, json=device_status)\n return resp\n\n\ndef get_token_device():\n userpass = base64.b64encode(str.encode('{}:{}'.format('DemoDevice', '12345')))\n header = {'Authorization': 'Basic ' + str(userpass)[2:-1]}\n resp = requests.get(url=api_url + '/api/v1/device/DemoDevice/token', headers=header)\n token_from_api = resp.json()['token']\n return token_from_api\n\n\nprint('Logging in as an admin')\ntoken = get_admin_token()\nprint('Admin token: {}'.format(token))\n\nprint('Obtaining list of current devices from API')\ndevices = list_devices(token)\nprint('Devices:\\n{}'.format(devices))\n\n# We want to prevent from attempting to add the same device twice if script was run multiple times\ndevices_in_db = [item['name'] for item in devices]\nif 'DemoDevice' not in devices_in_db:\n print('DemoDevice not found, adding to the database')\n response_code = add_device(token)\n print('Response code (200 if success): {}'.format(response_code))\nelse:\n print('DemoDevice is already in the database! Skipping...')\n\nprint('Logging in as a device')\ntoken = get_token_device()\nprint('Device token: {}'.format(token))\n\n# we will put those into DemoDevice\nproperties_to_initalize = {\n 'temperature1': {\n 'value': 0,\n 'threshold': 50},\n 'temperature2': {\n 'value': 0,\n 'threshold': 50},\n 'temperature3': {\n 'value': 0,\n 'threshold': 50},\n 'temperature4': {\n 'value': 0,\n 'threshold': 50}\n}\n\nprint('Pushing blank properties into the device')\nresponse_code = update_property(properties_to_initalize, token)\nprint('Response code (200 if success): {}'.format(response_code))\n\n# Device needs to update all properties to easily accomodate to the design\n# IAA currently does not care about the history\n# Note that if you want to set old timestamp, you need to specify it:\nproperties_to_update = {\n 'temperature1': {\n 'value': 0},\n 'temperature2': {\n 'value': 0},\n 'temperature3': {\n 'value': 0},\n 'temperature4': {\n 'value': 0,\n 'timestamp': int(time()) - 60 * 60 * 24 * 1000}\n}\n\n# extract names of the properties\nkeys_to_update = list(properties_to_initalize.keys())\n# status list\ngeneral_status = [{'value': 'OK'},\n {'value': 'ERROR'},\n {'value': 'custom message'},\n {'value': 'OK', 'timestamp': int(time()) - 86400 * 365}]\ntry:\n while True:\n sleep(7)\n # choose random property\n key_to_update = keys_to_update[random.randint(0, len(keys_to_update) - 1)]\n # choose random value\n value = random.randint(0, 100)\n # modify poprerty dict\n properties_to_update[key_to_update]['value'] = value\n # update properties\n update_property(properties_to_update, token)\n # pick a new status\n status = general_status[random.randint(0, len(general_status) - 1)]\n # update status\n update_status(status, token)\n print('Property {} of DemoDevice was set to {} with general status: {}'.format(key_to_update, value, status))\nexcept KeyboardInterrupt:\n pass\n","repo_name":"dabku/iamalive","sub_path":"client_example/client_example.py","file_name":"client_example.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31847049299","text":"from sqlalchemy import *\nfrom migrate import *\n\n\nfrom migrate.changeset import schema\npre_meta = MetaData()\npost_meta = MetaData()\nroom = Table('room', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('sortOrder', Integer),\n Column('roomName', String(length=30)),\n)\n\ntime_slot = Table('time_slot', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('sortOrder', Integer),\n Column('timeSlotName', String(length=30)),\n)\n\nschedule_slot = Table('schedule_slot', pre_meta,\n Column('id', INTEGER, primary_key=True, nullable=False),\n Column('talkId', INTEGER),\n Column('timeSlot', VARCHAR(length=30)),\n Column('room', VARCHAR(length=30)),\n)\n\nschedule_slot = Table('schedule_slot', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('talkId', Integer),\n Column('timeSlotId', Integer),\n Column('roomId', Integer),\n)\n\n\ndef upgrade(migrate_engine):\n # Upgrade operations go here. Don't create your own engine; bind\n # migrate_engine to your metadata\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['room'].create()\n post_meta.tables['time_slot'].create()\n pre_meta.tables['schedule_slot'].columns['room'].drop()\n pre_meta.tables['schedule_slot'].columns['timeSlot'].drop()\n post_meta.tables['schedule_slot'].columns['roomId'].create()\n post_meta.tables['schedule_slot'].columns['timeSlotId'].create()\n\n\ndef downgrade(migrate_engine):\n # Operations to reverse the above upgrade go here.\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['room'].drop()\n post_meta.tables['time_slot'].drop()\n pre_meta.tables['schedule_slot'].columns['room'].create()\n pre_meta.tables['schedule_slot'].columns['timeSlot'].create()\n post_meta.tables['schedule_slot'].columns['roomId'].drop()\n post_meta.tables['schedule_slot'].columns['timeSlotId'].drop()\n","repo_name":"PghTechFest/pypghtechfest","sub_path":"db_repository/versions/005_migration.py","file_name":"005_migration.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21816886584","text":"def on_enter(event_data):\n \"\"\" \"\"\"\n pocs = event_data.model\n pocs.next_state = 'sleeping'\n\n pocs.say(\"Resetting the list of observations and doing some cleanup!\")\n\n # Cleanup existing observations\n try:\n pocs.observatory.scheduler.reset_observed_list()\n except Exception as e: # pragma: no cover\n pocs.logger.warning(f'Problem with cleanup: {e!r}')\n","repo_name":"panoptes/POCS","sub_path":"src/panoptes/pocs/state/states/default/housekeeping.py","file_name":"housekeeping.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"78"} +{"seq_id":"5952428597","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n This implements the nwis data retrieval. \n The data retrieval uses the already exiting python library from https://github.com/USGS-python/dataretrieval. \n The original code is modified to collect hourly measurements and interact with the data retriever BMI. A validity check is added to inform BMI if there is a measurement. \n This code is useful for extracting observation from Nextgen framework.\n \n contact\n ----------\n fwolkeba@crimson.ua.edu\n butlerz@oregonstate.edu\n \n Required inputs\n ----------\n site: USGS site ID e.g. 01123000\n start start of the requested data e.g. 2017-05-15\n parameterCd:\"00060\", parameterCd is constant to collect only flow measurements \n end: End of the requested e.g. 2017-05-15\n\n outputs\n ----------\n Flow: Hourly flow measurement of the requested station\n validity: Binary value indicating availability of measurement (0= no measurement, 1 there is a valid measurement)\n Attributes\n\n References\n ----------\n https://github.com/USGS-python/dataretrieval\n\n \"\"\"\n\n\nimport dataretrieval.nwis as nwis\nimport pandas as pd\nimport numpy as np\npd.options.mode.chained_assignment = None\n\n\nclass USGS:\n\n def __init__(self):\n super(USGS, self).__init__()\n\n def run_usgs(self, u):\n\n # -------------------------------------------------------------------\n\n sites = u.sites\n start = u.start\n end = u.end\n site = nwis.get_record(sites=u.sites, parameterCd='00060',\n start=u.start, end=u.end)\n\n # -------------------------------------------------------------------\n # change the extacted values to data frame to do computation on\n # -------------------------------------------------------------------\n\n #site = pd.DataFrame(site_data[0])\n site.reset_index(inplace=True) # reset index to grab station date\n site['datetime'] = pd.to_datetime(site['datetime'], utc=True,\n format='%Y-%m-%d %H:%M:%S') # transfer to utc so same time throughout\n site_copy = site.copy()\n site_copy['datetime'] = pd.to_datetime(site_copy['datetime'],\n utc=True, format='%Y-%m-%d %H:%M:%S') # convert datetime to average every hour\n site_copy.index = site_copy['datetime'] # index so can pull date time in resample\n site_avg = site_copy.select_dtypes(include=['float64', 'int64']).resample('H').mean()\n #site_avg = site_copy.resample('H').mean() # Average every hour based on datetime\n site_avg.reset_index(inplace=True) # reset index again to have datetime\n site_avg.columns = ['Date', 'Flow']\n\n # check validity of extracted data\n\n site_avg.loc[site_avg['Flow'] >= 0, 'validity'] = 1 # if value positive, consider\n site_avg.loc[site_avg['Flow'] < 0, 'validity'] = 0 # if less than zero, not realistic\n site_avg.loc[site_avg['Flow'].isnull() == True, 'validity'] = 0 # if NaN not availible\n\n # Output results to csv file\n\n #site_avg.to_csv('USGS_' + str(sites) + '_obs_streamflow.csv',\n # index=False)\n\n # to check if the code runs on the framework\n\n u.flow = site_avg['Flow']\n u.validity = site_avg['validity']\n\n return","repo_name":"NWC-CUAHSI-Summer-Institute/data_assimilation_with_bmi","sub_path":"observations/usgs.py","file_name":"usgs.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"27443925362","text":"import math\nimport os\nfrom math import floor\n\nimport pygame as pg\n\nfrom core.classes import WVec, BlockSelection, WBounds, PixVec\nfrom core.funcs import w_to_pix_shift, pix_to_w_shift, light_level_to_color_int\nfrom core.constants import BLOCK_PIX_SIZE, PLAYER_S_POS, FULLSCREEN, C_KEY, CAM_FPS, CAM_DEFAULT_SCALE, \\\n CAM_SCALE_BOUNDS, DIR_TO_ANGLE, GUI_PATH, ACTION_MAX_DISTANCE, PIX_ORIGIN, HOTBAR_S_POS\n\n\nclass Camera:\n _ZOOM_SPEED = 1.05\n _VEL_DAMPING_FACTOR = 0.5\n _POS_DAMPING_FACTOR = 0.1\n _ZOOM_VEL_DAMPING_FACTOR = 0.1\n _SCALE_COLLISION_DAMPING_FACTOR = 0.75\n\n def __init__(self):\n self._pos = WVec()\n self._req_pos = WVec(self._pos)\n\n self._vel = WVec()\n self._req_vel = WVec(self._vel)\n\n self._zoom_vel = 1.0\n self._req_zoom_vel = 1.0\n\n self._scale = CAM_DEFAULT_SCALE\n\n if FULLSCREEN:\n self._screen = pg.display.set_mode((0, 0), pg.FULLSCREEN)\n else:\n self._screen = pg.display.set_mode((1280, 720))\n self._pix_size = PixVec(self._screen.get_size())\n\n self.selected_block_w_pos = WVec(self._pos)\n self.selected_space_w_pos = WVec(self._pos)\n self._block_selector_surf = pg.image.load(os.path.join(GUI_PATH, \"block_selector.png\")).convert()\n self._block_selector_space_only_surf = pg.image.load(os.path.join(GUI_PATH, \"block_selector_space_only.png\")).convert()\n\n # Surfs to reuse\n self._world_max_surf_scaled = pg.Surface((0, 0))\n self._player_surf_scaled = pg.Surface((0, 0))\n self._player_surf_scaled.set_colorkey(C_KEY)\n self._block_selector_surf_scaled = pg.Surface((0, 0))\n self._block_selector_surf_scaled.set_colorkey(C_KEY)\n\n self._clock = pg.time.Clock()\n self._font = pg.font.SysFont(pg.font.get_default_font(), 24)\n\n # ==== GET DATA ====\n\n @property\n def w_size(self):\n return self._pix_size / self._scale\n\n @property\n def w_view(self):\n \"\"\"World referred part of the world visible on screen. \"\"\"\n return WBounds(\n min=self._pos - self.w_size * PLAYER_S_POS,\n max=self._pos + self.w_size * (1-PLAYER_S_POS),\n )\n\n @property\n def _mouse_w_pos(self):\n mouse_pix_shift = PixVec(pg.mouse.get_pos())\n mouse_w_shift = pix_to_w_shift(\n mouse_pix_shift,\n PixVec(),\n self._pix_size,\n dest_pivot=self._pix_size * PLAYER_S_POS,\n scale=self._scale,\n )\n mouse_w_pos = mouse_w_shift + self._pos\n return mouse_w_pos\n\n @property\n def is_zooming(self):\n return not math.isclose(self._zoom_vel, 1)\n\n def _select_block(self, action_w_pos: WVec, world, *, substeps=5, max_rays=3) -> BlockSelection:\n \"\"\"Return selection based on player position and mouse position.\n Selection is one selected block and one selected space.\n \"\"\"\n\n if (self._mouse_w_pos - action_w_pos).norm() > ACTION_MAX_DISTANCE:\n return BlockSelection(None, None)\n\n selection: BlockSelection\n selection = world.get_block_pos_and_space_pos(\n action_w_pos,\n self._mouse_w_pos,\n ACTION_MAX_DISTANCE,\n substeps=substeps,\n max_rays=max_rays,\n )\n\n if selection.block_w_pos is not None:\n return selection\n\n selection = world.get_intersected_block_pos_and_space_pos(\n action_w_pos,\n self._mouse_w_pos,\n ACTION_MAX_DISTANCE,\n substeps=substeps,\n )\n\n return selection\n\n # ==== DRAW ====\n\n def draw_world(self, max_surf, max_view_pos: WVec):\n max_surf_scaled_pix_size = floor(PixVec(max_surf.get_size()) * (self._scale / BLOCK_PIX_SIZE))\n if self._world_max_surf_scaled.get_size() != max_surf_scaled_pix_size:\n self._world_max_surf_scaled = pg.transform.scale(self._world_max_surf_scaled, max_surf_scaled_pix_size)\n self._world_max_surf_scaled = pg.transform.scale(max_surf, max_surf_scaled_pix_size, self._world_max_surf_scaled)\n\n w_shift = max_view_pos - self._pos\n pix_shift = w_to_pix_shift(\n w_shift,\n max_surf_scaled_pix_size,\n self._pix_size,\n dest_pivot=self._pix_size * PLAYER_S_POS,\n scale=self._scale\n )\n\n self._screen.blit(self._world_max_surf_scaled, pix_shift)\n\n def draw_player(self, anim_surf, player_pos: WVec, sky_light):\n anim_surf.draw_and_tick(sky_light)\n\n surf_scaled_pix_size = floor(anim_surf.w_size * self._scale)\n if self._player_surf_scaled.get_size() != surf_scaled_pix_size:\n self._player_surf_scaled = pg.transform.scale(self._player_surf_scaled, surf_scaled_pix_size)\n self._player_surf_scaled = pg.transform.scale(anim_surf.surf, surf_scaled_pix_size, self._player_surf_scaled)\n\n w_shift = player_pos - self._pos\n pix_shift = w_to_pix_shift(\n w_shift,\n surf_scaled_pix_size,\n self._pix_size,\n source_pivot=surf_scaled_pix_size * PixVec(0.5, 0.0),\n dest_pivot=self._pix_size * PLAYER_S_POS,\n scale=self._scale\n )\n\n self._screen.blit(self._player_surf_scaled, pix_shift)\n\n def draw_block_selector(self, action_w_pos: WVec, world):\n selection: BlockSelection\n selection = self._select_block(action_w_pos, world)\n if selection.block_w_pos is None:\n self.selected_block_w_pos = None\n self.selected_space_w_pos = None\n return\n\n self.selected_block_w_pos = selection.block_w_pos\n self.selected_space_w_pos = self.selected_block_w_pos + selection.space_w_pos_shift\n\n surf_pix_size = floor(PixVec(self._scale, self._scale))\n if not selection.space_only:\n surf = self._block_selector_surf\n else:\n surf = self._block_selector_space_only_surf\n\n if self._block_selector_surf_scaled.get_size() != surf_pix_size:\n self._block_selector_surf_scaled = pg.transform.scale(self._block_selector_surf_scaled, surf_pix_size)\n self._block_selector_surf_scaled = pg.transform.scale(surf, surf_pix_size, self._block_selector_surf_scaled)\n self._block_selector_surf_scaled = pg.transform.rotate(self._block_selector_surf_scaled, DIR_TO_ANGLE[selection.space_w_pos_shift])\n\n w_shift = floor(self.selected_block_w_pos) - self._pos\n\n if selection.space_only:\n self.selected_block_w_pos = None\n\n pix_shift = w_to_pix_shift(\n w_shift,\n surf_pix_size,\n self._pix_size,\n source_pivot=PixVec(-1, 1),\n dest_pivot=self._pix_size * PLAYER_S_POS,\n scale=self._scale)\n\n self._screen.blit(self._block_selector_surf_scaled, pix_shift)\n\n def draw_hotbar(self, surf, pix_shift):\n self._screen.blit(surf, self._pix_size * (1-HOTBAR_S_POS) - pix_shift)\n\n def draw_debug_info(self):\n fps_surf = self._font.render(f\"{self._clock.get_fps():.1f}\", True, (255, 255, 255))\n self._screen.blit(fps_surf, (20, 20))\n\n def display_flip_and_clock_tick(self):\n pg.display.flip()\n self._clock.tick(CAM_FPS)\n\n # ==== REQUEST MOVEMENTS ====\n\n def req_zoom_in(self):\n self._req_zoom_vel = self._ZOOM_SPEED\n\n def req_zoom_out(self):\n self._req_zoom_vel = 1 / self._ZOOM_SPEED\n\n def req_zoom_stop(self):\n self._req_zoom_vel = 1.0\n\n def req_move(self, pos):\n self._req_vel = pos - self._pos\n\n # ==== APPLY MOVEMENTS ====\n\n def set_transforms(self, pos: WVec, vel: WVec = WVec()):\n self._pos = WVec(pos)\n self._req_pos = WVec(self._pos)\n\n self._vel = WVec(vel)\n self._req_vel = WVec(self._vel)\n\n def move(self, threshold=0.001):\n self._vel += (self._req_vel - self._vel) * self._VEL_DAMPING_FACTOR\n self._pos += self._vel * self._POS_DAMPING_FACTOR ** (1 / (1 + abs(self._vel)))\n # (1/(1+speed)) is 1 when speed is 0 and is 0 when speed is +inf.\n # This is so that the CAM_POS_DAMPING_FACTOR is only applied at low speeds.\n\n self._zoom_vel *= (self._req_zoom_vel / self._zoom_vel) ** self._ZOOM_VEL_DAMPING_FACTOR\n if CAM_SCALE_BOUNDS[0] > self._scale:\n self._zoom_vel = 1.0\n self._scale = CAM_SCALE_BOUNDS[0] * (1+threshold)\n if self._scale > CAM_SCALE_BOUNDS[1]:\n self._zoom_vel = 1.0\n self._scale = CAM_SCALE_BOUNDS[1] / (1+threshold)\n self._scale *= self._zoom_vel\n","repo_name":"AndreiToroplean/mineflat","sub_path":"graphics/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":8620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"20785570180","text":"import unittest as ut\n\ndef listNodeBuilder(numbers):\n nodes = [ListNode(x) for x in numbers]\n\n root = nodes[0]\n copy_root = root\n for x in range(1, len(nodes)):\n root.next = nodes[x]\n root = root.next\n\n return copy_root\n\nclass TestSolutionMethods(ut.TestCase):\n def test_toNumber(self):\n numbers = [ 2, 4, 3 ]\n root = listNodeBuilder(numbers)\n s = Solution()\n self.assertEqual(342, s.toNumber(root))\n\n def testAddTwoNumbers(self):\n number1, number2 = listNodeBuilder([2, 4, 3]), listNodeBuilder([5, 6, 4])\n result = listNodeBuilder([7, 0, 8])\n\n s = Solution()\n\n self.assertEqual(s.toNumber(result),\n s.toNumber(s.addTwoNumbers(number1, number2)))\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n total = str(self.toNumber(l1) + self.toNumber(l2))\n\n node_element = [ListNode(x) for x in reversed(total)]\n root = node_element[0]\n copy_root = root\n for x in range(1, len(node_element)):\n root.next = node_element[x]\n root = root.next\n\n return copy_root\n\n def toNumber(self, x):\n number = 0\n numberes = []\n while x.next is not None:\n numberes.append(x.val)\n x = x.next\n numberes.append(x.val) # add last one\n numberes = reversed(numberes)\n for x in numberes:\n number = int(number) * 10 + int(x)\n\n return number\n\nif __name__ == '__main__':\n ut.main()","repo_name":"coding-and-trading/PracticeEveryDay","sub_path":"LeetCode/addTwoNumbers.py","file_name":"addTwoNumbers.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70180349694","text":"import pprint\n\nfrom pconf import Pconf\nimport os\n\nfrom ioflow.configure.get_configure_path_from_argv import get_configure_path_from_argv\n\n\ndef guess_configure_file_type(file_name):\n file_extension_mapping = {\".json\": \"json\", \".yaml\": \"yaml\", \".yml\": \"yaml\"}\n\n _, file_extension = os.path.splitext(file_name)\n\n if file_extension in file_extension_mapping:\n return file_extension_mapping[file_extension]\n\n raise ValueError(file_extension)\n\n\ndef find_best_file_candidate(candidate_list):\n for candidate in candidate_list:\n if os.path.exists(candidate):\n return candidate\n\n return None\n\n\ndef read_configure(return_empty=False) -> dict:\n # set return_empty to True for not read config from env\n # which can prevent unexpected result\n # e.g. './configure.json' is not for this app, but for other using\n if return_empty:\n return {}\n\n default_configure_candidate = [\n \".\".join([\"./configure\", ext]) for ext in [\"yaml\", \"yml\", \"json\"]\n ]\n builtin_configure_candidate = [\n \".\".join([\"./builtin_configure\", ext]) for ext in [\"yaml\", \"yml\", \"json\"]\n ]\n\n default_configure = find_best_file_candidate(default_configure_candidate)\n builtin_configure = find_best_file_candidate(builtin_configure_candidate)\n\n active_configure_file = get_configure_path_from_argv()\n if not active_configure_file:\n active_configure_file = os.getenv(\"_DEFAULT_CONFIG_FILE\", default_configure)\n\n builtin_configure_file = os.getenv(\"_BUILTIN_CONFIG_FILE\", builtin_configure)\n\n # Note: this is a safeguard, before using any Pconf function, do execute this\n # In case former Pconf usage influence current usage\n # which will lead to a hidden and wired bug\n Pconf.clear()\n\n # disable read configure from environment\n # Pconf.env()\n\n active_configure_file_abs_path = os.path.realpath(active_configure_file)\n\n if not os.path.exists(active_configure_file):\n msg = \"default configure file is not found! CWD: {}; activate_config: {}; builtin_configure: {}\".format(\n os.getcwd(), active_configure_file, builtin_configure_file\n )\n print(msg)\n raise ValueError(msg)\n else:\n print(\n \">>> Using configure read from file: {}\".format(\n active_configure_file_abs_path\n )\n )\n\n file_encoding = guess_configure_file_type(active_configure_file_abs_path)\n Pconf.file(active_configure_file, file_encoding)\n\n # try loading builtin configure file\n if builtin_configure_file and os.path.exists(builtin_configure_file):\n print(\"loading builtin configure from {}\".format(builtin_configure_file))\n file_encoding = guess_configure_file_type(builtin_configure_file)\n Pconf.file(builtin_configure_file, encoding=file_encoding)\n else:\n print(\">>> builtin configure file is not found!\")\n\n # Get all the config values parsed from the sources\n config = Pconf.get()\n\n # NOTE: clean Pconf for later brand new use\n Pconf.clear()\n\n print(\"++\" * 8, \"configure\", \"++\" * 8)\n pprint.pprint(config)\n\n return config\n\n # sys.exit(0)\n\n # return {\n # 'corpus': {\n # 'train': './data/train.conllz',\n # 'test': './data/test.conllz'\n # },\n # 'model': {\n # 'shuffle_pool_size': 10,\n # 'batch_size': 32,\n # 'epochs': 20,\n # 'arch': {}\n # }\n # }\n\n\nread_config = read_configure # alias\n","repo_name":"hot-vs-cool/ioflow","sub_path":"ioflow/configure/read_configure.py","file_name":"read_configure.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70657497212","text":"import argparse\nimport os\nimport time\n\nimport nengo_dl\nfrom nengo.learning_rules import PES\nfrom nengo.params import Default\nfrom nengo.processes import WhiteSignal\nfrom sklearn.metrics import mean_squared_error\n\nfrom memristor_nengo.extras import *\nfrom memristor_nengo.learning_rules import mPES\n\nsetup()\n\n# Should not be useful for NengoDL>=3.3.0\n# tf.compat.v1.disable_eager_execution()\n# tf.compat.v1.disable_control_flow_v2()\n\nparser = argparse.ArgumentParser()\nparser.add_argument( \"-f\", \"--function\", default=\"x\",\n help=\"The function to learn. Default is x\" )\nparser.add_argument( \"-i\", \"--inputs\", default=[ \"sine\", \"sine\" ], nargs=\"*\", choices=[ \"sine\", \"white\" ],\n help=\"The input signals [learning, testing]. Default is sine\" )\nparser.add_argument( \"-t\", \"--timestep\", default=0.001, type=int )\nparser.add_argument( \"-S\", \"--simulation_time\", default=30, type=int )\nparser.add_argument( \"-N\", \"--neurons\", nargs=\"*\", default=[ 10 ], action=\"store\", type=int,\n help=\"The number of neurons used in the Ensembles [pre, post, error]. Default is 10\" )\nparser.add_argument( \"-D\", \"--dimensions\", default=3, type=int,\n help=\"The number of dimensions of the input signal\" )\nparser.add_argument( \"-n\", \"--noise\", nargs=\"*\", default=0.15, type=float,\n help=\"The noise on the simulated memristors [R_0, R_1, c, R_init] Default is 0.15\" )\nparser.add_argument( \"-g\", \"--gain\", default=1e4, type=float ) # default chosen by parameter search experiments\nparser.add_argument( \"-l\", \"--learning_rule\", default=\"mPES\", choices=[ \"mPES\", \"PES\" ] )\nparser.add_argument( \"-P\", \"--parameters\", default=Default, type=float,\n help=\"The parametrs of simualted memristors. For now only the exponent c\" )\nparser.add_argument( \"-b\", \"--backend\", default=\"nengo_dl\", choices=[ \"nengo_dl\", \"nengo_core\" ] )\nparser.add_argument( \"-o\", \"--optimisations\", default=\"run\", choices=[ \"run\", \"build\", \"memory\" ] )\nparser.add_argument( \"-s\", \"--seed\", default=None, type=int )\nparser.add_argument( \"--plot\", default=0, choices=[ 0, 1, 2, 3 ], type=int,\n help=\"0: No visual output, 1: Show plots, 2: Save plots, 3: Save data\" )\nparser.add_argument( \"--verbosity\", default=2, choices=[ 0, 1, 2 ], type=int,\n help=\"0: No textual output, 1: Only numbers, 2: Full output\" )\nparser.add_argument( \"-pd\", \"--plots_directory\", default=\"../data/\",\n help=\"Directory where plots will be saved. Default is ../data/\" )\nparser.add_argument( \"-d\", \"--device\", default=\"/cpu:0\",\n help=\"/cpu:0 or /gpu:[x]\" )\nparser.add_argument( \"-lt\", \"--learn_time\", default=3 / 4, type=float )\nparser.add_argument( '--probe', default=1, choices=[ 0, 1, 2 ], type=int,\n help=\"0: probing disabled, 1: only probes to calculate statistics, 2: all probes active\" )\n\n# TODO read parameters from conf file https://docs.python.org/3/library/configparser.html\nargs = parser.parse_args()\nseed = args.seed\ntf.random.set_seed( seed )\nnp.random.seed( seed )\nfunction_string = \"lambda x: \" + args.function\nfunction_to_learn = eval( function_string )\nif len( args.inputs ) not in (1, 2):\n parser.error( 'Either give no values for action, or two, not {}.'.format( len( args.inputs ) ) )\nif len( args.inputs ) == 1:\n if args.inputs[ 0 ] == \"sine\":\n input_function_train = input_function_test = Sines( period=4 )\n if args.inputs[ 0 ] == \"white\":\n input_function_train = input_function_test = WhiteSignal( period=60, high=5, seed=seed )\nif len( args.inputs ) == 2:\n if args.inputs[ 0 ] == \"sine\":\n input_function_train = Sines( period=4 )\n if args.inputs[ 0 ] == \"white\":\n input_function_train = WhiteSignal( period=60, high=5, seed=seed )\n if args.inputs[ 1 ] == \"sine\":\n input_function_test = Sines( period=4 )\n if args.inputs[ 1 ] == \"white\":\n input_function_test = WhiteSignal( period=60, high=5, seed=seed )\ntimestep = args.timestep\nsim_time = args.simulation_time\nif len( args.neurons ) not in range( 1, 3 ):\n parser.error( 'Either give no values for action, or one, or three, not {}.'.format( len( args.neurons ) ) )\nif len( args.neurons ) == 1:\n pre_n_neurons = post_n_neurons = error_n_neurons = args.neurons[ 0 ]\nif len( args.neurons ) == 2:\n pre_n_neurons = error_n_neurons = args.neurons[ 0 ]\n post_n_neurons = args.neurons[ 1 ]\nif len( args.neurons ) == 3:\n pre_n_neurons = args.neurons[ 0 ]\n post_n_neurons = args.neurons[ 1 ]\n error_n_neurons = args.neurons[ 2 ]\ndimensions = args.dimensions\nnoise_percent = args.noise\ngain = args.gain\nexponent = args.parameters\nlearning_rule = args.learning_rule\nbackend = args.backend\noptimisations = args.optimisations\nprogress_bar = False\nprintlv1 = printlv2 = lambda *a, **k: None\nif args.verbosity >= 1:\n printlv1 = print\nif args.verbosity >= 2:\n printlv2 = print\n progress_bar = True\nplots_directory = args.plots_directory\ndevice = args.device\nprobe = args.probe\ngenerate_plots = show_plots = save_plots = save_data = False\nif args.plot >= 1:\n generate_plots = True\n show_plots = True\n probe = 2\nif args.plot >= 2:\n save_plots = True\nif args.plot >= 3:\n save_data = True\n\n# TODO give better names to folders or make hierarchy\nif save_plots or save_data:\n dir_name, dir_images, dir_data = make_timestamped_dir( root=plots_directory + learning_rule + \"/\" )\n\nlearn_time = int( sim_time * args.learn_time )\nn_neurons = np.amax( [ pre_n_neurons, post_n_neurons ] )\nif optimisations == \"build\":\n optimize = False\n sample_every = timestep\n simulation_discretisation = 1\nelif optimisations == \"run\":\n optimize = True\n sample_every = timestep\n simulation_discretisation = 1\nelif optimisations == \"memory\":\n optimize = False\n sample_every = timestep * 100\n simulation_discretisation = n_neurons\nprintlv2( f\"Using {optimisations} optimisation\" )\n\nmodel = nengo.Network( seed=seed )\nwith model:\n nengo_dl.configure_settings( inference_only=True )\n # Create an input node\n input_node = nengo.Node(\n output=SwitchInputs( input_function_train,\n input_function_test,\n switch_time=learn_time ),\n size_out=dimensions\n )\n \n # Shut off learning by inhibiting the error population\n stop_learning = nengo.Node( output=lambda t: t >= learn_time )\n \n # Create the ensemble to represent the input, the learned output, and the error\n pre = nengo.Ensemble( pre_n_neurons, dimensions=dimensions, seed=seed )\n post = nengo.Ensemble( post_n_neurons, dimensions=dimensions, seed=seed )\n error = nengo.Ensemble( error_n_neurons, dimensions=dimensions, radius=2, seed=seed )\n \n # Connect pre and post with a communication channel\n # the matrix given to transform is the initial weights found in model.sig[conn][\"weights\"]\n # the initial transform has not influence on learning because it is overwritten by mPES\n # the only influence is on the very first timesteps, before the error becomes large enough\n conn = nengo.Connection(\n pre.neurons,\n post.neurons,\n transform=np.zeros( (post.n_neurons, pre.n_neurons) )\n )\n \n # Apply the learning rule to conn\n if learning_rule == \"mPES\":\n conn.learning_rule_type = mPES(\n noisy=noise_percent,\n gain=gain,\n seed=seed,\n exponent=exponent )\n if learning_rule == \"PES\":\n conn.learning_rule_type = PES()\n printlv2( \"Simulating with\", conn.learning_rule_type )\n \n # Provide an error signal to the learning rule\n nengo.Connection( error, conn.learning_rule )\n \n # Compute the error signal (error = actual - target)\n nengo.Connection( post, error )\n \n # Subtract the target (this would normally come from some external system)\n nengo.Connection( pre, error, function=function_to_learn, transform=-1 )\n \n # Connect the input node to ensemble pre\n nengo.Connection( input_node, pre )\n \n nengo.Connection(\n stop_learning,\n error.neurons,\n transform=-20 * np.ones( (error.n_neurons, 1) ) )\n \n # essential ones are used to calculate the statistics\n if probe > 0:\n pre_probe = nengo.Probe( pre, synapse=0.01, sample_every=sample_every )\n post_probe = nengo.Probe( post, synapse=0.01, sample_every=sample_every )\n if probe > 1:\n input_node_probe = nengo.Probe( input_node, sample_every=sample_every )\n error_probe = nengo.Probe( error, synapse=0.01, sample_every=sample_every )\n learn_probe = nengo.Probe( stop_learning, synapse=None, sample_every=sample_every )\n weight_probe = nengo.Probe( conn, \"weights\", synapse=None, sample_every=sample_every )\n post_spikes_probe = nengo.Probe( post.neurons, sample_every=sample_every )\n if isinstance( conn.learning_rule_type, mPES ):\n pos_memr_probe = nengo.Probe( conn.learning_rule, \"pos_memristors\", synapse=None,\n sample_every=sample_every )\n neg_memr_probe = nengo.Probe( conn.learning_rule, \"neg_memristors\", synapse=None,\n sample_every=sample_every )\n\n# Create the Simulator and run it\nprintlv2( f\"Backend is {backend}, running on \", end=\"\" )\nif backend == \"nengo_core\":\n printlv2( \"CPU\" )\n cm = nengo.Simulator( model, seed=seed, dt=timestep, optimize=optimize, progress_bar=progress_bar )\nif backend == \"nengo_dl\":\n printlv2( device )\n cm = nengo_dl.Simulator( model, seed=seed, dt=timestep, progress_bar=progress_bar, device=device )\nstart_time = time.time()\nwith cm as sim:\n for i in range( simulation_discretisation ):\n printlv2( f\"\\nRunning discretised step {i + 1} of {simulation_discretisation}\" )\n sim.run( sim_time / simulation_discretisation )\nprintlv2( f\"\\nTotal time for simulation: {time.strftime( '%H:%M:%S', time.gmtime( time.time() - start_time ) )} s\" )\n\nif probe > 0:\n # essential statistics\n y_true = sim.data[ pre_probe ][ int( (learn_time / timestep) / (sample_every / timestep) ):, ... ]\n y_pred = sim.data[ post_probe ][ int( (learn_time / timestep) / (sample_every / timestep) ):, ... ]\n # MSE after learning\n printlv2( \"MSE after learning [f(pre) vs. post]:\" )\n mse = mean_squared_error( function_to_learn( y_true ), y_pred, multioutput='raw_values' )\n printlv1( mse.tolist() )\n # Correlation coefficients after learning\n correlation_coefficients = correlations( function_to_learn( y_true ), y_pred )\n printlv2( \"Pearson correlation after learning [f(pre) vs. post]:\" )\n printlv1( correlation_coefficients[ 0 ] )\n printlv2( \"Spearman correlation after learning [f(pre) vs. post]:\" )\n printlv1( correlation_coefficients[ 1 ] )\n printlv2( \"Kendall correlation after learning [f(pre) vs. post]:\" )\n printlv1( correlation_coefficients[ 2 ] )\n printlv2( \"MSE-to-rho after learning [f(pre) vs. post]:\" )\n printlv1( mse_to_rho_ratio( mse, correlation_coefficients[ 1 ] ) )\n\nif probe > 1:\n # Average\n printlv2( \"Weights average after learning:\" )\n printlv1( np.average( sim.data[ weight_probe ][ -1, ... ] ) )\n \n # Sparsity\n printlv2( \"Weights sparsity at t=0 and after learning:\" )\n printlv1( gini( sim.data[ weight_probe ][ 0 ] ), end=\" -> \" )\n printlv1( gini( sim.data[ weight_probe ][ -1 ] ) )\n\nplots = { }\nif generate_plots and probe > 1:\n plotter = Plotter( sim.trange( sample_every=sample_every ), post_n_neurons, pre_n_neurons, dimensions,\n learn_time,\n sample_every,\n plot_size=(13, 7),\n dpi=300,\n pre_alpha=0.3\n )\n plots[ \"results_smooth\" ] = plotter.plot_results( sim.data[ input_node_probe ], sim.data[ pre_probe ],\n sim.data[ post_probe ],\n error=\n sim.data[ post_probe ] -\n function_to_learn( sim.data[ pre_probe ] ),\n smooth=True )\n plots[ \"results\" ] = plotter.plot_results( sim.data[ input_node_probe ], sim.data[ pre_probe ],\n sim.data[ post_probe ],\n error=\n sim.data[ post_probe ] -\n function_to_learn( sim.data[ pre_probe ] ),\n smooth=False )\n plots[ \"post_spikes\" ] = plotter.plot_ensemble_spikes( \"Post\", sim.data[ post_spikes_probe ],\n sim.data[ post_probe ] )\n plots[ \"weights\" ] = plotter.plot_weight_matrices_over_time( sim.data[ weight_probe ], sample_every=sample_every )\n \n plots[ \"testing_smooth\" ] = plotter.plot_testing( function_to_learn( sim.data[ pre_probe ] ),\n sim.data[ post_probe ],\n smooth=True )\n plots[ \"testing\" ] = plotter.plot_testing( function_to_learn( sim.data[ pre_probe ] ), sim.data[ post_probe ],\n smooth=False )\n if n_neurons <= 10 and learning_rule == \"mPES\":\n plots[ \"weights_mpes\" ] = plotter.plot_weights_over_time( sim.data[ pos_memr_probe ],\n sim.data[ neg_memr_probe ] )\n plots[ \"memristors\" ] = plotter.plot_values_over_time( sim.data[ pos_memr_probe ], sim.data[ neg_memr_probe ],\n value=\"resistance\" )\n\nif save_plots:\n assert generate_plots and probe > 1\n \n for fig in plots.values():\n fig.savefig( dir_images + str( i ) + \".pdf\" )\n # fig.savefig( dir_images + str( i ) + \".png\" )\n \n print( f\"Saved plots in {dir_images}\" )\n\nif save_data:\n save_weights( dir_data, sim.data[ weight_probe ] )\n print( f\"Saved NumPy weights in {dir_data}\" )\n \n save_results_to_csv( dir_data, sim.data[ input_node_probe ], sim.data[ pre_probe ], sim.data[ post_probe ],\n sim.data[ post_probe ] - function_to_learn( sim.data[ pre_probe ] ) )\n save_memristors_to_csv( dir_data, sim.data[ pos_memr_probe ], sim.data[ neg_memr_probe ] )\n print( f\"Saved data in {dir_data}\" )\n\n# TODO save output txt with metrics\n\nif show_plots:\n assert generate_plots and probe > 1\n \n for fig in plots.values():\n fig.show()\n","repo_name":"thomastiotto/Learning-to-approximate-functions-using-niobium-doped-strontium-titanate-memristors","sub_path":"experiments/mPES.py","file_name":"mPES.py","file_ext":"py","file_size_in_byte":14882,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"11985141024","text":"#!/usr/bin/env python\n\nimport math\nimport mpmath\nimport numpy as np\nfrom context import floripy\nfrom floripy.mathutils.xform import *\n\n\n#mpmath.mp.dps = 128\npi = math.pi\n\n\ndef get_force_torque_prolate(a, c, d, u_inf, omega_inf, E_inf):\n if c == 'zero':\n print('Reduction of prolate to a line not possible.')\n return float('nan'), float('nan')\n e = math.sqrt((a+c)*(a-c))/a\n e2 = e*e\n e3 = e2*e\n e5 = e3*e2\n one_m_e2 = 1.0 - e2\n one_p_e2 = 1.0 + e2\n two_m_e2 = 2.0 - e2\n two_e2_m_three = 2.0*e2 - 3.0\n three_m_e2 = 3.0 - e2\n three_e2_m_one = 3.0*e2 - 1.0\n three_m_five_e2 = 3.0 - 5.0*e2\n\n L = math.log1p(e) - math.log1p(-e)\n XA = (8.0/3)*e3/(-2*e + one_p_e2*L)\n YA = (16.0/3)*e3/(2*e + three_e2_m_one*L)\n XC = (4.0/3)*e3*one_m_e2/(2*e - one_m_e2*L)\n YC = (4.0/3)*e3*two_m_e2/(-2*e + one_p_e2*L)\n YH = (4.0/3)*e5/(-2*e + one_p_e2*L)\n\n epsilon = np.zeros((3,3,3))\n epsilon[1,2,0] = 1.0\n epsilon[2,1,0] = -1.0\n epsilon[0,2,1] = -1.0\n epsilon[2,0,1] = 1.0\n epsilon[0,1,2] = 1.0\n epsilon[1,0,2] = -1.0\n\n dd = np.outer(d,d)\n I = np.identity(3)\n A = 6*pi*a*(XA*dd + YA*(I-dd))\n force = np.dot(A, u_inf)\n C = 8*pi*(a**3)*(XC*dd + YC*(I-dd))\n torque = np.dot(C, omega_inf) - 8*pi*a**3*YH*np.einsum('ijl,l,k,jk',\n epsilon, d, d, E_inf)\n return force, torque\n\n\ndef get_force_torque_oblate(a, c, d, u_inf, omega_inf, E_inf):\n if c == 'zero':\n XA = 8/(3*pi)\n YA = 16/(9*pi)\n XC = 4/(3*pi)\n YC = 4/(3*pi)\n YH = -4/(3*pi)\n else:\n e = math.sqrt((a+c)*(a-c))/a\n e2 = e*e\n e3 = e2*e\n e5 = e2*e3\n one_m_e2 = 1.0 - e2\n two_m_e2 = 2.0 - e2\n sqrt_one_m_e2 = math.sqrt(one_m_e2)\n C = math.atan(e/sqrt_one_m_e2)\n XA = (4.0/3)*e3/((2*e2-1)*C + e*sqrt_one_m_e2)\n YA = (8.0/3)*e3/((2*e2+1)*C - e*sqrt_one_m_e2)\n XC = (2.0/3)*e3/(C - e*sqrt_one_m_e2)\n YC = (2.0/3)*e3*two_m_e2/(e*sqrt_one_m_e2 - (1-2*e2)*C)\n YH = (-2.0/3)*e5/(e*sqrt_one_m_e2 - (1-2*e2)*C)\n\n epsilon = np.zeros((3,3,3))\n epsilon[1,2,0] = 1.0\n epsilon[2,1,0] = -1.0\n epsilon[0,2,1] = -1.0\n epsilon[2,0,1] = 1.0\n epsilon[0,1,2] = 1.0\n epsilon[1,0,2] = -1.0\n\n dd = np.outer(d,d)\n I = np.identity(3)\n K = 6*pi*a*(XA*dd + YA*(I-dd))\n force = np.dot(K, u_inf)\n Omega_0 = 8*pi*(a**3)*(XC*dd + YC*(I-dd))\n torque = np.dot(Omega_0, omega_inf) - 8*pi*a**3*YH*np.einsum('ijl,l,k,jk',\n epsilon, d, d, E_inf)\n return force, torque\n\n\ndef get_force_torque_ellipsoid(a, b, c, dcm, u_inf, omega_inf, E_inf):\n if c == 'zero' and b == 'zero':\n print('RF and RJ ill-defined for b = c = 0.')\n return float('nan'), float('nan')\n asq = a**2\n bsq = b**2\n if c == 'zero':\n csq = 0.0\n else:\n csq = c**2\n\n asq_alpha_1 = (2*asq/3.0)*mpmath.elliprj(asq, bsq, csq, asq)\n bsq_alpha_2 = (2*bsq/3.0)*mpmath.elliprj(asq, bsq, csq, bsq)\n if c == 'zero':\n csq_alpha_3 = 0.0\n else:\n csq_alpha_3 = (2*csq/3.0)*mpmath.elliprj(asq, bsq, csq, csq)\n \n chi = 2*mpmath.elliprf(asq, bsq, csq)\n\n A = np.zeros((3,3))\n C = np.zeros((3,3))\n H_tilde = np.zeros((3,3,3))\n\n A[0,0] = 16*pi/(chi + asq_alpha_1)\n A[1,1] = 16*pi/(chi + bsq_alpha_2)\n A[2,2] = 16*pi/(chi + csq_alpha_3)\n A = shift_tensor2_dcm(A, dcm, forward=False)\n\n C[0,0] = (16*pi/3)*(bsq+csq)/(bsq_alpha_2 + csq_alpha_3)\n C[1,1] = (16*pi/3)*(csq+asq)/(asq_alpha_1 + csq_alpha_3)\n C[2,2] = (16*pi/3)*(asq+bsq)/(asq_alpha_1 + bsq_alpha_2)\n C = shift_tensor2_dcm(C, dcm, forward=False)\n\n H_tilde[0,1,2] = (16*pi/3)*bsq/(bsq_alpha_2 + csq_alpha_3)\n H_tilde[0,2,1] = -(16*pi/3)*csq/(bsq_alpha_2 + csq_alpha_3)\n H_tilde[1,2,0] = (16*pi/3)*csq/(asq_alpha_1 + csq_alpha_3)\n H_tilde[1,0,2] = -(16*pi/3)*asq/(asq_alpha_1 + csq_alpha_3)\n H_tilde[2,0,1] = (16*pi/3)*asq/(asq_alpha_1 + bsq_alpha_2)\n H_tilde[2,1,0] = -(16*pi/3)*bsq/(asq_alpha_1 + bsq_alpha_2)\n H_tilde = shift_tensor3_dcm(H_tilde, dcm, forward=False)\n\n force = np.dot(A, u_inf)\n torque = np.dot(C, omega_inf) + np.einsum('ijk,jk', H_tilde, E_inf)\n return force, torque\n\n\naxis, angle = get_rand_axis_angle()\ndcm = axis_angle_to_dcm(axis, angle)\n#dcm = np.identity(3)\n\n#Streaming Velocity\nu_inf = np.random.random((3,))\n#u_inf = np.array([1, 0, 0]) #np.random.random((3,))\n#omega_inf = np.zeros((3,))\n#E_inf = np.zeros((3,3))\nomega_inf = np.random.random((3,))\nE_inf = np.random.random((3,3))\nE_inf = (E_inf + E_inf.T)/2\n\nprint('Velocity field')\nprint('--------------')\nprint('Streaming velocity: ', u_inf)\nprint()\nprint('Angular velocity: ', omega_inf)\nprint()\nprint('Strain rate tensor: ', E_inf[0,:])\nprint(' ', E_inf[1,:])\nprint(' ', E_inf[2,:])\nprint()\n\nprint('Direction cosine matrix: ', dcm[0,:])\nprint(' ', dcm[1,:])\nprint(' ', dcm[2,:])\nprint()\n\n#Ellipsoid/sphere\nprint('Ellipsoid --> Sphere')\nprint('=============================\\n')\na = 0.5\nb = 0.5\nc = 0.5\n\nforce, torque = get_force_torque_ellipsoid(a, b, c, dcm, u_inf,\n omega_inf, E_inf)\nprint('Ellipsoid ({0}, {1}, {2})'.format(a,b,c))\nprint('-------------------------')\nprint('Force: ', force)\nprint('Torque: ', torque)\nprint()\n\nforce = 6*pi*a*u_inf\ntorque = 8*pi*a**3*omega_inf\nprint('Sphere ({0})'.format(a))\nprint('-------------------------')\nprint('Force (6*pi*r*u_inf): ', force)\nprint('Torque (8*pi*r^3*omega_inf): ', torque)\nprint()\n\n#Ellipsoid/oblate\nprint('Ellipsoid --> Oblate')\nprint('=============================\\n')\na = 0.5\nb = 0.5\nc = 0.15\n\nforce, torque = get_force_torque_ellipsoid(a, b, c, dcm, u_inf,\n omega_inf, E_inf)\nprint('Ellipsoid ({0}, {1}, {2})'.format(a,b,c))\nprint('-------------------------')\nprint('Force: ', force)\nprint('Torque: ', torque)\nprint()\n\nd = rotate_vector_dcm(np.array([0,0,1]), dcm)\nforce, torque = get_force_torque_oblate(a, c, d, u_inf,\n omega_inf, E_inf)\nprint('Oblate ({0}, {1}, {2})'.format(a,b,c))\nprint('-------------------------')\nprint('Force: ', force)\nprint('Torque: ', torque)\nprint()\n\n#Ellipsoid/prolate\nprint('Ellipsoid --> Prolate')\nprint('=============================\\n')\na = 1.0\nb = 0.5\nc = 0.5\n\nforce, torque = get_force_torque_ellipsoid(a, b, c, dcm, u_inf,\n omega_inf, E_inf)\nprint('Ellipsoid ({0}, {1}, {2})'.format(a,b,c))\nprint('-------------------------')\nprint('Force: ', force)\nprint('Torque: ', torque)\nprint()\n\nd = rotate_vector_dcm(np.array([1,0,0]), dcm)\nforce, torque = get_force_torque_prolate(a, c, d, u_inf,\n omega_inf, E_inf)\nprint('Prolate ({0}, {1}, {2})'.format(a,b,c))\nprint('-------------------------')\nprint('Force: ', force)\nprint('Torque: ', torque)\nprint()\n\n\n#Elliptic disc/circular disc\nprint('Elliptic disc --> Circular disc')\nprint('=============================\\n')\na = 0.5\nb = 0.5\nc = 'zero'\n\nforce, torque = get_force_torque_ellipsoid(a, b, c, dcm, u_inf,\n omega_inf, E_inf)\nprint('Elliptic disc ({0}, {1}, {2})'.format(a,b,c))\nprint('-------------------------')\nprint('Force: ', force)\nprint('Torque: ', torque)\nprint()\n\nd = rotate_vector_dcm(np.array([0,0,1]), dcm)\nforce, torque = get_force_torque_oblate(a, c, d, u_inf,\n omega_inf, E_inf)\nprint('Circular disc ({0}, {1}, {2})'.format(a,b,c))\nprint('-------------------------')\nprint('Force: ', force)\nprint('Torque: ', torque)\nprint()\n\n\n#Ellipsoid/needle\nprint('Ellipsoid --> Needle')\nprint('=============================\\n')\na = 1.0\nb = 'zero'\nc = 'zero'\n\nforce, torque = get_force_torque_ellipsoid(a, b, c, dcm, u_inf,\n omega_inf, E_inf)\nprint('Ellipsoid ({0}, {1}, {2})'.format(a,b,c))\nprint('-------------------------')\nprint('Force: ', force)\nprint('Torque: ', torque)\nprint()\n\nd = rotate_vector_dcm(np.array([1,0,0]), dcm)\nforce, torque = get_force_torque_prolate(a, c, d, u_inf,\n omega_inf, E_inf)\nprint('Needle ({0}, {1}, {2})'.format(a,b,c))\nprint('-------------------------')\nprint('Force: ', force)\nprint('Torque: ', torque)\nprint()\n\n\n","repo_name":"saridut/FloriPy","sub_path":"hydrodynamics/ellipsoid_rf.py","file_name":"ellipsoid_rf.py","file_ext":"py","file_size_in_byte":8165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74755801212","text":"from pyrogram.types import (\r\n InlineKeyboardButton,\r\n InlineKeyboardMarkup,\r\n InlineQueryResultPhoto,\r\n)\r\nfrom youtubesearchpython.__future__ import VideosSearch\r\n\r\nfrom AnonXMusic import app\r\nfrom AnonXMusic.utils.inlinequery import answer\r\nfrom config import BANNED_USERS\r\n\r\n\r\n@app.on_inline_query(~BANNED_USERS)\r\nasync def inline_query_handler(client, query):\r\n text = query.query.strip().lower()\r\n answers = []\r\n if text.strip() == \"\":\r\n try:\r\n await client.answer_inline_query(query.id, results=answer, cache_time=10)\r\n except:\r\n return\r\n else:\r\n a = VideosSearch(text, limit=20)\r\n result = (await a.next()).get(\"result\")\r\n for x in range(15):\r\n title = (result[x][\"title\"]).title()\r\n duration = result[x][\"duration\"]\r\n views = result[x][\"viewCount\"][\"short\"]\r\n thumbnail = result[x][\"thumbnails\"][0][\"url\"].split(\"?\")[0]\r\n channellink = result[x][\"channel\"][\"link\"]\r\n channel = result[x][\"channel\"][\"name\"]\r\n link = result[x][\"link\"]\r\n published = result[x][\"publishedTime\"]\r\n description = f\"{views} | {duration} ᴍɪɴᴜᴛᴇs | {channel} | {published}\"\r\n buttons = InlineKeyboardMarkup(\r\n [\r\n [\r\n InlineKeyboardButton(\r\n text=\"ʏᴏᴜᴛᴜʙᴇ 🎄\",\r\n url=link,\r\n )\r\n ],\r\n ]\r\n )\r\n searched_text = f\"\"\"\r\n❄ ᴛɪᴛʟᴇ : {title}\r\n\r\n⏳ ᴅᴜʀᴀᴛɪᴏɴ : {duration} ᴍɪɴᴜᴛᴇs\r\n👀 ᴠɪᴇᴡs : {views}\r\n🎥 ᴄʜᴀɴɴᴇʟ : {channel}\r\n⏰ ᴘᴜʙʟɪsʜᴇᴅ ᴏɴ : {published}\r\n\r\n\r\n➻ ɪɴʟɪɴᴇ sᴇᴀʀᴄʜ ᴍᴏᴅᴇ ʙʏ {app.name}\"\"\"\r\n answers.append(\r\n InlineQueryResultPhoto(\r\n photo_url=thumbnail,\r\n title=title,\r\n thumb_url=thumbnail,\r\n description=description,\r\n caption=searched_text,\r\n reply_markup=buttons,\r\n )\r\n )\r\n try:\r\n return await client.answer_inline_query(query.id, results=answers)\r\n except:\r\n return\r\n","repo_name":"AnonymousX1025/AnonXMusic","sub_path":"AnonXMusic/plugins/bot/inline.py","file_name":"inline.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"78"} +{"seq_id":"26364810411","text":"class kontak:\n def __init__(self, name, nomer):\n self.name = name\n self.nomer = nomer\n\np1 = kontak(\"Nama : Johni\", \"nomer : 08123456789\")\np2 =kontak(\"Nama : rian\", \"nomer : 08987654321\")\n\nnamaKontak = ['Naufal', 'Hazim']\nnoTelepon = ['08123456789', '08987654321']\n\ndef tambahKontak(): \n namaKontak.append(input('Nama: '))\n noTelepon.append(input('No Telepon: '))\n print('Kontak berhasil ditambahkan')\n\n\nprint('Selamat datang!')\nwhile True:\n print('---Menu---')\n print('1. Daftar Kontak')\n print('2. Tambah Kontak')\n print('3. Keluar')\n menu = int(input('Pilih Menu: '))\n if menu == 1:\n print(p1.name)\n print(p1.nomer)\n print(p2.name)\n print(p2.nomer)\n elif menu == 2:\n tambahKontak()\n elif menu == 3:\n print('Program selesai, sampai jumpa!')\n break\n else:\n print('Masukkan yang bener dong saya pusing nih')","repo_name":"Mohammadiqbalmaulana2001/tugas-uts-mohammad-iqbal-maulana","sub_path":"uts.py","file_name":"uts.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"ms","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21735890692","text":"import pandas as pd\r\n\r\n\r\ndef get_number_of_samples(dataset):\r\n \"\"\"Get the number of samples held in a dataset.\"\"\"\r\n keys = list(dataset.inputs.keys())\r\n if not keys:\r\n raise AssertionError('Dataset has no inputs!')\r\n\r\n first_set = dataset.inputs[keys[0]]\r\n if hasattr(first_set, 'shape'):\r\n return first_set.shape[0]\r\n return len(first_set)\r\n\r\n\r\ndef get_number_of_outputs(output_data):\r\n \"\"\"Get the number of output variables for a given output array.\"\"\"\r\n if not hasattr(output_data, 'shape'):\r\n raise AttributeError(\r\n 'Output data types must have attribute \"shape\".'\r\n )\r\n\r\n if len(output_data.shape) == 1:\r\n return 1\r\n\r\n return output_data.shape[1]\r\n\r\n\r\ndef split_data(data, indices):\r\n \"\"\"Get the data only at the given indices.\r\n\r\n Args:\r\n data: array-like\r\n indices: list-like of int\r\n\r\n Returns:\r\n array-like\r\n \"\"\"\r\n if isinstance(data, pd.DataFrame):\r\n return data.iloc[indices, :]\r\n elif isinstance(data, list):\r\n return [data[i] for i in indices]\r\n return data[indices]\r\n\r\n\r\ndef split_data_dict(data_dict, indices):\r\n \"\"\"Get the data only at the give indices for a dict of arrays.\r\n\r\n Args:\r\n data_dict: a dict with array-like values\r\n indices: the indices to grab\r\n\r\n Returns:\r\n dict\r\n \"\"\"\r\n if data_dict is None:\r\n return None\r\n return {\r\n key: split_data(d, indices)\r\n for key, d in data_dict.items()\r\n }\r\n","repo_name":"lopez86/DataTools","sub_path":"data_tools/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15933966809","text":"# Tool to build streama project reading commands from file and copy to ftp once completed and notify user over mail\r\nfrom utils import *\r\nimport os, sys, getopt, re\r\nimport logging\r\n# To test /home/si-vm/poky/poky/build/tmp/log/cooker/qemux86-64/console-latest.log\r\nbitbake_str = \"bitbake\"\r\ncleanall_str = \" -c cleanall\"\r\ntmpfile = \"handle_errors.sh\"\r\nrecipe_file = \"recipes.txt\"\r\nsource_cmd = ''\r\nlogger = ''\r\nlog_file=\"log_file\"\r\n\r\ndef tail(f, window=1):\r\n \"\"\"\r\n Returns the last `window` lines of file `f` as a list of bytes.\r\n \"\"\"\r\n if window == 0:\r\n return b''\r\n BUFSIZE = 1024\r\n f.seek(0, 2)\r\n end = f.tell()\r\n nlines = window + 1\r\n data = []\r\n while nlines > 0 and end > 0:\r\n i = max(0, end - BUFSIZE)\r\n nread = min(end, BUFSIZE)\r\n\r\n f.seek(i)\r\n chunk = f.read(nread)\r\n data.append(chunk)\r\n nlines -= chunk.count(b'\\n')\r\n end -= nread\r\n return b'\\n'.join(b''.join(reversed(data)).splitlines()[-window:])\r\n\r\ndef handle_knowerror(filename,recipe_filename):\r\n global logger\r\n status = 0\r\n\r\n logger.info(\"handle_knowerror: Reading the file to grab the errors File: \" + filename )\r\n\r\n #Read last 100 lines for error lookup\r\n with open(filename, 'rb') as f:\r\n last_lines = tail(f, 100).decode('utf-8')\r\n logger.info(\"handle_knowerror: \" + last_lines )\r\n\r\n #Find the errors to clean bitbake\r\n error_list = []\r\n for line in last_lines.splitlines():\r\n match = re.search(\"ERROR: Task.* failed with exit code\",line)\r\n if match != None:\r\n fname = os.path.basename(line.split(\" \")[2])\r\n error_list.append(fname.split(\".\")[0])\r\n\r\n\r\n logger.info(\"handle_knowerror: Error list: \" + str(len(error_list)))\r\n logger.info(error_list)\r\n\r\n if len(error_list) != 0:\r\n list(set(error_list))\r\n #recipe_file_o = os.path.join(os.getcwd(), recipe_file)\r\n recipe_file_o = recipe_filename\r\n recipe_list = []\r\n if os.path.exists(recipe_file_o):\r\n recipe_list = extract_recipe_name(recipe_file_o,error_list)\r\n else:\r\n logger.info(\"handle_knowerror: Recipe file not exist \" + recipe_file_o)\r\n\r\n if len(recipe_list) > 0:\r\n logger.info(\"handle_knowerror: Recipe filtering from the database\")\r\n logger.info(recipe_list)\r\n\r\n #delete if file exist\r\n tmpfile_o = os.path.join(os.getcwd(), tmpfile)\r\n if os.path.exists(tmpfile_o):\r\n os.remove(tmpfile_o)\r\n logger.info(\"handle_knowerror: Writing the bitbake clean commands to file : \" + tmpfile_o)\r\n os.umask(0)\r\n with open(os.open(tmpfile_o, os.O_CREAT | os.O_WRONLY, 0o755), \"w\") as f:\r\n #f.write(source_cmd + os.linesep)\r\n #f.write(\"source meta-rdk-mcg-hpr0a/setup-environment\" + os.linesep)\r\n for error in recipe_list:\r\n command_to_execute = ''\r\n command_to_execute = bitbake_str + \" \" + error + cleanall_str + os.linesep\r\n f.write(command_to_execute)\r\n else:\r\n status = 1\r\n logger.error(\"handle_knowerror: : No match found for the comparsion on the recipe.txt status: \"+ str(status))\r\n else:\r\n status = 1\r\n logger.error(\"handle_knowerror:No match found for the logs file :\" + filename + \"Status: \" + str(status))\r\n\r\n if 0 == status:\r\n return status,tmpfile_o;\r\n else:\r\n return status,\"\";\r\n\r\n\r\ndef usage():\r\n print(\"Usage: ./main.py -f -ftp <0 or 1>\")\r\n print(\" |filename : error file to look for error\")\r\n print(\" |ftp : 1 to upload the image to ftp, default is 0 \")\r\n print(\" |h : print usage\")\r\n exit(1)\r\n\r\ndef main():\r\n global logger\r\n error_filename =''\r\n recipename_filename = ''\r\n status_code = -1\r\n marker = 0\r\n #print(len(sys.argv))\r\n if len(sys.argv) < 5 :\r\n usage()\r\n\r\n opts, args = getopt.getopt(sys.argv[1:], \"r:h:f:ftp:\")\r\n for o, a in opts:\r\n if o == '-f':\r\n error_filename = a\r\n marker = 1\r\n elif o == '-ftp':\r\n marker = 2\r\n elif o == '-r':\r\n recipename_filename = a\r\n\r\n access = 0o755\r\n #log folder\r\n log_folder = os.path.join(os.getcwd(), \"logs\")\r\n if not os.path.exists(log_folder):\r\n os.mkdir(log_folder, access)\r\n\r\n log_folder = os.path.join(log_folder, append_timestamp(log_file))\r\n log_folder = log_folder + \".txt\"\r\n\r\n # Create and configure logger\r\n logging.basicConfig(filename= log_folder,\r\n format='%(asctime)s %(message)s',\r\n filemode='w')\r\n\r\n # Creating an object\r\n logger = logging.getLogger()\r\n\r\n # Setting the threshold of logger to DEBUG\r\n logger.setLevel(logging.DEBUG)\r\n if os.path.exists(error_filename):\r\n logger.info(\"mains: error filename: \" + error_filename + \"Recipe filename: \" + recipename_filename)\r\n status_code , command_file = handle_knowerror(error_filename,recipename_filename)\r\n logger.info(\"main: handle_knowerror returncode: \" + str(status_code) + \" Filename: \" + \\\r\n \"None\" if command_file==\"\" else command_file)\r\n print(command_file)\r\n exit(status_code)\r\n else:\r\n logger.info(\"handle_knowerror: Error file not found \" + error_filename)\r\n\r\n exit(1)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"BharathSundaram/StreamaBT","sub_path":"pythonsrc/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20491316522","text":"import APIHandler\n\n\"\"\"\nFields:\n:id: course ID (eg: ENGL101)\n:name: course name\n:description: course description\n:credits: course credits\n:gen_ed: list of gen eds the course fulfills\n:dept_id: the first part of the course id (eg: ENGL)\n:prereqs: course prerequisites\n\"\"\"\nclass Course:\n def __init__(self, course_id):\n API = APIHandler.API()\n course_info = API.get_course_by_id(course_id)\n\n self.id = course_info[0]['course_id']\n self.name = course_info[0][\"name\"]\n self.description = course_info[0][\"description\"]\n self.credits = int(course_info[0]['credits'])\n self.gen_ed = course_info[0][\"gen_ed\"]\n self.dept_id = course_info[0][\"dept_id\"]\n self.prereqs = []\n if course_info[0]['relationships']['prereqs'] != None:\n raw_prereqs = course_info[0]['relationships']['prereqs']\n id_indices = []\n for i in range(len(raw_prereqs)):\n if raw_prereqs[i].isdigit() and raw_prereqs[i - 1].isalpha():\n id_indices.append(i)\n for id_index in id_indices:\n self.prereqs.append(raw_prereqs[id_index - 4 : id_index + 3])\n\n def __eq__(self, __o: object) -> bool:\n if type(__o) is Course:\n return self.id == __o.id\n elif type(__o) is str:\n return self.id == __o\n else:\n return False\n\n def __repr__(self) -> str:\n return self.id\n","repo_name":"abarkker/OpenSourcery2022","sub_path":"course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"31002348820","text":"import os\r\nfrom tkinter import *\r\nimport time\r\nfrom datetime import date\r\nimport math\r\nimport webbrowser\r\nimport random\r\n##\r\n\r\ndef movieslist():\r\n f=open(\"Project1.txt\")\r\n m=f.read()\r\n print(m)\r\n\r\n\r\n\r\n##\r\ndef seereview():\r\n f=open(\"addreview.txt\",'r')\r\n print(f.read())\r\n n=int(input(\"Enter 999 to go back\"))\r\n while n!=999:\r\n n=int(input(\"Plz enter a specific value(999 for back)\"))\r\n\r\n mrt()\r\n \r\n \r\n \r\n##\r\ndef addreview():\r\n f=open(\"addreview.txt\",'a')\r\n print(\"Please type your review about the movie:\")\r\n s=\"\"\r\n name=input(\"Enter your name:\")\r\n s+=(name+\":\\n\")\r\n m=input(\"Line 1: (999 for previous page)\\n\")\r\n \r\n i=2\r\n while m!='999':\r\n print(\"Line \",i,\": (999 for previous page)\")\r\n m=input()\r\n s+=(m+\"\\n\")\r\n i+=1\r\n s=s[0:len(s)-4]\r\n s+=\"\\n\"\r\n f.write(s)\r\n print(\"Review Added !!\\n\")\r\n print(\"Redirecting to HomePage ......\")\r\n time.sleep(2)\r\n home()\r\n\r\n##\r\n\r\n \r\n \r\n \r\n \r\n \r\n## \r\ns=[\"www.youtube.com/watch?v=vOCM9wztBYQ\",\"www.youtube.com/watch?v=zAGVQLHvwOY\",\"www.youtube.com/watch?v=KyhrrdpA2YA\",\"www.youtube.com/watch?v=GWxF84mWfxs\",\"www.youtube.com/watch?v=tsxemFX0a7k\"] \r\ndef trailer():\r\n a=int(input(\"1.Asuran (90%)\\n2.Joker (88%)\\n3.Sye Raa (85%)\\n4.Pailwaan (83%)\\n5.Chichchore (88%)\\n6.Back to Previous Page\\n\"))\r\n if a==1 or a==2 or a==3 or a==4 or a==5:\r\n webbrowser.open(s[a-1],new=0)#Trailer\r\n mrt()\r\n elif a==6:\r\n mrt()\r\n \r\n \r\n \r\n## \r\ndef mrt():\r\n print(\"MOVIES TRAILERS AND REVIEWS\\n\\n1.Movie Trailer\\n2.Movie Review\\n3.Back to HomePage\")\r\n n=int(input())\r\n if n==1:\r\n trailer()\r\n elif n==2:\r\n n2=int(input(\"1.User Reviews\\n2.Add Review\\n3.Back\"))\r\n if n2==1:\r\n seereview()\r\n elif n2==2:\r\n addreview()\r\n elif n2==3:\r\n mrt()\r\n elif n==3:\r\n home()\r\n \r\n \r\n \r\n \r\n## \r\ndef confirm():\r\n \r\n n=int(input(\"Enter Movie to book : \"))\r\n \r\n if n==1:\r\n print(\"Asuran (U/A)\\nAction, Drama\\nCast : Dhanush,Manju Warrier,Ken Karunas,Teejay,Prakash Raj,Pasupathy,Naren,Pawan\\nDirector : Vetri Maaran\\nMusic Composer : G V Prakash Kumar\\nLanguage : Tamil, Malayalam\\nDuration : 2 hrs 21 mins\")\r\n print(\"\\nTrailer - \")\r\n s=int(input(\"Enter 0 to confirm and 999 to previous page:\\n\"))\r\n if s==0:\r\n book(n)\r\n elif s==999:\r\n movieslist()\r\n confirm()\r\n elif n==2:\r\n print(\"Joker (A)\\nCrime,Fantasy,Thriller\\nCast : Joaquin, Robert, Zazie, Brian Tyree\\nDirector : Todd Philips\\nMusic Composer : Hildur\\nLanguage : English\\nDuration : 2 hrs 16 mins\")\r\n s=int(input(\"Enter 0 to confirm and 999 to previous page:\\n\"))\r\n if s==0:\r\n book(n)\r\n elif s==999:\r\n movieslist()\r\n confirm()\r\n elif n==3:\r\n print(\"SyeRaa Narasimha Reddy (U)\\nAction,Drama,Historical\\nCast : Chiranjeevi,Kicha Sudeep, Vijay Sethupathi,Jagapati Babu,Nayantara\\nDirector : Surender Reddy\\nMusic Composer : Amit Trivedi\\nLanguage : Telugu,Kannada,Tamil,Hindi\\nDuration : 3 hrs 5 mins\")\r\n s=int(input(\"Enter 0 to confirm and 999 to previous page:\\n\"))\r\n if s==0:\r\n book(n)\r\n elif s==999:\r\n movieslist()\r\n confirm()\r\n elif n==4:\r\n print(\"Pailwaan (U/A)\\nAction, Drama\\nCast : Kicha Sudeep,Sunil Shetty,Sushant Singh,Akansha Singh\\nDirector : S Krishna \\nMusic Composer : Arjun Janya\\nLanguage : Kannada, Telugu, Tamil, Malayalam\\nDuration : 2 hrs 46 mins\")\r\n s=int(input(\"Enter 0 to confirm and 999 to previous page:\\n\"))\r\n if s==0:\r\n book(n)\r\n elif s==999:\r\n movieslist()\r\n confirm()\r\n elif n==5:\r\n print(\"Chichhorre (U)\\nComedy,Romantic,Drama\\nCast : Sushant Singh, Shradda Kapoor, Varun Sharma, Tushar Pandey\\nDirector : Nithesh Tiwari\\nMusic Composer : Pritam\\nLanguage : Hindi\\nDuration : 2 hrs 29 mins\")\r\n s=int(input(\"Enter 0 to confirm and 999 to previous page:\"))\r\n if s==0:\r\n book(n)\r\n elif s==999:\r\n movieslist()\r\n confirm()\r\n elif n==999:\r\n home()\r\n \r\n \r\n##\r\nmlist=[\"Asuran\",\"Joker\",\"Sye Raa Narasimha Reddy\",\"Pailwaan\",\"Chichchore\"]\r\ntkprice=[250,450,300,300,400]\r\nslotlist=[\"7:00 am to 10:15 am\",\"11:00 am to 02:15 pm\",\"3:00 pm to 06:15 pm\",\"7:00 pm to 10:15 pm\"]\r\ndef book(temp):\r\n tseats=\"\"\r\n n=temp\r\n fprice=0\r\n pc=tkprice[n-1]+150\r\n gc=tkprice[n-1]\r\n sc=tkprice[n-1]-100\r\n slot=int(input(\"enter your slot :\\n1.07:00 am to 10:15 am\\n2.11:00 am to 02:15 pm\\n3.03:00 pm to 06:15 pm\\n4.07:00 pm to 10:15 pm\\n\"))\r\n nseats=int(input(\"Enter required number of seats :\"))\r\n print(\"Platinum Class -> Rs.\",pc,\"\\nGold Class -> Rs.\",gc,\"\\nSilver Class -> Rs.\",sc,\"\\nEnter P/G/S :\")\r\n classtype=input()\r\n if classtype in 'pP':\r\n fprice=pc\r\n elif classtype in 'gG':\r\n fprice=gc\r\n elif classtype in 'sS':\r\n fprice=sc\r\n seatselection(nseats)\r\n print(\"Please enter the seats you wish to book :\")\r\n seattemp=nseats\r\n while seattemp:\r\n sn=input(\"Enter Seat Number :\")\r\n tseats+=(sn+\" \")\r\n seattemp-=1\r\n \r\n fprice=fprice*nseats\r\n fpricetax=fprice*0.18\r\n total=fprice+fpricetax\r\n invoice(nseats,total,n,tseats,slot)\r\n\r\n\r\n##\r\n\r\ndef seatselection(nseats):\r\n n=nseats\r\n \r\n s=[' * ',' $ ']\r\n seatrow=['A','','B','','C','','D','','E','','F','','G','','H','','I','','J','']\r\n seatcolumn=range(1,21)\r\n seats=[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]\r\n print(\"A to J - Rows 1 to 20 - Columns\")\r\n for i in range(20):\r\n for j in seatcolumn:\r\n if i%2==0:\r\n if j<10:\r\n seats[i].append(seatrow[i]+\" \"+str(j))\r\n else:\r\n seats[i].append(seatrow[i]+str(j))\r\n else:\r\n seats[i].append(random.choice(s))\r\n\r\n for i in range(len(seats)):\r\n if i%2==0:\r\n print(seats[i])\r\n else:\r\n print(seats[i],'\\n')\r\n\r\n print(\" * -> Unbooked\\n $ -> Booked\")\r\n\r\n \r\n\r\n \r\n##\r\ndef invoice(n,t,mvn,ts,sl):\r\n slot=sl\r\n tseats=ts\r\n nseats=n\r\n gtotal=t\r\n ntotal=gtotal/1.18\r\n gst=gtotal-ntotal\r\n moviename=mlist[mvn-1]\r\n print(\"\\n\"*10)\r\n \r\n print(\" RND CINEMAS\")\r\n print(\"Date/Time :\",time.ctime())\r\n print(\"Ticket No:\",random.randint(1,850),\" Screen No:\",random.randint(1,5))\r\n print(\"-----------------------------------\\n\")\r\n print(\"Movie : \",moviename)\r\n print(\"Slot : \",slotlist[slot-1])\r\n print(\"Number of seats : \",nseats)\r\n print(\"Seats : \",tseats)\r\n print(\"Net Total : Rs.\",round(ntotal,2))\r\n print(\"GST 18 % : Rs.\",round(gst,2))\r\n print(\"Grand Total : Rs.\",round(gtotal,2))\r\n print(\"-----------------------------------\")\r\n print(\"\\n\\n\")\r\n\r\n q=int(input(\"1.Print Receipt\\n2.Cancel (move to HomePage)\"))\r\n if q==1:\r\n time.sleep(1)\r\n print(\"Sending details to Printer - EPSON ML 380 Series\")\r\n time.sleep(1)\r\n print(\"Printing .....\")\r\n time.sleep(3)\r\n print(\"Thank you for Booking ! Enjoying watching the movie and dont forget to rate us !\")\r\n time.sleep(1)\r\n print(\"Redirecting to HomePage ......\\n\")\r\n time.sleep(2)\r\n home()\r\n elif q==2:\r\n print(\"Redirecting to HomePage ......\\n\")\r\n time.sleep(2)\r\n home()\r\n\r\n##\r\ndef snacks():\r\n pricesnacks=0\r\n popcorn=[160,180,190]\r\n pizza=[130,180]\r\n burger=[80,120]\r\n kfc=[300,650]\r\n beverage=[40,95,40,90]\r\n l=[]\r\n lp=[]\r\n \r\n print(\"Welcome to RND Food Corner !! \\n\\n\")\r\n print(\"What would you like to have ?\")\r\n n=int(input(\"1.Popcorn\\n2.Pizza\\n3.Burger\\n4.Chicken(KFC)\\n5.Beverages\\n6.Back to homepage\"))\r\n while n!=6:\r\n if n==1:\r\n n1=int(input(\" Types Small Big\\n1.Salty - Rs.160 Rs.210\\n2.Cheesy - Rs.180 Rs.240\\n3.Caramel - Rs.190 Rs.270\\n4.Back\\n\"))\r\n n1size=int(input(\"1.Big\\n2.Small\"))\r\n if n1==1:\r\n if n1size==1:\r\n pricesnacks+=(popcorn[0]+50)\r\n l.append(\"Big Salty Popcorn\")\r\n lp.append(popcorn[0]+50)\r\n else:\r\n pricesnacks+=popcorn[0]\r\n l.append(\"Small Salty Popcorn\")\r\n lp.append(popcorn[0])\r\n elif n1==2:\r\n if n1size==1:\r\n pricesnacks+=(popcorn[1]+60)\r\n l.append(\"Big Cheesy Popcorn\")\r\n lp.append(popcorn[1]+60)\r\n else:\r\n pricesnacks+=popcorn[1]\r\n l.append(\"Small Cheesy Popcorn\")\r\n lp.append(popcorn[1])\r\n elif n1==3:\r\n if n1size==1:\r\n pricesnacks+=(popcorn[2]+80)\r\n l.append(\"Big Caramel Popcorn\")\r\n lp.append(popcorn[2]+80)\r\n else:\r\n pricesnacks+=popcorn[2]\r\n l.append(\"Small Caramel Popcorn\")\r\n lp.append(popcorn[2])\r\n elif n1==4:\r\n n=int(input(\"1.Popcorn\\n2.Pizza\\n3.Burger\\n4.Chicken(KFC)\\n5.Beverages\\n6.Back to homepage\"))\r\n \r\n \r\n elif n==2:\r\n n2=int(input(\" Types Small Large \\n1.Veg Pizza - Rs.130 Rs.200\\n2.Chicken Pizza - Rs.180 Rs.240\\n3.Back\"))\r\n n2size=int(input(\"1.Small\\n2.Large\"))\r\n if n2==1:\r\n if n2size==1:\r\n pricesnacks+=pizza[0]\r\n l.append(\"Veg Pizza Small\")\r\n lp.append(pizza[0])\r\n \r\n else:\r\n pricesnacks+=(pizza[0]+70)\r\n l.append(\"Veg Pizza Big\")\r\n lp.append(pizza[0]+70)\r\n elif n2==2:\r\n if n2size==1:\r\n pricesnacks+=pizza[1]\r\n l.append(\"Chicken Pizza Small\")\r\n lp.append(pizza[1])\r\n else:\r\n pricesnacks+=(pizza[1]+60)\r\n l.append(\"Chicken Pizza Big\")\r\n lp.append(pizza[1]+60)\r\n elif n3==3:\r\n snacks()\r\n \r\n elif n==3:\r\n n3=int(input(\" Types Small Large \\n1.Veg Burger - Rs.80 Rs.100\\n2.Chicken Burger - Rs.120 Rs.150\\n3.Back\"))\r\n n3size=int(input(\"1.Small\\n2.Large\"))\r\n if n3==1:\r\n if n3size==1:\r\n pricesnacks+=burger[0]\r\n l.append(\"Veg Burger Small\")\r\n lp.append(burger[0])\r\n else:\r\n pricesnacks+=(burger[0]+20)\r\n l.append(\"Veg Burger Big\")\r\n lp.append(burger[0]+20)\r\n elif n3==2:\r\n if n3size==1:\r\n pricesnacks+=burger[1]\r\n l.append(\"Chicken Burger Small\")\r\n lp.append(burger[1])\r\n else:\r\n pricesnacks+=(burger[1]+30)\r\n l.append(\"Chicken Burger Big\")\r\n lp.append(burger[1]+30)\r\n elif n3==3:\r\n snacks()\r\n\r\n elif n==4:\r\n n4=int(input(\" Type Rate \\n1.4 legs - Rs.300\\n2.10 legs - Rs.650\"))\r\n if n4==1:\r\n pricesnacks+=kfc[0]\r\n l.append(\"KFC Chicken 4 legs\")\r\n lp.append(kfc[0])\r\n else:\r\n pricesnacks+=kfc[1]\r\n l.append(\"KFC Chicken 10 legs\")\r\n lp.append(kfc[1])\r\n \r\n elif n==5:\r\n n5=int(input(\" Type Rate \\n1.Pepsi (200ml) - Rs.40\\n2.Pepsi (900ml) - Rs.95\\n3.Coke (250ml) - Rs.40\\n4.Coke (900ml) - Rs.90\\n5.Back\"))\r\n if n5==1:\r\n pricesnacks+=beverage[n5-1]\r\n l.append(\"Pepsi (200ml)\")\r\n lp.append(beverage[0])\r\n elif n5==2:\r\n pricesnacks+=beverage[n5-1]\r\n l.append(\"Pepsi (900ml)\")\r\n lp.append(beverage[1])\r\n elif n5==3:\r\n pricesnacks+=beverage[n5-1]\r\n l.append(\"Coke (250ml)\")\r\n lp.append(beverage[2])\r\n elif n5==4:\r\n pricesnacks+=beverage[n5-1]\r\n l.append(\"Coke (900ml)\")\r\n lp.append(beverage[3])\r\n elif n5==5:\r\n snacks()\r\n print(\"Items added :\")\r\n for i in range(len(l)):\r\n print(i+1,\".\",l[i],\" - Rs.\",lp[i])\r\n choice=int(input(\"1.Continue to buy further\\n2.Terminate\"))\r\n if choice==1:\r\n n=int(input(\"1.Popcorn\\n2.Pizza\\n3.Burger\\n4.Chicken(KFC)\\n5.Beverages\\n6.Back to homepage\"))\r\n continue\r\n else:\r\n break\r\n snacksbill(l,lp,pricesnacks)\r\n\r\n##\r\ndef snacksbill(templ,templp,temppricesnacks):\r\n l=templ\r\n lp=templp\r\n pricesnacks=temppricesnacks\r\n \r\n print(\" RND Food Corner\")\r\n print(\" taste never ends....\")\r\n print(\"--------------------------------------------------\")\r\n print(\"Date / Time:\",time.ctime())\r\n print(\"Invoice:\",random.randint(450,950))\r\n print(\"--------------------------------------------------\")\r\n for i in range(len(l)):\r\n print(i+1,\".\",l[i],\" - Rs.\",lp[i])\r\n print(\" Net : Rs.\",round(pricesnacks,2))\r\n print(\" GST(18%): Rs. \",round(pricesnacks*0.18,2))\r\n print(\" Total: Rs.\",round(pricesnacks*1.18,2),\"\\n\\n\")\r\n print(\"--------------------------------------------------\")\r\n \r\n choice=int(input(\"1.Print Receipt\\n2.Cancel and Re-order\\n3.Back to Homepage\"))\r\n if choice==1:\r\n time.sleep(1)\r\n print(\"Sending details to Printer - EPSON ML 380 Series\")\r\n time.sleep(1)\r\n print(\"Printing .....\")\r\n time.sleep(3)\r\n print(\"Thank you for Ordering ! Enjoying watching the movie and dont forget to rate us !\")\r\n time.sleep(1)\r\n print(\"Redirecting to HomePage ......\\n\")\r\n time.sleep(2)\r\n home()\r\n elif choice==2:\r\n print(\"Current order cancelled !\\nRedirecting to Snacks Page ...\\n\")\r\n time.sleep(2)\r\n snacks()\r\n elif choice==3:\r\n print(\"Redirecting to HomePage ......\\n\")\r\n time.sleep(3)\r\n home()\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n#### \r\ndef home():\r\n print(\" Welcome to RND CINEMAS !!\")\r\n print(\" get into reality.....\")\r\n print(\"\\n\"*2)\r\n n=int(input(\"1.Book a Movie Ticket\\n2.Movie Reviews and Trailers\\n3.Snacks and Refreshments\\n4.Exit\\n\"))\r\n if n==1:\r\n movieslist()\r\n confirm()\r\n elif n==2:\r\n mrt()\r\n elif n==3:\r\n snacks()\r\n elif n==4:\r\n print(\"Thank you :) Hope you enjoy using our app !\")\r\n \r\n\r\n\r\nhome()\r\n\r\n\r\n","repo_name":"dheerajsreddy/Movie-Booking","sub_path":"Project1.py","file_name":"Project1.py","file_ext":"py","file_size_in_byte":15952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1400418738","text":"import pandas\r\nimport turtle\r\nfrom quizz import StatesQuiz\r\n\r\nTITLE = \"U.S. States Game\"\r\nIMAGE_PATH = \"./blank_states_img.gif\"\r\nMISSED_STATES = \"./missed_states.csv\"\r\nRESOLUTION = (500, 725)\r\n\r\ndef screen_setup():\r\n screen.title(TITLE)\r\n screen.addshape(IMAGE_PATH)\r\n screen.setup(height = RESOLUTION[0], width = RESOLUTION[1])\r\n turtle.shape(IMAGE_PATH)\r\n\r\n\r\nscreen = turtle.Screen()\r\nscreen_setup()\r\nquiz = StatesQuiz()\r\nis_game_on = True\r\n\r\nwhile is_game_on:\r\n my_answer = screen.textinput(title = \"Guess the State\", \r\n prompt = f\"{quiz.no_correct_answers}/{quiz.number_of_all_states} States Correct\").title()\r\n if my_answer == \"Exit\":\r\n break\r\n quiz.answer(my_answer)\r\n is_game_on = quiz.continue_game()\r\n\r\nmissed_answers = pandas.Series(quiz.missed_states()) \r\nmissed_answers.to_csv(MISSED_STATES)\r\n\r\n#getting coordinates from a map\r\n#def get_mouse_click_cor(x,y):\r\n# print(x,y)\r\n#turtle.onscreenclick(get_mouse_click_cor)\r\n\r\nturtle.mainloop()\r\n\r\n","repo_name":"Aevise/Python-projects","sub_path":"PY_USStatesQuizz/PY_USStatesQuizz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5589313705","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 20 16:00:51 2020\r\n\r\n@author: nacer\r\n\"\"\"\r\n\r\nimport os\r\n\r\n## Global Vars\r\n\r\n#Path of input files\r\nPATH = \"./\"\r\n\r\n#List all input file names\r\nFILES = []\r\nfor r, d, f in os.walk(PATH):\r\n for file in f:\r\n if '.txt' in file and \"solution\" not in file:\r\n FILES.append(os.path.join(file))\r\n\r\n''' \r\n Choose the files we'll work with\r\n 1 - a_example.txt\r\n 2 - b_read_on.txt\r\n 3 - c_incunabula.txt\r\n 4 - d_tough_choices.txt\r\n 5 - e_so_many_books.txt\r\n 6 - f_libraries_of_the_world.txt\r\n'''\r\nfor i in range(len(FILES)):\r\n print(i+1,\"-\",FILES[i])\r\nchoice = int(input(\"Choose a file : \"))\r\nFILE = FILES[choice-1]\r\n\r\ndef read_input(file):\r\n f = open(file,\"r\")\r\n text = f.read()\r\n lines = text.split(\"\\n\")\r\n \r\n B,L,D = lines[0].split(\" \")\r\n BL = list( map(int, lines[1].split(\" \")))\r\n LL= []\r\n for i in range(int(L)):\r\n LL += [ list(map(int,lines[2*i+2].split(\" \"))) + [ list(map(int,lines[2*i+3].split(\" \"))) ] ]\r\n return int(B),int(L),int(D), BL, LL\r\n\r\nB,L,D,BL,LL = read_input(FILE)\r\n\r\n# Building a better BL list\r\nBS = [list(range(len(BL))),BL]\r\nBS = list(zip(*BS))\r\nBS.sort(key=lambda x: x[1])\r\nBS = list(zip(*BS))\r\n\r\ndef scorify_library(library):\r\n \"\"\"\r\n The aim is to give the libraries a score, that will enable to order them later on\r\n \"\"\"\r\n NB = library[0]\r\n BD = library[2]\r\n SB = library_total_book_score(library)\r\n DR = library[1]\r\n library_scoring = (D - DR) * BD * (SB/NB)\r\n return library_scoring\r\n\r\n\r\n\r\ndef library_total_book_score(library):\r\n book_ids = library[3]\r\n total_library_book_score = 0\r\n for id in book_ids:\r\n total_library_book_score += BL[id]\r\n return total_library_book_score\r\n\r\n\r\n# Scores --> list of tuple (id lib, score)\r\nscores = []\r\nfor i in range(len(LL)):\r\n scores += [( i, scorify_library(LL[i]) ) ]\r\nscores.sort(key=lambda tup: tup[1])\r\n\r\ndef compute_available_days():\r\n available_libraries = []\r\n availability_day = 0\r\n while len(scores)>0:\r\n library_id_score = scores.pop()\r\n library_id = library_id_score[0]\r\n DR = LL[library_id][1]\r\n availability_day += DR\r\n if availability_day > D:\r\n continue\r\n else:\r\n entry = (library_id,availability_day)\r\n available_libraries.append(entry)\r\n return available_libraries\r\n\r\nAvL = compute_available_days() # Availability of libraries following the scoring\r\n\r\ndef available_libs(d):\r\n AvLD = []\r\n day = AvL[0][1]\r\n i = 0\r\n ln = len(AvL)\r\n while i 0:\r\n AvLD += [AvL[i]]\r\n day = AvL[i][1]\r\n i+=1\r\n return AvLD\r\n\r\nScB = [] #Scanned books\r\nScBpL = [] #Scanned books per lib\r\nLL2 = LL.copy()\r\n\r\ndef available_books(n_lib):\r\n return len(LL2[n_lib][3])\r\n\r\nfor d in range(D):\r\n print(\"Day %d/%d\\t\\t%.2f%%\" % (d,D,d/D*100))\r\n AvLD = available_libs(d)\r\n for lib in AvLD:\r\n id_lib = lib[0]\r\n scan_rate = LL2[ id_lib ][2]\r\n \r\n for book in LL2[id_lib][3]:\r\n if scan_rate == 0:\r\n continue\r\n \r\n if book in ScB:\r\n LL2[id_lib][3].remove(book)\r\n else :\r\n ScBpL += [ (book,id_lib) ]\r\n ScB += [book]\r\n scan_rate -= 1\r\n \r\nLIBS = available_libs(D)\r\nnbr_libraries_for_sign_up = len(LIBS)\r\nlibraries_submission = [ [] ]*nbr_libraries_for_sign_up\r\n\r\n\r\nlibrary_dict = {}\r\ntemp_lib = 0\r\n\r\nfor book in ScBpL:\r\n if book[1] not in library_dict.keys(): \r\n library_dict[book[1]]=[]\r\n library_dict[book[1]].append(book[0])\r\n\r\nOUT = \"Solutions/\" + FILE[:-4] + \"_solution.txt\"\r\n\r\nf = open(OUT,\"w+\")\r\nf.write(str(nbr_libraries_for_sign_up))\r\nf.write(\"\\n\")\r\nfor lib in LIBS:\r\n BOOKS = library_dict[lib[0]]\r\n f.write(str(lib[0]))\r\n f.write(\" \")\r\n f.write(str(len(BOOKS)))\r\n f.write(\"\\n\")\r\n for b in BOOKS:\r\n f.write(str(b))\r\n f.write(\" \")\r\n f.write(\"\\n\")\r\nf.close()","repo_name":"NacerSebtiMS/Google_Hashcode_2020_Qualification","sub_path":"SIN_solution.py","file_name":"SIN_solution.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23980189901","text":"def elemSum(list,sum):\n if list == []:\n return sum\n else:\n sum += list[0]\n return elemSum(list[1:],sum)\n\nsum=0\nlista1=[1,2,3]\nlista2=[1,2]\nprint( elemSum(lista1, sum))\nprint( elemSum(lista2, sum))\n","repo_name":"lfgbs/IA","sub_path":"Guiaoprog/1/elemSum.py","file_name":"elemSum.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"829428580","text":"import json, os\nimport cPickle as pickle\n\nDBLog = \"data/db.txt\"\nDBPick = \"data/db.p\"\ndata = dict()\n\ndef load_data():\n\tglobal data\n\tif not os.path.exists(DBPick):\n\t\tdata = dict()\n\telse:\n\t\tdata = pickle.load(open(DBPick, \"rb\"))\n\tif not os.path.exists(DBLog):\n\t\topen(DBLog, 'w').close()\n\t\n\twith open(DBLog, \"r\") as myfile:\n\t\tfor l in myfile.readlines():\n\t\t\tdat = json.loads(l)\n\t\t\tact = dat[\"action\"]\n\t\t\tif act == \"add\":\n\t\t\t\tadd(dat[\"key\"], dat[\"value\"], False)\n\t\t\telif act == \"remove\":\n\t\t\t\tremove(dat[\"key\"], False)\n\t\n\tpickle.dump(data, open(DBPick, \"wb\"))\n\t\n\tos.remove(DBLog)\n\tmyfile = open(DBLog, \"w\").close()\n\treturn myfile\n\t\ndef write_f(mstr):\n\twith open(DBLog, 'a') as file:\n\t\tfile.write(mstr + \"\\n\")\n\ndef all():\n\treturn data\n\ndef get(key):\n\treturn data[key]\n\ndef add(key, value, store = True):\n\tdata[key] = value\n\tif store:\n\t\twrite_f(json.dumps({\"key\":key, \"action\":\"add\", \"value\":value }))\n\ndef remove(key, store = True):\n\tif key not in data:\n\t\treturn\n\tdel data[key]\n\tif store:\n\t\twrite_f(json.dumps({\"key\":key, \"action\":\"remove\" }))\n\n\nfile = load_data()","repo_name":"hbirler/datalovr","sub_path":"store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2010160970","text":"import argparse\nimport ast\nimport webbrowser\nfrom argparse import ArgumentParser, RawTextHelpFormatter\nimport math\nimport re\nfrom collections import namedtuple\nfrom datetime import datetime, timedelta\nfrom urllib.request import urlopen\nfrom xml.etree import ElementTree\nimport unicodedata\n\n\nTABLE_TEMPLATE = '''\n\n {0}\n
\n'''\n\nINSIDE_TABLE = '''\n\n Activitat\n\n\n {0.name} \n\n\n Lloc\n Direcció\n Numero\n Dia\n Hora\n Edat\n\n\n {0.place}\n {0.address}\n {0.number}\n {0.day}\n {0.hour}\n {0.age}\n\n{1}\n'''\n\nSTATIONS_ROW = '''\n\n Estació\n Distància (metres)\n'''\n\nSTATIONS_TEMPLATE = '''\n\n {0.name}\n {0.distance}\n'''\n\nHTML_DOC_TEMPLATE = '''\n\n\n \n Query Results\n \n\n\n{0}\n\n'''\n\nCSS_DOC = '''\n\ntable {\n color: #333;\n font-family: Helvetica, Arial, sans-serif;\n width: 640px; \n border-collapse: collapse; \n border-spacing: 0; \n}\nth, td {\n border: 1px solid #CCC; \n height: 30px;\n}\nth {\n background: #DFDFDF;\n font-weight: bold;\n}\ntd {\n background: #FAFAFA;\n text-align: center;\n}\n.activitat {\n background-color: #595959;\n color: white;\n width: 8%;\n}\n.info {\n background-color: #878787;\n color: black;\n width: 8%;\n}\n.stops {\n background-color: #adadad;\n color: black;\n width: 8%;\n}\n'''\n\nKEY_HELP = '''Either a string, a list, a tuple or a list with any combination\nof the three previous. All elements in a list will be satisfied. One of the \nelements in a tuple will be satisfied. Strings are case insensitive and \ndiacritics are ignored. Must go inside single quotes and each string surrounded\nby double quotes.\nE.g:\n'\"park\"'\n'[\"park\",\"clown\",\"chocolate\"]'\n'(\"dog\",\"cat\")'\n'[\"balloon\",(\"football\",\"basket\")]' '''\n\nDATE_HELP = '''dd/mm/yyyy format. Either a date, a tuple with a date a low range\nand an upper range or a list combining any of the previous. Must go inside single \nquotes.\nE.g:\n'01/06/2017'\n'(24/09/2017,-3,1)'\n'[03/01/2017,(06/01/2017,-1,1),(14/01/2017,0,1)]' '''\n\nMETRO_HELP = '''\"L#\" represents a metro line, being # any number. On its own, a \nlist, a tuple or a list combining any of the previous elements. All elements in \na list will be satisfied. One of the elements in a tuple will be satisfied. \nMust go inside single quotes.\nE.g:\n'[L1,L5]'\n'[L11]'\n'[(L3,L5),L4]' '''\n\nMetroStop = namedtuple('MetroStop', 'name point')\nPoint = namedtuple('Point', 'latitude longitude')\nMetroDistance = namedtuple('MetroDistance', 'name distance')\nActivity = namedtuple('Activity', 'name place address number day hour age')\nResult = namedtuple('Result', 'activity stations')\n\n\ndef interpret_key(string):\n interpretation = ast.literal_eval(string)\n if isinstance(interpretation, str) or isinstance(interpretation, tuple) or isinstance(interpretation, list):\n return interpretation\n msg = '%r is not a valid key' % string\n argparse.ArgumentTypeError(msg)\n\n\ndef interpret_date(string):\n dates = re.findall('[0-3][0-9]/[0-1][0-9]/[0-9]{4}(?!,-?[0-9]+,[0-9]+)', string)\n date_tuples = re.findall('\\([0-3][0-9]/[0-1][0-9]/[0-9]{4},-?[0-9]+,[0-9]+\\)', string)\n date_list = []\n for date in dates:\n date_list.append(datetime.strptime(date, '%d/%m/%Y'))\n for date_tuple in date_tuples:\n base_date = re.search('[0-3][0-9]/[0-1][0-9]/[0-9]{4}', date_tuple).group(0)\n negative_delta = int(re.search('(?<=[0-3][0-9]/[0-1][0-9]/[0-9]{4},)-?[0-9]+', date_tuple).group(0))\n positive_delta = int(re.search(\"[0-9]+(?=\\))\", date_tuple).group(0))\n date_list.append(datetime.strptime(base_date, '%d/%m/%Y'))\n for delta in range(1, positive_delta + 1):\n date_list.append(datetime.strptime(base_date, '%d/%m/%Y') + timedelta(days=delta))\n for delta in range(-1, negative_delta - 1, -1):\n date_list.append(datetime.strptime(base_date, '%d/%m/%Y') + timedelta(days=delta))\n if date_list:\n return date_list\n else:\n msg = '%r is not a valid date' % string\n argparse.ArgumentTypeError(msg)\n\n\ndef interpret_metro(string):\n no_l = string.replace('L', '')\n interpretation = ast.literal_eval(no_l)\n if isinstance(interpretation, int) or isinstance(interpretation, tuple) or isinstance(interpretation, list):\n return interpretation\n msg = '%r is not a valid metro' % string\n argparse.ArgumentTypeError(msg)\n\n\ndef haversine_distance(latitude1, longitude1, latitude2, longitude2):\n radius = 6371000\n phi1 = math.radians(latitude1)\n phi2 = math.radians(latitude2)\n delta_phi = math.radians(latitude2 - latitude1)\n delta_lambda = math.radians(longitude2 - longitude1)\n\n a = math.sin(delta_phi / 2) * math.sin(delta_phi / 2) \\\n + math.cos(phi1) * math.cos(phi2) \\\n * math.sin(delta_lambda / 2) * math.sin(delta_lambda / 2)\n\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n return d\n\n\ndef get_closest_stops(reference, stops, radius, size):\n result = []\n for stop in stops:\n distance = int(haversine_distance(reference.latitude, reference.longitude, stop.point.latitude,\n stop.point.longitude))\n if distance <= radius:\n result.append(MetroDistance(stop.name, distance))\n sorted_result = sorted(result, key=lambda s: s.distance)\n return sorted_result[0:size]\n\n\ndef output_results(results):\n inside_content = ''\n for result in results:\n stations = ''\n for station in result.stations:\n stations += STATIONS_TEMPLATE.format(station)\n if stations != '':\n stations = STATIONS_ROW + stations\n inside_content += INSIDE_TABLE.format(result.activity, stations)\n if inside_content == '':\n inside_content = 'No s\\'han trobat resultats'\n table = TABLE_TEMPLATE.format(inside_content)\n page = HTML_DOC_TEMPLATE.format(table)\n css = CSS_DOC\n f = open('style.css', 'w')\n f.write(css)\n f.close()\n g = open('result.html', 'w')\n g.write(page)\n g.close()\n webbrowser.open_new_tab('result.html')\n\n\ndef get_age(event):\n classificacions = event.find('classificacions')\n ages = []\n for nivell in classificacions:\n if 'anys' in nivell.text:\n text = nivell.text.replace('de', '')\n text = text.replace('anys', '')\n if text[0] == '+':\n ages.append(100)\n else:\n pos_a = text.find('a')\n ages.append(int(text[0:pos_a]))\n ages.append(int(text[pos_a+1:]))\n if not ages:\n return '-'\n elif min(ages) == max(ages):\n return '+12 anys'\n else:\n min_age = min(ages)\n max_age = max(ages)\n return '{0} a {1} anys'.format(min_age, '+12') if max_age == 100 else '{0} a {1} anys'.format(min_age, max_age)\n\n\ndef get_event_info(event):\n name = event.find('nom').text\n place = event.find('lloc_simple').find('nom').text\n address = event.find('lloc_simple').find('adreca_simple').find('carrer').text\n number = event.find('lloc_simple').find('adreca_simple').find('numero').text\n date = event.find('data').find('data_proper_acte').text\n day = date[0:10]\n hour = date[11:]\n age = get_age(event)\n return Activity(name, place, address, number, day, hour, age)\n\n\ndef get_event_coordinates(event):\n attributes = event.find('lloc_simple').find('adreca_simple').find('coordenades').find('googleMaps').attrib\n latitude = float(attributes.get('lat'))\n longitude = float(attributes.get('lon'))\n return Point(latitude, longitude)\n\n\ndef normalize(string):\n return ''.join((c for c in unicodedata.normalize('NFD', string) if unicodedata.category(c) != 'Mn')).casefold()\n\n\ndef get_encoding(raw_declaration):\n decoded_declaration = raw_declaration.decode('utf-8')\n split_declaration = decoded_declaration.split()\n for attribute in split_declaration:\n if attribute[0:8] == 'encoding':\n return attribute[10:-1]\n return 'utf-8' # default encoding\n\n\ndef follows_date_restrictions(date, date_mask):\n for mask in date_mask:\n if date == mask:\n return True\n return False\n\n\ndef follows_keys_restrictions(combined_keys, key_mask):\n if isinstance(key_mask, str):\n normalized_mask = normalize(key_mask)\n if normalized_mask in combined_keys:\n return True\n return False\n if isinstance(key_mask, list):\n for condition in key_mask:\n if not follows_keys_restrictions(combined_keys, condition):\n return False\n return True\n if isinstance(key_mask, tuple):\n for condition in key_mask:\n if follows_keys_restrictions(combined_keys, condition):\n return True\n return False\n\n\ndef follows_metro_restrictions(metro_number, metro_mask):\n if isinstance(metro_mask, int):\n if metro_number == metro_mask:\n return True\n return False\n if isinstance(metro_mask, list):\n for condition in metro_mask:\n if not follows_metro_restrictions(metro_number, condition):\n return False\n return True\n if isinstance(metro_mask, tuple):\n for condition in metro_mask:\n if follows_metro_restrictions(metro_number, condition):\n return True\n return False\n\n\ndef get_keys(acte):\n keys = ''\n if acte.find('nom').text:\n keys = keys + ' ' + acte.find('nom').text\n if acte.find('lloc_simple').find('nom').text:\n keys = keys + ' ' + acte.find('lloc_simple').find('nom').text\n if acte.find('lloc_simple').find('adreca_simple').find('barri').text:\n keys = keys + ' ' + acte.find('lloc_simple').find('adreca_simple').find('barri').text\n if acte.find('lloc_simple').find('adreca_simple').find('barri').text:\n keys = keys + ' ' + acte.find('lloc_simple').find('adreca_simple').find('carrer').text\n return keys\n\n\ndef has_coordinates(acte):\n coord = acte.find('lloc_simple').find('adreca_simple').find('coordenades').find('googleMaps').attrib\n try:\n float(coord.get('lat'))\n float(coord.get('lon'))\n return True\n except ValueError:\n return False\n\n\ndef is_infantil(acte):\n classificacions = acte.find('classificacions')\n for nivell in classificacions:\n if 'infants' in nivell.text or 'anys' in nivell.text:\n return True\n return False\n\n\nclass XMLScraperInterface:\n\n def __init__(self):\n self._page = None\n\n def set_page(self, url):\n content = urlopen(url).read()\n encoding = get_encoding(content[0:200])\n decoded = content.decode(encoding=encoding)\n root = ElementTree.fromstring(decoded)\n self._page = root\n\n\nclass XMLScraperFilterInterface(XMLScraperInterface):\n\n def get_filtered_elements(self):\n raise NotImplementedError\n\n def _has_restrictions(self, element):\n raise NotImplementedError\n\n\nclass XMLScraperKeysDates(XMLScraperFilterInterface):\n\n def __init__(self, keys, dates):\n super().__init__()\n self._key_mask = keys\n self._date_mask = dates\n\n def get_filtered_elements(self):\n actes = self._page.find('body').find('resultat').find('actes')\n return (acte for acte in actes if self._has_restrictions(acte) and has_coordinates(acte) and is_infantil(acte))\n\n def _has_restrictions(self, element):\n keys = get_keys(element)\n normalized_keys = normalize(keys)\n if follows_keys_restrictions(normalized_keys, self._key_mask):\n if element.find('data').find('data_proper_acte').text:\n date = element.find('data').find('data_proper_acte').text\n formatted_date = datetime.strptime(date[0:10], '%d/%m/%Y')\n if follows_date_restrictions(formatted_date, self._date_mask):\n return True\n return False\n\n\nclass XMLScraperKeys(XMLScraperFilterInterface):\n\n def __init__(self, keys):\n super().__init__()\n self._key_mask = keys\n\n def get_filtered_elements(self):\n actes = self._page.find('body').find('resultat').find('actes')\n return (acte for acte in actes if self._has_restrictions(acte) and has_coordinates(acte) and is_infantil(acte))\n\n def _has_restrictions(self, element):\n keys = get_keys(element)\n normalized_keys = normalize(keys)\n if follows_keys_restrictions(normalized_keys, self._key_mask):\n if element.find('data').find('data_proper_acte').text:\n return True\n return False\n\n\nclass XMLScraperDates(XMLScraperFilterInterface):\n\n def __init__(self, dates):\n super().__init__()\n self._date_mask = dates\n\n def get_filtered_elements(self):\n actes = self._page.find('body').find('resultat').find('actes')\n return (acte for acte in actes if self._has_restrictions(acte) and has_coordinates(acte) and is_infantil(acte))\n\n def _has_restrictions(self, element):\n if element.find('data').find('data_proper_acte').text:\n date = element.find('data').find('data_proper_acte').text\n formatted_date = datetime.strptime(date[0:10], '%d/%m/%Y')\n if follows_date_restrictions(formatted_date, self._date_mask):\n return True\n return False\n\n\nclass XMLScraperNoFilter(XMLScraperInterface):\n\n def get_filtered_elements(self):\n actes = self._page.find('body').find('resultat').find('actes')\n return (acte for acte in actes if has_coordinates(acte) and is_infantil(acte))\n\n\nclass XMLScraperMetro(XMLScraperInterface):\n\n def __init__(self):\n super().__init__()\n\n def get_metro_stops(self):\n ls = []\n punts = self._page.findall('Punt')\n for punt in punts:\n if punt.find('Coord').find('Latitud').text \\\n and punt.find('Coord').find('Longitud').text \\\n and punt.find('Tooltip').text and self._has_restrictions(punt):\n lat = float(punt.find('Coord').find('Latitud').text)\n long = float(punt.find('Coord').find('Longitud').text)\n name = punt.find('Tooltip').text[0:-1]\n ls.append(MetroStop(name, Point(lat, long)))\n return ls\n\n def _has_restrictions(self, element):\n tooltip = element.find('Tooltip').text\n match = re.search('METRO', tooltip)\n if not match:\n return False\n else:\n return True\n\n\nclass XMLScraperMetroFilter(XMLScraperFilterInterface):\n\n def __init__(self, metro_mask):\n super().__init__()\n self._metro_mask = metro_mask\n\n def get_metro_stops(self):\n return self.get_filtered_elements()\n\n def get_filtered_elements(self):\n ls = []\n punts = self._page.findall('Punt')\n for punt in punts:\n if punt.find('Coord').find('Latitud').text \\\n and punt.find('Coord').find('Longitud').text \\\n and punt.find('Tooltip').text and self._has_restrictions(punt):\n lat = float(punt.find('Coord').find('Latitud').text)\n long = float(punt.find('Coord').find('Longitud').text)\n name = punt.find('Tooltip').text[0:-1]\n ls.append(MetroStop(name, Point(lat, long)))\n return ls\n\n def _has_restrictions(self, element):\n tooltip = element.find('Tooltip').text\n match_line = re.findall('L[0-9][0-9]?', tooltip)\n match_metro = re.findall('METRO', tooltip)\n if not match_metro or not match_line:\n return False\n else:\n for line in match_line:\n metro_number = int(line[1:])\n if follows_metro_restrictions(metro_number, self._metro_mask):\n return True\n return False\n\n\ndef main():\n cli_parser = ArgumentParser(formatter_class=RawTextHelpFormatter)\n cli_parser.add_argument('--key', type=interpret_key, help=KEY_HELP)\n cli_parser.add_argument('--date', type=interpret_date, help=DATE_HELP)\n cli_parser.add_argument('--metro', type=interpret_metro, help=METRO_HELP)\n arguments = vars(cli_parser.parse_args())\n\n if arguments.get('key') and arguments.get('date'):\n xml_scraper = XMLScraperKeysDates(arguments.get('key'), arguments.get('date'))\n elif arguments.get('key'):\n xml_scraper = XMLScraperKeys(arguments.get('key'))\n elif arguments.get('date'):\n xml_scraper = XMLScraperDates(arguments.get('date'))\n else:\n xml_scraper = XMLScraperNoFilter()\n\n xml_scraper.set_page('http://w10.bcn.es/APPS/asiasiacache/peticioXmlAsia?id=199')\n filtered_events = xml_scraper.get_filtered_elements()\n\n if arguments.get('metro'):\n metro_xml_scraper = XMLScraperMetroFilter(arguments.get('metro'))\n else:\n metro_xml_scraper = XMLScraperMetro()\n metro_xml_scraper.set_page('http://opendata-ajuntament.barcelona.cat/resources/bcn/TRANSPORTS%20GEOXML.xml')\n metro_stops = metro_xml_scraper.get_metro_stops()\n results = []\n for event in filtered_events:\n reference = get_event_coordinates(event)\n closest_stops = get_closest_stops(reference, metro_stops, 500, 5)\n activity = get_event_info(event)\n results.append(Result(activity, closest_stops))\n output_results(results)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"Polo3cat/cerca","sub_path":"cerca/cerca.py","file_name":"cerca.py","file_ext":"py","file_size_in_byte":17843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10661900374","text":"import os\nimport shutil\nimport unittest\n\nfrom dask.distributed import LocalCluster\n\nfrom laserfarm.macro_pipeline import MacroPipeline\nfrom laserfarm.pipeline import Pipeline\n\nfrom .tools import ShortIOPipeline\n\n\nclass TestMacroPipelineObject(unittest.TestCase):\n\n _tmp_dask_worker_dir = 'dask-worker-space'\n\n def tearDown(self):\n if os.path.isdir(self._tmp_dask_worker_dir):\n shutil.rmtree(self._tmp_dask_worker_dir)\n\n def test_tasksDefault(self):\n mp = MacroPipeline()\n self.assertIsInstance(mp.tasks, list)\n self.assertTrue(len(mp.tasks) == 0)\n\n def test_setTasksNotValid(self):\n mp = MacroPipeline()\n pip = Pipeline()\n with self.assertRaises(TypeError):\n mp.tasks = 0\n with self.assertRaises(TypeError):\n mp.tasks = pip\n with self.assertRaises(AssertionError):\n mp.tasks = ['load']\n\n def test_addTaskNotValid(self):\n mp = MacroPipeline()\n with self.assertRaises(AssertionError):\n mp.add_task(0)\n with self.assertRaises(AssertionError):\n mp.add_task('load')\n with self.assertRaises(AssertionError):\n mp.add_task(['load'])\n\n def test_setLabels(self):\n mp = MacroPipeline()\n mp.tasks = [Pipeline(), Pipeline()]\n labels = ['a', 'b']\n mp.set_labels(labels)\n self.assertListEqual(labels, [task.label for task in mp.tasks])\n\n\nclass TestSetupClientMacroPipeline(unittest.TestCase):\n\n def test_localClusterFromInput(self):\n mp = MacroPipeline()\n cluster = LocalCluster(processes=True,\n n_workers=1,\n threads_per_worker=1)\n mp.setup_cluster(cluster=cluster)\n self.assertEqual(mp.client.status, 'running')\n mp.client.cluster.close()\n status = mp.client.cluster.status\n if hasattr(status, \"value\"):\n status = status.value\n self.assertEqual(status, 'closed')\n\n def test_localClusterFromMethod(self):\n mp = MacroPipeline()\n mp.setup_cluster(mode='local', processes=True, n_workers=1,\n threads_per_worker=1)\n self.assertEqual(mp.client.status, 'running')\n mp.client.cluster.close()\n status = mp.client.cluster.status\n if hasattr(status, \"value\"):\n status = status.value\n self.assertEqual(status, 'closed')\n\n def test_invalidCluster(self):\n mp = MacroPipeline()\n with self.assertRaises(RuntimeError):\n mp.setup_cluster(mode='newcluster')\n\n\nclass TestToyMacroPipeline(unittest.TestCase):\n\n _test_dir = 'test_tmp_dir'\n _tmp_dask_worker_dir = 'dask-worker-space'\n _outcome_file_path = os.path.join(_test_dir, 'outcome.out')\n\n def setUp(self):\n os.mkdir(self._test_dir)\n self.cluster = LocalCluster(processes=True,\n n_workers=1,\n threads_per_worker=1)\n\n def tearDown(self):\n shutil.rmtree(self._test_dir)\n self.cluster.close()\n if os.path.isdir(self._tmp_dask_worker_dir):\n shutil.rmtree(self._tmp_dask_worker_dir)\n\n def test_runValidPipelines(self):\n a, b = ShortIOPipeline(), ShortIOPipeline()\n file_a, file_b = [os.path.join(self._test_dir, 'file_{}.txt'.format(s))\n for s in 'ab']\n text = 'hello world'\n a.input = {'open': file_a,\n 'write': [text],\n 'close': {}}\n b.input = {'open': file_b,\n 'write': [text],\n 'close': {}}\n mp = MacroPipeline()\n mp.tasks = [a, b]\n mp.setup_cluster(cluster=self.cluster)\n mp.run()\n self.assertTrue(all([os.path.isfile(f) for f in [file_a, file_b]]))\n lines_a, lines_b = [open(f).readlines() for f in [file_a, file_b]]\n self.assertEqual(lines_a, lines_b)\n self.assertListEqual(mp.get_failed_pipelines(), [])\n mp.print_outcome(to_file=self._outcome_file_path)\n self.assertTrue(os.path.isfile(self._outcome_file_path))\n with open(self._outcome_file_path, 'r') as f:\n res = [line.split()[-1] for line in f.readlines()]\n self.assertListEqual(res, ['finished']*2)\n\n def test_runInvalidPipeline(self):\n a, b = ShortIOPipeline(), ShortIOPipeline()\n file = os.path.join(self._test_dir, 'file_a.txt')\n text = 'hello world'\n a.input = {'open': file,\n 'write': [text],\n 'close': {}}\n b.input = {'open': self._test_dir,\n 'write': [text],\n 'close': {}}\n mp = MacroPipeline()\n mp.tasks = [a, b]\n mp.setup_cluster(cluster=self.cluster)\n mp.run()\n self.assertListEqual(mp.get_failed_pipelines(), [b])\n self.assertIs(mp.errors[0], None)\n self.assertTrue(mp.errors[1][0], IsADirectoryError)\n mp.print_outcome(to_file=self._outcome_file_path)\n self.assertTrue(os.path.isfile(self._outcome_file_path))\n with open(self._outcome_file_path, 'r') as f:\n res = [line.split()[-1] for line in f.readlines()]\n self.assertEqual(res[0], 'finished')\n self.assertNotEqual(res[1], 'finished')\n","repo_name":"eEcoLiDAR/Laserfarm","sub_path":"tests/test_macro_pipeline.py","file_name":"test_macro_pipeline.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"25533659589","text":"# globalvariables.py is the module that contains the global variables for other files to access.\n# Additionally, all initializations (clientID, handles, coordinates, etc, etc.) are configured here.\nimport sys\nimport numpy as np\n\nimport sim\n\nkFinal = 0\n\n## Initialization ##\n\nclientID = sim.simxStart('127.0.0.1', 19990, True, True, 5000, 5) #Server IP, Port num, boolean wait-until-connected\n #boolean doNotReconnectOnceDisconnected, timeOutinMs,\n #commThreadCycleinMs (usually set as 5)\n\ndef connectionMessage(clientid):\n '''\n #################################### Setup #####################################\n '''\n #sim.simxFinish(-1) # just in case, close all opened connections ----- IF I NEED to reset sandboxscript on Vrep\n if clientid != -1:\n print('Connected to remote API server')\n else:\n print('connection not successful')\n sys.exit(\"could not connect\")\n\nPI = np.pi #For move_L calculations\n\n#Obtaining appropriate handles\nerrorCode, target = sim.simxGetObjectHandle(clientID, 'target', sim.simx_opmode_blocking) #target dummy\nerrorCode, j1 = sim.simxGetObjectHandle(clientID, 'ROBOTIQ_85_active1', sim.simx_opmode_blocking) #gripper joint 1\nerrorCode, j2 = sim.simxGetObjectHandle(clientID, 'ROBOTIQ_85_active2', sim.simx_opmode_blocking) #gripper joint 2\nerrorCode, connector = sim.simxGetObjectHandle(clientID, 'ROBOTIQ_85_attachPoint', sim.simx_opmode_blocking) #gripper connect point\n\n#Obtaining joint positions for the gripper to close & open\nerrorCode, p1 = sim.simxGetJointPosition(clientID, j1, sim.simx_opmode_streaming)\nerrorCode, p2 = sim.simxGetJointPosition(clientID, j2, sim.simx_opmode_streaming)\n\nreturnCode, pos = sim.simxGetObjectPosition(clientID, target, -1, sim.simx_opmode_streaming)\nreturnCode, orient = sim.simxGetObjectOrientation(clientID, target, -1, sim.simx_opmode_streaming)\n\n ## Coordinates ##\n#Initial + spawn point coordinates\ninitial_pos = [0.275, 0.646, 0.87, 0, 0, 0] #[x, y, z, alpha, beta, gamma]\nz_spawn = 0.765 # [x, y, z]\nz_pCup = 1.050 # height for with the end effector is before grasping 1.050\nz_gCup = 0.89 # height for with the end effector is for grasping motion\n\n\n","repo_name":"BurritoOverlord/SemesterProject-Stefan","sub_path":"Simulation/Simulation_globalvariables.py","file_name":"Simulation_globalvariables.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24003549857","text":"#!/usr/bin/python\nfrom challenge3 import xor_hex_strings_3, single_byte_xor_for_multi_byte_key\nfrom challenge5 import convert_char_string_to_hex_3\nfrom itertools import combinations\nfrom statistics import mean\nfrom binascii import hexlify\nfrom base64 import b64decode\n\nfrom collections import Counter\n\nimport time\n\nkeysize_values = range(2, 100)\n\ndef calculate_hamming_distance_between(s1, s2):\n assert len(s1) == len(s2)\n \n # Python 2.x\n # s1 = s1.encode(\"hex\")\n # s2 = s2.encode(\"hex\")\n\n # Python 3\n s1 = convert_char_string_to_hex_3(s1)\n s2 = convert_char_string_to_hex_3(s2)\n\n count = 0\n\n # z = xor_hex_strings_2(s1, s2)\n z = xor_hex_strings_3(s1, s2)\n z = bin(int(z, 16))[2:]\n\n return z.count(\"1\")\n\ndef decode_text(input_file, b64=True):\n f = open(input_file).read()\n if b64:\n binary_encoding = b64decode(f)\n else:\n return f\n \n # Python 2.x\n # hex_encoding = binary_encoding.encode(\"hex\")\n\n # Python 3.x\n hex_encoding = hexlify(binary_encoding).decode(\"utf-8\") # need to return a # hex string\n return hex_encoding\n\ndef get_keysize(input_file, b64=True):\n hex_encoding = decode_text(input_file, b64)\n keysize_candidates = []\n\n # Estimate with 2 blocks \n\n # for keysize in keysize_values:\n # # each byte = 2 characters in the encoding\n # st_1, st_2 = hex_encoding[0:2*keysize], \\\n # hex_encoding[2*keysize:4*keysize]\n # hamming_distance = calculate_hamming_distance_between(st_1, st_2)\n # keysize_candidates.append((keysize, \\\n # (float) (hamming_distance / keysize)))\n\n # # print(\"Keysize : \", keysize, \" Hamming Distance : \", hamming_distance, \" Normalized : \", \n # # ((float) (hamming_distance / keysize)), \"\\n\")\n\n # possible_keysizes = sorted(keysize_candidates, key=lambda x: x[1])[:5]\n # possible_keysizes = [k[0] for k in possible_keysizes]\n # return possible_keysizes\n\n\n # Estimate with 4 blocks : Always a better option\n\n for keysize in keysize_values:\n st_1, st_2 = hex_encoding[0:2*keysize], hex_encoding[2*keysize:4*keysize]\n st_3, st_4 = hex_encoding[4*keysize:6*keysize], hex_encoding[6*keysize:8*keysize]\n\n pairs = combinations([st_1, st_2, st_3, st_4], 2)\n \n # For some weird reason, pair[0] wasn't working. \n # Is pair a reserved word? \n hamming_distance = [calculate_hamming_distance_between(p[0], p[1]) for p in pairs] \n avg_hamming_distance = mean(hamming_distance)\n\n keysize_candidates.append((keysize, (float) (avg_hamming_distance / keysize)))\n\n possible_keysizes = sorted(keysize_candidates, key=lambda x: x[1])[:5]\n possible_keysizes = [k[0] for k in possible_keysizes]\n return possible_keysizes\n\ndef get_blocks_of_size(hex_code, keysize):\n blocks = []\n \n for i in range(0, len(hex_code) // (2 * keysize)):\n bytes_keysize = 2 * keysize\n start_index = i * bytes_keysize\n end_index = (i + 1) * bytes_keysize\n\n if end_index > len(hex_code):\n end_index = len(hex_code)\n\n block = hex_code[start_index:end_index]\n\n if len(block) == bytes_keysize:\n blocks.append(block)\n\n return blocks\n\ndef transpose(hex_code, keysize):\n bytes_keysize = 2 * keysize\n blocks = get_blocks_of_size(hex_code, keysize)\n transposed = []\n\n i = 0\n\n while i < bytes_keysize:\n t = ''.join([block[i:i+2] for block in blocks])\n transposed.append(t)\n i += 2\n\n return transposed\n\ndef crack_blocks(hex_code, keysize):\n transposed = transpose(hex_code, keysize)\n key_bytes = []\n\n for t in transposed:\n # Count the 13 most common bytes. With luck, they should \n # correspond to ETAOIN[space]SHRDLU. Now we only consider those single \n # byte keys which when xor-ed with each of these bytes gives\n # ETAOIN[space]SHRDLU, at least those which give the max percentage of\n # matches.\n\n split_t = [t[i:i+2] for i in range(0, len(t), 2)]\n most_common_bytes = [com[0] for com in Counter(split_t).most_common(13)]\n\n most_probable_block_crack_byte = \\\n single_byte_xor_for_multi_byte_key(most_common_bytes)\n key_bytes.append(most_probable_block_crack_byte)\n\n most_probable_key = \"\".join(key_bytes)\n return most_probable_key\n\n# The probable keysizes for this text are 2, 5, 29 on\n# analysis of hamming distance with 2 & 4 initial block sizes\n\n# Change b64 to False for picoCTF or any non-bas 64 encoding\ndef crack_multi_byte_repeated_xor(input_file, keysize=0, b64=True):\n hex_encoding = decode_text(input_file, b64)\n\n # Optional keysize argument to test with various keysizes\n if keysize != 0:\n most_probable_key = crack_blocks(hex_encoding, keysize)\n text_hex = xor_hex_strings_3(hex_encoding, most_probable_key) \n # text = text_hex.decode(\"hex\")\n text = bytes.fromhex(text_hex).decode(\"utf-8\", \"ignore\")\n\n print(\"\\n\\nKeysize\\t:\\t\", keysize, \"\\nKey\\t:\\t\", most_probable_key, \"\\nText\\t:\\t\", text)\n \n # the real deal\n else:\n # gotten from Hamming distance experiments\n possible_keysizes = (2, 5, 29) # Matasano Crypto\n # possible_keysizes = (3, 21, 28) # picoCTF 2014 \n\n for keysize in possible_keysizes:\n most_probable_key = crack_blocks(hex_encoding, keysize)\n text_hex = xor_hex_strings_3(hex_encoding, most_probable_key) \n # text = text_hex.decode(\"hex\")\n text = bytes.fromhex(text_hex).decode(\"utf-8\", \"ignore\")\n\n print(\"\\n\\nKeysize\\t:\\t\", keysize, \"\\nKey\\t:\\t\", most_probable_key, \"\\nDecoded Text\\t:\\t\", text)\n\n time.sleep(5)\n\n# The Matasano guys have an unhealthy obsession with Vanilla Ice.\n# The damn decryption is the lyrics of the song 'Play That Funky Music'.\n# Final decryption : Length = 29 bytes\n# Key = 5465726d696e61746f7220583a204272696e6720746865206e6f697365\n","repo_name":"varadgunjal/matasano-crypto","sub_path":"set1/challenge6.py","file_name":"challenge6.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23959070712","text":"from __future__ import annotations\n\nimport pandas as pd\nimport pandera as pa\n\nfrom lixinger import client\nfrom lixinger.config import settings\nfrom lixinger.utils import api, get_response_df\n\n\nclass Output(pa.DataFrameModel):\n date: pa.typing.Series[pa.typing.DateTime]\n bonus_shares_from_profit: pa.typing.Series[int]\n bonus_shares_from_capital_reserve: pa.typing.Series[int]\n dividend: pa.typing.Series[float]\n content: pa.typing.Series[str] = pa.Field(nullable=True)\n register_date: pa.typing.Series[pa.typing.DateTime] = pa.Field(nullable=True)\n ex_date: pa.typing.Series[pa.typing.DateTime] = pa.Field(nullable=True)\n payment_date: pa.typing.Series[pa.typing.DateTime] = pa.Field(nullable=True)\n status: pa.typing.Series[str]\n original_value: pa.typing.Series[float]\n split_ratio: pa.typing.Series[float] = pa.Field(nullable=True)\n\n\n@api\ndef get_dividend_and_alloment(\n start_date: str,\n stock_code: str,\n end_date: str | None = None,\n limit: int | None = None,\n) -> pa.typing.DataFrame[Output]:\n \"\"\"获取分红送配信息.\n\n 参考文档: https://www.lixinger.com/open/api/doc?api-key=cn/company/dividend-and-alloment\n \"\"\"\n payload = {\n \"token\": settings.token,\n \"startDate\": start_date,\n \"stockCode\": stock_code,\n }\n if end_date is not None:\n payload[\"endDate\"] = end_date\n if limit is not None:\n payload[\"limit\"] = limit\n\n response = client.post(\n f\"{settings.base_url}/cn/company/dividend-and-alloment\",\n json=payload,\n )\n df = get_response_df(response, Output)\n df[\"date\"] = pd.to_datetime(df[\"date\"]).dt.tz_localize(None) + pd.Timedelta(\"8h\")\n df[\"ex_date\"] = pd.to_datetime(df[\"ex_date\"]).dt.tz_localize(None) + pd.Timedelta(\n \"8h\"\n )\n return df\n","repo_name":"Chaoyingz/lixinger","sub_path":"lixinger/api/cn/company/dividend_and_alloment.py","file_name":"dividend_and_alloment.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"30031611207","text":"from IEvent import IEvent\nimport ida_struct\nfrom constants import *\nclass CreateStructVariableEvent(IEvent):\n\tdef __init__(self, id_of_struct, offset, variable_type, value):\n\t\tsuper(CreateStructVariableEvent,self).__init__(CREATE_STRUCT_VARIABLE_ID, \"Create struct variable\", {\"id\": id_of_struct, \"offset\": offset, \"variable-type\": variable_type, \"value\": value})\t\n\t\tself._id = id_of_struct\n\t\tself._offset = offset\n\t\tself._variable_type = variable_type\n\t\tself._value = value\n\t\n\tdef implement(self):\n\t\tsturct_obj = ida_struct.get_struct_by_idx(self._id)\n\t\tval_type = \"\"\n\t\tif self._variable_type == \"db\":\n\t\t\tval_type = ida_sturct.FF_BYTE\n\t\telif self._variable_type == \"dw\":\n\t\t\tval_type = ida_sturct.FF_WORD\n\t\telif self._variable_type == \"dd\":\n\t\t\tval_type = ida_sturct.FF_DWORD\n\t\telif self._variable_type == \"dq\":\n\t\t\tval_type = ida_sturct.FF_QWORD\n\t\telse:\n\t\t\tval_type = ida_sturct.FF_STRUCT\n\t\tida_sturct.add_struc_member(sturct_obj, self._value, self._offset, val_type, None, 0 )","repo_name":"lolblat/IReal","sub_path":"events/CreateStructVariableEvent.py","file_name":"CreateStructVariableEvent.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"20277703623","text":"import traceback\n\nfrom PyQt5.QtCore import QRunnable, pyqtSignal, QObject\n\n\nclass WorkerSignal(QObject):\n error = pyqtSignal(str)\n success = pyqtSignal(str, float)\n\n\nclass Worker(QRunnable):\n def __init__(self, function):\n super(Worker, self).__init__()\n self.setAutoDelete(True)\n self.function = function\n self.signal = WorkerSignal()\n\n def run(self):\n try:\n output = self.function()\n if type(output) == str:\n out_path = output\n psnr = 0.0\n else:\n out_path, psnr = output\n self.signal.success.emit(out_path, psnr)\n except Exception as e:\n print(e)\n traceback.print_exc()\n self.signal.error.emit(str(e))\n","repo_name":"SteveImmanuel/multimedia-stegano","sub_path":"stegano/gui/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9883874362","text":"# coding=utf-8\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport input_data\n\nmnist = input_data.read_data_sets(\"./MNIST_data\", one_hot=True)\n\n\ndef show_picture(img):\n plt.imshow(np.reshape(img, [28, 28]))\n plt.show()\n\n\ndef show_mnist_info():\n print(mnist.train.images.shape)\n print(mnist.train.labels.shape)\n print(mnist.test.images.shape)\n print(mnist.test.labels.shape)\n for i in range(5):\n show_picture(mnist.train.images[i])\n\n\n# show_mnist_info()\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\n# create model\nx = tf.placeholder(tf.float32, shape=[None, 784])\ny = tf.placeholder(tf.float32, shape=[None, 10])\n\nw_fc1 = weight_variable([784, 200])\nb_fc1 = bias_variable([200])\nw_fc2 = weight_variable((200, 200))\nb_fc2 = bias_variable([200])\nw_out = weight_variable([200, 10])\nb_out = bias_variable([10])\n\nhidden1 = tf.nn.relu(tf.matmul(x, w_fc1) + b_fc1)\nhidden2 = tf.nn.relu(tf.matmul(hidden1, w_fc2) + b_fc2)\ny_pred = tf.nn.softmax(tf.matmul(hidden2, w_out) + b_out)\n\n\n# loss and optimizer\naccuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(y_pred, 1)), tf.float32))\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_pred), reduction_indices=1))\nlearning_rate = 0.05\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n\n# session\ndisplay_step = 100\nbatch_size = 100\ntraining_iterations = 10000\n\ntf.summary.scalar(\"Accuracy\", accuracy)\ntf.summary.scalar(\"Cross entropy\", cross_entropy)\nmerged = tf.summary.merge_all()\n\nwith tf.Session() as sess:\n writer = tf.summary.FileWriter(\"./network\", sess.graph)\n sess.run(tf.global_variables_initializer())\n\n for iteration in range(training_iterations):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n summary, current_accuracy, _ = sess.run([merged, accuracy, optimizer], feed_dict={x: batch_xs, y: batch_ys})\n writer.add_summary(summary, iteration)\n\n if iteration % display_step == 0:\n print(\"Iteration: %d | Accuracy: %.6f\" % (iteration, current_accuracy))\n\n test_accuracy = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})\n print(\"Test Accuracy: %.6f\" % test_accuracy)\n writer.close()\n\n\n","repo_name":"Uranium-Deng/machine_learning","sub_path":"mnist/1.full_connect.py","file_name":"1.full_connect.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39471418461","text":"import requests\nfrom django.conf import settings\nfrom rest_framework import status\nfrom structlog import get_logger\n\nfrom blockbuster_clone.movies.exceptions import (\n InvalidCredentialError,\n MovieNotFound,\n RequestFailedError,\n)\n\nlogger = get_logger()\n\nAPI_URL = \"http://www.omdbapi.com/\"\n\n\nclass OmdbApi:\n def __init__(self):\n self.api_key = settings.OMDB_API_KEY\n if self.api_key is None:\n raise InvalidCredentialError\n\n def fetch_movie_by_imdb_id(self, imdb_id):\n current_movie_response = requests.get(\n API_URL, params={\"i\": imdb_id, \"apiKey\": self.api_key}\n )\n if current_movie_response.status_code != status.HTTP_200_OK:\n raise RequestFailedError(current_movie_response.json())\n else:\n json_response = current_movie_response.json()\n if \"Error\" in json_response:\n raise MovieNotFound(\n message=json_response.get(\"Error\", \"Error from API\"),\n imdb_id=imdb_id,\n )\n item_type = json_response.get(\"Type\")\n if item_type != \"movie\":\n raise MovieNotFound(imdb_id)\n return json_response\n","repo_name":"alexche77/blockbuster-api-clone","sub_path":"blockbuster_clone/movies/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3139030294","text":"# BOJ 2565\nimport sys\n\nsi = sys.stdin.readline\n\n\ndef solve(n):\n dp = [1] * n\n for i in range(1, n):\n for j in range(i):\n if arr[i][1] > arr[j][1]:\n dp[i] = max(dp[i], dp[j] + 1)\n return max(dp)\n\n\narr = []\nn = int(si())\nfor _ in range(n):\n a, b = map(int, si().split())\n arr.append((a, b))\n\narr.sort(key=lambda x: x[0])\n\n\nval = n - solve(n)\nprint(val)\n\n\"\"\"\nn = 8\n\narr = [(1, 8), (3, 9), (2, 2), (4, 1), (6, 4), (10, 10), (9, 7), (7, 6)]\n\"\"\"\n","repo_name":"mrbartrns/algorithm-and-structure","sub_path":"BOJ/dp_boj/e_line.py","file_name":"e_line.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42844987744","text":"import pandas as pd\nimport math\nimport track_exp1308 as track\nimport config_exp1308 as cfg_exp\nimport numpy as np\nimport functions as fn\nfrom sklearn.ensemble import RandomForestClassifier\nimport matplotlib.pyplot as plt\n\n__author__ = 'YBeer'\n\n\ndef parse_rssi(rssi):\n rssi = rssi[1:-1].split(', ')\n rssi = map(lambda x: float(x), rssi)\n return rssi\n\npredictions = np.zeros(())\nap_timed_pred = np.zeros((len(track.time_frames), len(track.valid_ants)))\nap_timed_sd = np.zeros((len(track.time_frames), len(track.valid_ants)))\nfor i in track.valid_ants:\n \"\"\"\n Create training data\n \"\"\"\n # read database from file\n dataset = pd.read_csv('dataset_ap' + str(i) + '.csv',\n names=['V0', 'V1', 'V2', 'V3', 'H0', 'H1', 'H2', 'H3', 'V_m_H'])\n dataset_time = np.zeros((dataset.shape[0], 1))\n dataset = np.array(dataset)\n dataset = np.hstack((dataset_time, dataset))\n\n # filter noise below -10 dB\n dataset = fn.noise_filter(np.array(dataset))\n\n dataset_angle = pd.read_csv('dataset_angle_ap' + str(i) + '.csv', names=['Angle'])\n # filter angles out of range\n valid_angle = (cfg_exp.min_angle <= dataset_angle['Angle']) & (cfg_exp.max_angle >= dataset_angle['Angle'])\n\n dataset = fn.filter_rows(dataset, valid_angle)\n dataset_angle = dataset_angle.loc[valid_angle]\n\n dataset_angle = np.array(dataset_angle).ravel()\n\n # Fitting to RF\n clf = RandomForestClassifier()\n clf.fit(np.array(dataset[:, 1:]), dataset_angle)\n\n # creating predicted test set angles\n test_prediction = clf.predict(dataset[:, 1:]).tolist()\n\n # # plot test data\n # plt.plot(dataset_angle, test_prediction)\n # plt.show()\n\n # Fitting to RF\n clf = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1, min_samples_leaf=1,\n max_features=3, criterion=\"gini\", min_samples_split=2)\n clf.fit(dataset[:, 1:], dataset_angle)\n\n \"\"\"\n Get experiment data\n \"\"\"\n ap = pd.read_csv('ap' + str(i) + '.csv', index_col=0, names=['MAC', 'Time', 'RSSIs', 'channel'])\n ap = ap.loc[ap.index == cfg_exp.mac]\n ap['Time'] /= 1000\n ap['Time'] = ap['Time'] - track.t_0\n ap = ap.loc[ap['Time'] >= 0]\n ap = ap.loc[ap['Time'] <= track.time_stop]\n\n ap_rssis = list(ap['RSSIs'])\n\n for j in range(len(ap_rssis)):\n ap_rssis[j] = parse_rssi(ap_rssis[j])\n\n del ap['RSSIs']\n\n ap_rssis = pd.DataFrame(ap_rssis, columns=['V0', 'V1', 'V2', 'V3', 'H0', 'H1', 'H2', 'H3'], index=ap.index)\n ap = pd.concat([ap, ap_rssis], axis=1)\n ap_rssis = np.array(ap_rssis)\n\n # Arranging model data\n ap_max = np.apply_along_axis(np.max, 1, ap_rssis)\n\n ap_rssis = np.hstack((np.array(np.transpose(np.matrix(ap['Time']))), ap_rssis))\n\n ap_arranged = fn.arrange_data(ap_rssis)\n ap_arranged = fn.noise_filter(ap_arranged)\n\n # conditions\n not_sat_power = ap_max <= cfg_exp.rssi_max\n not_low_power = ap_max >= cfg_exp.rssi_min\n not_erroneous = ap_arranged[:, 9] > -10\n\n # Filtered\n ap_arranged_filtered = fn.filter_rows(ap_arranged, not_sat_power & not_low_power & not_erroneous)\n\n # Predicting basic model result\n ap_pred = clf.predict(ap_arranged_filtered[:, 1:])\n ap_pred = ap_pred.reshape((ap_pred.shape[0], 1))\n ap_pred = np.hstack((ap_arranged_filtered[:, [0]], ap_pred))\n\n # # plot predictions\n # plt.plot(ap_pred[:, 0], ap_pred[:, 1], 'go')\n # plt.show()\n\n # Building time frames\n time_frames = track.time_frames\n\n # calculate real angles from AP\n ap_angles = np.zeros((len(time_frames), 3))\n ap_angles[:, 0] = time_frames\n\n # global\n # Get global angles\n deltax = track.track_position[:, 1] - track.aps[i, 0]\n deltay = track.track_position[:, 2] - track.aps[i, 1]\n\n atan_vectorized = np.vectorize(math.atan)\n g_angle = 180 / np.pi * atan_vectorized(deltax / deltay)\n\n ap_angles[:, 1] = g_angle\n ap_angles[:, 2] = g_angle - track.aps[i, 2]\n\n # not valid prediction are saved as 100\n ap_timed_pred[:, i], ap_timed_sd[:, i] = fn.timed_predictions(ap_pred, time_frames)\n\n # # prediction vs angle\n # plt.plot(track.time_frames, ap_timed_pred[:, 0], 'go', track.time_frames, ap_angles[:, 1], 'r')\n # plt.show()\n\n # # deltax VS angle\n # plt.plot(track.track_position[:, 0], track.track_position[:, 1] - track.aps[i, 0],\n # ap_angles[:, 0], ap_angles[:, 1])\n # plt.show()\n #\n # # deltay VS angle\n # plt.plot(track.track_position[:, 0], track.track_position[:, 2] - track.aps[i, 1],\n # ap_angles[:, 0], ap_angles[:, 1])\n # plt.show()\n\nap_direction = np.zeros((ap_timed_pred.shape[0], len(track.valid_ants)))\n\nfor i in range(len(track.valid_ants)):\n ap_direction[:, i] = track.aps[track.valid_ants[i], 2]\n","repo_name":"yairbeer/my_repository","sub_path":"aps_doa.py","file_name":"aps_doa.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2353337856","text":"\nimport numpy as np\nimport tensorflow as tf\nfrom utils import *\nfrom distance import distance\nimport npdistance as nd\nfrom predict import predict_cv\nimport copy\nimport time\nfrom sklearn.preprocessing import normalize\nimport math\n\nclass SNEQ:\n\n def __init__(self, input,A, X, L,z, K=3, data_ = '',p_val=0.10, p_test=0.05, p_nodes=0.0, n_hidden=None,\n max_iter=50001, tolerance=100,batch =100, p_recom=0.1, p_semi=0.3,scale=False, seed=0, verbose=True):\n self.data_name= data_\n tf.reset_default_graph()\n tf.set_random_seed(seed)\n np.random.seed(seed)\n self.M = 8\n self.K = 16\n self.num_hops = K\n self.input = input\n self.label_ratio = p_semi\n X = X.astype(np.float32)\n self.GT = copy.deepcopy(A).toarray().astype(np.int8)\n \n p = np.arange(A.shape[0])\n np.random.shuffle(p)\n p = p[:300]\n self.recom_edges_id = p\n self.classification_ratios = [0.02,0.10]\n \n if p_nodes > 0:\n A = self.__setup_inductive(A, X, p_nodes)\n else:\n self.X = X # tf.SparseTensor(*sparse_feeder(X))\n self.feed_dict = None\n self.labeled_nodes = self.sample_labeled_nodes(z,p_semi)\n self.N, self.D = X.shape\n self.L = L\n self.max_iter = max_iter\n self.tolerance = tolerance\n self.scale = scale\n self.verbose = verbose\n self.batch_size = batch\n if n_hidden is None:\n n_hidden = [512]\n self.n_hidden = n_hidden\n\n if p_val + p_test > 0:\n train_ones, val_ones, val_zeros, test_ones, test_zeros = train_val_test_split_adjacency(\n A=A, p_val=p_val, p_test=p_test, seed=seed, neg_mul=1, every_node=True, connected=False,\n undirected=False)\n\n A_train = edges_to_sparse(train_ones, self.N)\n hops = get_hops(A_train, K)\n else:\n hops = get_hops(A, K)\n self.hops = hops\n self.scale_terms = {h if h != -1 else max(hops.keys()) + 1:\n hops[h].sum(1).A1 if h != -1 else hops[1].shape[0] - hops[h].sum(1).A1\n for h in hops}\n self.triplets = tf.placeholder(tf.int32, [None, None],'triplets')\n self.batch_input_tf = tf.sparse_placeholder(tf.float32,name='sb')\n self.neg_margin_tf = tf.placeholder(tf.float32,[None])\n self.pos_margin_tf = tf.placeholder(tf.float32,[None])\n self.lamb_weight = tf.placeholder(tf.float32)\n self.similarity = tf.placeholder(tf.float32,[None,None])\n self.mask = tf.placeholder(tf.float32,[None,None])\n self.__build()\n self.__dataset_generator(hops, scale_terms)\n self.__build_loss()\n\n if p_recom >= 0:\n self.recommend_edges = self.X[self.recom_edges_id,:]\n\n if p_val > 0:\n val_edges = np.row_stack((val_ones, val_zeros)) # N x 2\n print (val_edges.shape)\n self.left_val = self.X[val_edges[:, 0], :]\n self.right_val = self.X[val_edges[:, 1], :]\n self.val_ground_truth = A[val_edges[:, 0], val_edges[:, 1]].A1\n\n\n if p_test > 0:\n test_edges = np.row_stack((test_ones, test_zeros)) # N x 2\n self.left_test = self.X[test_edges[:, 0], :]\n self.right_test = self.X[test_edges[:, 1], :]\n self.test_ground_truth = A[test_edges[:, 0], test_edges[:, 1]].A1\n\n if p_nodes > 0:\n self.neg_ind_energy = -self.energy_kl(self.ind_pairs)\n\n def sample_labeled_nodes(self,z,p):\n len_ = self.X.shape[0]\n u = np.arange(len_)\n np.random.shuffle(u)\n keep_ = int(len_ * (1. - p))\n t = set()\n for i in u[:keep_]:\n z[i] = t\n # z[u[:keep_]] = -9999\n return z\n\n def __build(self):\n w_init = tf.contrib.layers.xavier_initializer\n sizes = [self.D] + self.n_hidden\n for i in range(1, len(sizes)):\n W = tf.get_variable(name='W{}'.format(i), shape=[sizes[i - 1], sizes[i]], dtype=tf.float32,\n initializer=w_init())\n b = tf.get_variable(name='b{}'.format(i), shape=[sizes[i]], dtype=tf.float32, initializer=w_init())\n\n if i == 1:\n encoded = tf.sparse_tensor_dense_matmul(self.batch_input_tf, W) + b\n else:\n encoded = tf.matmul(encoded, W) + b\n\n encoded = tf.nn.tanh(encoded)\n\n W_mu = tf.get_variable(name='W_mu', shape=[sizes[-1], self.L], dtype=tf.float32, initializer=w_init())\n b_mu = tf.get_variable(name='b_mu', shape=[self.L], dtype=tf.float32, initializer=w_init())\n mu_ = tf.matmul(encoded, W_mu) + b_mu\n self.mu = tf.nn.tanh(mu_)\n\n self.codebooks = tf.cast(tf.get_variable(\"codebook\", [self.M * self.K, self.L]), tf.float32)\n logits = self.mu\n W_mu2 = tf.get_variable(name='W_mu2', shape=[self.L, self.L], dtype=tf.float32, initializer=w_init())\n b_mu2 = tf.get_variable(name='b_mu2', shape=[self.L], dtype=tf.float32, initializer=w_init())\n logits_a = tf.nn.tanh(tf.matmul(logits, W_mu2) + b_mu2)\n logits_a = tf.reshape(logits_a, [-1, self.M, self.K])\n logits_a = tf.nn.softmax(logits_a, dim=-1)\n self.atten_index = tf.cast(tf.argmax(logits_a, axis=-1), tf.int32)\n logits_a = tf.reshape(logits_a, [-1, self.M * self.K])\n\n logits = logits * logits_a\n logits = tf.reshape(logits, [-1, self.M, self.K], name=\"logits\")\n # D = self._gumbel_softmax(logits, self._TAU, sampling=True)\n D = tf.nn.softmax(logits,-1)\n _output_ = tf.reshape(D, [-1, self.M * self.K]) # ~ (B, M * K)\n # self.maxp = tf.reduce_mean(tf.reduce_max(D, axis=2))\n y_hat = self._decode(_output_, self.codebooks)\n loss = 0.5 * tf.reduce_sum((y_hat - self.mu) ** 2, axis=1)\n self.loss_quatization = tf.reduce_mean(loss, name=\"loss\")\n\n # recontruct rules\n self.max_index = max_index = tf.cast(tf.argmax(logits, axis=2), tf.int32)\n\n self.offset = offset = tf.range(self.M, dtype=\"int32\") * self.K\n self.codes_with_offset = codes_with_offset = max_index + offset[None, :]\n selected_vectors = tf.gather(self.codebooks, codes_with_offset) # ~ (B, M, H)\n self.reconstructed_embed = tf.reduce_sum(selected_vectors, axis=1) # ~ (B, H)\n\n def _decode(self, gumbel_output, codebooks):\n return tf.matmul(gumbel_output, codebooks)\n\n def __build_loss(self):\n anc_vect = tf.gather(self.mu, self.triplets[:, 0])\n hop_pos = tf.gather(self.mu, self.triplets[:, 1])\n hop_neg = tf.gather(self.mu, self.triplets[:,2])\n eng_pos = self.F2_distance(anc_vect,hop_pos)\n eng_neg = self.F2_distance(anc_vect,hop_neg)\n basic_loss = tf.maximum(eng_pos - eng_neg + (self.neg_margin_tf-self.pos_margin_tf)*15. , 0.0)\n self.loss = tf.reduce_mean(basic_loss)\n lab_dis = distance(self.mu, pair=True, dist_type='euclidean2')\n self.class_margin = tf.reduce_mean(tf.abs(lab_dis - self.similarity)*self.mask)\n\n def F2_distance(self, p1, p2):\n return distance(p1, p2, pair=False, dist_type='euclidean2')\n\n def sigmoid(self,x):\n s = 1 / (1 + np.exp(-x))\n return 0\n\n\n def construct_sim(self, label):\n a = np.ones((label.shape[0], label.shape[0])) * ( (self.num_hops+5)*15.0)\n b = np.ones((label.shape[0], label.shape[0]))\n\n for i in range(label.shape[0]):\n for j in range(label.shape[0]):\n if len(label[i]) <= 0 or len(label[j]) <= 0:\n b[i][j] = 0\n b[j][i] = 0\n continue\n O = len(label[i].intersection(label[j]))*1.0 / len(label[i].union(label[j]))\n if O != 0:\n a[i][j] = 0\n return a, b\n\n def adaptation_factor(self, x):\n if x >= 1.0:\n return 1.0\n den = 1.0 + math.exp(-10 * x)\n lamb = 2.0 / den - 1.0\n return lamb\n\n def gen(self):\n while True:\n data,scal,neig_ty = to_triplets(sample_all_hops(self.hops), self.scale_terms)\n num = data.shape[0]\n if num >= self.batch_size:\n its = int(num/self.batch_size)\n else:\n its = 1\n self.batch_size = num\n arr = np.arange(data.shape[0])\n np.random.shuffle(arr)\n np.random.shuffle(arr)\n for i in range(its):\n range_index = arr[(i*self.batch_size):(i+1)*self.batch_size]\n triplet_batch =data[range_index]\n scale_batch = scal[range_index]\n neig_batch = neig_ty[range_index]\n triplet_batch_ = triplet_batch.transpose().reshape(-1)\n triplet_batch1 = np.unique(triplet_batch_)\n\n c = np.array([np.where(triplet_batch1 == j)[0][0] for j in triplet_batch_])\n c = c.reshape(3,self.batch_size).transpose()\n\n yield self.X[triplet_batch1,:],c ,neig_batch.astype(np.float32), \\\n np.array(self.labeled_nodes[triplet_batch1])\n\n def train(self, z, gpu_list='0'):\n\n \n train_op = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(self.loss +\n self.lamb_weight*self.loss_quatization+\n (2-self.lamb_weight)*self.class_margin)\n \n sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(visible_device_list=gpu_list,\n allow_growth=True)))\n sess.run(tf.global_variables_initializer())\n iterator = self.gen()\n print ('start traning ...')\n for epoch in range(self.max_iter):\n data, trplits, neg_type,labes = iterator.__next__()\n print(neg_type)\n input()\n _s, Mask = self.construct_sim(labes)\n\n decay = self.adaptation_factor(epoch/5000.0)\n codebook, codes,q_loss,struct_loss,class_loss, _ = sess.run([self.codebooks,self.max_index,self.loss_quatization,self.loss, self.class_margin, train_op], {self.batch_input_tf: sparse_feeder(data),\n self.triplets: trplits,\n self.neg_margin_tf: neg_type[:, 2],\n self.pos_margin_tf: neg_type[:, 1],\n self.similarity: _s, self.mask: Mask,\n self.lamb_weight:decay\n })\n\n if epoch % 500 == 0:\n val_left = []\n val_right = []\n \n qus2 = []\n qus1 = []\n for i in range(0,self.right_val.shape[0] , 100):\n mu1,qu1 = sess.run([self.mu,self.reconstructed_embed], {self.batch_input_tf: sparse_feeder(self.left_val[i: i + 100, :])})\n mu2,qu2 = sess.run([self.mu, self.reconstructed_embed], {self.batch_input_tf: sparse_feeder(self.right_val[i: i + 100, :])})\n val_left.append(mu1)\n val_right.append(mu2)\n qus1.append(qu1)\n qus2.append(qu2)\n qus2 = np.row_stack(qus2)\n qus1 = np.row_stack(qus1)\n val_left = np.row_stack(val_left)\n val_right = np.row_stack(val_right)\n score = nd.distance(val_left, val_right, pair=False, dist_type='euclidean2')\n score2 = nd.distance(qus2, qus1, pair=False, dist_type='euclidean2')\n score = - score\n score2 = - score2\n val_auc1, val_ap1 = score_link_prediction(self.val_ground_truth, score)\n val_auc2, val_ap2 = score_link_prediction(self.val_ground_truth, score2)\n\n print('epoch: {:3d}, struct loss: {:.4f}, val_auc: {:.4f}, '\n 'val_ap: {:.4f}, c_loss: {:.4f}, quati:{:.5f}, qu_auc:{:.4f}, qu_ap:{:.4f},decay:{:.3f}'.format(epoch, struct_loss,\n val_auc1, val_ap1,class_loss, q_loss,val_auc2,val_ap2,decay))\n\n if epoch % 500 == 0 :\n quantization_index = []\n querys=[]\n reconstructed_vect = []\n for i in range(0,self.X.shape[0],1000):\n mu1, qu1, reco = sess.run([self.mu, self.max_index,self.reconstructed_embed],\n {self.batch_input_tf: sparse_feeder(self.X[i:i+1000, :])})\n quantization_index.append(qu1)\n querys.append(mu1)\n reconstructed_vect.append(reco)\n reconstructed_vect = np.row_stack( reconstructed_vect )\n querys = np.row_stack(np.array(querys))\n \n\n if epoch % 500 == -1:\n np.savez('./output/'+self.data_name +'_'+str(epoch).zfill(6)+'.npz',\n q=reconstructed_vect,e=querys,\n codebook=codebook,nodes=self.recom_edges_id,\n q_index=quantization_index)\n\n \n\n print('epoch: {:3d}, struct loss: {:.4f}, '\n ' c_loss: {:.4f}, quati:{:.5f}, decay:{:.3f}'.format(\n epoch, struct_loss, class_loss, q_loss,decay))\n\n return sess\n","repo_name":"ht014/SNEQ","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"26157680409","text":"import time\r\nfrom web3 import Web3\r\n \r\nPancakeABI = open('pcABI','r').read().replace('\\n','')\r\n \r\nbsc=\"https://bsc-dataseed.binance.org/\"\r\nweb3 = Web3(Web3.HTTPProvider(bsc))\r\nprint(web3.isConnected())\r\n \r\n#Mi direccion publica\r\nsender_address = \"Dirección de tu Wallet Publica\"\r\n \r\n#Direccion Pancake V2 Swap router address\r\nrouter_address = \"0x10ED43C718714eb63d5aA57B78B54704E256024E\"\r\n \r\n#Contrato de BNB\r\nspend = web3.toChecksumAddress(\"0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c\")\r\n \r\n#Clave privada de tu Wallet\r\nprivate=\"Tu clave privada\"\r\n\r\nbalance = web3.eth.get_balance(sender_address)\r\n#print(balance)\r\n \r\nhumanReadable = web3.fromWei(balance,'ether')\r\n#print(humanReadable)\r\n \r\n#Token que quieres recibir, en este caso BUSD\r\ncontract_id = web3.toChecksumAddress(\"0xe9e7cea3dedca5984780bafc599bd69add087d56\")\r\n\r\ncontract = web3.eth.contract(address=router_address, abi=PancakeABI)\r\n \r\nnonce = web3.eth.get_transaction_count(sender_address)\r\n \r\nstart = time.time()\r\nprint(web3.toWei('0.005','ether'))\r\n \r\npancakeswap2_txn = contract.functions.swapExactETHForTokens(\r\n 0, \r\n [spend,contract_id],\r\n sender_address,\r\n (int(time.time()) + 1000000)\r\n).buildTransaction({\r\n 'from': sender_address,\r\n 'value': web3.toWei(0.005,'ether'),#Cantidad en BNB\r\n 'gas': 250000,\r\n 'gasPrice': web3.toWei('5','gwei'),\r\n 'nonce': nonce,\r\n})\r\n \r\nsigned_txn = web3.eth.account.sign_transaction(pancakeswap2_txn, private_key=private)\r\ntx_token = web3.eth.send_raw_transaction(signed_txn.rawTransaction)\r\nprint(web3.toHex(tx_token))","repo_name":"WalletDatGH/Comprar-en-PancakeSwap-con-Python","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"4947807821","text":"# Imports\nimport nltk\nimport re\nimport string \nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize \nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import wordnet\n\n# from sklearn.feature_extraction.text import TfidfVectorizer # in case vectorizer is added here \n# import numpy as np # in case this is needed\nimport pandas as pd\n\n# Arguments to change based on data location and filename\n# Data for loading .csv\nREL_PATH_INPUT = \"../raw_data/all_the_news/\"\n\n# Main function\ndef pre_process(df, source='web', params=None, sample=None, printed=False):\n \"\"\"\n Main function to pre-process data. \n\n Parameters\n ----------\n df : DataFrame\n DataFrame containing the data to pre_process.\n\n source : string\n Describes the source of data to be pre_processed.\n The default is 'web. Can also be used 'prepared'.\n \n params : dict\n A dictionary contain some or all of the following keywords:\n - 'cat_mapping': True,\n - 'count_numbers': True,\n - 'check_emotions': True,\n - 'vocab_richness': True,\n - 'remove_digits': True,\n - 'remove_punctuation': True,\n - 'remove_emojis': True,\n - 'tokenize': True,\n - 'stopwords': True,\n - 'lemmatize': True,\n - 'stemming': False\n \n sample : float\n Default value is None. Returns a sample of the whole dataset.\n\n printed : boolean\n Turn on to receive information on the pre_processing data process.\n \"\"\"\n # Column name for pre_processed data\n pre_processed_text = \"pre_processed_text\"\n \n \n # For sampling\n if sample:\n df = df.sample(sample)\n \n pp_dict = {'cat_mapping': True,\n 'count_numbers': True,\n 'check_emotions': True,\n 'vocab_richness': True,\n 'remove_digits': True,\n 'remove_punctuation': True,\n 'remove_emojis': True,\n 'lower_case': True,\n 'tokenize': True,\n 'stopwords': True,\n 'lemmatize': True,\n 'stemming': False}\n \n #print(params)\n if params:\n for key, val in params.items():\n pp_dict[key] = val\n # pp_dict.update(params)\n \n # Prep columns for data pre_processing\n df = data_prep(df, source, printed)\n df = pp_lower_case(df, printed, execute=pp_dict['lower_case'])\n df['minor_preprocessing'] = df[pre_processed_text]\n df = pp_cat_mapping(df, printed, execute=pp_dict['cat_mapping'])\n df = df.dropna(subset=[pre_processed_text]).reset_index(drop=True)\n \n if printed:\n print(\"-------------------------\")\n print(\"Minor pre-processing done\")\n print(\"-------------------------\")\n \n # Add new columns for label and other features\n df = pp_count_numbers(df, printed, execute=pp_dict['count_numbers'])\n df = pp_check_emotions(df, printed, execute=pp_dict['check_emotions'])\n df = pp_vocab_richness(df, printed, execute=pp_dict['vocab_richness'])\n \n if printed:\n print(\"-------------------------\")\n print(\"New features added\")\n print(\"-------------------------\")\n \n # Remove excess data\n df = pp_remove_digits(df, printed, execute=pp_dict['remove_digits'])\n df = pp_remove_punctuation(df, printed, execute=pp_dict['remove_punctuation'])\n df = pp_remove_emojis(df, printed, execute=pp_dict['remove_emojis'])\n \n if printed:\n print(\"-------------------------\")\n print(\"Excess data removed\")\n print(\"-------------------------\")\n\n \n # Divide data and pre_process\n df = pp_tokenize(df, printed, execute=pp_dict['tokenize'])\n df = pp_stopwords(df, printed, execute=pp_dict['stopwords'])\n df = pp_lemmatizing(df, printed, execute=pp_dict['lemmatize'])\n df = pp_stemming(df, printed, execute=pp_dict['stemming'])\n\n df[pre_processed_text] = df[pre_processed_text].map(lambda x: \" \".join(x))\n \n if printed:\n print(\"-------------------------\")\n print(\"Data pre-processed\")\n print(\"-------------------------\")\n\n return df\n\n# Data cleaning\ndef data_prep(df, source, printed=False):\n \n if printed:\n print('Preparing pre_processed_text column')\n \n if source == 'web':\n CONTENT_COL = \"content\"\n DESCRIPTION_COL = \"short_description\"\n HEADLINE_COL = \"headline\"\n \n \n df = df.drop(columns=['Unnamed: 0', 'index'], errors='ignore')\n df = df[df['content'] != \"Invalid file\"].reset_index(drop=True)\n df[CONTENT_COL] = df[CONTENT_COL].replace(['\\n','\\r'],' ', regex=True)\n df['pre_processed_text'] = df[HEADLINE_COL] + \" \" + df[DESCRIPTION_COL] + \" \" + df[CONTENT_COL]\n\n \n return df\n \n elif source == 'prepared':\n HEADLINE_COL = \"title\"\n CONTENT_COL = \"content\"\n \n df[CONTENT_COL] = df[CONTENT_COL].replace(['\\n','\\r'],' ', regex=True)\n df['pre_processed_text'] = df[HEADLINE_COL] + \" \" + df['description'] + \" \" + df[CONTENT_COL]\n \n return df\n \n elif source == 'train':\n DESCRIPTION_COL = \"short_description\"\n HEADLINE_COL = \"headline\"\n\n df['pre_processed_text'] = df[HEADLINE_COL] + \" \" + df[DESCRIPTION_COL]\n \n return df\n\n\ndef pp_cat_mapping(df, printed=False, execute=False):\n\n my_dict = {'CRIME': 'Crime',\n 'ENTERTAINMENT': 'Entertainment',\n 'WORLD NEWS': 'World News',\n 'IMPACT': 'Other',\n 'POLITICS': 'Politics',\n 'WEIRD NEWS': 'Other',\n 'BLACK VOICES': 'Activism',\n 'WOMEN': 'Entertainment',\n 'COMEDY': 'Entertainment',\n 'QUEER VOICES': 'Activism',\n 'SPORTS': 'Sports',\n 'BUSINESS': 'Business',\n 'TRAVEL': 'Culture',\n 'MEDIA': 'Media',\n 'TECH': 'Technology',\n 'RELIGION': 'Religion',\n 'SCIENCE': 'Science',\n 'LATINO VOICES': 'Activism',\n 'EDUCATION': 'Education',\n 'COLLEGE': 'Education',\n 'PARENTS': 'Other',\n 'ARTS & CULTURE': 'Culture',\n 'STYLE': 'Trends',\n 'GREEN': 'Activism',\n 'TASTE': 'Culture',\n 'HEALTHY LIVING': 'Health',\n 'THE WORLDPOST': 'World News',\n 'GOOD NEWS': 'Other',\n 'WORLDPOST': 'World News',\n 'FIFTY': 'Other',\n 'ARTS': 'Culture',\n 'WELLNESS': 'Health',\n 'PARENTING': 'Other',\n 'HOME & LIVING': 'Trends',\n 'STYLE & BEAUTY': 'Trends',\n 'DIVORCE': 'Other',\n 'WEDDINGS': 'Other',\n 'FOOD & DRINK': 'Culture',\n 'MONEY': 'Other',\n 'ENVIRONMENT': 'Activism',\n 'CULTURE & ARTS': 'Culture'}\n \n if execute:\n df['label'] = df.category.map(lambda x: my_dict[x])\n\n if printed:\n print('Mapping labels')\n\n return df\n return df \n\n# Add features\ndef pp_count_numbers(df, printed=False, execute=False):\n '''Create number of \"news + headline\" decimals column'''\n \n if execute:\n df['nrs_count'] = df['pre_processed_text'].str.count('[+-]?([0-9]*[.])?[0-9]+')\n df['nrs_count'] = df['nrs_count'].fillna(0)\n df['nrs_count'] = df['nrs_count'].astype(float).astype(int)\n\n if printed:\n print('Counting numbers')\n\n return df\n return df\n\ndef pp_check_emotions(df, printed=False, execute=False):\n '''Counts types of entonation'''\n \n if execute:\n df['questions'] = df['pre_processed_text'].str.count('\\?')\n df['exclamations'] = df['pre_processed_text'].str.count('\\!')\n df['irony'] = df['pre_processed_text'].map(lambda x: len(re.findall('\\?!|\\!\\?',x)))\n\n if printed:\n print('Revealing emotions')\n\n return df\n return df \n\ndef pp_vocab_richness(df, printed=False, execute=False):\n\n def vocab_richness(text):\n tokens = word_tokenize(text)\n total_length = len(tokens)\n unique_words = set(tokens)\n unique_word_length = len(unique_words)\n \n if total_length > 0:\n return unique_word_length / total_length\n return 0\n \n if execute:\n df['vocab_richness'] = df['pre_processed_text'].apply(lambda x: vocab_richness(x))\n\n if printed:\n print('Analyzing vocabulary bank account!')\n\n return df\n return df\n\n# Pre_process data\ndef pp_lower_case(df, printed=False, execute=False):\n '''Lower cases the pre_processed_text column'''\n \n if execute:\n df['pre_processed_text'] = df['pre_processed_text'].str.lower()\n\n if printed:\n print('Lowering the cases')\n\n return df\n return df\n\ndef pp_tokenize(df, printed=False, execute=False):\n \n if execute:\n df['pre_processed_text'] = df['pre_processed_text'].apply(lambda x: word_tokenize(x))\n\n if printed:\n print('Tokenizing')\n\n return df\n return df\n\ndef pp_stopwords(df, printed=False, execute=False, language='english'):\n \n if execute:\n stop_words = set(stopwords.words(language))\n df['pre_processed_text'] = df['pre_processed_text']\\\n .apply(lambda x: [word for word in x if not word in stop_words])\n\n if printed:\n print('Checking for stopwords')\n\n return df\n return df \n\ndef pp_stemming(df, printed=False, execute=False, language='english'):\n \n if execute:\n stemmer = SnowballStemmer(language=language)\n \n df['pre_processed_text'] = df['pre_processed_text']\\\n .apply(lambda x: [stemmer.stem(word) for word in x])\n\n if printed:\n print('Generating branches')\n\n return df\n return df\n\ndef pp_lemmatizing(df, printed=False, execute=False, upgrade=False):\n \n def get_wordnet_pos(word):\n \"\"\"Map POS tag to first character lemmatize() accepts\"\"\"\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)\n \n if execute:\n lemmatizer = WordNetLemmatizer()\n \n if upgrade:\n nltk.download('popular')\n \n df['pre_processed_text'] = df['pre_processed_text']\\\n .map(lambda x: [lemmatizer.lemmatize(word, get_wordnet_pos(word)) for word in x])\n\n if printed:\n print('Lematizing, yummy!')\n\n return df\n return df\n\n# Remove excess data\ndef pp_remove_digits(df, printed=False, execute=False):\n '''Removes all the digits from the string'''\n \n if execute:\n df['pre_processed_text'] = df['pre_processed_text'].str.replace('[+-]?([0-9]*[.])?[0-9]+', '', regex=True)\n\n if printed:\n print('Removing digits')\n\n return df\n return df \n\ndef pp_remove_punctuation(df, printed=False, execute=False):\n \n if execute:\n real_string_punctuation = string.punctuation + \"—\" + '”' + \"’\" + \"‘\" + \"…\" + '“' + '´' + \"`\" + \"«\" + \"»\"\n df['pre_processed_text'] = df['pre_processed_text'].apply(lambda x: ''\\\n .join(word for word in x if word not in real_string_punctuation))\n\n if printed:\n print('Removing pesky dots')\n\n return df\n return df\n\ndef pp_remove_emojis(df, printed=False, execute=False):\n\n def deEmojify(text):\n regrex_pattern = re.compile(pattern = \"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\U00002500-\\U00002BEF\" # chinese char\n u\"\\U00002702-\\U000027B0\"\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n u\"\\U0001f926-\\U0001f937\"\n u\"\\U00010000-\\U0010ffff\"\n u\"\\u2640-\\u2642\" \n u\"\\u2600-\\u2B55\"\n u\"\\u200d\"\n u\"\\u23cf\"\n u\"\\u23e9\"\n u\"\\u231a\"\n u\"\\ufe0f\" # dingbats\n u\"\\u3030\"\n \"]+\", flags = re.UNICODE)\n return regrex_pattern.sub(r'',text)\n \n if execute:\n df['pre_processed_text'] = df['pre_processed_text'].apply(lambda x: deEmojify(x))\n\n if printed:\n print('Disabling emojis')\n\n return df\n return df \n\nif __name__ == \"__main__\":\n pass\n","repo_name":"archifreitas/big_picture","sub_path":"big_picture/pre_processor.py","file_name":"pre_processor.py","file_ext":"py","file_size_in_byte":13088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6355323482","text":" \nclass Account:\n\n def __init__(self,account_holder):\n self.balance = 0\n self.holder = account_holder\n self.tranactions = []\n\n def __saveHistory(self,action,amount):\n self.tranactions.append((action,amount))\n\n def deposit(self,amount):\n self.balance = self.balance + amount\n self.__saveHistory('deposit',amount)\n\n def withdraws(self,amount):\n if amount > self.balance:\n return 'Insufficient funds'\n else:\n self.balance = self.balance - amount\n self.__saveHistory('withdrawl',amount)\n\n def status(self):\n print(self.holder + ' : ',end ='')\n print(self.tranactions)\n\ndef main(): \n bob = Account('Bob')\n bob.deposit(1000000)\n bob.withdraws(100)\n bob.deposit(440)\n bob.status()\n\n tom = Account('Tom')\n tom.deposit(5000000)\n tom.withdraws(250)\n tom.withdraws(875)\n tom.status()\n\nmain()","repo_name":"yuchanmo/python1_class","sub_path":"과제/hw3_16a_3.py","file_name":"hw3_16a_3.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74697653371","text":"import random\n\n# O computador \"pensa\" em um número aleatório entre 0 e 5\nnumero_pensado = random.randint(0, 5)\n\n# Solicita ao usuário que tente adivinhar o número pensado pelo computador\nnumero_usuario = int(\n input(\"Tente adivinhar o número que estou pensando (entre 0 e 5): \")\n)\n\n# Verifica se o número fornecido pelo usuário é igual ao número pensado pelo computador\nif numero_usuario == numero_pensado:\n # Se os números forem iguais, exibe a mensagem de acerto\n print(\"Parabéns! Você acertou o número.\")\nelse:\n # Caso contrário, exibe a mensagem de erro e revela o número correto\n print(\"Você errou. O número correto era\", numero_pensado)\n","repo_name":"Wxskley/Python","sub_path":"Curso em vídeo - Python/Dia 5/ex028.py","file_name":"ex028.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1988689521","text":"import os\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport dash_bootstrap_components as dbc\nimport dash_daq as daq\nimport plotly.express as px\nimport pandas as pd\nimport plotly.graph_objects as go\n\nfrom app import app\ncases = pd.read_csv(\"indian_cases_confirmed_cases.csv\")\ndeaths = pd.read_csv(\"indian_cases_confirmed_deaths.csv\")\nimp_st = pd.read_csv('cases_deaths_india.csv')\nimp_st = imp_st.sort_values('date')\nindia_cases= pd.read_csv(\"india_cases_diff.csv\")\nindia_deaths= pd.read_csv(\"india_deaths_diff.csv\")\nstates_g = pd.read_csv(\"plot_states_g.csv\")\nstates_d = pd.read_csv(\"plot_states_d.csv\")\ndate_range = [\"2020-01-30\", \"2021-06-09\"]\n\nstate_dic = {'ap':'Andhra Pradesh',\n 'dl':'Delhi',\n 'mp':'Madhya Pradesh',\n 'kl':'Kerala',\n 'up':'Uttar Pradesh',\n 'mh':'Maharastra',\n 'br':'Bihar',\n 'wb':'West Bengal',\n 'tn':'Tamil Nadu',\n 'rj':'Rajesthan',\n 'ka':'Karnataka',\n 'gj':'Gujarat',\n 'or':'Odisha',\n 'tg':'Telangana',\n 'jh':'Jharkhand',\n 'as':'Assam',\n 'pb':'Punjab',\n 'ct':'Chattisgarh',\n 'hr':'Haryana',\n 'jk':'Jammu and Kashmir',\n 'ut':'Uttarakhand',\n 'hp':'Himachal Pradesh',\n 'tr':'Tripura',\n 'ml':'Meghalaya',\n 'mn':'Manipur',\n 'nl':'Nagaland',\n 'ga':'Goa',\n 'ar':'Arunachal Pradesh',\n 'py':'Puducherry',\n 'mz':'Mizoram',\n 'ch':'Chandigarh',\n 'sk':'Sikkim',\n 'dn_dd':'Daman and Diu',\n 'an':'Andaman and Nicobar',\n 'ld':'Ladakh',\n 'la':'Lakshdweep'}\ntotal_cases = cases.set_index('state')\ntotal_state_cases = total_cases.iloc[:,-1:]\ntotal_cases = total_state_cases.sum()\n\ntotal_deaths = deaths.set_index('state')\ntotal_state_deaths = total_deaths.iloc[:,-1:]\ntotal_deaths= total_state_deaths.sum()\n\ndef plot_cases(state,ca):\n sim_data = states_g[states_g['series'] == state]\n sim_data = sim_data.T\n sim_data = sim_data[2:].reset_index()\n sim_data.columns = ['date','G']\n sim_data['date'] = pd.to_datetime(sim_data['date'])\n dates = sim_data['date']\n if ca == False:\n st = cases.set_index('state')\n col1 = st['2020-01-30']\n st = st.diff(axis = 1)\n st['2020-01-30'] = col1\n st = st.reset_index()\n st = (st[st['state'] == state].T)\n sim_data = sim_data['G'].diff()\n sim_data[0] = 0\n sim_data = sim_data.to_frame()\n sim_data['date'] = dates\n sim_data.columns = ['G','date']\n else:\n st = (cases[cases['state'] == state].T)\n \n #sim_data1 = sim_data1[sim_data1['series'] == 'A'].T\n #sim_data1 = sim_data1[1:].reset_index()\n #sim_data1.columns = ['date','A']\n #sim_data1['date'] = pd.to_datetime(sim_data1['date'])\n dates = sim_data['date']\n st = st[1:].reset_index()\n st.columns = ['date','cases']\n st['date'] = pd.to_datetime(st['date'])\n \n fig = go.Figure()\n fig.add_trace(go.Bar(x=st['date'],y = st['cases'],name=\"Actual G\"))\n fig.update_traces(marker_color='rgb(0,128,0)',\n opacity=1)\n #fig.add_trace(go.Scatter(x=sim_data['date'],y = sim_data['G'],name=\"G\"))\n #fig.add_trace(go.Scatter(x=sim_data1['date'],y = sim_data1['A'],name=\"A\"))\n #fig = go.Figure()\n #fig.add_trace(go.Scatter(x=st['date'],y=st['cases'],mode= 'markers',name='Cases'))\n #fig.add_trace(go.Scatter(x=sim_data['date'],y=sim_data['infections'],mode= 'markers',name='I'))\n #fig = make_subplots(rows = 6, cols =6, start_cell = \"top-left\")\n #fig.add_trace(go.Scatter(x=st['date'],y=st['cases'],mode= 'markers'))\n #fig = px.scatter(st, x='date', y='cases')\n #fig = go.Figure()\n #fig.add_trace(go.scatter(x=sim_data['date'],y=sim_data['infections'],mode =\"lines\",name=\"infections\"))\n #fig.add_trace(go.scatter(x=st['date'],y=st['cases'],mode =\"lines\"))\n #fig.add_trace()\n fig.update_layout(\n autosize=True,\n #title = st_name,\n margin = dict(l=40, r=40, t=10, b=40 ),\n width=500,\n height=400,\n yaxis = dict(\n #range = [0,100] ,\n #rangemode=\"tozero\",\n autorange=True,\n title_text='Cases',\n titlefont=dict(size=10),\n ),\n xaxis=dict(\n title_text = \"date\",\n autorange=True,\n range=date_range,\n rangeslider=dict(\n autorange=True,\n range=date_range\n ),\n type=\"date\"\n ),\n )\n fig.update_yaxes(title=None)\n fig.update_xaxes(title=None)\n return fig\n\ndef plot_deaths(state,ca):\n sim_data = states_d[states_d['series'] == state]\n sim_data = sim_data.T\n sim_data = sim_data[2:].reset_index()\n sim_data.columns = ['date','D']\n sim_data['date'] = pd.to_datetime(sim_data['date'])\n dates = sim_data['date']\n if ca == False:\n st = deaths.set_index('state')\n col1 = st['2020-01-30']\n st = st.diff(axis = 1)\n st['2020-01-30'] = col1\n st = st.reset_index()\n st = (st[st['state'] == state].T)\n sim_data = sim_data['D'].diff()\n sim_data[0] = 0\n sim_data = sim_data.to_frame()\n sim_data['date'] = dates\n sim_data.columns = ['D','date']\n else:\n st = (deaths[deaths['state'] == state].T)\n st = st[1:].reset_index()\n st.columns = ['date','deaths']\n st['date'] = pd.to_datetime(st['date'])\n #fig = go.Figure()\n #fig.add_trace(go.Scatter(x=st['date'],y=st['deaths'],mode= 'markers',name=f'{state_dic[state]}'))\n #st_name = u'Deaths in {}'.format(state_dic[state])\n #fig = px.bar(st, x='date', y='deaths')\n fig = go.Figure()\n fig.add_trace(go.Bar(x=st['date'],y = st['deaths'],name=\"Actual D\"))\n fig.update_traces(marker_color='rgb(255,99,71)',\n opacity=1)\n #fig.add_trace(go.Scatter(x=sim_data['date'],y = sim_data['D'],name=\"D\"))\n fig.update_layout(\n autosize=True,\n #title = st_name,\n\n margin = dict(l=40, r=40, t=10, b=40 ),\n width=500,\n height=400,\n yaxis = dict(\n #range = [0,100] ,\n #rangemode=\"tozero\",\n autorange=True,\n title_text='deaths',\n titlefont=dict(size=10),\n ),\n xaxis=dict(\n title_text = \"date\",\n autorange=True,\n range=date_range,\n rangeslider=dict(\n autorange=True,\n range=date_range,\n \n \n \n ),\n type=\"date\",\n ),\n )\n fig.update_yaxes(title=None)\n fig.update_xaxes(title=None)\n return fig\ndef plot_total_cases(ca):\n if ca == False:\n st = cases.set_index('state')\n col1 = st['2020-01-30']\n st = st.diff(axis = 1)\n st['2020-01-30'] = col1\n st = st.reset_index()\n else:\n st = cases\n ind = st.sum(axis =0)[1:]\n ind = ind.to_frame()\n ind = ind.reset_index()\n ind.columns = ['date','sum']\n ind['date'] = pd.to_datetime(ind['date'])\n #ind = ind[ind['date'] > '2021-01-31']\n #tc = india_cases[india_cases['date'] > '2021-01-31']\n fig = go.Figure()\n #fig.add_trace(go.Scatter(x=ind['date'],y=ind['sum'],mode= 'markers'))\n #fig = px.bar(ind, x='date', y='sum')\n fig.add_trace(go.Bar(x=ind['date'],y=ind['sum'],name='Actual G'))\n fig.update_layout(\n autosize=True,\n title = \"Cases in India\",\n margin = dict(l=40, r=40, t=40, b=40 ),\n width=500,\n height=400,\n yaxis = dict(\n #range = [0,100] ,\n #rangemode=\"tozero\",\n autorange=True,\n title_text='cases',\n titlefont=dict(size=10),\n ),\n xaxis=dict(\n title_text = \"date\",\n autorange=True,\n range=date_range,\n rangeslider=dict(\n autorange=True,\n range=date_range,\n \n \n \n ),\n type=\"date\",\n ),\n )\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title=None)\n fig.update_xaxes(title=None)\n return fig\n\ndef plot_total_deaths(ca):\n if ca == False:\n st = deaths.set_index('state')\n col1 = st['2020-01-30']\n st = st.diff(axis = 1)\n st['2020-01-30'] = col1\n st = st.reset_index()\n else:\n st = deaths\n ind = st.sum(axis =0)[1:]\n ind = ind.to_frame()\n ind = ind.reset_index()\n ind.columns = ['date','sum']\n ind['date'] = pd.to_datetime(ind['date'])\n #ind = ind[ind['date'] > '2021-01-31']\n #tc = india_deaths[india_deaths['date'] > '2021-01-31']\n fig = go.Figure()\n #fig.add_trace(go.Scatter(x=ind['date'],y=ind['sum'],mode= 'markers'))\n fig.add_trace(go.Bar(x=ind['date'],y=ind['sum'],name='Actual D'))\n #fig.add_trace(go.Scatter(x=cum_pro['date'],y=cum_pro['deaths'],name='D'))\n fig.update_layout(\n autosize=True,\n title = \"Deaths in India\",\n margin = dict(l=40, r=40, t=40, b=40 ),\n width=500,\n height=400,\n\n #style = {'color':'green'},\n yaxis = dict(\n #range = [0,100] ,\n #rangemode=\"tozero\",\n autorange=True,\n title_text='deaths',\n titlefont=dict(size=10),\n ),\n xaxis=dict(\n title_text = \"date\",\n autorange=True,\n range=date_range,\n rangeslider=dict(\n autorange=True,\n range=date_range,\n \n \n \n ),\n type=\"date\",\n ),\n )\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title=None)\n fig.update_xaxes(title=None)\n #fig.update_yaxes(visible=True, showticklabels=True, title=False)\n #fig.update_xaxes(visible=False, showticklabels=True)\n return fig\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\n\n\n\n\nbody = dbc.Container([ \n \n dbc.Row([html.P(\"This data on confirmed cases and deaths has been updated on 9th June, 2021\",style= {\"color\":\"#151516\",'font-size':'20px'}),]),\n dbc.Row([ \n html.Label(['Projections based on our model can be found on this link ---> ', \n html.A('here', href='https://sars-covid-tracker-india.herokuapp.com/Projections',style = {\"color\":\"#E60B1F\",'font-size':'20px'})],style={\"color\":\"#151516\",'font-size':'20px'})\n ]),\ndbc.Row(\n [html.Br()]),\n dbc.Row([\n dbc.Col([html.H3(id = \"tsc\", style = {'display': 'inline-block'}),\n html.Br(),\n html.P(\"Cummulative\",style = {'display': 'inline-block'}),\n daq.BooleanSwitch(\n id='cum-tc',\n on=False,\n style = {'display': 'inline-block','size':'20%'}\n ),\n dcc.Graph(id=\"fig3\",figure = plot_total_cases('Daily new cases'))]),\n \n \n dbc.Col([html.H3(id = \"tsd\", style = {'display': 'inline-block'}),\n html.Br(),\n html.P(\"Cummulative\",style = {'display': 'inline-block'}),\n daq.BooleanSwitch(\n id='cum-td',\n on=False,\n style = {'display': 'inline-block','size':'20%'}\n ),\n dcc.Graph(id=\"fig4\",figure = plot_total_deaths('Daily new cases'))])\n ,],align='center',justify = \"center\"),\n \ndbc.Row(\n [html.Br()]),\n\ndbc.Row(\n [\n dcc.Dropdown(\n id='st',\n options=[\n {'label':'Andaman and Nicobar','value':'an'},\n {'label': 'Andhra Pradesh', 'value': 'ap'},\n {'label':'Arunachal Pradesh','value':'ar'},\n {'label':'Assam','value':'as'},\n {'label':'Bihar','value':'br'},\n {'label':'Chandigarh','value':'ch'},\n {'label':'Chattisgarh','value':'ct'},\n {'label':'Daman and Diu','value':'dn_dd'},\n {'label':'Delhi','value':'dl'},\n {'label':'Goa','value':'ga'},\n {'label':'Gujarat','value':'gj'},\n {'label':'Haryana','value':'hr'},\n {'label':'Himachal Pradesh','value':'hp'},\n {'label':'Jammu and Kashmir','value':'jk'},\n {'label':'Jharkhand','value':'jh'},\n {'label':'Karnataka','value':'ka'},\n {'label': 'Kerala', 'value': 'kl'},\n {'label':'Ladakh','value':'ld'},\n {'label':'Lakshdweep','value':'la'},\n {'label': 'Madhya Pradesh', 'value': 'mp'},\n {'label':'Maharastra','value':'mh'},\n {'label':'Manipur','value':'mn'},\n {'label':'Meghalaya','value':'ml'},\n {'label':'Mizoram','value':'mz'},\n {'label':'Nagaland','value':'nl'},\n {'label':'Odisha','value':'or'},\n {'label':'Puducherry','value':'py'},\n {'label':'Punjab','value':'pb'},\n {'label':'Rajesthan','value':'rj'},\n {'label':'Sikkim','value':'sk'},\n {'label':'Tamil Nadu','value':'tn'},\n {'label':'Telangana','value':'tg'},\n {'label':'Tripura','value':'tr'},\n {'label':'Uttarakhand','value':'ut'},\n {'label':'Uttar Pradesh','value':'up'},\n {'label':'West Bengal','value':'wb'},\n \n ],\n value='dl',style = {'color':'black','width':'50%','display': 'inline-block','margin-left':'0.8%'}\n ),\n ]\n ),\n dbc.Row(\n [html.Br()]), \n dbc.Row([\n dbc.Col([html.H3(id = \"tc\", style = {'display': 'inline-block'}),\n html.Br(),\n html.P(\"Cummulative\",style = {'display': 'inline-block'}),\n daq.BooleanSwitch(\n id='cum-c',\n on=False,\n style = {'display': 'inline-block','size':'20%'}\n ),\n html.Br(),\n html.P(id = \"title1\", style = {'color':'green','display': 'inline-block'}),dcc.Graph(id='fig',figure = plot_cases('dl',True))] ),\n dbc.Col([\n html.H3(id = \"td\", style = {'display': 'inline-block'}),\n html.Br(),\n html.P(\"Cummulative\",style = {'display': 'inline-block'}),\n daq.BooleanSwitch(\n id='cum-d',\n on=False,\n style = {'display': 'inline-block','size':'20%'}\n ),\n html.Br(),\n html.P(id = \"title2\", style = {'color':'red','display': 'inline-block'}),\n dcc.Graph(id='fig2',figure = plot_deaths('dl',True))\n \n ])\n ,]),\n\ndbc.Row([\n dbc.Col(html.P(\"Data used in this site is taken from the below website...\", style={\"color\":\"#33068A\"}))]),\n dbc.Row([dbc.Col(html.P(dcc.Link(\"http://projects.datameet.org/covid19/\",href = \"http://projects.datameet.org/covid19/\",style={\"color\":\"#33068A\"})))\n ]), \n\n],style={\"height\": \"100vh\"}\n\n)\n\n#app = dash.Dash(__name__, external_stylesheets=[dbc.themes.SUPERHERO])\nserver = app.server\nlayout = html.Div([body])\n\n\napp.css.append_css({\n 'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'\n})\n\n@app.callback(\n Output('fig', 'figure'),\n Input('st', 'value'),\n Input('cum-c','on'))\ndef update_figure(st,ca):\n fig1 = plot_cases(st,ca)\n fig1.update_layout(transition_duration=500)\n return fig1\n\n@app.callback(\n Output('fig2', 'figure'),\n Input('st', 'value'),\n Input('cum-d','on'))\ndef update_figure2(st,ca):\n fig2 = plot_deaths(st,ca)\n fig2.update_layout(transition_duration=500)\n return fig2\n\n@app.callback(\n Output('fig3', 'figure'),\n Input('cum-tc','on'))\ndef update_figure3(ca):\n fig3 = plot_total_cases(ca)\n fig3.update_layout(transition_duration=500)\n return fig3\n\n@app.callback(\n Output('fig4', 'figure'),\n Input('cum-td','on'))\ndef update_figure4(ca):\n fig4 = plot_total_deaths(ca)\n fig4.update_layout(transition_duration=500)\n return fig4\n@app.callback(\n Output('tc','children'),\n Input('st','value')\n )\ndef update_output_div(st):\n return u'Total Cases in {}: {:,}'.format(state_dic[st],total_state_cases.loc[st].values[0]) \n\n@app.callback(\n Output('td','children'),\n Input('st','value')\n )\ndef update_output_div2(st):\n return u'Total Deaths in {}: {:,}'.format(state_dic[st],total_state_deaths.loc[st].values[0]) \n\n@app.callback(\n Output('tsc','children'),\n Input('st','value')\n )\ndef update_output_div3(st):\n return u'Total Cases in India: {:,}'.format(total_cases.values[0])\n\n@app.callback(\n Output('tsd','children'),\n Input('st','value')\n )\ndef update_output_div4(st):\n return u'Total Deaths in India: {:,}'.format(total_deaths.values[0])\n\n@app.callback(\n Output('title1','children'),\n Input('st','value')\n )\ndef update_output_div5(st):\n return u'Cases in {}'.format(state_dic[st]) \n\n@app.callback(\n Output('title2','children'),\n Input('st','value')\n )\ndef update_output_div6(st):\n return u'Deaths in {}'.format(state_dic[st]) \n\n#app.config.suppress_callback_exceptions = True\n\n'''if __name__ == '__main__':\n app.run_server(debug=True)'''","repo_name":"tnarravala/sars-covid19-project","sub_path":"apps/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":17137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"226075025","text":"\n# author lamyoung\n\nCONST_START_TIME=1;\nCONST_END_TIME=3;\nCONST_FILE_NAME=\"jump.mp4\";\nCONST_FILE_OUTPUT_vedio=\"jump_cmp_vedio.mp4\";\nCONST_FILE_OUTPUT_ffmpeg=\"jump_cmp_ffmpeg.gif\";\nCONST_FILE_OUTPUT_imageio=\"jump_cmp_imageio.gif\";\nCONST_FILE_LOGO=\"logo.png\";\nCONST_FPS_PERCENT=0.5;\nCONST_COLORS=128;\n\nfrom moviepy.editor import *\n\nvedioClip = VideoFileClip(CONST_FILE_NAME,audio=False)\n\nduration=CONST_END_TIME-CONST_START_TIME;\n \ndef logo_pos(t):\n\tif(t 0 and zero_proof[-2] > 0):\n pass\n else:\n counter_change += 1\n if counter_change == 0:\n x = x * 3\n x_neg = x_neg * 3\n zero_proof.clear()\n else:\n x = x * 2\n x_neg = x_neg * 2\n break\n iter_count += 1\n if iter_count == 6:\n messagebox.showerror(title=\"Error de Convergencia\", message=\"La ecuacion insertada no presenta \"\n \"raices reales.\\n\")\n return\n\n graph_x.set_xlim([x_neg, x])\n for value in range(x_neg, x, step):\n x_value.append(value)\n y_value.append(f(value))\n if math.isnan(y_value[-1]):\n x_value.pop()\n y_value.pop()\n\n # To get a smoothed curve for the graph\n x_lin = array(x_value)\n y_lin = array(y_value)\n\n x_smooth = linspace(min(x_lin), max(x_lin), 300) # Create 300? points between min and max (another values?)\n spline = make_interp_spline(x_lin, y_lin, k=3) # Make a splin, k is for grade of smoothness\n y_smooth = spline(x_smooth) # Need to verify, maybe smooth the y values for x smoothed values\n ################################################################\n\n # graph_x.plot(x_value, y_value, color=\"red\")\n graph_x.plot(x_smooth, y_smooth, color=\"red\")\n graph_x.axhline(0, color='black')\n graph_x.axvline(0, color='black')\n graph_x.set_xlabel('x', fontsize=16)\n graph_x.set_ylabel('F(x)', fontstyle=\"oblique\", fontsize=16)\n graph_x.grid()\n\n canvas.draw()\n\n except ZeroDivisionError:\n messagebox.showwarning(title=\"Advertencia: Fallo en Expresiones\",\n message=\"Por favor verifique lo siguiente:\\n\\n\"\n \"# Utilizar como unica variable la letra x (minuscula).\\n\"\n \"# Hay funciones o constantes mal indicadas.\")\n except ValueError:\n messagebox.showwarning(title=\"Advertencia: Error de Sintaxis\",\n message=\"La ecuacion esta mal representada, por favor verifique lo siguiente:\\n\\n\"\n \"# Uso incorrecto de los parentesis.\\n\"\n \"# Uso incorrecto de signos matematicos.\")\n return\n\n\ndef activate_Bisection(): # Call bisection method from M_Biseccion module\n Bisection_Results(root, to_centerx, to_centery, entry_equation, entry_tol)\n return\n\n\ndef activate_FalsePos(): # Call false position method from M_FalsaPos_M module\n FalsePos_Results(root, to_centerx, to_centery, entry_equation, entry_tol)\n return\n\n\ndef activate_FixPoint(): # Call false position method from M_PuntoFijo module\n FixedPoint_Results(root, to_centerx, to_centery, entry_equation, entry_tol)\n return\n\n\ndef help_text(): # Just a guide to input an equation in a correct way\n messagebox.showinfo(title=\"¿Como introducir una ecuacion?\",\n message=\"# La ecuacion a ingresar debe contener maximo una variable identificada como 'x'\"\n \" por lo que su expresion matematica debe estar en funcion de x.\\n\\n\"\n \"# Los decimales se representan mediante el punto (.).\\n\"\n \"# Las operaciones de multiplicacion debe representarse correctamente con el simbolo \"\n \"(*).\\n\\n \"\n \" Ejemplo: 2x (forma incorrecta) => 2*x (forma correcta).\\n\\n\"\n \"# La representacion de potencias viene dado por ** (doble signo de multiplicacion).\\n\"\n \" Ejemplo: x**2 => x^2 o como se leeria 'x al cuadrado'.\\n\\n\"\n \"# Las funciones trigometricas se escriben tal como se ha definido su abreviacion y su\"\n \"valor objetivo debe ir siempre entre parentesis.\\n\"\n \"Ejemplo:\\n\"\n \" sin(x) => Funcion Seno (se aplico su abreviacion como sin).\\n\"\n \" cos(x) => Funcion Coseno.\\n\"\n \" tan(x) => Funcion Tangente.\\n\"\n \" csc(x) => Funcion Cosecante.\\n\"\n \" sec(x) => Funcion Secante.\\n\"\n \" cot(x) => Funcion Cotangente.\\n\"\n \" Asi mismo es posible usar las funciones inversas e hiperbolicas:\\n\"\n \" asin, sinh, asinh, acos, cosh, acosh, atan, tanh, atanh,acot, coth,\\n\"\n \" acoth, asec, sech, asech, acsc, csch, acsch.\\n\\n\"\n \"# Las constantes pi y euler se expresan como (pi) y (E), respectivamente.\\n\\n\"\n \"# La raiz cuadrada se puede expresar mediante sqrt() o tambien es posible expresar \"\n \"cualquier tipo de radicacion en su forma de potencia.\\n \"\n \" Ejemplo:\\n\"\n \" sqrt(x) = x**(1/2) | sqrt(4) = 4**(1/2).\\n\"\n \" x**(1/3) => Raiz Cubica\")\n return\n\n\ndef change_limx():\n def assert_values():\n val = [[\"Limite Inferior\", lower_entry.get()], [\"Limite Superior\", higher_entry.get()],\n [\"Paso\", step_entry.get()]]\n i = 0\n\n # To avoid some error in input cells like exchange of limits, Step less than 0 and not integer\n try:\n\n for i in range(len(val)):\n val[i][1] = int(val[i][1])\n if val[0][1] >= val[1][1]:\n messagebox.showwarning(title=\"Limites Erroneos\",\n message=\"El Limite Inferior es mayor o igual al Limite Superior.\\n\"\n \"Cerciorese que los valores correspondan a lo indicado.\",\n parent=limit_window)\n return\n elif val[2][1] <= 0:\n messagebox.showwarning(title=\"Paso no Valido\",\n message=\"El valor de Paso no puede ser menor o igual a 0.\", parent=limit_window)\n return\n elif int(val[2][1]) > numpy.abs((int(val[1][1])) - (int(val[0][1]))) // 4 + 1:\n messagebox.showwarning(title=\"Fallo en Metodo Grafico\",\n message=f\"El valor de Paso ingresado no genera el numero de puntos minimo para \"\n f\"generar un grafico suavizado.\\n \"\n f\"Para los limites seleccionados, es posible utilizar un Paso \"\n f\"maximo de {numpy.abs((int(val[1][1])) - (int(val[0][1]))) // 4 + 1}.\",\n parent=limit_window)\n return\n\n graph_function(val[0][1], val[1][1] + val[2][1], val[2][1], True)\n\n except ValueError:\n if val[i][1] == \"\":\n messagebox.showwarning(title=\"Valor Vacio\", message=f\"No se digito valor alguno para {val[i][0]}.\",\n parent=limit_window)\n\n elif \" \" in val[i][1]:\n messagebox.showwarning(title=\"Error en Numero\",\n message=f\"El valor para {val[i][0]} contiene al menos un espacio \"\n f\"entre digitos.\",\n parent=limit_window)\n\n elif val[i][1].isupper() or val[i][1].islower() or val[i][1].count(\".\") >= 2 or \\\n val[i][1].count(\"+\", 1) >= 1 or val[i][1].count(\"-\", 1) >= 1:\n messagebox.showwarning(title=\"Valor Invalido\",\n message=f\"El valor para {val[i][0]} no es un numero real.\\n\"\n f\"Modifique el valor a un Numero Entero.\\n\",\n parent=limit_window)\n\n elif len(val[i][1].split(\".\", 1)) == 2 and (\n val[i][1].split(\".\", 1)[0].startswith(\n (\"+\", \"-\", \"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"))) and \\\n val[i][1].split(\".\", 1)[1].isdigit():\n messagebox.showwarning(title=\"Numero no Valido\",\n message=f\"Se ha ingresado '{val[i][1]}' para {val[i][0]}.\\nRecuerde que debe \"\n f\"ingresar solo Numeros Enteros.\",\n parent=limit_window)\n\n elif not val[i][1].isalnum() and (val[i][1].count(\".\") <= 1) and (\n val[i][1].startswith((\"+\", \"-\", \"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"))):\n messagebox.showwarning(title=\"Error Aritmetico\",\n message=f\"El valor para {val[i][0]} contiene uno o varios caracteres \"\n f\"desconcidos.\\n \"\n f\"Modifique el valor a un Numero Entero.\\n\",\n parent=limit_window)\n\n else:\n messagebox.showwarning(title=\"Error de Validacion\",\n message=f\"El valor de {val[i][0]} contiene datos que imposibilitan su \"\n f\"procesamiento.\\n \"\n \"Por favor verifique que los datos ingresados sea correcto.\",\n parent=limit_window)\n ################################################################\n return\n\n limit_window = Toplevel(root)\n limit_window.title(\"Dominio de la Funcion\")\n limit_window.configure(background=\"beige\")\n\n window_width = 500\n window_height = 150\n centerx = root.winfo_screenwidth() / 2 - window_width / 2\n centery = root.winfo_screenheight() / 2 - window_height / 2\n limit_window.geometry(f\"{window_width}x{window_height}+{int(centerx) - 280}+{int(centery) - 100}\")\n\n label_style = ttk.Style()\n label_style.configure(\"BW.TLabel\", background=\"beige\")\n label_style.map(\"BW.TLabel\", background=[(\"active\", \"beige\")])\n\n info_text = ttk.Label(limit_window,\n text=\"Establezca el dominio de la funcion digitando sus limites a continuacion :\\n\"\n \" (Todos los campos deben llenarse con numeros enteros)\",\n style=\"BW.TLabel\")\n info_text.grid(row=0, column=0, columnspan=5, padx=10, pady=5)\n\n lower_x = ttk.Label(limit_window, text=\"Limite Inferior =\", style=\"BW.TLabel\")\n lower_x.grid(row=1, column=0, pady=5)\n\n lower_entry = ttk.Entry(limit_window)\n lower_entry.grid(row=1, column=1, pady=5)\n\n higher_x = ttk.Label(limit_window, text=\"Limite Superior =\", style=\"BW.TLabel\")\n higher_x.grid(row=2, column=0, pady=5)\n\n higher_entry = ttk.Entry(limit_window)\n higher_entry.grid(row=2, column=1, pady=5)\n\n step_label = ttk.Label(limit_window, text=\"Paso =\", style=\"BW.TLabel\")\n step_label.grid(row=3, column=0)\n\n step_entry = ttk.Entry(limit_window)\n step_entry.grid(row=3, column=1)\n\n plot_button2 = ttk.Button(limit_window, text=\"Plot\", command=assert_values)\n plot_button2.grid(row=2, column=2)\n\n return\n\n\n# Initiation of tkinter window\nroot = themed_tk.ThemedTk()\nroot.get_themes()\nroot.set_theme(\"radiance\")\nroot.title(\"Raices de Ecuaciones\")\nroot.configure(bg=\"beige\")\n################################################################\n\n# A way to center tkinter window on screen\nroot_width = 850\nroot_height = 600\nscreen_width = root.winfo_screenwidth()\nscreen_height = root.winfo_screenheight()\nto_centerx = screen_width / 2 - root_width / 2\nto_centery = screen_height / 2 - root_height / 2\n\nroot.geometry(f\"{root_width}x{root_height}+{int(to_centerx)}+{int(to_centery) - 20}\")\n################################################################\n\n# root.minsize(800, 600)\n\nlabel_equation = ttk.Label(root, text=\" Escriba la ecuacion : \", font=(\"Helvetica\", 18, \"bold\"), borderwidth=5,\n relief=\"sunken\")\nlabel_equation.grid(row=0, column=0, pady=10)\n\nentry_equation = ttk.Entry(root, width=25, font=(\"Helvetica\", 14))\nentry_equation.grid(row=1, column=0, padx=10, pady=5)\n\n# To put background color of all ttk object to the same color as background color of window (radiance theme cause color\n# alteration)\nstyle_backgttk = ttk.Style()\nstyle_backgttk.configure(\"TButton\", background=\"beige\")\nstyle_backgttk.map(\"TButton\", background=[(\"active\", \"beige\")])\n################################################################\n\ngraph_Button = ttk.Button(root, text=\"Plot\", width=5, command=graph_function)\ngraph_Button.grid(row=2, column=0, padx=10)\n\nlabel_fx = ttk.Label(root, text=\"F(x) =\")\nlabel_fx.config(background=\"beige\", font=(\"font name\", 12, \"bold\"))\nlabel_fx.place(x=10, y=105)\n\n# The way to adjust an image (png,etc) to a specific size (this case on button frame)\nhelp_pic = Image.open(help_icon_dir)\nresized_help = help_pic.resize((30, 30), Image.ANTIALIAS)\nhelp_icon = ImageTk.PhotoImage(resized_help)\n\ngraph_pic = Image.open(graph_icon_dir)\nresized_graph = graph_pic.resize((65, 65), Image.ANTIALIAS)\ngraph_icon = ImageTk.PhotoImage(resized_graph)\n################################################################\n\nhelp_button = Button(root, image=help_icon, highlightthickness=5, bd=0, background=\"beige\", command=help_text,\n cursor=\"hand2\")\nhelp_button.place(x=27, y=130)\nfloat_help = Hovertip(help_button, \"Ayuda para ingresar \\nuna ecuacion\", hover_delay=100)\n\ngraphIcon_button = Button(root, image=graph_icon, highlightthickness=0, bd=1, background=\"beige\", command=change_limx,\n cursor=\"hand2\")\ngraphIcon_button.place(x=260, y=140)\nfloat_graph = Hovertip(graphIcon_button, \"Seleccionar limites\", hover_delay=100)\n\nlabel_note_eq = ttk.Label(root, text=\" Nota : La variable a utilizar en la ecuacion debe ser ' x '.\", borderwidth=10,\n relief=\"solid\", foreground=\"red\")\nlabel_note_eq.grid(row=3, column=0, padx=10, pady=5)\n\n# To create the two orange(coral) bars to enclose all method button\nsquare_line1 = Canvas(root, height=2, width=300, background=\"coral\", bd=0, highlightthickness=0, borderwidth=8,\n relief=\"raised\")\nsquare_line1.place(x=50, y=270) # Top bar\nsquare_line2 = Canvas(root, height=2, width=300, background=\"coral\", bd=0, highlightthickness=0, borderwidth=8,\n relief=\"raised\")\nsquare_line2.place(x=50, y=550) # Bottom bar\n################################################################\n\nlabel_MSelection = ttk.Label(root, text=\"Seleccione el metodo para encontrar la raiz:\", background=\"beige\")\nlabel_MSelection.grid(row=4, column=0)\n\nlabel_tol = ttk.Label(root, text=\"(Tolerancia(%)= )\", background=\"beige\")\nlabel_tol.place(x=115, y=312)\nentry_tol = ttk.Entry(root, width=6)\nentry_tol.place(x=225, y=312)\n\n# From here all the method button are placed\nm_Bisection = ttk.Button(root, text=\"Metodo de Biseccion\", command=activate_Bisection, width=19)\nm_Bisection.grid(row=5, column=0)\n\nm_FalsaPos = ttk.Button(root, text=\"M. Falsa Posicion Mod.\", command=activate_FalsePos, width=19)\nm_FalsaPos.place(x=108, y=375)\n\nm_FixPoint = ttk.Button(root, text=\"Metodo Punto Fijo\", command=activate_FixPoint, width=19)\nm_FixPoint.place(x=108, y=410)\n################################################################\n\n# Code for add a graph draw on window mode --Figure,FigureCanvasTkAgg,NavigationToolbar2Tk are the most important--\nx_value = []\ny_value = []\n# Figure is responsible of the size white frame where graph is placed (figsize is the size frame, dpi relate to zoom?¿)\nfig = Figure(figsize=(6, 6), dpi=70, edgecolor=\"tomato\", facecolor=\"azure\", linewidth=4,\n subplotpars=SubplotParams(0.15))\ngraph_x = fig.add_subplot(1, 1, 1) # Add the graph in the frame\ngraph_x.axhline(0, color='black')\ngraph_x.axvline(0, color='black')\ngraph_x.set_xlabel('x', fontsize=16)\ngraph_x.set_ylabel('F(x)', fontstyle=\"oblique\", fontsize=16)\ngraph_x.grid()\n\n# I'm not sure about this but it allows to draw the graph in the white frame\ncanvas = FigureCanvasTkAgg(fig, master=root)\ncanvas.get_tk_widget().configure(highlightcolor=\"red\")\ncanvas.draw()\ncanvas.get_tk_widget().grid()\n\n# This allows to place the toolbar buttons to handle the graph\ntoolbar_frame = tkinter.Frame(root)\ntoolbar = NavigationToolbar2Tk(canvas, toolbar_frame)\ntoolbar.update()\ntoolbar.configure(background=\"beige\")\ntoolbar._message_label.config(background=\"beige\")\n# toolbar._message_label.pack_configure(side=tkinter.LEFT)\n# for button in toolbar.winfo_children():\n# button.configure(background=\"dark gray\")\ncanvas.get_tk_widget().grid(row=0, column=2, rowspan=8, columnspan=8, padx=10, pady=10)\ntoolbar_frame.grid(row=8, column=2, rowspan=8, columnspan=8)\n################################################################\n\nroot.mainloop()\n","repo_name":"HansJP96/Raices-Ecuaciones","sub_path":"GUI_User.py","file_name":"GUI_User.py","file_ext":"py","file_size_in_byte":19523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5321440748","text":"from django.contrib.auth.models import User\nfrom .models import Board,Topic,Post\nfrom django.views import View\n# Create your views here.\nfrom django.shortcuts import render, get_object_or_404,redirect,HttpResponseRedirect,reverse\nfrom django.views.decorators.http import require_http_methods\nfrom rest_framework.decorators import api_view\n\nfrom rest_framework.views import APIView\nfrom .serializers import BoardSerializ\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .forms import BoardForm\nfrom django.contrib.auth import authenticate,login,logout\nfrom rest_framework.permissions import IsAuthenticated\n\n\ndef user_login(request):\n context={}\n if request.method==\"POST\":\n username=request.POST['username']\n password=request.POST['password']\n user=authenticate(request,username=username,password=password)\n if user:\n login(request,user)\n return HttpResponseRedirect(reverse('home'))\n\n else:\n context[\"error\"]=\"provide valid credentials!\"\n return render(request, 'login.html', context)\n\n else:\n return render(request,'login.html',context)\n\n\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('login'))\n\n\n\nclass HelloView(APIView):\n permission_classes = (IsAuthenticated,) # <-- And here\n def get(self, request):\n content = {'message': 'Hello, World!'}\n return Response(content)\n@require_http_methods(['GET'])\ndef home(request):\n boards=Board.objects.all()\n return render(request,'home.html',{'boards':boards})\n\n\n\n\ndef board_topics(request,pk):\n board = get_object_or_404(Board, pk=pk)\n return render(request, 'topics.html', {'board': board})\n\n\n\n\n\n\ndef new_topic(request,pk):\n board=get_object_or_404(Board,pk=pk)\n user=User.objects.first()\n if request.method=='POST':\n subject=request.POST['subject']\n message=request.POST['message']\n\n topic=Topic.objects.create(\n subject=subject,\n board=board,\n starter=user\n )\n post=Post.objects.create(\n message=message,\n topic=topic,\n created_by=user\n )\n\n return render(request,'new_topic.html',{'board':board})\n\n\ndef update(request):\n board=Board.objects.all().update( description='This ....')\n return redirect('home')\n\n\n\n@api_view(['GET'])\ndef boardList(request):\n board=Board.objects.all()\n serializer=BoardSerializ(board,many=True)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef boardCreate(request):\n serializer=BoardSerializ(data=request.data)\n serializer.validate_name(request.data['name'])\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n\n return Response (serializer.data)\n\n\n\n@api_view(['PUT'])\ndef boardUpdate(request,pk):\n board=Board.objects.get(id=pk)\n serializer=BoardSerializ(instance=board,data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response (serializer.data)\n\n@api_view(['delete'])\ndef boardDelete(request,pk):\n board=Board.objects.filter(id=pk).first()\n board.delete()\n return Response ('Deleted!')\n\n\n\n\n\n\n\n\n\n\n\n\ndef creat_board(request):\n form = BoardForm()\n if request.method=='POST':\n form = BoardForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n context={'form':form}\n return render(request,'new_board.html',context)\n\ndef UpdateBoard(request,pk):\n board=Board.objects.get(id=pk)\n form = BoardForm(instance=board)\n if request.method=='POST':\n form = BoardForm(request.POST,instance=board)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n context={'form':form}\n\n return render(request, 'new_board.html', context)\n\n\ndef DeleteBoard(request,pk):\n board = Board.objects.get(id=pk)\n if request.method=='POST':\n board.delete()\n return redirect('/')\n\n context={'item':board}\n return render(request,'delete.html',context)","repo_name":"yaqoot9/djano_project","sub_path":"boards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3801329818","text":"import logging\nfrom sys import getsizeof\nfrom concurrent.futures.thread import ThreadPoolExecutor\n\nimport boto3\nfrom boto3.dynamodb.conditions import Key\nfrom boto3.exceptions import ResourceNotExistsError\nfrom typing import List, Optional, Any, Dict, Tuple\n\nfrom botocore.exceptions import ClientError\n\nfrom StructNoSQL.tables_clients.backend.dynamodb_utils import DynamoDBUtils\nfrom StructNoSQL.tables_clients.backend.models import GlobalSecondaryIndex, PrimaryIndex, CreateTableQueryKwargs, \\\n GetItemResponse, Response, EXPRESSION_MAX_BYTES_SIZE\nfrom StructNoSQL.models import DatabasePathElement, FieldPathSetter, MapItemInitializer, \\\n MapItemInitializerContainer, QueryMetadata\nfrom StructNoSQL.practical_logger import message_with_vars\nfrom StructNoSQL.utils.data_processing import navigate_into_data_with_field_path_elements\n\n\nclass DynamoDbCoreAdapter:\n _EXISTING_DATABASE_CLIENTS = {}\n PAY_PER_REQUEST = \"PAY_PER_REQUEST\"\n PROVISIONED = \"PROVISIONED\"\n\n def __init__(\n self, table_name: str, region_name: str, primary_index: PrimaryIndex,\n create_table: bool = True, billing_mode: str = PAY_PER_REQUEST,\n global_secondary_indexes: List[GlobalSecondaryIndex] = None,\n boto_session: Optional[boto3.Session] = None\n ):\n self.table_name = table_name\n self.primary_index = primary_index\n self.create_table = create_table\n self.billing_mode = billing_mode\n self.global_secondary_indexes = global_secondary_indexes\n self._global_secondary_indexes_hash_keys = []\n if self.global_secondary_indexes is not None:\n for secondary_index in self.global_secondary_indexes:\n self._global_secondary_indexes_hash_keys.append(secondary_index.hash_key_name)\n\n # We store the database clients in a static variable, so that if we init the class with\n # the same region_name, we do not need to wait for a new initialization of the client.\n if region_name in self._EXISTING_DATABASE_CLIENTS.keys():\n self.dynamodb = self._EXISTING_DATABASE_CLIENTS[region_name]\n # print(f\"Re-using the already created dynamodb client for region {region_name}\")\n elif \"default\" in self._EXISTING_DATABASE_CLIENTS.keys():\n self.dynamodb = self._EXISTING_DATABASE_CLIENTS[\"default\"]\n # print(f\"Re-using the already created dynamodb client for the default region\")\n else:\n # print(f\"Initializing the {self}. For local development, make sure that you are connected to internet.\"\n # f\"\\nOtherwise the DynamoDB client will get stuck at initializing the {self}\")\n\n used_boto_session: boto3.Session = boto3.Session() if boto_session is None else boto_session\n dynamodb_regions = used_boto_session.get_available_regions('dynamodb')\n if region_name in dynamodb_regions:\n self.dynamodb = used_boto_session.resource('dynamodb', region_name=region_name)\n self._EXISTING_DATABASE_CLIENTS[region_name] = self.dynamodb\n else:\n self.dynamodb = used_boto_session.resource('dynamodb')\n self._EXISTING_DATABASE_CLIENTS['default'] = self.dynamodb\n logging.debug(\n f\"Warning ! The specified dynamodb region_name {region_name} is not a valid region_name.\"\n f\"The dynamodb client has been initialized without specifying the region.\")\n\n self._create_table_if_not_exists()\n\n def _create_table_if_not_exists(self) -> None:\n \"\"\"\n Creates table in Dynamodb resource if it doesn't exist and create_table is set as True.\n :raises: PersistenceException: When `create_table` fails on dynamodb resource.\n \"\"\"\n if self.create_table:\n create_table_query_kwargs = CreateTableQueryKwargs(table_name=self.table_name, billing_mode=self.billing_mode)\n create_table_query_kwargs.add_hash_key(\n key_name=self.primary_index.hash_key_name,\n key_python_variable_type=self.primary_index.hash_key_variable_python_type\n )\n if self.primary_index.sort_key_name is not None and self.primary_index.sort_key_variable_python_type is not None:\n create_table_query_kwargs.add_hash_key(\n key_name=self.primary_index.sort_key_name,\n key_python_variable_type=self.primary_index.sort_key_variable_python_type\n )\n\n if self.global_secondary_indexes is not None:\n create_table_query_kwargs.add_all_global_secondary_indexes(global_secondary_indexes=self.global_secondary_indexes)\n try:\n self.dynamodb.create_table(**create_table_query_kwargs.data)\n except Exception as e:\n if e.__class__.__name__ != \"ResourceInUseException\":\n raise Exception(f\"Request to create the table if not exist failed: Exception of type {type(e).__name__} occurred {str(e)}\")\n\n def put_record(self, item_dict: dict) -> bool:\n serialized_item_dict = DynamoDBUtils.python_to_dynamodb(python_object=item_dict)\n try:\n table = self.dynamodb.Table(self.table_name)\n response = table.put_item(Item=serialized_item_dict)\n return True if response is not None else False\n except ResourceNotExistsError:\n raise Exception(f\"DynamoDb table {self.table_name} doesn't exist. Failed to save attributes to DynamoDb table.\")\n except Exception as e:\n print(f\"Failed to save attributes to DynamoDb table. Exception of type {type(e).__name__} occurred: {str(e)}\")\n return False\n\n def delete_record(self, indexes_keys_selectors: dict) -> bool:\n try:\n table = self.dynamodb.Table(self.table_name)\n response_data: Optional[dict] = table.delete_item(Key=indexes_keys_selectors)\n return True if response_data is not None else False\n except ResourceNotExistsError:\n raise Exception(f\"DynamoDb table {self.table_name} doesn't exist. Failed to delete_record in DynamoDb table.\")\n except Exception as e:\n print(e)\n return False\n\n def remove_record(self, indexes_keys_selectors: dict) -> Optional[dict]:\n try:\n table = self.dynamodb.Table(self.table_name)\n response_data: dict = table.delete_item(Key=indexes_keys_selectors, ReturnValues='ALL_OLD')\n if response_data is None:\n return None\n response_attributes: Optional[dict] = response_data.get('Attributes', None)\n if response_attributes is None:\n return None\n return DynamoDBUtils.dynamodb_to_python(response_attributes)\n except ResourceNotExistsError:\n raise Exception(f\"DynamoDb table {self.table_name} doesn't exist. Failed to remove_record in DynamoDb table.\")\n except Exception as e:\n print(e)\n return None\n\n def get_item_by_primary_key(self, index_name: str, key_value: any, fields_path_elements: Optional[List[List[DatabasePathElement]]]) -> Optional[GetItemResponse]:\n if fields_path_elements is not None:\n kwargs = self._fields_paths_elements_to_expressions(fields_path_elements=fields_path_elements)\n else:\n kwargs = {}\n kwargs['Key'] = {index_name: key_value}\n kwargs['ConsistentRead'] = True\n\n try:\n table = self.dynamodb.Table(self.table_name)\n response = table.get_item(**kwargs)\n if 'Item' in response:\n processed_item = DynamoDBUtils.dynamodb_to_python(dynamodb_object=response['Item'])\n return GetItemResponse(item=processed_item, success=True)\n else:\n return GetItemResponse(item=None, success=False)\n except ResourceNotExistsError:\n raise Exception(f\"DynamoDb table {self.table_name} do not exist or in the process of being created. Failed to get attributes from DynamoDb table.\")\n except Exception as e:\n print(f\"Failed to retrieve attributes from DynamoDb table. Exception of type {type(e).__name__} occurred: {str(e)}\")\n return None\n\n def check_if_item_exist_by_primary_key(self, index_name: str, key_value: str, fields_path_elements: Optional[List[str]]) -> Optional[bool]:\n raise Exception(\"Not implemented\") # todo: implement\n\n def _execute_update_query(self, query_kwargs_dict: dict, allow_validation_exception: bool = False) -> Optional[Response]:\n try:\n table = self.dynamodb.Table(self.table_name)\n response = table.update_item(**query_kwargs_dict)\n return Response(response)\n except ResourceNotExistsError:\n raise Exception(f\"DynamoDb table {self.table_name} do not exist or in the process of being created. Failed to get attributes from DynamoDb table.\")\n except ClientError as e:\n print(f\"{e} - No element has been found for the update query : {query_kwargs_dict}\")\n if allow_validation_exception and e.response['Error']['Code'] == 'ValidationException':\n return Response({})\n return None\n except Exception as e:\n print(f\"Failed to update attributes in DynamoDb table. Exception of type {type(e).__name__} occurred: {str(e)}\")\n return None\n\n def add_data_elements_to_list(self, index_name: str, key_value: Any, object_path: str, element_values: List[dict]) -> Optional[Response]:\n serialized_elements_values: List[Any] = DynamoDBUtils.python_to_dynamodb(python_object=element_values)\n kwargs = {\n 'TableName': self.table_name,\n 'Key': {index_name: key_value},\n 'ReturnValues': \"UPDATED_NEW\",\n 'UpdateExpression': f\"SET {object_path} = list_append(if_not_exists({object_path}, :emptyList), :newItems)\",\n # The if_not_exists inside the list_append, will create an empty\n # list before adding the newItems, only if the field do not exist.\n 'ExpressionAttributeValues': {\n ':newItems': serialized_elements_values,\n ':emptyList': []\n }\n }\n return self._execute_update_query(query_kwargs_dict=kwargs)\n\n def remove_data_elements_from_list(self, index_name: str, key_value: Any, list_object_path: str, indexes_to_remove: list) -> Optional[Response]:\n kwargs = {\n 'TableName': self.table_name,\n 'Key': {index_name: key_value},\n 'ReturnValues': \"UPDATED_NEW\"\n }\n update_expression = \"REMOVE \"\n for i, index_in_database_list in enumerate(indexes_to_remove):\n update_expression += f\"{list_object_path}[{index_in_database_list}]\"\n if i + 1 < len(indexes_to_remove):\n update_expression += \", \"\n kwargs['UpdateExpression'] = update_expression\n\n return self._execute_update_query(query_kwargs_dict=kwargs)\n\n def remove_data_elements_from_map(\n self, index_name: str, key_value: Any,\n targets_path_elements: List[List[DatabasePathElement]],\n retrieve_removed_elements: bool = False\n ) -> Optional[Dict[str, Any]]:\n\n consumed_targets_path_elements: List[List[DatabasePathElement]] = []\n expression_attribute_names_dict = {}\n update_expression = \"REMOVE \"\n\n for i_target, target in enumerate(targets_path_elements):\n current_target_num_path_elements = len(target)\n current_setter_attribute_names = {}\n current_remover_update_expression = \"\"\n\n for i_path_element, path_element in enumerate(target):\n current_path_key = f\"#target{i_target}_pathKey{i_path_element}\"\n current_remover_update_expression += current_path_key\n current_setter_attribute_names[current_path_key] = path_element.element_key\n if i_path_element + 1 < current_target_num_path_elements:\n current_remover_update_expression += \".\"\n\n complete_update_expression_bytes_size = getsizeof(update_expression)\n current_setter_update_expression_bytes_size = getsizeof(current_remover_update_expression)\n update_expression_bytes_size_if_setter_is_added = complete_update_expression_bytes_size + current_setter_update_expression_bytes_size\n if update_expression_bytes_size_if_setter_is_added < EXPRESSION_MAX_BYTES_SIZE:\n if i_target > 0:\n update_expression += \", \"\n expression_attribute_names_dict = {**expression_attribute_names_dict, **current_setter_attribute_names}\n update_expression += current_remover_update_expression\n consumed_targets_path_elements.append(target)\n else:\n break\n\n update_query_kwargs = {\n 'TableName': self.table_name,\n 'Key': {index_name: key_value},\n 'ReturnValues': \"UPDATED_OLD\" if retrieve_removed_elements is True else \"NONE\",\n 'UpdateExpression': update_expression,\n 'ExpressionAttributeNames': expression_attribute_names_dict,\n }\n response = self._execute_update_query(query_kwargs_dict=update_query_kwargs, allow_validation_exception=True)\n if response is None:\n return None\n\n # Even if we return an empty output_response_attributes dict, we do not want to return None instead of a dict, because since this function only returns a dict, a return\n # value of None indicates that the operation failed. Where as, for example in delete operation, we will not request the removed attributes from the database, which will\n # give us an empty output_response_attributes dict, but the delete operation will base itself on the presence of the dict to judge if the operation failed or not.\n output_response_attributes: dict = DynamoDBUtils.dynamodb_to_python(response.attributes) if response.attributes is not None else {}\n if len(targets_path_elements) == len(consumed_targets_path_elements):\n return output_response_attributes\n else:\n for current_target_path_elements in consumed_targets_path_elements:\n targets_path_elements.remove(current_target_path_elements)\n\n request_continuation_response_attributes = self.remove_data_elements_from_map(\n index_name=index_name, key_value=key_value,\n targets_path_elements=targets_path_elements,\n retrieve_removed_elements=retrieve_removed_elements\n )\n combined_output_response_attributes = {\n **(output_response_attributes or {}),\n **(request_continuation_response_attributes or {})\n }\n return combined_output_response_attributes\n\n @staticmethod\n def _add_database_path_element_to_string_expression(\n base_string: Optional[str], database_path_element: DatabasePathElement, path_key: str\n ) -> Tuple[str, Dict[str, str]]:\n\n output_string: str = base_string or str()\n output_expression_attribute_names: Dict[str, str] = {}\n\n if isinstance(database_path_element.element_key, str):\n if len(output_string) > 0:\n output_string += \".\"\n output_string += path_key\n output_expression_attribute_names[path_key] = database_path_element.element_key\n elif isinstance(database_path_element.element_key, int):\n # If the element_key is an int, it means we try to access an index of a list or set. We can right away use the index access\n # quotation ( [$index] instead of .$attributeName ) and not need to pass the index as an attribute name. Note, that anyway,\n # boto3 will only accept strings as attribute names, and trying to pass an int or float as attribute name, will crash the request.\n output_string += f\"[{database_path_element.element_key}]\"\n\n return output_string, output_expression_attribute_names\n\n @staticmethod\n def _add_database_path_element_to_string_path(base_string: Optional[str], database_path_element: DatabasePathElement) -> str:\n output_string: str = base_string or str()\n if isinstance(database_path_element.element_key, str):\n if len(output_string) > 0:\n output_string += \".\"\n output_string += database_path_element.element_key\n elif isinstance(database_path_element.element_key, int):\n # See comment on similar block code in function _add_database_path_element_to_string_expression\n output_string += f\"[{database_path_element.element_key}]\"\n return output_string\n\n @staticmethod\n def _prepare_map_initialization(\n i: int, current_path_element: DatabasePathElement, overriding_init_value: Any,\n previous_element_initializer_container: Optional[MapItemInitializerContainer] = None\n ) -> MapItemInitializer:\n\n current_path_key = f\"#pathKey{i}\"\n current_path_target = str() if previous_element_initializer_container is None else previous_element_initializer_container.item.path_target\n\n current_path_target, new_expression_attribute_names = DynamoDbCoreAdapter._add_database_path_element_to_string_expression(\n base_string=current_path_target, database_path_element=current_path_element, path_key=current_path_key\n )\n expression_attribute_names = (\n {**previous_element_initializer_container.item.expression_attribute_names, **new_expression_attribute_names}\n if previous_element_initializer_container is not None else new_expression_attribute_names\n )\n\n item_default_value = overriding_init_value or current_path_element.get_default_value()\n return MapItemInitializer(\n path_target=current_path_target, last_item_element_key=current_path_element.element_key,\n item_default_value=item_default_value, expression_attribute_names=expression_attribute_names\n )\n\n def initialize_element_in_map_target(\n self, index_name: str, key_value: Any, initializer: MapItemInitializer, is_last_path_element: bool = False\n ) -> Optional[Response]:\n\n base_query_kwargs = {\n 'TableName': self.table_name,\n 'Key': {index_name: key_value},\n 'ExpressionAttributeNames': initializer.expression_attribute_names,\n 'ExpressionAttributeValues': {\n ':item': initializer.item_default_value\n },\n 'ReturnValues': 'UPDATED_NEW' if is_last_path_element else 'NONE'\n # We do not need to waste retrieving the UPDATED_NEW value when we know we we\n # will it only for the last path element (we use right after the loop ended)\n }\n current_update_expression = f\"SET {initializer.path_target} = if_not_exists({initializer.path_target}, :item)\"\n current_set_potentially_missing_object_query_kwargs = {**base_query_kwargs, 'UpdateExpression': current_update_expression}\n response = self._execute_update_query(query_kwargs_dict=current_set_potentially_missing_object_query_kwargs)\n if is_last_path_element is True:\n # If the last item attribute value in the database does (retrieved thanks to the \"UPDATED_NEW\" ReturnValues),\n # do not equal its default_value (which is either the field type default value, or the\n # last_item_custom_overriding_init_value parameter), we want to initialize again the item, because this means\n # that the attribute existed, so the SET update expression with if_not_exists did not performed, and we do\n # not currently have the value we want in the database. We perform this operation only for the last path\n # element in the request, to avoid overriding and scratching list or dictionary that are expected to exist.\n field_existing_attribute_in_database = response.attributes.get(initializer.last_item_element_key) if response is not None else None\n if field_existing_attribute_in_database is None or field_existing_attribute_in_database != initializer.item_default_value:\n # It is possible for the field to already exist, and yet, to not have\n current_update_expression = f\"SET {initializer.path_target} = :item\"\n current_override_existing_object_value_query_kwargs = {**base_query_kwargs, 'UpdateExpression': current_update_expression}\n override_object_value_response = self._execute_update_query(query_kwargs_dict=current_override_existing_object_value_query_kwargs)\n return override_object_value_response\n return response\n\n def _construct_update_data_element_to_map_query_kwargs(\n self, index_name: str, key_value: Any, field_path_elements: List[DatabasePathElement], value: Any, return_old_value: bool = False\n ) -> dict:\n\n expression_attribute_names_dict: Dict[str, str] = {}\n update_expression = \"SET \"\n\n for i, path_element in enumerate(field_path_elements):\n current_path_key = f\"#pathKey{i}\"\n update_expression += current_path_key\n expression_attribute_names_dict[current_path_key] = path_element.element_key\n if i + 1 < len(field_path_elements):\n update_expression += \".\"\n else:\n update_expression += \" = :item\"\n\n serialized_value = DynamoDBUtils.python_to_dynamodb(python_object=value)\n update_query_kwargs = {\n \"TableName\": self.table_name,\n \"Key\": {index_name: key_value},\n \"ReturnValues\": \"NONE\" if return_old_value is not True else \"UPDATED_OLD\",\n \"UpdateExpression\": update_expression,\n \"ExpressionAttributeNames\": expression_attribute_names_dict,\n \"ExpressionAttributeValues\": {\n \":item\": serialized_value\n }\n }\n return update_query_kwargs\n\n def set_update_data_element_to_map_with_default_initialization(\n self, index_name: str, key_value: Any, field_path_elements: List[DatabasePathElement], value: Any, return_old_value: bool = False\n ) -> Optional[Response]:\n update_query_kwargs = self._construct_update_data_element_to_map_query_kwargs(\n index_name=index_name, key_value=key_value, field_path_elements=field_path_elements, value=value, return_old_value=return_old_value\n )\n return self._execute_update_query_with_initialization_if_missing(\n index_name=index_name, key_value=key_value, update_query_kwargs=update_query_kwargs,\n setters=[FieldPathSetter(field_path_elements=field_path_elements, value_to_set=value)],\n )\n\n def set_update_data_element_to_map_without_default_initialization(\n self, index_name: str, key_value: Any, field_path_elements: List[DatabasePathElement], value: Any\n ) -> Optional[Response]:\n update_query_kwargs = self._construct_update_data_element_to_map_query_kwargs(\n index_name=index_name, key_value=key_value, field_path_elements=field_path_elements, value=value\n )\n return self._execute_update_query(query_kwargs_dict=update_query_kwargs)\n\n @staticmethod\n def _setters_to_tidied_initializers(setters: List[FieldPathSetter]) -> Dict[str, MapItemInitializerContainer]:\n root_initializers_containers: Dict[str, MapItemInitializerContainer] = {}\n all_initializers_containers: Dict[str, MapItemInitializerContainer] = {}\n\n for setter in setters:\n current_absolute_target_path: str = \"\"\n last_map_initializer_container: Optional[MapItemInitializerContainer] = None\n setter_serialized_value_to_set = DynamoDBUtils.python_to_dynamodb(python_object=setter.value_to_set)\n\n for i_path, path_element in enumerate(setter.field_path_elements):\n current_absolute_target_path = DynamoDbCoreAdapter._add_database_path_element_to_string_path(\n base_string=current_absolute_target_path, database_path_element=path_element\n )\n existing_container: Optional[MapItemInitializerContainer] = all_initializers_containers.get(current_absolute_target_path, None)\n if existing_container is None:\n overriding_init_value: Optional[Any] = setter_serialized_value_to_set if i_path + 1 >= len(setter.field_path_elements) else None\n current_map_initializer = DynamoDbCoreAdapter._prepare_map_initialization(\n i=i_path, current_path_element=path_element, overriding_init_value=overriding_init_value,\n previous_element_initializer_container=last_map_initializer_container\n )\n current_map_initializer_container = MapItemInitializerContainer(item=current_map_initializer, nexts_in_line=dict())\n all_initializers_containers[current_absolute_target_path] = current_map_initializer_container\n # We must use the current_absolute_target_path as key, because the all_initializers_containers dict is a flattened dict.\n\n # We can use the element_key instead of the absolute path as key for both the root_initializers_containers and last_map_initializer_container\n # dict's since they are layered dictionaries, unlike the all_initializers_containers dict which is a flattened dict.\n if last_map_initializer_container is None:\n # If the last_map_initializer_container is None, this means that we are in the first iteration of the\n # field_path_elements loop, which means that the current_map_initializer_container is a root initializer.\n root_initializers_containers[path_element.element_key] = current_map_initializer_container\n else:\n # If the last_map_initializer_container is not None, we know that our current_map_initializer_container is not\n # a root initializer, and needs to be set in the nexts_in_line variable of the last_map_initializer_container.\n last_map_initializer_container.nexts_in_line[path_element.element_key] = current_map_initializer_container\n\n last_map_initializer_container = current_map_initializer_container\n else:\n last_map_initializer_container = existing_container\n logging.info(message_with_vars(\n message=\"Successfully tidied duplicate DatabasePathElement initializer\",\n vars_dict={\n 'trimmedPathElementKey': path_element.element_key,\n 'trimmedSetterSerializedValueToSet': setter_serialized_value_to_set,\n 'existingContainerItemPathTarget': existing_container.item.path_target,\n 'existingContainerItemDefaultValue': existing_container.item.item_default_value\n }\n ))\n return root_initializers_containers\n\n def _initializer_task_executor(self, index_name: str, key_value: str, initializer_container: MapItemInitializerContainer) -> Optional[Response]:\n # todo: group fields initialization in single request\n is_last_path_element = not len(initializer_container.nexts_in_line) > 0\n initialization_response = self.initialize_element_in_map_target(\n index_name=index_name, key_value=key_value,\n initializer=initializer_container.item,\n is_last_path_element=is_last_path_element\n )\n if initialization_response is None:\n logging.error(message_with_vars(\n message=\"Initialized a field after a set/update multiple data elements in map request had failed.\",\n vars_dict={'initializer_container': initializer_container}\n ))\n\n if len(initializer_container.nexts_in_line) > 0:\n results = self._run_initializers_in_executors(\n index_name=index_name, key_value=key_value,\n initializers=initializer_container.nexts_in_line\n )\n return initialization_response\n\n def _run_initializers_in_executors(self, index_name: str, key_value: str, initializers: Dict[str, MapItemInitializerContainer]) -> Optional[List[Optional[Response]]]:\n initializers_values = initializers.values()\n num_initializers = len(initializers_values)\n with ThreadPoolExecutor(max_workers=num_initializers) as executor:\n results: List[Optional[Response]] = [executor.submit(\n self._initializer_task_executor, index_name, key_value, item\n ).result() for i, item in enumerate(initializers_values)]\n return results\n\n def _execute_update_query_with_initialization_if_missing(\n self, index_name: str, key_value: Any, update_query_kwargs: dict, setters: List[FieldPathSetter]\n ) -> Optional[Response]:\n\n response = self._execute_update_query(query_kwargs_dict=update_query_kwargs)\n if response is None:\n # If the response is None, it means that one of the path of the target path has not been found and need to be initialized.\n database_paths_initializers = self._setters_to_tidied_initializers(setters=setters)\n results = self._run_initializers_in_executors(\n index_name=index_name, key_value=key_value,\n initializers=database_paths_initializers\n )\n last_response = results[-1]\n # This function can only be triggered if there is at least one item in setters List, so we do not need to worry about accessing\n # the last element in result List without checking if the len of the setters or of the results is superior to zero.\n return last_response\n return response\n\n def set_update_multiple_data_elements_to_map(\n self, index_name: str, key_value: Any, setters: List[FieldPathSetter], return_old_values: bool\n ) -> Optional[Response]:\n\n if not len(setters) > 0:\n # If we tried to run the query with no object setter,\n # she will crash when executed. So we return None.\n return None\n\n update_query_kwargs = {\n 'TableName': self.table_name,\n 'Key': {index_name: key_value},\n 'ReturnValues': \"UPDATED_OLD\" if return_old_values is True else \"NONE\"\n }\n update_expression = \"SET \"\n expression_attribute_names_dict, expression_attribute_values_dict = {}, {}\n\n consumed_setters: List[FieldPathSetter] = []\n for i_setter, current_setter in enumerate(setters):\n current_setter_update_expression = \"\"\n current_setter_attribute_names, current_setter_attribute_values = {}, {}\n setter_serialized_value_to_set = DynamoDBUtils.python_to_dynamodb(python_object=current_setter.value_to_set)\n\n for i_path, current_path_element in enumerate(current_setter.field_path_elements):\n current_path_key = f\"#setter{i_setter}_pathKey{i_path}\"\n current_setter_update_expression, new_expression_attribute_names = DynamoDbCoreAdapter._add_database_path_element_to_string_expression(\n base_string=current_setter_update_expression, database_path_element=current_path_element, path_key=current_path_key\n )\n current_setter_attribute_names = {**current_setter_attribute_names, **new_expression_attribute_names}\n\n if i_path >= (len(current_setter.field_path_elements) - 1):\n current_setter_update_expression += f\" = :item{i_setter}\"\n current_setter_attribute_values[f\":item{i_setter}\"] = setter_serialized_value_to_set\n\n complete_update_expression_bytes_size = getsizeof(update_expression)\n # complete_update_query_attribute_names_bytes_size = getsizeof(expression_attribute_names_dict)\n # complete_update_query_attribute_values_bytes_size = getsizeof(expression_attribute_values_dict)\n current_setter_update_expression_bytes_size = getsizeof(current_setter_update_expression)\n # current_setter_attribute_names_bytes_size = getsizeof(current_setter_attribute_names)\n # current_setter_attribute_values_bytes_size = getsizeof(current_setter_attribute_values)\n update_expression_bytes_size_if_setter_is_added = complete_update_expression_bytes_size + current_setter_update_expression_bytes_size\n # attributes_names_bytes_size_if_setter_is_added = complete_update_query_attribute_names_bytes_size + current_setter_attribute_names_bytes_size\n # attributes_values_bytes_size_if_setter_is_added = complete_update_query_attribute_values_bytes_size + current_setter_attribute_values_bytes_size\n if update_expression_bytes_size_if_setter_is_added < EXPRESSION_MAX_BYTES_SIZE:\n if i_setter > 0:\n update_expression += \", \"\n update_expression += current_setter_update_expression\n expression_attribute_names_dict = {**expression_attribute_names_dict, **current_setter_attribute_names}\n expression_attribute_values_dict = {**expression_attribute_values_dict, **current_setter_attribute_values}\n consumed_setters.append(current_setter)\n else:\n print(message_with_vars(\n message=\"Update operation expression size has reached over 4kb. \"\n \"The operation will be divided in a secondary operation (which could also be divided)\",\n vars_dict={\n 'index_name': index_name, 'key_value': key_value,\n 'setters': setters, 'update_expression': update_expression,\n 'current_setter_update_expression': current_setter_update_expression,\n 'current_setter_attribute_names': current_setter_attribute_names,\n 'current_setter_attribute_values': current_setter_attribute_values\n }\n ))\n break\n\n update_query_kwargs['UpdateExpression'] = update_expression\n update_query_kwargs['ExpressionAttributeValues'] = expression_attribute_values_dict\n if len(expression_attribute_names_dict) > 0:\n update_query_kwargs['ExpressionAttributeNames'] = expression_attribute_names_dict\n\n response = self._execute_update_query_with_initialization_if_missing(\n index_name=index_name, key_value=key_value,\n update_query_kwargs=update_query_kwargs, setters=consumed_setters\n )\n if len(consumed_setters) == len(setters):\n return response\n else:\n for setter in consumed_setters:\n setters.remove(setter)\n return self.set_update_multiple_data_elements_to_map(\n index_name=index_name, key_value=key_value,\n setters=setters, return_old_values=return_old_values\n )\n\n def query_response_by_key(\n self, index_name: str, key_value: Any,\n fields_path_elements: Optional[List[List[DatabasePathElement]]] = None,\n filter_expression: Optional[Any] = None, pagination_records_limit: Optional[int] = None,\n exclusive_start_key: Optional[str] = None, **additional_kwargs\n ) -> Response:\n if fields_path_elements is not None:\n kwargs = self._fields_paths_elements_to_expressions(fields_path_elements=fields_path_elements)\n else:\n kwargs = {}\n if additional_kwargs is not None:\n kwargs = {**kwargs, **additional_kwargs}\n\n kwargs['KeyConditionExpression'] = Key(index_name).eq(key_value)\n if index_name != self.primary_index.hash_key_name:\n # If the queried index is the primary_index, it must\n # not be specified, otherwise the request will fail.\n kwargs['IndexName'] = index_name\n\n if filter_expression is not None:\n kwargs['FilterExpression'] = filter_expression\n\n if pagination_records_limit is not None:\n kwargs['Limit'] = pagination_records_limit\n\n if exclusive_start_key is not None:\n kwargs['ExclusiveStartKey'] = exclusive_start_key\n\n try:\n table = self.dynamodb.Table(self.table_name)\n response = table.query(**kwargs)\n return Response(DynamoDBUtils.dynamodb_to_python(response))\n except ResourceNotExistsError:\n raise Exception(f\"DynamoDb table {self.table_name} do not exist or in the process\"\n \"of being created. Failed to get attributes from DynamoDb table.\")\n except Exception as e:\n raise Exception(f\"Failed to retrieve attributes from DynamoDb table.\"\n f\"Exception of type {type(e).__name__} occurred: {str(e)}\")\n\n def query_items_by_key(\n self, index_name: str, key_value: Any, fields_path_elements: List[List[DatabasePathElement]],\n filter_expression: Optional[Any] = None, pagination_records_limit: Optional[int] = None,\n exclusive_start_key: Optional[str] = None, **additional_kwargs\n ) -> Tuple[Optional[List[Any]], QueryMetadata]:\n response = self.query_response_by_key(\n index_name=index_name, key_value=key_value, fields_path_elements=fields_path_elements,\n filter_expression=filter_expression, pagination_records_limit=pagination_records_limit,\n exclusive_start_key=exclusive_start_key, **additional_kwargs\n )\n if response is None:\n return None, QueryMetadata(count=0, has_reached_end=True, last_evaluated_key=None)\n query_metadata = QueryMetadata(\n count=response.count,\n has_reached_end=response.has_reached_end,\n last_evaluated_key=response.last_evaluated_key\n )\n return response.items, query_metadata\n\n output: List[Any] = []\n for record_item_data in response.items:\n if isinstance(record_item_data, dict):\n if is_multi_selector is not True:\n field_path_elements: List[DatabasePathElement]\n output.append(navigate_into_data_with_field_path_elements(\n data=record_item_data, field_path_elements=field_path_elements,\n num_keys_to_navigation_into=len(field_path_elements)\n ))\n else:\n field_path_elements: Dict[str, List[DatabasePathElement]]\n output.append(self._unpack_multiple_retrieved_fields(\n item_data=record_item_data, fields_path_elements=field_path_elements,\n num_keys_to_stop_at_before_reaching_end_of_item=0, metadata=False\n ))\n return output, query_metadata\n\n def query_single_item_by_key(\n self, index_name: str, key_value: Any,\n fields_path_elements: Optional[List[List[DatabasePathElement]]] = None,\n filter_expression: Optional[Any] = None\n ) -> Optional[dict]:\n # Yes, a query request is heavier than a get request that we could do with the _get_item_by_primary_key function.\n # Yet, in a get request, we cannot specify an index_name to query on. So, the _query_single_item_by_key should be\n # used when we want to get an item based on another index that the primary one. Otherwise, use _get_item_by_primary_key\n response = self.query_response_by_key(\n index_name=index_name, key_value=key_value,\n fields_path_elements=fields_path_elements,\n filter_expression=filter_expression, pagination_records_limit=1\n )\n if response.count == 1:\n return response.items[0]\n elif not response.count > 0:\n print(\"No item has been found\")\n return None\n elif not response.count > 1:\n print(\"More than one item has been found. Returning first item.\")\n return response.items[0]\n\n def get_or_query_single_item(self, index_name: str, key_value: str, fields_path_elements: List[List[DatabasePathElement]]) -> Optional[dict]:\n if self.primary_index.hash_key_name == index_name:\n response: Optional[GetItemResponse] = self.get_item_by_primary_key(\n index_name=index_name, key_value=key_value, fields_path_elements=fields_path_elements\n )\n return response.item if response is not None else None\n else:\n if index_name not in self._global_secondary_indexes_hash_keys:\n print(message_with_vars(\n message=\"A index_name was not the primary_index index_name, and was not found in the \"\n \"global_secondary_indexes hash_keys. Database query not executed, and None is being returned.\",\n vars_dict={\n 'primary_index.hash_key_name': self.primary_index.hash_key_name,\n '_global_secondary_indexes_hash_keys': self._global_secondary_indexes_hash_keys,\n 'index_name': index_name, 'key_value': key_value, \"fields_path_elements\": fields_path_elements\n }\n ))\n return None\n else:\n response_items: Optional[List[dict]] = self.query_response_by_key(\n index_name=index_name, key_value=key_value,\n fields_path_elements=fields_path_elements, pagination_records_limit=1\n ).items\n if isinstance(response_items, list) and len(response_items) > 0:\n return response_items[0]\n else:\n return None\n\n def get_data_in_path_target(\n self, index_name: str, key_value: str,\n field_path_elements: List[DatabasePathElement],\n num_keys_to_navigation_into: int\n ) -> Optional[any]:\n\n response_item = self.get_or_query_single_item(\n index_name=index_name, key_value=key_value,\n fields_path_elements=[field_path_elements]\n )\n return navigate_into_data_with_field_path_elements(\n data=response_item, field_path_elements=field_path_elements,\n num_keys_to_navigation_into=num_keys_to_navigation_into\n )\n\n def get_value_in_path_target(self, index_name: str, key_value: str, field_path_elements: List[DatabasePathElement]) -> Optional[any]:\n return self.get_data_in_path_target(\n index_name=index_name, key_value=key_value,\n field_path_elements=field_path_elements,\n num_keys_to_navigation_into=len(field_path_elements)\n )\n\n def get_item_in_path_target(self, index_name: str, key_value: str, field_path_elements: List[DatabasePathElement]) -> Optional[dict]:\n return self.get_data_in_path_target(\n index_name=index_name, key_value=key_value,\n field_path_elements=field_path_elements,\n num_keys_to_navigation_into=len(field_path_elements) - 1\n )\n\n @staticmethod\n def _unpack_multiple_retrieved_fields(\n item_data: dict, fields_path_elements: Dict[str, List[DatabasePathElement]],\n num_keys_to_stop_at_before_reaching_end_of_item: int, metadata: bool = False\n ):\n output: Dict[str, Any] = {}\n for field_path_key, field_item_path_elements in fields_path_elements.items():\n # All the values of each requested items will be inside the response_item dict. We just need\n # to navigate inside of the response_item with the field_path_elements for each requested\n # item, and package that in an output dict that will use the key of the requested items.\n if len(field_item_path_elements) > 0:\n num_keys_to_navigation_into: int = len(field_item_path_elements) - num_keys_to_stop_at_before_reaching_end_of_item\n navigated_item: Optional[Any] = navigate_into_data_with_field_path_elements(\n data=item_data, field_path_elements=field_item_path_elements,\n num_keys_to_navigation_into=num_keys_to_navigation_into\n )\n output[field_path_key] = navigated_item if metadata is not True else {\n 'value': navigated_item, 'field_path_elements': field_item_path_elements\n }\n return output\n\n def get_data_from_multiple_fields_in_path_target(\n self, key_value: str, fields_path_elements: Dict[str, List[DatabasePathElement]],\n num_keys_to_stop_at_before_reaching_end_of_item: int, index_name: Optional[str] = None,\n metadata: bool = False\n ) -> Optional[Dict[str, Any]]:\n\n response_item: Optional[dict] = self.get_or_query_single_item(\n index_name=index_name, key_value=key_value,\n fields_path_elements=list(fields_path_elements.values())\n )\n if response_item is None:\n return None\n\n return self._unpack_multiple_retrieved_fields(\n item_data=response_item, fields_path_elements=fields_path_elements,\n num_keys_to_stop_at_before_reaching_end_of_item=num_keys_to_stop_at_before_reaching_end_of_item,\n metadata=metadata\n )\n\n def get_values_in_multiple_path_target(\n self, index_name: str, key_value: str,\n fields_path_elements: Dict[str, List[DatabasePathElement]],\n metadata: bool = False\n ):\n return self.get_data_from_multiple_fields_in_path_target(\n index_name=index_name, key_value=key_value,\n fields_path_elements=fields_path_elements,\n num_keys_to_stop_at_before_reaching_end_of_item=0, metadata=metadata\n )\n\n def get_items_in_multiple_path_target(\n self, index_name: str, key_value: str,\n fields_path_elements: Dict[str, List[DatabasePathElement]],\n metadata: bool = False\n ):\n return self.get_data_from_multiple_fields_in_path_target(\n index_name=index_name, key_value=key_value,\n fields_path_elements=fields_path_elements,\n num_keys_to_stop_at_before_reaching_end_of_item=1, metadata=metadata\n )\n\n @staticmethod\n def _add_to_filter_expression(expression, condition):\n if expression is None:\n return condition\n return expression & condition\n\n @staticmethod\n def _fields_paths_elements_to_expressions(fields_path_elements: List[List[DatabasePathElement]]) -> dict:\n output_kwargs = {}\n expression_attribute_names = {}\n projection_expression = \"\"\n\n # In DynamoDB, when trying to get some fields, certain type of values (like values with - in them,\n # or big numbers like an UUID), can not work and provoke in error while executing the query. In\n # order to fix that, we need to pass the variable name in the ExpressionAttributeNames instead of\n # putting it directly in the ProjectionExpression. Yet, when using a map path like myMap.data.item1\n # we must declare each path element (myMap, data, item1) as separate ExpressionAttributeNames,\n # otherwise it will not work. So, we define an 'id' for each path of each attribute name\n # (f\"#f{i_field}_{i_path}\") we add our path as the dict value, and then we build the condition\n # expression to use the ids of our attributes names.\n for i_field, field_elements in enumerate(fields_path_elements):\n for i_path_element, path_element in enumerate(field_elements):\n if i_path_element > 0:\n last_field_path_element = field_elements[i_path_element - 1]\n if last_field_path_element.default_type == list:\n projection_expression += f'[{path_element.element_key}]'\n # We set the index of a list directly inside the projection expression, because a variable can\n # be passed the expression_attribute_names only if it is a str, where as our index will be an int.\n continue\n elif last_field_path_element.default_type == set:\n raise Exception(\"Selection by index in set type's not yet supported\")\n\n # If we reach this point, the continue keyword has not been triggered, which means that the current\n # path_element has not yet been handled by a special case handler, like an list index handler.\n current_field_path_expression_name = f\"#f{i_field}_{i_path_element}\"\n expression_attribute_names[current_field_path_expression_name] = path_element.element_key\n projection_expression += f'{\".\" if i_path_element > 0 else \"\"}{current_field_path_expression_name}'\n\n if i_field + 1 < len(fields_path_elements):\n projection_expression += \", \"\n\n if len(expression_attribute_names) > 0:\n output_kwargs[\"ExpressionAttributeNames\"] = expression_attribute_names\n if projection_expression.replace(\" \", \"\") != \"\":\n output_kwargs[\"ProjectionExpression\"] = projection_expression\n\n return output_kwargs\n\n","repo_name":"inoftrobinson/StructNoSQL","sub_path":"StructNoSQL/tables_clients/backend/dynamodb_core.py","file_name":"dynamodb_core.py","file_ext":"py","file_size_in_byte":49644,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"21884289854","text":"from abc import ABC, abstractmethod\nfrom collections.abc import Sequence\nfrom dataclasses import dataclass\nfrom typing import (\n Any,\n Generic,\n Iterable,\n Iterator,\n Optional,\n TypeVar,\n cast,\n no_type_check,\n)\n\n\nclass LinkedListError(Exception):\n pass\n\n\nclass EmptyLinkedList(LinkedListError):\n \"\"\"Is raised when the operation is not allowed on an empty linked list\"\"\"\n\n def __str__(self):\n return \"LinkedList is empty\"\n\n\nclass NotFoundDataError(LinkedListError):\n def __init__(self, data: Any):\n self.data = data\n super().__init__(data)\n\n def __str__(self):\n return f\"Data: {self.data} is not found in here!\"\n\n\nT = TypeVar(\"T\")\n\n\nclass LinkedList(Generic[T], ABC):\n def __init__(self, head: Optional[\"T\"] = None):\n if head is not None:\n self.head: \"Node[T]\" = (\n _ensure_node(head)\n if isinstance(self, SinglyLinkedList)\n else _ensure_dnode(head)\n )\n self._size = 1\n else:\n self.head = head if isinstance(self, SinglyLinkedList) else head\n self._size = 0\n self.tail: Optional[\"Node\"] = self.head\n\n def __iter__(self) -> Iterator[T]:\n if self.head is None:\n raise EmptyLinkedList()\n\n data, next_item = self.head.data, self.head.next_item\n yield data\n while next_item is not None:\n data, next_item = next_item.data, next_item.next_item\n yield data\n\n def iternodes(self) -> Iterator[\"Node[T]\"]:\n if self.head is None:\n raise EmptyLinkedList()\n\n node, next_node = self.head, self.head.next_item\n yield node\n while next_node is not None:\n node, next_node = next_node, next_node.next_item\n yield node\n\n @abstractmethod\n def pop(self, index: Optional[int] = None) -> \"T\":\n pass\n\n @abstractmethod\n def popleft(self) -> \"T\":\n pass\n\n def remove(self, index: Optional[int] = None) -> None:\n self.pop(index)\n\n def removeleft(self) -> None:\n self.popleft()\n\n\n# Singly LinkedList implementation\n@dataclass\nclass Node(Generic[T]):\n \"\"\"Simple node representation\n\n each node is like:\n --------------------\n | data | next_item |\n --------------------\n in which the data is the actual data the node is containing and next_item\n is a reference the next node.\n \"\"\"\n\n data: Any\n next_item: Optional[\"Node\"] = None\n\n def __hash__(self):\n return hash((self.data, self.next_item))\n\n\nclass SinglyLinkedList(LinkedList[T], Sequence):\n \"\"\"Singly Linked List implementation\n\n Attributes:\n head: is the head node of the linked list, defaults to None.\n tail: is the tail node of the linked list, default to `head`.\n _size: holds the size of linked list, size is the number of nodes which are connected to each other.\n\n Methods:\n append: adds a node to the end of the linked list, the newest node becomes the `tail`.\n appendleft: add a node to the head of the linked list, the newses node becomes the `head`.\n pop: deletes and return the tail, or a desired index\n popleft: deletes and returns the head of the linked list.\n remove: just like pop but does not return.\n removeleft: deletes the head of the linked list.\n insert: insert a data to a specific index\n extend: just like list.extend\n\n Behaviours:\n '__eq__', '__ne__'\n '__hash__', hash(obj)\n '__iter__', iter(obj) (for i in obj)\n '__getitem__', obj[0], obj[:10]\n '__contains__', ob in obj, ob not in obj\n '__init__', SLL(...)\n '__len__', len(obj)\n '__repr__', print, repr\n '__reversed__', reversed(obj)\n 'count', obj.count(ob)\n 'index', obj.index(ob)\n \"\"\"\n\n @no_type_check\n def __getitem__(self, item: int | slice) -> \"T\":\n if not self:\n raise EmptyLinkedList()\n\n self_iter = iter(self)\n data = None\n\n if isinstance(item, int):\n item = item if item >= 0 else item + self._size\n if item == 0:\n return self.head.data\n\n for _ in range(item + 1):\n try:\n data = next(self_iter)\n except StopIteration:\n raise IndexError(\"Index out of range\") from None\n else:\n return data\n\n # item is a slice now\n return [\n self[index]\n for index in range(\n item.start if item.start >= 0 else self._size + item.start,\n item.stop,\n item.step,\n )\n ]\n\n def __len__(self):\n return self._size\n\n def append(self, data: Any):\n \"\"\"Append a node to the end of SinglyLinkedList\n\n If `data` is not a node: wrap it in a node.\n \"\"\"\n\n # Check if we have a head or not\n # if no head is available -> head is None and tail is None too\n data = _ensure_node(data)\n if self.head is None:\n self.tail = self.head = data\n else:\n self.tail.next_item = data\n self.tail = data\n assert self.tail.next_item is None\n self._size += 1\n\n def appendleft(self, data: Any):\n \"\"\"Append a node to the start of the SinglyLinkedList\n\n This node will be the `head`\n \"\"\"\n\n # Check if we have a head or not\n # if no head is available -> head is Node and tail is None too\n # then, we have to just append the data to out SinglyLinkedList :)\n if self.head is None:\n self.append(data)\n return\n\n data = _ensure_node(data)\n former_head = self.head\n self.head = data\n self.head.next_item = former_head\n self._size += 1\n\n def pop(self, index: Optional[int] = None) -> \"T\":\n \"\"\"Pop the tail, or a desired index of sll\"\"\"\n if len(self) == 0:\n raise EmptyLinkedList()\n\n if index is None:\n if len(self) == 1:\n return self.popleft()\n\n for node in self.iternodes():\n if node.next_item == self.tail:\n # this is the node before the tail:\n to_ret = self.tail\n self.tail = node\n self.tail.next_item = None\n self._size -= 1\n return to_ret.data\n\n if isinstance(index, int):\n if index == 0:\n return self.popleft()\n\n to_ret = self[index] # may raise error :)\n\n before_and_self_nodes = []\n\n for idx, node in enumerate(self.iternodes()):\n if idx == index - 1:\n before_and_self_nodes.append(node)\n elif idx == index:\n before_and_self_nodes.append(node)\n\n before_node, self_node = before_and_self_nodes\n after_node = self_node.next_item\n before_node.next_item = after_node\n self._size -= 1\n return to_ret # type: ignore\n\n def popleft(self) -> \"T\":\n \"\"\"Pop the head\"\"\"\n if self.head is None:\n raise EmptyLinkedList()\n\n former_head = self.head\n newer_head = self.head.next_item\n self.head = newer_head\n self._size -= 1\n return former_head.data\n\n def insert(self, index: int, data: Any):\n \"\"\"Insert a node at the given\n\n >>> a = [1]\n >>> a.insert(0, 0)\n >>> # a -> [0, 1]\n \"\"\"\n\n if self._size == 0:\n self.append(data)\n return\n\n # if index == 0: it will be inserted at the head\n if index == 0:\n self.appendleft(data)\n return\n\n where_data = self[index]\n former_node = self.head\n\n while True:\n if former_node.next_item.data == where_data:\n _ = _ensure_node(data)\n later_node = former_node.next_item\n former_node.next_item = _\n _.next_item = later_node\n self._size += 1\n return\n former_node = former_node.next_item\n\n def extend(self, data: Iterable):\n for item in data:\n self.append(_ensure_node(item))\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(head={self.head})\"\n\n\n# TODO: finish CLL\n# Circularly LinkedList implementation\nclass CircularlyLinkedList(SinglyLinkedList[\"T\"], Sequence):\n def __init__(self, data: Optional[T] = None):\n super().__init__(data)\n if data is not None:\n self.tail.next_item = self.head\n\n def append(self, data: Any):\n super().append(data)\n self.tail.next_item = self.head\n\n def appendleft(self, data: Any):\n super().appendleft(data)\n self.tail.next_item = self.head\n\n def pop(self, index: Optional[int] = None) -> \"T\":\n \"\"\"Pop the tail, or a desired index of sll\"\"\"\n if len(self) == 0:\n raise EmptyLinkedList()\n\n if index is None:\n got = super().pop(None)\n self.tail.next_item = self.head\n return got\n\n if isinstance(index, int):\n if index == 0:\n return self.popleft()\n\n to_ret = self[index] # may raise error :)\n\n before_and_self_nodes = []\n\n for idx, node in enumerate(self.iternodes()):\n if idx == len(self) + 1:\n break\n if idx == index - 1:\n before_and_self_nodes.append(node)\n elif idx == index:\n before_and_self_nodes.append(node)\n\n before_node, self_node = before_and_self_nodes\n after_node = self_node.next_item\n before_node.next_item = after_node\n self._size -= 1\n self.tail.next_item = self.head\n\n return to_ret\n\n def popleft(self) -> \"T\":\n got = super().popleft()\n self.tail.next_item = self.head\n return got\n\n\n# Doubly LinkedList implementation\n\n\nclass DNode(Node[\"T\"]):\n \"\"\"Class to create a two-way node\n\n each d-node is like:\n --------------------------------\n | prev_item | data | next_item |\n -------------------------------\n\n But the API is designed to instantiate a DNode as:\n dnode = DNode(data, prev_item=None, next_item=None)\n This is much better than DNode(prev_item, data, next_item=None)\n \"\"\"\n\n prev_item: Optional[\"DNode\"] = None\n\n def __hash__(self):\n return hash((self.data, self.prev_item, self.next_item))\n\n\nclass DoublyLinkedList(LinkedList, Sequence, Generic[T]):\n \"\"\"Doubly Linked List implementation\n\n Attributes:\n head: the head of dll -> prev_item is always None, Default: None.\n tail: the tail of dll -> next_item is always None, Default: `head`\n _size holds the size of the dll\n\n Methods:\n append: adds a dnode to the end dll, the newest dnode becomes the tail\n appendleft: adds a dnode to the head, the newest dnode becomes the head\n\n \"\"\"\n\n def append(self, data: Any):\n \"\"\"Append a Dnode to DoublyLinkedList\n\n Ensure data is a Dnode.\n \"\"\"\n\n data = _ensure_dnode(data)\n if self.head is None:\n self.tail = self.head = data\n else:\n self.tail.next_item = data\n data.prev_item = self.tail\n self.tail = data\n assert self.tail.next_item is None\n self._size += 1\n\n def appendleft(self, data: Any):\n \"\"\"Append at the head of DoublyLinkedList\n\n Ensure data is a Dnode\n \"\"\"\n\n if self.head is None:\n self.append(data)\n return\n\n data = _ensure_dnode(data)\n former_head = cast(\"DNode\", self.head)\n self.head = data\n self.head.next_item = former_head\n former_head.prev_item = cast(\"DNode\", self.head)\n self._size += 1\n\n def pop(self, index: Optional[int] = None) -> \"T\":\n \"\"\"Pop the tail\"\"\"\n if len(self) == 0:\n raise EmptyLinkedList()\n\n if len(self) == 1:\n # one dnode in out dll\n to_ret = self.tail\n self.head = self.tail = None\n self._size -= 1\n return to_ret.data\n\n former_tail = self.tail\n newest_tail = cast(DNode, self.tail).prev_item\n newest_tail.next_item = None\n self.tail = newest_tail\n self._size -= 1\n return former_tail.data\n\n @no_type_check\n def popleft(self) -> \"T\":\n \"\"\"Pop the head\"\"\"\n if len(self) == 0:\n raise EmptyLinkedList()\n\n if len(self) == 1:\n return self.pop()\n\n former_head = self.head\n newest_head = self.head.next_item\n newest_head.prev_item = None\n self.head = newest_head\n self._size -= 1\n return former_head.data\n\n @no_type_check\n def __getitem__(self, index: int | slice) -> \"T\":\n pass\n\n def __len__(self):\n return self._size\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(head={self.head})\"\n\n\nclass StrSinglyLinkedList(SinglyLinkedList[str]):\n pass\n\n\nclass IntSinglyLinkedList(SinglyLinkedList[int]):\n pass\n\n\nclass StrDoublyLinkedList(DoublyLinkedList[str]):\n pass\n\n\nclass IntDoublyLinkedList(DoublyLinkedList[int]):\n pass\n\n\ndef _ensure_node(data: Any) -> \"Node\":\n if isinstance(data, Node):\n return data\n return Node(data)\n\n\ndef _ensure_dnode(data: Any) -> \"DNode\":\n if isinstance(data, DNode):\n return data\n return DNode(data)\n\n\n# TODO: fix no_type_check mypy errors\n","repo_name":"mahdihaghverdi/DrFatemiDataStructureClass","sub_path":"dsp/datastructures/linkedlists.py","file_name":"linkedlists.py","file_ext":"py","file_size_in_byte":13785,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"34511192958","text":"import cv2\nimport os\nimport numpy as np\nfrom scipy.ndimage import maximum_filter, minimum_filter\nimport argparse\nimport time\nimport math\n\nfrom HeadMaskGenerator import load_mask\n\n# debug draw\nimport matplotlib.pyplot as plt\n\n'''\ncalculate all MRI image score in the dataset\nreturn individual score for each image\n\ninput => masks_img, foreground_img\noutput => average score of current dataset\n'''\ndef mriqa(masks, imgs):\n _, _, n = masks.shape\n scores = []\n num_mask = len(masks)\n cnt = 0\n for i in range(n):\n img = imgs[:,:,i]\n mask = masks[:,:,i]\n if (np.sum(mask) / (mask.shape[0] * mask.shape[1]) < 0.2):\n scores.append(0.0)\n cnt += 1\n continue\n # most images are polluted\n if cnt > num_mask / 4:\n return [0.0]\n # normalize img\n norm_img = cv2.normalize(img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n\n gray = norm_img\n # contrast feature img\n window = (13, 13)\n maximg = maximum_filter(gray, size = window)\n minimg = minimum_filter(gray, size = window)\n contrast_img = maximg - minimg\n # image moment\n grayscale_moment = cv2.moments(gray)['nu20']\n contrast_moment = cv2.moments(contrast_img)['nu20']\n # binary image\n fgmg = (gray > grayscale_moment).astype(np.int)\n fcmg = (gray > contrast_moment).astype(np.int)\n fcmc = (contrast_img > contrast_moment).astype(np.int)\n fgmc = (contrast_img > grayscale_moment).astype(np.int)\n\n # print(np.sum(fgmg))\n # print(np.sum(fcmg))\n # print(np.sum(fcmc))\n # print(np.sum(fgmc))\n # fig = plt.figure(figsize=(10,10))\n # fig.add_subplot(2,2,1)\n # plt.imshow(fgmg, cmap=plt.cm.bone)\n # fig.add_subplot(2,2,2)\n # plt.imshow(fcmg, cmap=plt.cm.bone)\n # fig.add_subplot(2,2,3)\n # plt.imshow(fcmc, cmap=plt.cm.bone)\n # fig.add_subplot(2,2,4)\n # plt.imshow(fgmc, cmap=plt.cm.bone)\n # plt.show()\n # luminance contrast quality score\n q11 = fcmg & fgmg\n q1 = np.sum(mask * q11) / max(np.sum(fcmg), np.sum(fgmg)) if max(np.sum(fcmg), np.sum(fgmg)) != 0 else 0\n\n # texture score\n q22 = fgmc & fcmc\n q2 = np.sum(mask * q22) / max(np.sum(fgmc), np.sum(fgmg)) if max(np.sum(fgmc), np.sum(fgmg)) != 0 else 0\n\n # texture contrast quality score\n q33 = fgmc & fcmc\n q3 = np.sum(mask * q33) / np.sum(mask) if np.sum(mask) != 0 else 0\n\n # lightness quality score\n q44 = fcmg & fgmg\n q4 = np.sum(mask * q44) / np.sum(mask) if np.sum(mask) != 0 else 0\n\n # print(f\"{q1},{q2},{q3},{q4}\")\n # weight\n w1 = w2 = w4 = 0.1\n w3 = 0.7\n Q = w1 * q1 + w2 * q2 + w3 * q3 + w4 * q4\n\n if not math.isnan(Q):\n scores.append(Q)\n return scores\n\n# load mask img and dicom img file name\ndef fileImgLoad(Opath):\n dcms_path = []\n masks_path = []\n for file in os.listdir(Opath):\n exten = file.split('.')[1]\n if exten == \"dcm\":\n dcms_path.append(Opath + file)\n elif exten == \"dcmactImg\":\n masks_path.append(Opath + file)\n \n return masks_path, dcms_path\n\n# read image\ndef ImgLoader(masks_path, dcms_path):\n masks = []\n fgs = []\n first = True\n for i,mask in enumerate(masks_path):\n mask_img = cv2.imread(mask, cv2.IMREAD_GRAYSCALE)\n # norm_image = cv2.normalize(mask_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n if first:\n first = False\n w, h = mask_img.shape\n masks = np.zeros((w, h, len(masks_path)))\n masks[:,:,i] = mask_img\n \n first = True\n for i,fg in enumerate(dcms_path):\n fg_img = cv2.imread(fg, cv2.IMREAD_GRAYSCALE)\n # norm_image = cv2.normalize(fg_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n if first:\n first = False\n w, h = fg_img.shape\n fgs = np.zeros((w, h, len(dcms_path)))\n fgs[:,:,i] = fg_img\n \n return masks, fgs\n\n\n# load generated mask image and dicomimg\ndef load_existing_mask(Opath):\n masks_path, dcms_path = fileImgLoad(Opath)\n # read \n masks, imgs = ImgLoader(masks_path, dcms_path)\n return masks, imgs\n\n# slice property\n# from f(x) = e^(-pi*(x-1)^2)\ndef slice_score(slice_thickness, slice_spacing):\n p = slice_thickness / slice_spacing\n return np.exp(-np.pi * (p - 1)**2)\n\n# weight control for final score\ndef final_score(img_score, slice_score):\n return img_score * 0.7 + slice_score * 0.3\n\ndef directory_score(directory, hdr):\n \"\"\"\n :param directory: Input directory containg MRI images\n :return: metric (img_quality_score, img_slice_score, num_slices, total_score, time_consuming)\n \"\"\"\n print(f\"Starting process directory {directory}\")\n masks = []\n imgs = []\n thickness = 0\n\n masks, imgs, distance, thickness = load_mask(directory, hdr)\n scores = mriqa(masks, imgs)\n img_score = sum(scores) / len(scores)\n s_score = slice_score(float(thickness), float(distance)) if distance != 0 and thickness != 0 else 0\n score = final_score(img_score, s_score)\n\n print(f\"Processed the directory {directory}\")\n return (img_score, s_score, len(scores), score)\n\n\n# Unit Test\nif __name__ == \"__main__\":\n # parsing command line arguments \n # Input path of MRI image dataset\n # Output result of foreground image \n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--hdr\", type=bool)\n parser.add_argument(\"-i\", \"--input\")\n parser.add_argument(\"-o\", \"--output\")\n args = parser.parse_args()\n \n if args.input is None or args.output is None:\n print(\"Please python mriqa.py -i -o for calculate dataset quality score\")\n\n Ipath = args.input\n Opath = args.output\n HDREnable = args.hdr\n # Ipath = \"../data/series_216_SGE_fs_ax_113_2.55_256x256/\"\n # Ipath = \"../pydicom-playground/data/\"\n # Opath = \"../bfg/OutputImg-Larry-8/\"\n # Opath = \"./OutputImg-sample\"\n\n\n\n masks = []\n imgs = []\n thickness = 0\n\n if not os.path.exists(Opath):\n os.makedirs(Opath)\n\n masks, imgs, distance, thickness = load_mask(Ipath, Opath)\n\n scores = mriqa(masks, imgs)\n img_score = sum(scores) / len(scores)\n s_score = slice_score(float(thickness), float(distance))\n score = final_score(img_score, s_score)\n print(f\"img_score is: {img_score}, slice_score is: {s_score}\")\n print(f\"The dataset quality is {score}\")\n print(f\"The number of slice is {len(scores)}\")\n\n # check difference between new generated img and existing img\n\n # masks_e, imgs_e = load_existing_mask(Opath)\n # masks_n, imgs_n = load_mask(Ipath, Opath)\n # _,_,n = masks_e.shape\n # for i in range(n):\n # m_e = masks_e[:,:,i]\n # m_e = cv2.normalize(m_e, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n # m_n = masks_n[:,:,i]\n # m_d = m_e - m_n\n\n # i_e = imgs_e[:,:,i]\n # i_n = imgs_n[:,:,i]\n # i_d = i_e - i_n","repo_name":"davidzhangxm/3DMIP-MRIQC","sub_path":"mriqa.py","file_name":"mriqa.py","file_ext":"py","file_size_in_byte":7165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37572550211","text":"\n# 定义不可达距离\n_ = float('inf')\n\n\ndef read(dir):\n lst = []\n with open(dir) as f:\n while True:\n line = f.readline().split()\n if not line:\n break\n l = [int(i) for i in line]\n lst.append(l)\n # lst = lst[1:]\n return lst\n\n\nlst = [[6, 3, 2], [4, 5, 6], [5, 3], [3, 1]]\nans = [_ for i in range(lst[0][0])]\n\nfor fix in lst[2:]:\n ans[fix[1]-1] = fix[0]\n\nranked = lst[1].copy()\nwhile ranked:\n rank = 0\n if len(ranked) == 1:\n for i in range(len(ans)):\n if ans[-(i+1)] == _:\n ans[-(i+1)] = ranked[rank]\n ranked.remove(ranked[rank])\n break\n break\n if ranked[rank] not in ans:\n for pos in range(len(ans)):\n if pos+len(ranked) == len(ans)+1:\n # [0, 1, 2, 3, _, _], [5, 6]\n ans[pos] = ranked[rank]\n ranked.remove(ranked[rank])\n rank = 0\n break\n if ans[pos] == _ and ans[pos+1] == ranked[rank+1]:\n print('ans[pos+1]:', ans[pos+1])\n print('ranked[rank+1]:', ranked[rank+1])\n print('len(ans)-(pos+1):', len(ans)-(pos+1))\n # for i in range(len(ans)-(pos+1)):\n # if ans[-i] == _:\n # ans[-(i+1)] = ranked[rank]\n # ranked.remove(ranked[rank])\n # rank = 0\n # break\n break\n else:\n ranked.remove(ranked[rank])\n\n\nprint(ans)","repo_name":"saflhsajkhbgr/My-journey-on-preparing-for-USACO","sub_path":"MilkingOrder.py","file_name":"MilkingOrder.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6712472026","text":"import os\nimport tensorflow as tf\nimport pandas as pd\nimport nltk as nl\nimport numpy as np\n\ncwd = os.getcwd()\nos.chdir('/Users/clementmanger/Desktop/Thesis/Word2Vec')\n# cwd\n\ndf = pd.DataFrame.from_csv('ReviewsFiction.csv', sep = '|', header=0)\nlines = df['Review Text']\n\n#figure out document lengths\nlength = []\nfor f in df['Review Text']:\n length.append(len(nl.word_tokenize(f)))\nMAX_DOCUMENT_LENGTH = max(length)\n\n# create vocabulary\n\nvocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)\n\nx = np.array(list(vocab_processor.fit_transform(lines)))\n\nvocabulary = vocab_processor.vocabulary_\n\nvocab_size = len(vocabulary)\n\n#check if the vocabulary actually has any words in it. it feels like vocab may have the length, but no actual words in it\n\n#this is the idea, and it comes from https://github.com/cahya-wirawan/cnn-text-classification-tf/blob/master/data_helpers.py\n# with open('GoogleNews-vectors-negative300.bin', 'rb') as f:\n# header = f.readline()\n# vocab_size, vector_size = map(int, header.split())\n# print(vocab_size, vector_size)\n\n\ndef load_embedding_vectors_word2vec(vocabulary, filename, binary):\n # load embedding_vectors from the word2vec\n encoding = 'utf-8'\n with open(filename, \"rb\") as f:\n header = f.readline()\n vocab_size, vector_size = map(int, header.split())\n # initial matrix with random uniform\n embedding_vectors = np.random.uniform(-0.25, 0.25, (len(vocabulary), vector_size))\n if binary:\n binary_len = np.dtype('float32').itemsize * vector_size\n for line_no in range(vocab_size):\n word = []\n while True:\n ch = f.read(1)\n if ch == b' ':\n break\n if ch == b'':\n raise EOFError(\"unexpected end of input; is count incorrect or file otherwise damaged?\")\n if ch != b'\\n':\n word.append(ch)\n word = str(b''.join(word), encoding=encoding, errors='strict')\n idx = vocabulary.get(word)\n if idx != 0:\n embedding_vectors[idx] = np.fromstring(f.read(binary_len), dtype='float32')\n else:\n f.seek(binary_len, 1)\n else:\n for line_no in range(vocab_size):\n line = f.readline()\n if line == b'':\n raise EOFError(\"unexpected end of input; is count incorrect or file otherwise damaged?\")\n parts = str(line.rstrip(), encoding=encoding, errors='strict').split(\" \")\n if len(parts) != vector_size + 1:\n raise ValueError(\"invalid vector on line %s (is this really the text format?)\" % (line_no))\n word, vector = parts[0], list(map('float32', parts[1:]))\n idx = vocabulary.get(word)\n if idx != 0:\n embedding_vectors[idx] = vector\n f.close()\n return embedding_vectors\n\ninitW = load_embedding_vectors_word2vec(vocabulary, 'GoogleNews-vectors-negative300.bin', True)\n\nW = tf.Variable(tf.random_uniform([vocab_size, 300], -1.0, 1.0),name=\"W\")\n\nW.assign(initW)\n\ntf.nn.embedding_lookup(W, features)\n\n#\n#\n# def load_embedding_vectors_glove(vocabulary, filename, vector_size):\n# # load embedding_vectors from the glove\n# # initial matrix with random uniform\n# embedding_vectors = np.random.uniform(-0.25, 0.25, (len(vocabulary), vector_size))\n# f = open(filename)\n# for line in f:\n# values = line.split()\n# word = values[0]\n# vector = np.asarray(values[1:], dtype=\"float32\")\n# idx = vocabulary.get(word)\n# if idx != 0:\n# embedding_vectors[idx] = vector\n# f.close()\n# return embedding_vectors\n","repo_name":"CjemGit/FictionNon","sub_path":"W2Vtest.py","file_name":"W2Vtest.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36296637483","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass PoetryModel(nn.Module):\n def __init__(self, vocab_size, embedding_dim, hidden_dim):\n super(PoetryModel, self).__init__()\n self.vocab_size = vocab_size\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.embedding = nn.Embedding(self.vocab_size, self.embedding_dim)\n self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim, num_layers=2)\n self.linear = nn.Linear(self.hidden_dim, self.vocab_size)\n\n def forward(self, input, hidden=None):\n sentence_length, batch_size = input.size()\n if hidden == None:\n h_0 = input.data.new(2, batch_size, self.hidden_dim).fill_(0).float()\n c_0 = input.data.new(2, batch_size, self.hidden_dim).fill_(0).float()\n else:\n h_0, c_0 = hidden\n \n embeds = self.embedding(input)\n output, hidden = self.lstm(embeds, (h_0, c_0))\n output = self.linear(output.view(sentence_length * batch_size, -1))\n return output, hidden","repo_name":"swimmtest/deep-learning-assignments","sub_path":"Auto-writingPoem/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"418525635","text":"import sys\nimport os\nimport time\n\nsys.path.append(os.getcwd()[:os.getcwd().index('implementations')])\n\nfrom keras import backend as K\nfrom keras import optimizers\nfrom keras.layers import Conv2D, Activation, BatchNormalization, UpSampling2D, Lambda\nfrom keras.models import Sequential\nfrom random import shuffle\n\nfrom keras.utils import HDF5Matrix\n\nfrom implementations.support_scripts.common import data_to_onehot, H5Choose\nfrom implementations.support_scripts.image_processing import ImageDownloader\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n\nb_size = 8\ndir_name = \"../small_dataset\"\nlist_dir = os.listdir(dir_name)\nshuffle(list_dir)\nlist_dir = list_dir\nnum_classes = 400\nn_epochs = 1000\n\nmodel = Sequential()\n\n# conv1_1\nmodel.add(Conv2D(64, (3, 3), padding=\"same\", input_shape=(256, 256, 1)))\nmodel.add(Activation(\"relu\"))\n\n# conv1_2\nmodel.add(Conv2D(64, (3, 3), padding=\"same\"))\nmodel.add(BatchNormalization(axis=3)) # todo: check if really axis 1 since data has last axis for chanel\nmodel.add(Activation(\"relu\"))\n\nmodel.add(Conv2D(64, (3, 3), padding=\"same\"))\nmodel.add(BatchNormalization(axis=3)) # todo: check if really axis 1 since data has last axis for chanel\nmodel.add(Activation(\"relu\"))\n\n# conv2_1\nmodel.add(Conv2D(64, (3, 3), padding=\"same\"))\nmodel.add(BatchNormalization(axis=3)) # todo: check if really axis 1 since data has last axis for chanel\nmodel.add(Activation(\"relu\"))\n\nmodel.add(Conv2D(64, (3, 3), padding=\"same\"))\nmodel.add(BatchNormalization(axis=3)) # todo: check if really axis 1 since data has last axis for chanel\nmodel.add(Activation(\"relu\"))\n\n# conv2_2\nmodel.add(Conv2D(64, (3, 3), padding=\"same\"))\nmodel.add(BatchNormalization(axis=3)) # todo: check if really axis 1 since data has last axis for chanel\nmodel.add(Activation(\"relu\"))\n\nmodel.add(Conv2D(64, (3, 3), padding=\"same\"))\nmodel.add(BatchNormalization(axis=3)) # todo: check if really axis 1 since data has last axis for chanel\nmodel.add(Activation(\"relu\"))\n\nmodel.add(Conv2D(400, (1, 1), padding=\"same\"))\n\n\n# multidimensional softmax\ndef custom_softmax(x):\n x = K.reshape(x, (b_size * 256 * 256, num_classes))\n x = K.softmax(x)\n x = K.reshape(x, (b_size, 256, 256, num_classes))\n return x\n\nmodel.add(Activation(custom_softmax))\n\n\n# sgd = optimizers.SGD(lr=10, momentum=0.0, decay=0, nesterov=False)\nopt = optimizers.Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\nmodel.compile(optimizer=opt,\n loss=\"mean_squared_error\")\n\nmodel.summary()\n\n\nsave_every_n_epoch = 50\nstart_from = 100\n\n# start image downloader\nid = ImageDownloader(\"../h5_data\", \"imp5_\", mode=\"common\")\nid.setDaemon(True) # thread die when main thread die\nid.start()\n\nfile_picker = H5Choose(dir=\"../h5_data\")\n\ntry:\n for epoch in range(n_epochs):\n # Instantiating HDF5Matrix for the training set, which is a slice of the first 150 elements\n file = file_picker.pick_next(id)\n X_train = HDF5Matrix(file, 'grayscale')\n y_train = HDF5Matrix(file, 'ab_hist')\n\n print(\"Epoch \" + str(epoch) + \"/\" + str(n_epochs))\n start = time.time()\n for b in range(len(y_train) // b_size):\n i, j = b * b_size, (b+1) * b_size\n\n a = data_to_onehot(y_train[i:j])\n model.train_on_batch(X_train[i:j], a)\n print(\"Spent: \" + str(time.time() - start))\n if epoch % 5 == 4:\n print(model.evaluate(X_train[:8], data_to_onehot(y_train[:8]), batch_size=8))\n if epoch % 10 == 9:\n model.save_weights(\"../weights/implementation5-\" + str(epoch) + \".h5\")\n\nexcept (KeyboardInterrupt, SystemExit):\n id.stop()\n sys.exit()\n\nid.stop()","repo_name":"PrimozGodec/DeepColorization","sub_path":"implementations/implementation5.py","file_name":"implementation5.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11719826258","text":"from lxml import etree\nfrom urllib import parse\nimport requests\nimport json\nimport re\nfrom interval import Interval\n\nbaseUrl = 'http://121.194.213.115/swyt/jxcdkbcx.php' #基于校园内网地址进行爬取\nua = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36'\n\n# http://121.194.213.72/ 教务处网站 获取实时教学周\nweek = 10\n\nurlDic = {\n 'xnxq':\"'2020-20211'\"#查询日期,默认为2020-2021第一学期\n #'jxcdmc'为教室get请求头\n # \"%CD%C5%D6%FD%BD%A3%C2%A5\"是由\"团铸剑楼\"gbk编码得来\n}\nclassRoomNumZhuJian = ['101','102','104','105','106','108','110','111','112',\n '201','202','204','205','206','207','208','210',\n '301','302','303','304','305','306','307','308','309',\n '401','402','403','404','405','406','407','408','409','410','411',\n '501','502','503','504','505','506','507','508','509','510','511']#铸剑楼教室\n\nclassRoomNumZhong = ['103','104','107','110','112','113',\n '203','204','205','206','207','208','210','211',\n '303','304','305','306','307','308',\n '407','408',\n '503','504','505','506','507','510',\n '603','607',\n '703','704','705','707','708']#中楼教室\n\nclassRoomNumXi = ['102','103','104','105','106','109',\n '202','203','204','205','206','209',\n '302','303','304','305','306','309',\n '402','403','404','405','406','409',\n '502','503','504','505','506','509',]#西配楼教室\n\n#测试用教室号\n# classRoomNumZhuJian = ['101']#铸剑楼教室\n# classRoomNumZhong = ['103']#中楼教室\n# classRoomNumXi = ['102']#西配楼教室\n\npathPool = [#上午1、2节\n \"//table[@class='table table-bordered table-striped table-condensed']//tr[1]//td[2]/text()\",\n \"//table[@class='table table-bordered table-striped table-condensed']//tr[1]//td[3]/text()\",\n \"//table[@class='table table-bordered table-striped table-condensed']//tr[1]//td[4]/text()\",\n \"//table[@class='table table-bordered table-striped table-condensed']//tr[1]//td[5]/text()\",\n \"//table[@class='table table-bordered table-striped table-condensed']//tr[1]//td[6]/text()\",\n #上午3、4节\n \"//body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[2]/td[2]/text()\",\n \"//body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[2]/td[3]/text()\",\n \"//body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[2]/td[4]/text()\",\n \"//body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[2]/td[5]/text()\",\n \"//body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[2]/td[6]/text()\",\n #下午1、2节\n \"/html[1]/body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[3]/td[2]/text()\",\n \"/html[1]/body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[3]/td[3]/text()\",\n \"/html[1]/body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[3]/td[4]/text()\",\n \"/html[1]/body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[3]/td[5]/text()\",\n \"/html[1]/body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[3]/td[6]/text()\",\n #下午3、4节\n \"/html[1]/body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[4]/td[2]/text()\",\n \"/html[1]/body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[4]/td[3]/text()\",\n \"/html[1]/body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[4]/td[4]/text()\",\n \"/html[1]/body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[4]/td[5]/text()\",\n \"/html[1]/body[1]/div[3]/div[1]/div[1]/div[3]/div[1]/div[2]/table[1]/tbody[1]/tr[4]/td[6]/text()\",\n]\n\n# 定义一个变量\njsontext = {'data':[]}\n\n#生成当前教学楼所有教室的url池,以便下一步爬取\ndef creatUrlPool(roomNumSelect):\n urlPool = [] #url池, 存放需要遍历的url \n if roomNumSelect is classRoomNumZhuJian:\n for roomNum in roomNumSelect:\n urlPool.append(\"{}?{}&jxcdmc=%27%CD%C5%D6%FD%BD%A3%C2%A5{}%27\".format(baseUrl,parse.urlencode(urlDic),roomNum))#铸剑楼拼接字符串\n elif roomNumSelect is classRoomNumZhong:\n for roomNum in roomNumSelect:\n urlPool.append(\"{}?{}&jxcdmc=%27%CD%C5%D3%FD%BE%AF%D6%D0%C2%A5{}%27\".format(baseUrl,parse.urlencode(urlDic),roomNum))#中楼拼接字符串\n elif roomNumSelect is classRoomNumXi:\n for roomNum in roomNumSelect:\n urlPool.append(\"{}?{}&jxcdmc=%27%CD%C5%D3%FD%BE%AF%CE%F7%C2%A5{}%27\".format(baseUrl,parse.urlencode(urlDic),roomNum))#西配楼拼接字符串\n return urlPool\n\ndef RegStr(string):\n global week\n Reg1 = r'\\d-\\d\\d'\n Reg2 = r'\\d-\\d'\n if re.search(Reg1,string) is None:\n it = re.search(Reg2,string)\n else:\n it = re.search(Reg1,string)\n\n numberList = str(it.group()).split('-')\n for i in range(len(numberList)):\n numberList[i] = int(numberList[i])\n \n #若有课,返回1;无课,返回0\n return 1 if week in Interval(numberList[0],numberList[1]) else 0\n\n\n#开始爬取\ndef getResponse(roomNumSelect):\n am12, am34, pm12, pm34 = [], [], [], []\n urlPool = creatUrlPool(roomNumSelect)\n\n for classRoomIndex,url in enumerate(urlPool):\n with requests.get(url,headers={'User-agent':ua}) as response:\n content = response.text #HTML内容\n html = etree.HTML(content)\n\n count = 0\n for path in pathPool:\n pathTemp = html.xpath(path)\n count+=1\n pathFlag = 1#默认置1\n\n # max(list, key=len, default='')\n #检测文本\n if len(pathTemp)==0:\n pathFlag=0\n else:\n pathTemp = max(pathTemp, key=len, default='')\n pathFlag = RegStr(pathTemp)\n\n #结果检测完毕\n #append(0)为占位符,表示有课\n if count<=5:\n if pathFlag==0:#pathFlag为0,代表无课\n am12.append(int(roomNumSelect[classRoomIndex]))\n else:#pathFlag不为0,代表有课。向结果数组添加0,以示占位。\n am12.append(0)\n elif 5 maybe RCE? :D)\n count2 (int): second counter, max: 999 (buffer overflow, if more! -> maybe RCE? :D)\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n bytearray_count1 = struct.pack(\"!h\", int(count1))\n bytearray_count2 = struct.pack(\"!h\", int(count2))\n return bytearray(\n [\n 8,\n 0,\n 10,\n 128,\n int(bytearray_count1[1]) % 256,\n int(bytearray_count1[0]) % 256,\n int(bytearray_count2[1]) % 256,\n int(bytearray_count2[0]) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not update the scoreboard: {}\".format(error))\n","repo_name":"derkalle4/python3-idotmatrix-client","sub_path":"core/idotmatrix/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"74527277052","text":"import os\nimport cv2\nfrom zipfile import ZipFile\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom itertools import combinations\n\ndef imagePreprocess(image,size):# Size in format img_width,img_height\n image=cv2.resize(image, size) \n #(thresh, image) = cv2.threshold(image, 150, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) # grayscale to binary using threshold\n image = image/255\n return image\ndef getData(loc,name_file,size,dic):\n img_list = []\n l = len(name_file)\n counter = 0\n for name in name_file:\n if counter==int(l/4):\n print(\"25% Completed..\")\n elif counter==int(l/2):\n print(\"50% Completed..\")\n elif counter==int(3*l/2):\n print(\"75% Completed..\")\n counter+=1\n\n try:\n img = cv2.imread(os.path.join(loc,name),0)\n img = imagePreprocess(img,size)\n img = img.reshape((size[0],size[1],1))\n img_list.append(name)\n dic[name] = img\n except:\n print(\"Couldn't import \",name,\"in Location:\",loc)\n continue\n print(\"100% Completed\")\n return img_list \n\n \n# Dataset 1\ndef getNames1(loc):\n real_loc = os.path.join(loc,'genuine')\n forge_loc= os.path.join(loc,'forged')\n real_names = os.listdir(real_loc)\n forge_names= os.listdir(forge_loc)\n # Sorting forged list because it is not in order of elements\n # Sort it in order of \"Last 2 Digits\" (Excluding \".png\") which denote who's sign it is \n forge_names = sorted(forge_names,key= lambda x: int(x[-6:-4]))\n return real_names,forge_names\n\ndef getImages(loc,size,dic):\n print(\"Getting Dataset-1 Data and Saving inside the Dictionary..\")\n real_names,forge_names = getNames1(loc)\n print(\"Getting Genuine Images..\")\n real_img = getData(os.path.join(loc,'genuine'),real_names,size,dic)\n print(\"Getting Forged Images..\")\n forge_img= getData(os.path.join(loc,'forged'),forge_names,size,dic)\n return np.asarray(real_img),np.asarray(forge_img)\n\n\n# Dataset 2\ndef getImages2(loc,size,dic):\n print(\"Getting Dataset2 Data..\")\n real_names = os.listdir(os.path.join(loc,'full_org'))\n forg_names = os.listdir(os.path.join(loc,'full_forg'))\n img_real = getData(os.path.join(loc,'full_org'),real_names,size,dic)\n img_forg = getData(os.path.join(loc,'full_forg'),forg_names,size,dic)\n print(\"Data Import Complete!\")\n return (np.asarray(img_real),np.asarray(img_forg))\n\n\ndef getDataset3(data,zipobject,dic,size):\n lis = []\n for c in range(0,len(data)):\n if c == len(data)//2:print(\"50% Complete\")\n if c == len(data)//4:print(\"25% Complete\")\n if c == 3*len(data)//4:print(\"75% Complete\")\n i = data[c]\n img = np.asarray(Image.open(zipobject.open('BHSig260/Hindi/'+ i)))\n img = imagePreprocess(img ,size)\n img = img.reshape((size[0],size[1],1))\n lis.append(i)\n dic[i] = img\n print('100% Complete')\n return lis\n\n# Dataset 3\ndef returnPairList(pairfile):\n x1,x2,y=[],[],[]\n for i in pairfile:\n t = i.split(' ')\n x1.append(t[0])\n x2.append(t[1])\n y.append(int(t[2]))\n return x1,x2,y\n\ndef getHindi(path,size,images_dictionary):\n real_list=[]\n forge_lis=[]\n with ZipFile(path, 'r') as z: \n Fdata = z.read('BHSig260/Hindi/list.forgery').decode(\"utf-8\").split(\"\\n\")\n Gdata = z.read('BHSig260/Hindi/list.genuine').decode(\"utf-8\").split(\"\\n\")\n Fdata = Fdata[0:-1]\n Gdata = Gdata[0:-1]\n pairs = z.read('BHSig260/Hindi/Hindi_pairs.txt').decode(\"utf-8\").split(\"\\n\")\n pairs=pairs[0:-1]\n print(\"Getting Genuine Data..\")\n real_list=getDataset3(Gdata,z,images_dictionary,size)\n print(\"Getting Forged Data..\")\n forge_list=getDataset3(Fdata,z,images_dictionary,size)\n return real_list,forge_list\n\ndef makeHindiPairs(real,forged):\n x1,x2,y = [],[],[]\n for i in range(0,160):\n fstart = i*30\n gstart = i*24\n for j in range(gstart,gstart+24):\n for k in range(j+1,gstart+24):\n x1.append(real[j])\n x2.append(real[k])\n y.append(1)\n for k in range(fstart,fstart+30):\n x1.append(real[j])\n x2.append(forged[k])\n y.append(0)\n return x1,x2,y\n\n\ndef makePairs(real_img,forged_img,no_of_writers):\n y=[]\n x1=[]\n x2=[]\n length = len(real_img) # Length of both is supposed to be same\n for i in range(0,length,no_of_writers): # Real-Real samples\n combs = list(combinations(range(i,i+no_of_writers),2))\n for each in combs:\n x1.append(real_img[each[0]])\n x2.append(real_img[each[1]])\n y.append(1)\n x1.append(real_img[each[0]])\n x2.append(forged_img[each[1]])\n y.append(0)\n return [np.asarray(x1),np.asarray(x2),np.asarray(y)]\n","repo_name":"SiddheshSingh/Signature-Verification","sub_path":"data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":4772,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"41836170488","text":"import os\nfrom subprocess import Popen, PIPE\n\nimport schedule\nimport time\nfrom threading import Thread\n\nimport json\nfrom flask import Flask, request, abort\nfrom flask_mail import Mail, Message\n\napp = Flask(__name__)\n\n# Webhook, Scheduled\napp.config[\"MODE\"] = os.environ.get(\"MODE\", None)\n\nif app.config[\"MODE\"] == \"cron\":\n print(\"[+] Starting in cron mode\")\n # If no interval is passed, it will be executed hourly\n app.config[\"INTERVAL\"] = int(os.environ.get(\"INTERVAL\", 3600))\n\n# Notifications\napp.config[\"NOTIFY\"] = os.environ.get(\"NOTIFY\", None)\nif app.config[\"NOTIFY\"] == \"email\":\n # Mail configurations\n app.config[\"MAIL_TITLE\"] = os.environ[\"MAIL_TITLE\"]\n app.config[\"MAIL_SERVER\"] = os.environ[\"MAIL_SERVER\"]\n app.config[\"MAIL_PORT\"] = int(os.environ[\"MAIL_PORT\"])\n app.config[\"MAIL_USE_SSL\"] = bool(os.environ[\"MAIL_USE_SSL\"])\n app.config[\"MAIL_USERNAME\"] = os.environ[\"MAIL_USERNAME\"]\n app.config[\"MAIL_PASSWORD\"] = os.environ[\"MAIL_PASSWORD\"]\n\n mail = Mail(app)\n\n sender = os.environ[\"MAIL_SENDER\"]\n recipient = os.environ[\"MAIL_RECIPIENT\"]\n mailTitle = os.environ[\"MAIL_TITLE\"]\n \nif not os.path.isfile(\"/task.sh\"):\n print(\"[+] /task.sh or /task.py not found, please specify a task\")\n exit(-1)\n\ndef scheduler():\n # Run for the very first time, so we get a result\n print(\"[+] Starting the cron scheduller\")\n task()\n schedule.every(app.config[\"INTERVAL\"]).seconds.do(task)\n while True:\n schedule.run_pending()\n time.sleep(1)\n\ndef task():\n print(\"[+] Executing the task\")\n proc = Popen([\"/task.sh\"], stdout=PIPE, stderr=PIPE)\n stdout, stderr = proc.communicate()\n exitcode = proc.returncode\n print(\"[+] Task done, output: \")\n print(stdout)\n print(stderr)\n body = stdout + \"\\n\\n\\n\" + stderr\n sendEmail(mailTitle, body)\n \ndef sendEmail(title, body):\n msg = Message(title,\n sender=sender,\n recipients=[recipient])\n msg.body = body \n mail.send(msg)\n\n@app.route(\"/\")\ndef index():\n return \"This is a service, and you are using it wrong !\"\n\nif __name__ == \"__main__\":\n schedulerThread = Thread(target=scheduler)\n schedulerThread.daemon = True\n schedulerThread.start()\n #app.run(host='0.0.0.0', port=8080,threaded=True)\n schedulerThread.join()\n","repo_name":"opsxcq/docker-task","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"17780896602","text":"import os\nimport json\n\nimport luigi\nimport numpy as np\nimport z5py\n\nfrom cluster_tools.copy_volume import CopyVolumeLocal, CopyVolumeSlurm\nfrom paintera_tools import convert_to_paintera_format, set_default_roi, set_default_block_shape\nfrom mmpb.default_config import write_default_global_config\nfrom common import RAW_PATH, RAW_KEY, PAINTERA_PATH, PAINTERA_KEY, TMP_PATH, ROI_PATH, LABEL_MAPPING_PATH\n\n\ndef write_max_id(path, key, max_id):\n with z5py.File(path) as f:\n ds = f[key]\n ds.attrs['maxId'] = max_id\n\n\ndef copy_watersheds(input_path, input_key,\n output_path, output_key,\n copy_ids, tmp_folder, target, max_jobs,\n offset=None, insert_mode=False):\n task = CopyVolumeLocal if target == 'local' else CopyVolumeSlurm\n config_dir = os.path.join(tmp_folder, 'configs')\n os.makedirs(config_dir, exist_ok=True)\n\n config = task.default_task_config()\n config.update({'insert_mode': insert_mode, 'offset': offset})\n\n if copy_ids is None:\n with z5py.File(PAINTERA_PATH, 'r') as f:\n max_id = f[PAINTERA_KEY].attrs['maxId']\n else:\n config.update({'value_list': copy_ids.tolist()})\n max_id = int(copy_ids.max())\n\n with open(os.path.join(config_dir, 'copy_volume.config'), 'w') as f:\n json.dump(config, f)\n\n t = task(tmp_folder=tmp_folder, max_jobs=max_jobs, config_dir=config_dir,\n input_path=input_path, input_key=input_key,\n output_path=output_path, output_key=output_key,\n prefix='copy-ws')\n ret = luigi.build([t], local_scheduler=True)\n assert ret, \"Copy failed\"\n\n write_max_id(output_path, output_key, max_id)\n\n\ndef make_proofreading_project(project_folder, tmp_folder,\n assignments, block_labels, block_roi,\n target, max_jobs):\n\n if len(block_labels) == 0:\n return\n # don't do anything if we have a paintera project already\n if os.path.exists(os.path.join(project_folder, 'attributes.json')):\n return\n\n os.makedirs(project_folder, exist_ok=True)\n config_dir = os.path.join(tmp_folder, 'configs')\n\n roi_begin, roi_end = block_roi\n write_default_global_config(config_dir, roi_begin, roi_end)\n with open(os.path.join(config_dir, 'global.config'), 'r') as f:\n block_shape = json.load(f)['block_shape']\n\n data_path = os.path.join(project_folder, 'data.n5')\n f = z5py.File(data_path)\n f.require_group('volumes')\n\n # make a link to the raw data\n raw_out_key = 'volumes/raw'\n if raw_out_key not in f:\n print(\"Make raw symlink\")\n raw_in = os.path.join(RAW_PATH, RAW_KEY)\n raw_out = os.path.join(data_path, raw_out_key)\n os.symlink(raw_in, raw_out)\n\n # get the relevant fragment segment assignments for this block\n print(\"Get assignment mask\")\n assignment_mask = np.isin(assignments[:, 1], block_labels)\n assert assignment_mask.sum() > 0\n block_assignments = assignments[assignment_mask]\n assert block_assignments.shape[0] == assignment_mask.sum()\n assert block_assignments.shape[1] == 2\n print(\"Sub assignments have the shape:\", block_assignments.shape)\n\n # copy the relevant part of the fragment segment assignment\n print(\"Copy the assignments\")\n g_out = f.require_group('volumes/paintera')\n save_assignments = block_assignments.T\n ds_ass = g_out.require_dataset('fragment-segment-assignment', shape=save_assignments.shape,\n chunks=save_assignments.shape, compression='gzip',\n dtype='uint64')\n ds_ass[:] = save_assignments\n\n # copy the relevant parts of the watersheds\n print(\"Copy the watersheds\")\n ws_ids = block_assignments[:, 0]\n copy_watersheds(PAINTERA_PATH, os.path.join(PAINTERA_KEY, 'data/s0'),\n data_path, 'volumes/watershed',\n ws_ids, tmp_folder, target, max_jobs)\n\n # make the paintera data\n res = [0.025, 0.01, 0.01]\n restrict_sets = [-1, -1, 5, 4,\n 4, 3, 3, 1]\n print(\"Make new paintera data\")\n set_default_roi(roi_begin, roi_end)\n set_default_block_shape(block_shape)\n convert_to_paintera_format(data_path, raw_out_key,\n 'volumes/watershed', 'volumes/paintera',\n label_scale=1, resolution=res,\n tmp_folder=tmp_folder, target=target, max_jobs=max_jobs,\n max_threads=16, convert_to_label_multisets=True,\n restrict_sets=restrict_sets)\n\n\n# make the appropriate sub-volume and paintera project for each block\ndef make_proofreading_projects(root, labels_to_blocks, rois_to_blocks, target, max_jobs):\n os.makedirs(root, exist_ok=True)\n tmp_root = './tmps'\n os.makedirs(tmp_root, exist_ok=True)\n\n with z5py.File(TMP_PATH, 'r') as f:\n assignments = f['node_labels/fragment-segment-assignment2'][:]\n\n n_blocks = len(labels_to_blocks)\n # block_ids = range(1, n_blocks + 1)\n block_ids = range(3, 17)\n\n for block_id in block_ids:\n print(\"Make project\", block_id, \"/\", n_blocks + 1)\n project_folder = os.path.join(root, 'project%02i' % block_id)\n tmp_folder = os.path.join(tmp_root, 'tmp_project%i' % block_id)\n make_proofreading_project(project_folder, tmp_folder,\n assignments, labels_to_blocks[block_id],\n rois_to_blocks[block_id], target, max_jobs)\n\n\ndef make_subdivision(root, target, max_jobs):\n with open(LABEL_MAPPING_PATH, 'r') as f:\n labels_to_blocks = json.load(f)\n labels_to_blocks = {int(k): v for k, v in labels_to_blocks.items()}\n\n with open(ROI_PATH, 'r') as f:\n rois_to_blocks = json.load(f)\n rois_to_blocks = {int(k): v for k, v in rois_to_blocks.items()}\n\n make_proofreading_projects(root, labels_to_blocks, rois_to_blocks, target, max_jobs)\n\n\nif __name__ == '__main__':\n root = '/g/arendt/EM_6dpf_segmentation/corrections_and_proofreading/paintera_projects'\n target = 'local'\n max_jobs = 48\n make_subdivision(root, target, max_jobs)\n","repo_name":"mobie/platybrowser-project","sub_path":"segmentation/correction/make_proofreading_projects.py","file_name":"make_proofreading_projects.py","file_ext":"py","file_size_in_byte":6196,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"12802282395","text":"'''\ncopy('story.txt', 'story_copy.txt') # None\n# expect the contents of story.txt and story_copy.txt to be the same\n'''\n\ndef copy(file, cpy_file):\n with open(file) as filehandle:\n data = filehandle.read()\n\n with open(cpy_file, 'w') as copyfile:\n copyfile.write(data)\n\n\ncopy('cf1.txt', 'cf2.txt')\n","repo_name":"lavkushsingh31/Python_Programs","sub_path":"cpy.py","file_name":"cpy.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38282443774","text":"import sys\nimport lib.definition as proc\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\n\nimport processes.foundation as fd\nimport processes.fitModel as fM\nimport processes.histogramAction as hA\n\nfrom lmfit import Model\n\n\ndef fit_energy(energy_df):\n counts, bins, bars = plt.hist(energy_df['cal_energy'], histtype='step', bins=1000)\n\n i = np.argmax(counts)\n\n lower = hA.find_nearest_bin(bins, bins[i]-5)\n upper = hA.find_nearest_bin(bins, bins[i]+5)\n ydata = counts[lower:upper]\n xdata = bins[lower:upper]\n\n\n \n gmodel = Model(fM.lingaus)\n #gmodel = Model(gaus)\n i = np.argmax(ydata)\n #params = gmodel.make_params(A=700, m1=315.5, s1=0.5, H_tail=-0.000001, H_step=1, tau=-0.5, slope=-6, intrcpt=180)\n params = gmodel.make_params(a1=1700, m1=xdata[i], s1=1.5, slope=0.0, intrcpt=0.0)\n #params['s1'].vary = False\n result = gmodel.fit(ydata,params, x=xdata)\n\n sigma1 = result.params['s1'].value\n fw1 = 2.355*sigma1\n err = result.params['s1'].stderr\n err1 = err*2.355\n energy = result.params['m1'].value\n print(fw1)\n\n return sigma1, err, np.abs(fw1), err1, result.params['m1'].value\n\ndef main():\n base_min = 1800\n base_max = 10000\n base_lis = np.asarray([x for x in np.arange(base_min, base_max, 200)])\n l=0\n sig_lis1 = []\n fw_lis1 = []\n sig_error_lis1 = []\n fw_error_lis1 = []\n energy_lis1 = []\n sig_lis2 = []\n fw_lis2 = []\n sig_error_lis2 = []\n fw_error_lis2 = []\n energy_lis2 = []\n sig_lis3 = []\n fw_lis3 = []\n sig_error_lis3 = []\n fw_error_lis3 = []\n energy_lis3 = []\n sig_lis4 = []\n fw_lis4 = []\n sig_error_lis4 = []\n fw_error_lis4 = []\n energy_lis4 = []\n for base_line in base_lis:\n energy_df = pd.read_csv(r'baseVsNoiseCSV1723SubWave/energyArr' + str(base_line) + '.csv')\n energy_df = energy_df[energy_df[\"trapEmax\"]<200]\n energy_df[\"cal_energy\"] = energy_df[\"trapEmax\"]\n energy_df[\"true_energy\"] = 5.812\n energy_df[\"base_line\"] = base_line\n\n sig, sig_error, fw, fw_error, energy = fit_energy(energy_df)\n sig_lis1.append(sig)\n sig_error_lis1.append(sig_error)\n fw_lis1.append(fw)\n fw_error_lis1.append(fw_error)\n energy_lis1.append(energy-(5.812))\n\n\n for base_line in base_lis:\n energy_df = pd.read_csv(r'baseVsNoiseCSV1723SubTrap/energyArr' + str(base_line) + '.csv')\n energy_df = energy_df[energy_df[\"trapEmax\"]<20]\n energy_df[\"cal_energy\"] = energy_df[\"trapEmax\"]\n energy_df[\"true_energy\"] = 5.812\n energy_df[\"base_line\"] = base_line\n\n sig, sig_error, fw, fw_error, energy = fit_energy(energy_df)\n sig_lis2.append(sig)\n sig_error_lis2.append(sig_error)\n fw_lis2.append(fw)\n fw_error_lis2.append(fw_error)\n energy_lis2.append(energy-(5.812))\n \n for base_line in base_lis:\n energy_df = pd.read_csv(r'baseVsNoiseCSV1723WaveletSubWave/energyArr' + str(base_line) + '.csv')\n energy_df = energy_df[energy_df[\"trapEmax\"]<20]\n energy_df[\"cal_energy\"] = energy_df[\"trapEmax\"]\n energy_df[\"true_energy\"] = 5.812\n energy_df[\"base_line\"] = base_line\n\n sig, sig_error, fw, fw_error, energy = fit_energy(energy_df)\n sig_lis3.append(sig)\n sig_error_lis3.append(sig_error)\n fw_lis3.append(fw)\n fw_error_lis3.append(fw_error)\n energy_lis3.append(energy-(5.812))\n \n \"\"\"\n for base_line in base_lis:\n energy_df = pd.read_csv(r'baseVsNoiseCSV1726/energyArr' + str(base_line) + '.csv')\n energy_df = energy_df[energy_df[\"trapEmax\"]<20]\n energy_df[\"cal_energy\"] = energy_df[\"trapEmax\"]*0.09458385296-0.09174633738\n energy_df[\"true_energy\"] = 5.812*0.09458385296-0.09174633738\n energy_df[\"base_line\"] = base_line\n\n sig, sig_error, fw, fw_error, energy = fit_energy(energy_df)\n sig_lis4.append(sig)\n sig_error_lis4.append(sig_error)\n fw_lis4.append(fw)\n fw_error_lis4.append(fw_error)\n energy_lis4.append(energy-(5.812*0.09458385296-0.09174633738))\n \"\"\"\n\n df1 = pd.DataFrame({\n \"Base_line\":pd.Series(base_lis),\n \"sigma\":pd.Series(sig_lis1),\n \"sigma_error\":pd.Series(sig_error_lis1),\n \"FWHM\":pd.Series(fw_lis1),\n \"FWHM_error\":pd.Series(fw_error_lis1),\n \"Energy_diff\":pd.Series(energy_lis1)\n })\n\n df2 = pd.DataFrame({\n \"Base_line\":pd.Series(base_lis),\n \"sigma\":pd.Series(sig_lis2),\n \"sigma_error\":pd.Series(sig_error_lis2),\n \"FWHM\":pd.Series(fw_lis2),\n \"FWHM_error\":pd.Series(fw_error_lis2),\n \"Energy_diff\":pd.Series(energy_lis2)\n })\n\n df3 = pd.DataFrame({\n \"Base_line\":pd.Series(base_lis),\n \"sigma\":pd.Series(sig_lis3),\n \"sigma_error\":pd.Series(sig_error_lis3),\n \"FWHM\":pd.Series(fw_lis3),\n \"FWHM_error\":pd.Series(fw_error_lis3),\n \"Energy_diff\":pd.Series(energy_lis3)\n })\n\n \"\"\"\n df4 = pd.DataFrame({\n \"Base_line\":pd.Series(base_lis),\n \"sigma\":pd.Series(sig_lis4),\n \"sigma_error\":pd.Series(sig_error_lis4),\n \"FWHM\":pd.Series(fw_lis4),\n \"FWHM_error\":pd.Series(fw_error_lis4),\n \"Energy_diff\":pd.Series(energy_lis4)\n })\n \"\"\"\n\n fig = go.Figure()\n\n fig.add_trace(go.Scatter(x=df1[\"Base_line\"], y=df1[\"FWHM\"], name=\"Subtract Before Trap\", \n error_y=dict(\n type='data',\n array=np.asarray(df1[\"FWHM_error\"]),\n visible=True\n )))\n fig.add_trace(go.Scatter(x=df2[\"Base_line\"], y=df2[\"FWHM\"], name=\"Subtract After Trap\", \n error_y=dict(\n type='data',\n array=np.asarray(df2[\"FWHM_error\"]),\n visible=True\n )))\n fig.add_trace(go.Scatter(x=df3[\"Base_line\"], y=df3[\"FWHM\"], name=\"Wavelet Filtered Subtract \", \n error_y=dict(\n type='data',\n array=np.asarray(df3[\"FWHM_error\"]),\n visible=True\n )))\n \"\"\"\n fig.add_trace(go.Scatter(x=df4[\"Base_line\"], y=df4[\"FWHM\"], name=\"Det1726\", \n error_y=dict(\n type='data',\n array=np.asarray(df4[\"FWHM_error\"]),\n visible=True\n )))\n \"\"\"\n\n \"\"\"\n fig.add_vline(x=8500, annotation_text=\"Max Baseline 9 Channels\", annotation_font_size=20)\n fig.add_vline(x=15000, annotation_text=\"Max Baseline 5 Channels\", annotation_font_size=20)\n fig.update_xaxes(title=\"Length of BaseLine [Clocks]\", title_font_size=20)\n fig.update_yaxes(title=\"FWHM [keV]\", title_font_size=20)\n \"\"\"\n fig.update_xaxes(title=\"Length of BaseLine [Clocks]\", title_font_size=20)\n fig.update_yaxes(title=\"FWHM [ADC]\", title_font_size=20)\n\n fig.update_layout(\n title=\"FWHM Vs Length of Baseline For Different Methods\",\n title_font_size=20,\n font=dict(\n family=\"Courier New, monospace\",\n size=20\n )\n )\n fig.show()\n\n\n\nif __name__==\"__main__\":\n main()","repo_name":"jbrowni2/geminiAnalysis","sub_path":"baseline/plotFWHMVsBaseline.py","file_name":"plotFWHMVsBaseline.py","file_ext":"py","file_size_in_byte":6951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41459635181","text":"import numpy as np\nimport torch\nfrom torch.autograd import Variable\n\n\ndef get_data():\n train_x = np.asarray([3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779,\n 6.182, 7.59, 2.167, 7.042, 10.791, 5.313, 7.997,\n 5.654, 9.27, 3.1])\n train_y = np.asarray([1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366,\n 2.596, 2.53, 1.221, 2.827, 3.465, 1.65, 2.904,\n 2.42, 2.94, 1.3])\n dtype = torch.FloatTensor\n X = Variable(torch.from_numpy(train_x).type(dtype), requires_grad=False).view(17, 1)\n Y = Variable(torch.from_numpy(train_y).type(dtype), requires_grad=False)\n return X, Y\n\n\ndef get_weights():\n w = Variable(torch.randn(1), requires_grad=True)\n b = Variable(torch.randn(1), requires_grad=True)\n return w, b\n\n\ndef simple_network(x, w, b):\n y_pred = torch.matmul(x, w) + b\n return y_pred\n\n\ndef loss_fn(y, y_pred, w, b):\n loss = (y_pred - y).pow(2).sum()\n for param in [w, b]:\n if not param.grad is None:\n param.grad.data.zero_()\n loss.backward()\n return loss.data[0]\n\n\n# 优化器\ndef optimize(learning_rate, w, b):\n w.data -= learning_rate * w.grad.data\n b.data -= learning_rate * b.grad.data\n\n\n","repo_name":"sunyinggang/dailyCode","sub_path":"python/PyTorch-DeepLearning/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"17512491161","text":"# -*- coding: utf-8 -*-\n\"\"\"Goal: determine the fairness of the games.\n\nCompute win percentage for true and for the starter such that:\n t_win_% == 1 - f_win_%\n start_win_% == 1 - second_win_%\nand decide if they are fair.\n\nDetailed approach:\nFor every config file, play a bunch of games.\nCollect game ending data.\nCompute intermediate data so mean and standard dev can be computed.\n\nCreated on Sun Jul 23 11:29:10 2023\n@author: Ann\"\"\"\n\nimport argparse\nimport enum\nimport math\nimport os\nimport random\nimport sys\nimport time\n\nimport pandas as pd\nimport scipy\nimport tqdm\n\nfrom context import ai_player\nfrom context import man_config\n\nfrom game_log import game_log\nfrom game_interface import WinCond\n\n\n\n# %%\n\nALL = 'all'\n\n\nWIN_SCORE = 4\nTIE_SCORE = 2\n\nEXPECTED_VAL = WIN_SCORE * 0.5\n\n\n# %% column names\n\n# raw tally collumns\n\nMAX_TURNS = 'max_turns'\n\n# W_starter_winner\nW_F_F = 'w_f_f'\nW_F_T = 'w_f_t'\nW_T_F = 'w_t_f'\nW_T_T = 'w_t_t'\n\n# in order by starter, winner\nWINNER_COL = [W_F_F, W_F_T, W_T_F, W_T_T]\n\n# TIE_starter\nTIE_T = 'tie_t'\nTIE_F = 'tie_f'\n\n# in order by starter\nTIE_COL = [TIE_F, TIE_T]\n\n# two sum columns\nSCORE = 'score'\nSCORE_2 = 'score_2' # squared\n\nST_SCORE = 'st_score'\nST_SCORE_2 = 'st_score_2'\n\nINT_COLUMNS = [MAX_TURNS] + WINNER_COL + TIE_COL + \\\n [SCORE, SCORE_2, ST_SCORE, ST_SCORE_2]\n\nMEAN = 'mean'\nSTDEV = 'stddev'\n\nWIN_PCT = 'win_pct'\nSTR_PCT = 'str_pct'\nTIE_PCT = 'tie_pct'\nFLT_COLUMNS = [WIN_PCT, STR_PCT, TIE_PCT]\n\n# fair columsn\nWIN_FAIR = 'win_fair'\nSTARTER_FAIR = 'starter_fair'\n\nBOOL_COLUMNS = [WIN_FAIR, STARTER_FAIR]\n\n\n# %% files, fields and defaults\n\nPATH = '../GameProps/'\nBAD_CFG = 'all_params.txt'\n\nINDEX = [fname[:-4] for fname in os.listdir(PATH) if fname != BAD_CFG]\n\nFIELDS = {\n 'allow_rule': 0,\n 'blocks': False,\n 'capsamedir': False,\n 'capt_max': 0,\n 'capt_min': 0,\n 'capt_on': '',\n 'capt_rturn': False,\n 'capttwoout': False,\n 'child_cvt': 0,\n 'child_rule': 0,\n 'child_type': 0,\n 'crosscapt': False,\n 'evens': False,\n 'goal': 0,\n 'gparam_one': 0,\n 'grandslam': 0,\n 'min_move': 1,\n 'mlaps': 0,\n 'move_one': False,\n 'moveunlock': False,\n 'multicapt': False,\n 'mustpass': False,\n 'mustshare': False,\n 'no_sides': False,\n 'nocaptfirst': False,\n 'oppsidecapt': False,\n 'pickextra': 0,\n 'prescribed': 0,\n 'round_fill': 0,\n 'round_starter': 0,\n 'rounds': False,\n 'skip_start': False,\n 'sow_direct': 1,\n 'sow_own_store': False,\n 'sow_rule': 0,\n 'sow_start': False,\n 'start_pattern': 0,\n 'stores': False,\n 'udir_holes': '',\n 'visit_opp': False,\n 'xc_sown': False,\n 'xcpickown': 0,\n }\n\n\n# %% score and collect\n\nclass GameResult(enum.Enum):\n \"\"\"Game results.\"\"\"\n\n WIN = WinCond.WIN.value\n TIE = WinCond.TIE.value\n MAX_TURNS = enum.auto()\n\n\ndef score_game(data, gname, starter, result, winner):\n \"\"\"Collect sum and sum squared for scores for winner\n and for starter's wins.\"\"\"\n\n score = 0\n if result == GameResult.WIN.value and winner:\n score = WIN_SCORE\n elif result == GameResult.TIE.value:\n score = TIE_SCORE\n data.loc[gname, SCORE] += score\n data.loc[gname, SCORE_2] += score * score\n\n score = 0\n if result == GameResult.WIN.value and winner == starter:\n score = WIN_SCORE\n elif result == GameResult.TIE.value:\n score = TIE_SCORE\n data.loc[gname, ST_SCORE] += score\n data.loc[gname, ST_SCORE_2] += score * score\n\n\ndef result_name(starter, result, winner):\n \"\"\"Lookup the column name for the result.\"\"\"\n\n if result == GameResult.WIN.value:\n return WINNER_COL[starter * 2 + winner]\n\n if result == GameResult.TIE.value:\n return TIE_COL[starter]\n\n if result == GameResult.MAX_TURNS.value:\n return MAX_TURNS\n\n assert f'Unexpedect game result {result}.'\n\n\n# %% build data frame\n\n\ndef build_data_frame():\n \"\"\"Build a data frame with all the desired columns,\n Include key game data (optional), raw game result tallys, two sum values\n for win percents, and fairness columns.\"\"\"\n\n if cargs.no_params:\n data = pd.DataFrame(index=cargs.game)\n\n else:\n ginfo = dict()\n for gname in cargs.game:\n gdict = man_config.read_game(PATH + gname + '.txt')\n cons_gd = dict(**gdict['game_constants'], **gdict['game_info'])\n del cons_gd['about']\n del cons_gd['name']\n ginfo[gname] = cons_gd\n data = pd.DataFrame.from_dict(ginfo).transpose()\n\n for name, dval in FIELDS.items():\n if name in data:\n data[name] = data[name].fillna(dval)\n\n dlen = len(cargs.game)\n\n for col in INT_COLUMNS:\n data[col] = [0] * dlen\n for col in FLT_COLUMNS:\n data[col] = [0.0] * dlen\n for col in BOOL_COLUMNS:\n data[col] = [False] * dlen\n\n return data\n\n\n# %% fairness tests\n\ndef std_dev(xsum, x_sqr_sum, nbr):\n \"\"\"Use two sum formula to compute standard deviation.\"\"\"\n\n return math.sqrt((x_sqr_sum - ((xsum * xsum) / nbr)) / (nbr - 1))\n\n\ndef fail_to_reject(data, gname, tag, confidence=0.95):\n \"\"\"H0: prob of win = 0.5 Ha: prob of win != 0.5\n\n Failing to reject - means we didn't reject H0\n -- not enough evidence to prove it false\"\"\"\n\n nbr_games = cargs.nbr_runs - data.loc[gname, MAX_TURNS]\n if nbr_games <= 1:\n return False\n\n mean = data.loc[gname, tag] / nbr_games\n stdev = std_dev(data.loc[gname, tag], data.loc[gname, tag + '_2'],\n nbr_games)\n if abs(stdev) < 0.00001:\n return False\n\n test_stat = (mean - EXPECTED_VAL) / (stdev / math.sqrt(nbr_games))\n crit_value = scipy.stats.norm.ppf(1 - (1 - confidence) / 2)\n\n return test_stat < crit_value\n\n\ndef evals(gname, data):\n \"\"\"Compute win_fair and starter_fair.\"\"\"\n\n data.loc[gname, WIN_FAIR] = fail_to_reject(data, gname, SCORE)\n data.loc[gname, STARTER_FAIR] = fail_to_reject(data, gname, ST_SCORE)\n\n nbr_games = cargs.nbr_runs - data.loc[gname, MAX_TURNS]\n data.loc[gname, WIN_PCT] = data.loc[gname, SCORE] / (WIN_SCORE * nbr_games)\n data.loc[gname, STR_PCT] = data.loc[gname, ST_SCORE] / (WIN_SCORE * nbr_games)\n data.loc[gname, TIE_PCT] = \\\n (data.loc[gname, TIE_T] + data.loc[gname, TIE_F]) / nbr_games\n\n\n# %% play and collect\n\ndef test_one_game(game, pdict):\n \"\"\"Play one game, return the result as\n outcome (win, tie, or max turns) and winner (if one)\"\"\"\n\n if cargs.ai_player:\n tplayer = ai_player.AiPlayer(game, pdict)\n fplayer = ai_player.AiPlayer(game, pdict)\n\n for _ in range(5000 if game.info.rounds else 500):\n\n if not cargs.ai_player:\n moves = game.get_moves()\n assert moves, \"Game didn't end right.\"\n move = random.choice(moves)\n else:\n game_log.active = False\n if game.turn:\n move = tplayer.pick_move()\n else:\n move = fplayer.pick_move()\n game_log.active = cargs.save_logs\n\n cond = game.move(move)\n if cond in (WinCond.WIN, WinCond.TIE):\n break\n if cond in (WinCond.ROUND_WIN, WinCond.ROUND_TIE):\n if game.new_game(cond, new_round_ok=True):\n return cond.value, game.turn\n game_log.turn(0, 'Start Game', game)\n\n if game.info.mustpass:\n game.test_pass()\n\n else:\n return GameResult.MAX_TURNS.value, None\n\n return cond.value, game.turn\n\n\ndef play_one_config(gname, data):\n \"\"\"For one game configuration, play cargs.nbr_runs number of games.\n Start half with True and half with False.\n Tally raw results and accumualte data for mean/std_dev (score).\"\"\"\n\n for cnt in tqdm.tqdm(range(cargs.nbr_runs)):\n\n game, pdict = man_config.make_game(PATH + gname + '.txt')\n if cnt < cargs.nbr_runs // 2:\n starter = game.turn = True\n else:\n starter = game.turn = False\n\n result, winner = test_one_game(game, pdict)\n\n # raw tally\n col = result_name(starter, result, winner)\n data.loc[gname, col] += 1\n\n score_game(data, gname, starter, result, winner)\n if cargs.save_logs:\n game_log.save(f'Fair Games Simulated.\\n{cargs}\\n'\n + f'Starter: {starter}\\n\\n'\n + game.params_str())\n game_log.new()\n time.sleep(1)\n\n # print(game.params_str())\n\n\ndef play_them_all(data):\n\n for gname in cargs.game:\n print(gname)\n play_one_config(gname, data)\n evals(gname, data)\n\n data.to_csv(f'data/{cargs.output}.csv')\n\n\n# %% command line args\n\ndef define_parser():\n \"\"\"Define the command line arguements.\"\"\"\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--game', action='append',\n choices=list(INDEX) + [ALL],\n help=\"\"\"Select the games to simulate. Use multiple\n options to select multiple games.\"\"\")\n\n parser.add_argument('--nbr_runs', action='store',\n default=10, type=int,\n help=\"\"\"Select the number of games to simulate.\n Default: %(default)s\"\"\")\n\n parser.add_argument('--ai_player', action='store_true',\n help=\"\"\"Use the minimaxer ai_player.\n Default: %(default)s\"\"\")\n\n parser.add_argument('--no_params', action='store_true',\n help=\"\"\"Don't game parameters in output file.\n Default: %(default)s\"\"\")\n\n parser.add_argument('--save_logs', action='store_true',\n help=\"\"\"Save the game logs. Only one game maybe\n selected and nbr_games must be < 50.\n Games will be slowed to 1 per second.\n Default: %(default)s\"\"\")\n\n parser.add_argument('--output', action='store',\n default='junk',\n help=\"\"\"Output file. Default: %(default)s\"\"\")\n\n # TODO add confidence\n\n return parser\n\n\ndef process_command_line():\n\n global cargs\n\n parser = define_parser()\n try:\n cargs = parser.parse_args()\n except argparse.ArgumentError:\n parser.print_help()\n sys.exit()\n\n if not cargs.game:\n cargs.game = INDEX\n if cargs.save_logs and (len(cargs.game) > 1 or cargs.nbr_runs > 50):\n print(\"save_logs only valid for <= 1 game and <= 50 runs.\")\n sys.exit()\n\n game_log.active = cargs.save_logs\n game_log.level = game_log.STEP\n\n print(cargs)\n\n# %%\n\nprocess_command_line()\n\ndata = build_data_frame()\nplay_them_all(data)\n","repo_name":"StoneShark/MancalaGames","sub_path":"analysis/fair_games.py","file_name":"fair_games.py","file_ext":"py","file_size_in_byte":10936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36523557746","text":"# Open the file with the list of strings\nwith open('3.in') as f:\n # Read all the lines from the file\n lines = f.readlines()\n\n# Initialize the total score to 0\nscore = 0\n\n# Loop through each group of three strings\nfor i in range(0, len(lines), 3):\n # Get the current group of three strings\n s1, s2, s3 = lines[i:i+3]\n\n # Strip whitespace from the strings\n s1 = s1.strip()\n s2 = s2.strip()\n s3 = s3.strip()\n\n # Find the common characters in the three strings\n common_chars = set(s1).intersection(s2, s3)\n\n # Loop through each common character\n for c in common_chars:\n # Convert the character to a number\n if c.islower():\n # Convert lowercase letters to numbers between 1 and 26\n num = ord(c) - ord('a') + 1\n elif c.isupper():\n # Convert uppercase letters to numbers between 27 and 53\n num = ord(c) - ord('A') + 27\n\n # Add the number to the total score\n score += num\n\n# Print the total score\nprint(score)","repo_name":"msturm/aoc2022","sub_path":"3/gpt3.py","file_name":"gpt3.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4453114193","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport shutil\nimport torch\nimport onnx\nimport pycuda\nimport tensorrt\nimport pycuda.driver as cuda\nimport pycuda.autoinit\n\n\n# Simple helper data class that's a little nicer to use than a 2-tuple.\nclass HostDeviceMem(object):\n def __init__(self, host_mem, device_mem):\n self.host = host_mem\n self.device = device_mem\n\n def __str__(self):\n return \"Host:\\n\" + str(self.host) + \"\\nDevice:\\n\" + str(self.device)\n\n def __repr__(self):\n return self.__str__()\n\n\ndef allocate_buffers(engine, batch_size):\n inputs = []\n outputs = []\n bindings = []\n stream = cuda.Stream()\n for binding in engine:\n size = tensorrt.volume(engine.get_binding_shape(binding)) * batch_size\n dtype = tensorrt.nptype(engine.get_binding_dtype(binding))\n # Allocate host and device buffers\n host_mem = cuda.pagelocked_empty(size, dtype)\n device_mem = cuda.mem_alloc(host_mem.nbytes)\n # Append the device buffer to device bindings.\n bindings.append(int(device_mem))\n # Append to the appropriate list.\n if engine.binding_is_input(binding):\n inputs.append(HostDeviceMem(host_mem, device_mem))\n else:\n outputs.append(HostDeviceMem(host_mem, device_mem))\n return inputs, outputs, bindings, stream\n\n","repo_name":"YonghaoHe/LFD-A-Light-and-Fast-Detector","sub_path":"lfd/deployment/tensorrt/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":408,"dataset":"github-code","pt":"78"} +{"seq_id":"40082503135","text":"import numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom scipy.integrate import solve_ivp\r\nimport os,time\r\nimport Lagrange, Lyapunov_init, Lyapunov_diff\r\n\r\n#共通変数,関数\r\nfrom util import (\r\n mu,\r\n dirnum,\r\n dirgraph,\r\n func,\r\n event_y0pm,\r\n event_y0mp,\r\n Lyapunov_half,\r\n Lyapunov\r\n)\r\n\r\n#エネルギー計算\r\ndef Energy(mu,x):\r\n r1 = np.sqrt((x[0]+mu)**2+x[1]**2)\r\n r2 = np.sqrt((x[0]-1+mu)**2+x[1]**2)\r\n #エネルギー\r\n E = (x[2]**2+x[3]**2)/2-(x[0]**2+x[1]**2)/2 -(1-mu)/r1 -mu/r2 -mu*(1-mu)/2\r\n return E\r\n\r\n#Contination\r\ndef Contination(mu,x1,x2,energy):\r\n energy_init = Energy(mu, x1)\r\n energy_re = Energy(mu, x2)\r\n delta = x2 - x1\r\n print(energy_re)\r\n if abs(energy_re - energy)<1e-10:\r\n return x2\r\n elif (energy_init - energy)*(energy_re - energy)<0:\r\n delta = delta*0.5\r\n x2 = Lyapunov_diff.diff_corr(mu, x1+delta)\r\n return Contination(mu, x1, x2, energy)\r\n else:\r\n x1 = x2\r\n x2 = Lyapunov_diff.diff_corr(mu, x1+delta)\r\n return Contination(mu, x1, x2, energy)\r\n\r\n#L1,数値の保存も含めたcontination\r\ndef Con_init(mu, xe, energy):\r\n #ラグランジュ点の判別\r\n if xe>1-mu:\r\n file_name = 'L2_init.npy'\r\n elif xe>0:\r\n file_name = 'L1_init.npy'\r\n else:\r\n file_name = 'L3_init.npy'\r\n #値の検索\r\n if file_name in os.listdir(dirnum):\r\n initdata = np.load(dirnum+file_name, allow_pickle=True).item()\r\n if energy in initdata.keys():\r\n return initdata[energy]\r\n else:\r\n initdata = dict()\r\n\r\n init_x1 = Lyapunov_init.lyap_init(mu, 0.001, xe)\r\n init_x2 = Lyapunov_init.lyap_init(mu, 0.005, xe)\r\n x_diff1 = Lyapunov_diff.diff_corr(mu, init_x1)\r\n x_diff2 = Lyapunov_diff.diff_corr(mu, init_x2)\r\n x_contination = Contination(mu, x_diff1, x_diff2, energy)\r\n #値の保存\r\n initdata[energy] = x_contination\r\n np.save(dirnum+file_name,initdata)\r\n\r\n return x_contination\r\n\r\ndef main():\r\n t1 = time.time()\r\n L = Lagrange.Lagrangepoint(mu)\r\n E = [-1.59, -1.57, -1.55, -1.53, -1.51]\r\n color = ['b', 'c', 'g', 'k', 'm']\r\n\r\n #図の設定\r\n fig = plt.figure(figsize=(6,5))\r\n ax = fig.add_subplot()\r\n for i in range(len(E)):\r\n #初期条件の計算\r\n L1_con = Con_init(mu, L[0][0], E[i])\r\n L2_con = Con_init(mu, L[0][1], E[i])\r\n #積分計算\r\n y1 = Lyapunov(L1_con)\r\n y2 = Lyapunov(L2_con)\r\n ax.plot(y1[0], y1[1], color=color[i], label='E='+str(E[i]))\r\n ax.plot(y2[0], y2[1], color=color[i])\r\n ax.plot(L[0][0],L[1][0],'rx')\r\n ax.plot(L[0][1],L[1][1],'rx')\r\n ax.plot(1-mu, 0, marker='.', markersize=10, color='y')\r\n ax.set_xlabel('$\\it{x}$',fontsize=25,fontstyle='italic')\r\n ax.set_ylabel('y',fontsize=25)\r\n fig.gca().set_aspect('equal')\r\n ax.set_title('Contination')\r\n ax.legend(bbox_to_anchor=(1, 1), loc='upper left')\r\n plt.tight_layout()\r\n\r\n #グラフの保存\r\n fig.savefig(dirgraph+\"Contination.png\")\r\n\r\n t2 = time.time()\r\n print(f'経過時間:{t2-t1}s')\r\n plt.show()\r\n\r\nif __name__=='__main__':\r\n main()","repo_name":"MendyTaka/graduation-thesis","sub_path":"PCR3BP_Tube_ver2/Lyapunov/Lyapunov_con.py","file_name":"Lyapunov_con.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2344649092","text":"def commaDelimited(l):\n commaList = ''\n for i in range(len(l)):\n if len(l) == 1:\n commaList += 'and ' + l[0]\n else:\n commaList += l[0] + ', '\n del l[0]\n return commaList\n\nspam = ['apples', 'bananas', 'tofu', 'cats']\nprint(commaDelimited(spam))\n","repo_name":"rickymccallum87/automate-the-boring-stuff","sub_path":"commaList.py","file_name":"commaList.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74125215612","text":"from math import tanh,cosh\nfrom numpy import linspace\nfrom pylab import plot,show\n\naccuracy = 1e-16\n\ndef arctanh(u):\n x = 0.0\n delta = 1.0\n while abs(delta)>accuracy:\n delta = (tanh(x)-u)*cosh(x)**2\n x -= delta\n return x\n\n# upoints = linspace(-0.99,0.99,100)\n# xpoints = []\n# for u in upoints:\n# xpoints.append(arctanh(u))\n# plot(upoints,xpoints)\n# show()\n\na=0\nb=100\nx = 0.3\nN = 300\nh = (b-a)/N\n\nwhile arctanh(x)>accuracy:\n\tdf=(arctanh(x+h)-arctanh(x))/h\n\tx=x-arctanh(x)/df\nprint(x)","repo_name":"dgrin1/p304_2022_examples","sub_path":"petra_atanh.py","file_name":"petra_atanh.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"9587054383","text":"import mysql.connector\n\ndef insertCustomer(line):\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"Python@12\",\n database=\"ecomm\")\n mycursor = mydb.cursor()\n word_list = line.split(\",\")\n sql = \"INSERT INTO customers (name, address, phone_no, pwd, data) VALUES (%s, %s, %s, %s, %s)\"\n values = (word_list[1].strip(),word_list[2].strip(),word_list[0].strip(),word_list[3].strip(),word_list[4].strip())\n mycursor.execute(sql, values)\n mydb.commit()\n print(mycursor.rowcount, \"record inserted.\")\n\nwith open(\"customer_data.txt\",\"r\") as myfile:\n line = myfile.readline()\n while(line):\n try:\n insertCustomer(line)\n except:\n print(\"Could not insert line : {}\".format(line))\n continue\n finally:\n line = myfile.readline()\n","repo_name":"rahusriv/python_tutorial","sub_path":"python_batch_2/project1/load_customer_1.py","file_name":"load_customer_1.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37753541486","text":"\nimport gevent\nfrom gevent import monkey\nfrom gevent.queue import Queue, Empty\nfrom gevent.event import Event\nmonkey.patch_all(thread=False)\nfrom time import sleep\nimport json\nimport cv2\nfrom threading import Thread, Event\nimport signal\nimport numpy as np\nfrom flask import Flask, render_template, Response, request\nfrom flask_sockets import Sockets\nimport logging\nimport os\nimport humanize\nfrom collections import deque\nfrom recorder import Recorder\nfrom gameplay.stateful import Gameplay\nfrom datetime import datetime, timedelta\nfrom visualization import Visualizer\nfrom image_recognition import ImageRecognizer, ImageRecognition\nfrom grabber import PanoramaGrabber\nfrom remoterf import RemoteRF\nfrom config_manager import ConfigManager\n\n\n\n# Get Gevent websocket patched for Python3 here:\n# https://bitbucket.org/noppo/gevent-websocket/\n# hg update python3-support\n# sudo python3 setup.py install\n\nlogger = logging.getLogger(\"flask\")\n\n# Queue messages from bootstrap\nlog_queue = deque(maxlen=1000)\nwebsockets = set()\n\napp = Flask(__name__)\ntry:\n with open(\"/etc/machine-id\", \"r\") as fh:\n app.config['SECRET_KEY'] = fh.read()\nexcept:\n app.config['SECRET_KEY'] = 'secret!'\nsockets = Sockets(app)\n\n\n\n\n# Build pipeline\ngrabber = PanoramaGrabber() # config read from ~/.robovision/grabber.conf\nimage_recognizer = ImageRecognizer(grabber)\ngameplay = Gameplay(image_recognizer)\nrf = RemoteRF(gameplay, \"/dev/ttyACM0\")\nvisualizer = Visualizer(image_recognizer, framedrop=1)\nrecorder = Recorder(grabber)\n\ndef generator():\n visualizer.enable()\n queue = visualizer.get_queue()\n while True:\n try:\n buf, resized, frame, r = queue.get_nowait()\n except Empty:\n sleep(0.001) # Fix this stupid thingie\n continue\n else:\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n'\n yield buf\n yield b'\\r\\n\\r\\n'\n\n@app.route('/combined/')\ndef video_combined(type_str):\n TYPES = ['VIDEO', 'DEBUG', 'COMBO']\n return Response(generator(), mimetype='multipart/x-mixed-replace; boundary=frame')\n\n@app.route('/')\ndef group():\n return render_template(\n 'group.html',\n )\n\n@app.route('/logging')\ndef logging_view():\n return render_template('logging.html')\n\n@sockets.route('/')\ndef command(websocket):\n x = 0\n y = 0\n w = 0\n\n for buf in log_queue:\n websocket.send(buf)\n\n game_config = ConfigManager.instance(\"game\")\n\n def get_game_options():\n return [\n (\"field_id\", [game_config.get_value(\"global\", \"field_id\"),\"A\",\"B\",\"Z\"]),\n (\"robot_id\", [game_config.get_value(\"global\", \"robot_id\"),\"A\",\"B\"]),\n (\"target goal color\", [game_config.get_value(\"global\", \"target goal color\"),\"yellow\",\"blue\"]),\n (\"gameplay status\", [game_config.get_value(\"global\", \"gameplay status\"),\"disabled\", \"enabled\"]),\n ]\n \n game_options = get_game_options()\n\n settings_packet = json.dumps(dict(\n action = \"settings-packet\",\n sliders = ConfigManager.as_list(\"imgrec\"),\n options = game_options\n ))\n websocket.send(settings_packet)\n\n while not websocket.closed:\n websockets.add(websocket)\n\n gevent.sleep(0.01)\n\n msg = websocket.receive()\n\n if not msg:\n websockets.remove(websocket)\n logger.info(\"WebSocket connection presumably closed, %d left connected\" % len(websockets))\n break\n\n response = json.loads(msg)\n action = response.pop(\"action\", None)\n if not action:\n logger.info(\"Unknown action\")\n continue\n\n\n if action == \"gamepad\":\n controls = response.pop(\"data\")\n x = controls.pop(\"controller0.axis0\", x) * 0.99\n y = controls.pop(\"controller0.axis1\", y) * 0.99\n w = controls.pop(\"controller0.axis2\", w) * 0.7\n\n # Kick the ball\n if controls.get(\"controller0.button7\", None):\n gameplay.arduino.kick()\n\n # Toggle autonomy\n if controls.get(\"controller0.button4\", None):\n gameplay.toggle()\n\n # Manual control of the robot\n if not gameplay.alive:\n gameplay.arduino.set_xyw(x,-y,-w)\n\n # TODO: slders\n elif action == \"record_toggle\":\n print(\"TOGGLING RECORDER\")\n recorder.toggle()\n elif action == \"record_enable\":\n recorder.enable()\n elif action == \"record_disable\":\n recorder.disable()\n elif action == \"set_settings\":\n for k, v in response.items():\n ConfigManager.set_config_value(k, v)\n print(response.items())\n elif action == \"set_options\":\n for k, v in response.items():\n game_config.get_option(\"global\", k).set_value(v)\n game_config.save()\n print(response.items())\n else:\n logger.error(\"Unhandled action: %s\", action)\n websockets.remove(websocket)\n logger.info(\"WebSocket connection closed, %d left connected\", len(websockets))\n return b\"\"\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, datetime):\n return obj.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")[:-3] + \"Z\"\n if isinstance(obj, timedelta):\n f = obj.total_seconds()\n mins = f // 60\n return \"%02d:%06.03f\" % (mins, f % 60)\n return json.JSONEncoder.default(self, obj)\n\n\nclass WebsocketLogHandler(logging.Handler):\n def __init__(self):\n logging.Handler.__init__(self)\n self.started = datetime.utcnow()\n\n def emit(self, record):\n timestamp = datetime.utcfromtimestamp(record.created)\n buf = json.dumps(dict(\n action = \"log-entry\",\n created = timestamp,\n uptime = timestamp - self.started,\n message = record.msg % record.args,\n severity = record.levelname.lower()), cls=MyEncoder)\n log_queue.append(buf)\n for websocket in websockets:\n websocket.send(buf)\n\ndef main():\n logger.info(\"Starting robovision\")\n\n logging.basicConfig(\n filename=\"/tmp/robovision.log\",\n level=logging.INFO)\n\n ws_handler = WebsocketLogHandler()\n handler = logging.StreamHandler()\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n handler.setFormatter(formatter)\n for facility in \"grabber\", \"recognition\", \"cli\", \"flask\", \"arduino\", \"gameplay\", \"threading\", \"recorder\":\n logging.getLogger(facility).addHandler(handler)\n logging.getLogger(facility).addHandler(ws_handler)\n logging.getLogger(facility).setLevel(logging.DEBUG)\n\n from gevent import pywsgi\n from geventwebsocket.handler import WebSocketHandler\n\n ip, port = ('0.0.0.0', 5000)\n if os.getuid() == 0:\n port = 80\n\n\n server = pywsgi.WSGIServer((ip, port), app, handler_class=WebSocketHandler)\n logger.info(\"Started server at http://{}:{}\".format(ip, port))\n\n # Quick'n'diry hacks\n image_recognizer.grabber = grabber\n image_recognizer.websockets = websockets\n\n # Start all threads\n image_recognizer.start()\n gameplay.start()\n grabber.start()\n recorder.start()\n visualizer.start()\n rf.start()\n\n # Register threads for monitoring\n from managed_threading import ThreadManager\n manager = ThreadManager()\n manager.register(gameplay)\n manager.register(recorder)\n manager.register(grabber)\n manager.register(visualizer)\n manager.register(image_recognizer)\n manager.start()\n\n # Enable some threads\n image_recognizer.enable()\n visualizer.enable()\n #if gameplay.is_enabled:\n #gameplay.enable()\n server.serve_forever()\n\nif __name__ == '__main__':\n main()\n\n#start_server = websockets.serve(time, '0.0.0.0', 5001)\n#asyncio.get_event_loop().run_until_complete(start_server)\n#asyncio.get_event_loop().run_forever()\n","repo_name":"iplayfast/zoidberg","sub_path":"robovision/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2950749132","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom indexers import BM25Indexer, Word2VecIndexer, FastTextIndexer, BERTIndexer\nimport time\n\napp = Flask(__name__)\n\n# Определение путей к файлам индексов и моделей\nBM25_PICKLE = \"data/bm25_index.pkl\"\nW2V_MODEL_PATH = \"data/word2vec.bin\"\nW2V_EMBEDDINGS = \"data/w2v_embeddings.npy\"\nFT_MODEL_PATH = \"data/fasttext.model\"\nFT_EMBEDDINGS = \"data/ft_embeddings.npy\"\nBERT_MODEL_PATH = \"ai-forever/sbert_large_nlu_ru\"\nBERT_EMBEDDINGS = \"data/sbert_embeddings.npy\"\nCORPUS_PATH = \"data/corpus.txt\"\n\n# Инициализация и загрузка индексов при импорте модуля\nbm25_indexer = BM25Indexer()\nbm25_indexer.load_index(BM25_PICKLE)\n\nword2vec_indexer = Word2VecIndexer()\nword2vec_indexer.load_index(W2V_EMBEDDINGS, W2V_MODEL_PATH, CORPUS_PATH)\n\nfasttext_indexer = FastTextIndexer()\nfasttext_indexer.load_index(FT_EMBEDDINGS, FT_MODEL_PATH, CORPUS_PATH)\n\nbert_indexer = BERTIndexer(BERT_MODEL_PATH)\nbert_indexer.load_index(BERT_EMBEDDINGS, CORPUS_PATH)\n\n# Словарь, сопоставляющий имена индексаторов с их объектами\nindexers = {\n 'bm25': bm25_indexer,\n 'word2vec': word2vec_indexer,\n 'fasttext': fasttext_indexer,\n 'bert': bert_indexer\n}\n\n@app.route('/')\ndef index():\n return render_template('index.html') # здесь будет ваше основное описание и кнопка для перехода на страницу поиска\n\n@app.route('/search')\ndef search():\n return render_template('search.html') # здесь будет форма для ввода поискового запроса и возможно выбор индексатора\n\n@app.route('/results', methods=['POST'])\ndef results():\n search_query = request.form['query']\n selected_indexer_key = request.form['indexer'] # Это строка, такая как \"bm25\", \"word2vec\" и т.д.\n\n # Получаем соответствующий объект индексатора из словаря\n selected_indexer = indexers[selected_indexer_key]\n\n start_time = time.time()\n # Теперь вы можете использовать выбранный индексатор для поиска\n results = selected_indexer.search(search_query)\n end_time = time.time()\n\n search_time = end_time - start_time\n\n return render_template('results.html', query=search_query, results=results, time=search_time)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"elinkamaeva/Search-Engine","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36257349903","text":"import serial\nimport pynput\n\nfrom serial.tools import list_ports\nfrom pynput.keyboard import Key, Listener, KeyCode\n\nVALID_JOINTS = {'LOW', 'UP', 'JAWR', 'JAWW'}\nisInAutomation = 0\n\nports = [p.device for p in list_ports.comports()]\n\nprint(\"Available ports for this session:\")\nfor i in range(len(ports)):\n print(\"[\"+str(i+1)+\"] \"+str(ports[i]))\n\nport_selection = int(input(\"Enter a number for the port. (Defaults at 1) >> \") or 1)\nactive_port = ports[port_selection-1]\nactive_baudrate = int(input(\"Enter a baudrate for serial communication. (Defaults at 115200) >> \") or \"115200\")\n\nsession = serial.Serial(active_port, baudrate = active_baudrate)\nsession.write(\"PING\".encode(\"utf-8\"))\n#while 1:\n# if session.read_until(\"PONG\"):\n# break\n\nprint(\"Connection established with... [\" + str(ports[port_selection-1]) + \"] ...with a baudrate of \" + str(active_baudrate))\n\nlow = 0\nup = 0\njawr = 0\njaww = 0\n\n# Resets the arm to default angles, invoke when connection is established, when a new set of actions is applied.\ndef reset_rotation():\n global low, up, jawr, jaww\n session.write(\"RST\".encode(\"utf-8\"))\n print(\"RST\")\n low = 90\n up = 90\n jawr = 90\n jaww = 90\n\ndef single_rotation(joint, operator):\n global low, up, jawr, jaww\n i = 0\n\n if joint not in VALID_JOINTS:\n raise ValueError(\"Joint \" + str(joint) + \" is not a VALID_JOINT\")\n if int(operator) not in {0, 1}:\n raise ValueError(\"Operator can only be 0 (for -) or 1 (for +)\")\n if int(operator) == 0:\n i = -1\n elif int(operator) == 1:\n i = 1\n\n if joint == 'LOW':\n if low + i > 180 or low + i < 0:\n return 0\n else:\n low += i\n print(\"ROT \" + joint + \":\" + str(low))\n if joint == 'UP':\n if up + i > 180 or up + i < 0:\n return 0\n else:\n up += i\n print(\"ROT \" + joint + \":\" + str(up))\n if joint == 'JAWR':\n if jawr + i > 180 or jawr + i < 0:\n return 0\n else:\n jawr += i\n print(\"ROT \" + joint + \":\" + str(jawr))\n if joint == 'JAWW':\n if jaww + i > 180 or jaww + i < 0:\n return 0\n else:\n jaww += i\n print(\"ROT \" + joint + \":\" + str(jaww))\n\n session.write((\"ROT \" + joint + \":\" + str(operator)).encode(\"utf-8\"))\n \ndef pose_rotation(*argpos):\n global low, up, jawr, jaww\n if len(argpos) != 4:\n raise ValueError(\"Pose rotation takes only 4 arguments.\")\n session.write((\"ACT \" + str(argpos[0]) + \",\" + str(argpos[1]) + \",\" + str(argpos[2]) + \",\" + str(argpos[3])).encode(\"utf-8\"))\n print(\"ACT \" + str(argpos[0]) + \",\" + str(argpos[1]) + \",\" + str(argpos[2]) + \",\" + str(argpos[3]))\n\n# Stops the immediate arm actions, respond to user manual movement.\ndef stop_rotation():\n session.write(\"STP\".encode(\"utf-8\"))\n print(\"STP\")\n\nreset_rotation()\n\ndef on_press(key):\n try:\n if key.char == 'w':\n single_rotation('UP', 1)\n if key.char == 's':\n single_rotation('UP', 0)\n if key.char == 'a':\n single_rotation('JAWR', 0)\n if key.char == 'd':\n single_rotation('JAWR', 1)\n if key.char == 'q':\n single_rotation('JAWW', 0)\n if key.char == 'e':\n single_rotation('JAWW', 1)\n except AttributeError: \n if key is Key.up:\n single_rotation('LOW', 1)\n if key is Key.down:\n single_rotation('LOW', 0)\n \ndef on_release(key):\n if key is Key.esc:\n return False\n\nwith Listener(on_press=on_press, on_release=on_release, suppress=True) as Listener:\n Listener.join()","repo_name":"GToidZ/portfolio","sub_path":"02-Mechanical-Arm-Prototyping/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"40368692168","text":"#!/usr/bin/env python3\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n__author__ = 'Boris Wachtmeister'\n\nimport argparse\nimport glob\nimport os\n\nfrom os import path\n\nHIDDEN_FILE_PREFIX = '.'\nKMAIL_SUBDIR_PREFIX = '.'\nKMAIL_SUBDIR_SUFFIX = '.directory'\nDEFAULT_HIERARCHY_SEPARATOR = '.'\nMAILDIR_SPECIAL_DIRS = ('cur', 'new', 'tmp')\n\n\nclass Maildir:\n \"\"\"\n This class represents a maildir with the information about parent directories etc\n \"\"\"\n def __init__(self, options, directory):\n self.args = options\n self.name = path.basename(directory)\n self.directory = directory\n path_components = path.relpath(directory, options.folder).split(path.sep)\n # remove the leading dot and the trailing .directory\n self.path_list = list()\n for p in path_components:\n if p.startswith(KMAIL_SUBDIR_PREFIX) and p.endswith(KMAIL_SUBDIR_SUFFIX):\n p = p[len(KMAIL_SUBDIR_PREFIX):len(p) - len(KMAIL_SUBDIR_SUFFIX)]\n self.path_list.append(p)\n\n def get_folder_path(self):\n \"\"\" Get the path of the folder in maildir-format \"\"\"\n folder_path = HIDDEN_FILE_PREFIX + self.args.hierarchy_separator.join(self.path_list)\n return path.join(self.args.folder, folder_path)\n\n def get_parent_maildir(self):\n \"\"\" Get the parent maildir directory of this maildir \"\"\"\n if len(self.path_list) == 1:\n return None # this was a root folder\n return path.dirname(self.directory)\n\n\nclass FileSystemAction:\n \"\"\"\n A helper class that does all os.* operations and allows do a \"dry run\" which doesn't actually modify anything\n \"\"\"\n def __init__(self, dry_run, quiet=False):\n self.dry_run = dry_run\n self.quiet = quiet\n\n def rename(self, src, dst):\n \"\"\" Rename a folder from src to dst. This always checks if dst already exists, even in \"dry runs\" \"\"\"\n if path.exists(dst):\n raise Exception(\"Destination %s already exists\" % dst)\n self.__run('Moving %s -> %s' % (src, dst), lambda: os.rename(src, dst))\n\n def delete(self, file):\n \"\"\" Delete a file \"\"\"\n self.__run('Removing file %s' % file, lambda: os.remove(file))\n\n def rmdir(self, directory):\n \"\"\" remove a directory \"\"\"\n self.__run('Removing folder %s' % directory, lambda: os.rmdir(directory))\n\n def __run(self, message, function):\n if not self.quiet:\n print(message)\n if not self.dry_run:\n function()\n\n\nclass Kmail2Maildir:\n def __init__(self, args, fs_action):\n self.args = args\n self.fs_action = fs_action\n\n def move_kmail_folders(self):\n subdir_containers = self.__get_subfolders_containers_recursive(self.args.folder)\n maildirs_paths = self.__get_maildirs_from_subfoldercontainers(subdir_containers)\n maildirs = [Maildir(self.args, p) for p in maildirs_paths]\n maildirs.sort(key=lambda d: d.get_folder_path(), reverse=True)\n for maildir in maildirs:\n self.fs_action.rename(maildir.directory, maildir.get_folder_path())\n\n parent_maildir = maildir.get_parent_maildir()\n\n if self.args.remove_index_files:\n self.remove_index_files(maildir)\n\n # these will only be deleted if --remove-index-files is used, because otherwise the folder will not be empty\n if parent_maildir and self.__is_empty_dir(parent_maildir):\n self.fs_action.rmdir(parent_maildir)\n\n # kmail treats the inbox as yet another folder, but that's not how dovecot works -> move them to the base folder\n kmail_inbox = path.join(self.args.folder, HIDDEN_FILE_PREFIX + 'inbox')\n for maildir_dir in MAILDIR_SPECIAL_DIRS:\n self.fs_action.rename(path.join(kmail_inbox, maildir_dir), path.join(self.args.folder, maildir_dir))\n self.fs_action.rmdir(kmail_inbox)\n\n @staticmethod\n def __is_empty_dir(directory):\n \"\"\"\n checks if a directory is empty\n\n :param directory: the directory to check\n :return: True is the directory is empty\n \"\"\"\n return not os.listdir(directory)\n\n @staticmethod\n def __is_maildir(directory):\n \"\"\"\n checks if a directory is (most likely) a maildir.\n\n This basically checks if all special subdirectories of a maildir a present (\"cur\", \"tmp\", \"new\")\n\n :param directory: the directory to check\n :return: True if the directory seems to be a maildir\n \"\"\"\n dirs_exist = [path.isdir(path.join(directory, d)) for d in MAILDIR_SPECIAL_DIRS]\n return all(dirs_exist)\n\n @staticmethod\n def __get_subfolders_containers_recursive(directory):\n \"\"\"\n Recursively gets all of kmail's containers for subdirectories.\n\n Kmail puts subfolders into a special directory named \".foo.directory\".\n This method gets every directory that matches this description\n\n :param directory: The root of the tree that should be searched\n :return: a list of the directories that where found\n \"\"\"\n result = [directory]\n directories = glob.glob(path.join(directory, KMAIL_SUBDIR_PREFIX + '*' + KMAIL_SUBDIR_SUFFIX))\n maildir_subdirectory_containers = [d for d in directories if path.isdir(d)]\n for subdir in maildir_subdirectory_containers:\n result.extend(Kmail2Maildir.__get_subfolders_containers_recursive(subdir))\n return result\n\n @staticmethod\n def __get_maildirs_from_subfoldercontainers(subfolder_containers):\n \"\"\"\n This gets all maildirs from a list of containers as described in \"get_subfolders_containers_recursive\"\n\n :param subfolder_containers: The list of container directories that should be searched\n :return: a list of maildirs\n \"\"\"\n result = list()\n for subdir_container in subfolder_containers:\n files = glob.glob(path.join(subdir_container, \"*\"))\n directories = [f for f in files if path.isdir(f)]\n potential_maildirs = [d for d in directories if d[0] != HIDDEN_FILE_PREFIX]\n maildirs = [d for d in potential_maildirs if Kmail2Maildir.__is_maildir(d)]\n result.extend(maildirs)\n return result\n\n def remove_index_files(self, maildir):\n \"\"\"\n This removes all index files that where written by kmail for a maildir\n\n :param maildir: The maildir that should be cleaned\n \"\"\"\n index_file_glob = HIDDEN_FILE_PREFIX + maildir.name + '.index*'\n index_files = glob.glob(path.join(path.dirname(maildir.directory), index_file_glob))\n for index_file in index_files:\n self.fs_action.delete(index_file)\n\n\ndef kmail2maildir(args):\n # first make a dry run to check if every dir can be moved (the maildir shouldn't be changed concurrently of course)\n if not args.dry_run:\n print('Checking if everything should work')\n Kmail2Maildir(args, FileSystemAction(dry_run=True, quiet=True)).move_kmail_folders()\n\n Kmail2Maildir(args, FileSystemAction(args.dry_run)).move_kmail_folders()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Convert from kmail\\'s maildir variant to plain maildir')\n parser.add_argument('--dry-run', action='store_true', default=False,\n help='only print what would be done, don\\'t change anything yet')\n parser.add_argument('--remove-index-files', action='store_true', default=False,\n help='remove kmail\\'s index files')\n # courierimaps maildir++ documentation is vague about this separator.\n # It uses ':', but only as an example. dovecot uses '.' which is the default here\n parser.add_argument('--hierarchy-separator', default=DEFAULT_HIERARCHY_SEPARATOR,\n help='Separator that should be used for maildir++ subfolders '\n '(defaults to \"%s\")' % DEFAULT_HIERARCHY_SEPARATOR)\n parser.add_argument('folder')\n\n kmail2maildir(parser.parse_args())\n","repo_name":"Boris-de/kmail2maildir","sub_path":"kmail2maildir.py","file_name":"kmail2maildir.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11499332409","text":"from django.shortcuts import render\n\n\nexample_posts = [\n {\n 'author': 'admin',\n 'title': 'The beginning',\n 'content': 'post content',\n 'date_posted': ' Today'\n },\n{\n 'author': 'user',\n 'title': 'The ending',\n 'content': 'post content',\n 'date_posted': ' Yesterday'\n }\n]\n\ndef home(request):\n context = {\n 'posts': example_posts\n }\n return render(request, 'blog/home.html', context)\n\n\ndef about(request):\n return render(request, 'blog/about.html', {'title': 'About'})","repo_name":"ptrpl4/django-project","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18336462370","text":"import struct\n\nfrom dubbo.common.constants import MIN_INT_32, MAX_INT_32, DEFAULT_REQUEST_META\nfrom dubbo.common.exceptions import HessianTypeError\nfrom dubbo.common.util import double_to_long_bits, get_invoke_id\n\n\nclass Object(object):\n \"\"\"\n 创建一个Java对象\n \"\"\"\n\n def __init__(self, path, values=None):\n \"\"\"\n :param path: Java对象的路径,例如:java.lang.Object\n :param values: 可以在创建对象时就进行赋值\n \"\"\"\n if not isinstance(path, str):\n raise ValueError('Object path {} should be string type.'.format(path))\n self.__path = path\n if not isinstance(values, dict):\n values = {}\n self.__values = values\n\n def __getitem__(self, key):\n return self.__values[key]\n\n def __setitem__(self, key, value):\n if not isinstance(key, str):\n raise ValueError('Object key {} should be string type.'.format(key))\n self.__values[key] = value\n\n def __delitem__(self, key):\n del self.__values[key]\n\n def __repr__(self):\n return ''.format(self.__path, hex(id(self)), self.__values)\n\n def __contains__(self, key):\n return key in self.__values\n\n def keys(self):\n return list(self.__values.keys())\n\n def get_path(self):\n return self.__path\n\n\nclass Request(object):\n \"\"\"\n A class for dumping dubbo request body.\n All types can be dumped:\n * boolean\n * int\n * long\n * double\n * string\n * object\n \"\"\"\n\n def __init__(self, request):\n self.__body = request\n self.__classes = []\n self.types = [] # 泛型\n self.invoke_id = get_invoke_id()\n\n def encode(self):\n \"\"\"\n 把请求序列化为字节数组\n :return:\n \"\"\"\n request_body = self._encode_request_body()\n invoke_id = list(bytearray(struct.pack('!q', self.invoke_id)))\n request_head = DEFAULT_REQUEST_META + invoke_id + get_request_body_length(request_body)\n return bytearray(request_head + request_body)\n\n def _get_parameter_types(self, arguments):\n \"\"\"\n 针对所有的参数计算得到参数类型字符串\n :param arguments:\n :return:\n \"\"\"\n parameter_types = ''\n # 判断并得出参数的类型\n for argument in arguments:\n parameter_types += self._get_class_name(argument)\n return parameter_types\n\n def _get_class_name(self, _class):\n \"\"\"\n 根据一个字段的类型得到其在Java中对应类的全限定名\n 转换规则:https://stackoverflow.com/a/3442100/4614538\n :param _class:\n :return:\n \"\"\"\n if isinstance(_class, bool): # bool类型的判断必须放在int类型判断的前面\n return 'Z'\n elif isinstance(_class, int):\n if MIN_INT_32 <= _class <= MAX_INT_32:\n return 'I'\n else:\n return 'J'\n elif isinstance(_class, float):\n return 'D'\n elif isinstance(_class, str):\n return 'L' + 'java/lang/String' + ';'\n elif isinstance(_class, Object):\n path = _class.get_path()\n path = 'L' + path.replace('.', '/') + ';'\n return path\n elif isinstance(_class, list):\n if len(_class) == 0:\n raise HessianTypeError('Method parameter {} is a list but length is zero'.format(_class))\n return '[' + self._get_class_name(_class[0])\n else:\n raise HessianTypeError('Unknown argument type: {0}'.format(_class))\n\n def _encode_request_body(self):\n \"\"\"\n 对所有的已知的参数根据dubbo协议进行编码\n :return:\n \"\"\"\n dubbo_version = self.__body['dubbo_version']\n path = self.__body['path']\n version = self.__body['version']\n method = self.__body['method']\n arguments = self.__body['arguments']\n\n body = []\n body.extend(self._encode_single_value(dubbo_version))\n body.extend(self._encode_single_value(path))\n body.extend(self._encode_single_value(version))\n body.extend(self._encode_single_value(method))\n body.extend(self._encode_single_value(self._get_parameter_types(arguments)))\n for argument in arguments:\n body.extend(self._encode_single_value(argument))\n\n attachments = {\n 'path': path,\n 'interface': path,\n 'version': version\n }\n # attachments参数以H开头,以Z结尾\n body.append(ord('H'))\n for key in list(attachments.keys()):\n value = attachments[key]\n body.extend(self._encode_single_value(key))\n body.extend(self._encode_single_value(value))\n body.append(ord('Z'))\n\n # 因为在上面的逻辑中没有对byte大小进行检测,所以在这里进行统一的处理\n for i in range(len(body)):\n body[i] = body[i] & 0xff\n return body\n\n @staticmethod\n def _encode_bool(value):\n \"\"\"\n 对bool类型进行编码\n :param value:\n :return:\n \"\"\"\n result = []\n if value:\n result.append(ord('T'))\n else:\n result.append(ord('F'))\n return result\n\n @staticmethod\n def _encode_int(value):\n \"\"\"\n 对整数进行编码\n :param value:\n :return:\n \"\"\"\n result = []\n # 超出int类型范围的值则转化为long类型\n # 这里问题在于对于落在int范围内的数字,我们无法判断其是long类型还是int类型,所以一律认为其是int类型\n if value > MAX_INT_32 or value < MIN_INT_32:\n result.append(ord('L'))\n result.extend(list(bytearray(struct.pack('!q', value))))\n return result\n\n if -0x10 <= value <= 0x2f:\n result.append(value + 0x90)\n elif -0x800 <= value <= 0x7ff:\n result.append(0xc8 + (value >> 8))\n result.append(value)\n elif -0x40000 <= value <= 0x3ffff:\n result.append(0xd4 + (value >> 16))\n result.append(value >> 8)\n result.append(value)\n else:\n result.append(ord('I'))\n result.append(value >> 24)\n result.append(value >> 16)\n result.append(value >> 8)\n result.append(value)\n return result\n\n @staticmethod\n def _encode_float(value):\n \"\"\"\n 对浮点类型进行编码\n :param value:\n :return:\n \"\"\"\n result = []\n int_value = int(value)\n if int_value == value:\n if int_value == 0:\n result.append(0x5b)\n return result\n elif int_value == 1:\n result.append(0x5c)\n return result\n elif -0x80 <= int_value < 0x80:\n result.append(0x5d)\n result.append(int_value)\n return result\n elif -0x8000 <= int_value < 0x8000:\n result.append(0x5e)\n result.append(int_value >> 8)\n result.append(int_value)\n return result\n\n mills = int(value * 1000)\n if 0.001 * mills == value and MIN_INT_32 <= mills <= MAX_INT_32:\n result.append(0x5f)\n result.append(mills >> 24)\n result.append(mills >> 16)\n result.append(mills >> 8)\n result.append(mills)\n return result\n\n bits = double_to_long_bits(value)\n result.append(ord('D'))\n result.append(bits >> 56)\n result.append(bits >> 48)\n result.append(bits >> 40)\n result.append(bits >> 32)\n result.append(bits >> 24)\n result.append(bits >> 16)\n result.append(bits >> 8)\n result.append(bits)\n return result\n\n @staticmethod\n def _encode_utf(value):\n \"\"\"\n 对字符串进行编码,编码格式utf-8\n 参��方法:com.alibaba.com.caucho.hessian.io.Hessian2Output#printString\n :param value:\n :return:\n \"\"\"\n result = []\n for v in value:\n ch = ord(v)\n if ch < 0x80:\n result.append(ch & 0xff)\n elif ch < 0x800:\n result.append((0xc0 + ((ch >> 6) & 0x1f)) & 0xff)\n result.append((0x80 + (ch & 0x3f)) & 0xff)\n else:\n result.append((0xe0 + ((ch >> 12) & 0xf)) & 0xff)\n result.append((0x80 + ((ch >> 6) & 0x3f)) & 0xff)\n result.append((0x80 + (ch & 0x3f)) & 0xff)\n return result\n\n def _encode_str(self, value):\n \"\"\"\n 对一个字符串进行编码\n :param value:\n :return:\n \"\"\"\n result = []\n # 在进行网络传输操作时一律使用unicode进行操作\n # if isinstance(value, str):\n # value = value.decode('utf-8')\n length = len(value)\n if length <= 0x1f:\n result.append(0x00 + length)\n elif length <= 0x3ff:\n result.append(0x30 + (length >> 8))\n result.append(length)\n else:\n result.append(ord('S'))\n result.append(length >> 8)\n result.append(length)\n\n result.extend(self._encode_utf(value))\n return result\n\n def _encode_object(self, value):\n \"\"\"\n 对一个对象进行编码\n :param value:\n :return:\n \"\"\"\n result = []\n path = value.get_path()\n field_names = list(value.keys())\n\n if path not in self.__classes:\n result.append(ord('C'))\n result.extend(self._encode_single_value(path))\n\n result.extend(self._encode_single_value(len(field_names)))\n\n for field_name in field_names:\n result.extend(self._encode_single_value(field_name))\n self.__classes.append(path)\n class_id = self.__classes.index(path)\n if class_id <= 0xf:\n class_id += 0x60\n class_id &= 0xff\n result.append(class_id)\n else:\n result.append(ord('O'))\n result.extend(self._encode_single_value(class_id))\n for field_name in field_names:\n result.extend(self._encode_single_value(value[field_name]))\n return result\n\n def _encode_list(self, value):\n \"\"\"\n 对一个列表进行编码\n :param value:\n :return:\n \"\"\"\n result = []\n length = len(value)\n if length == 0:\n # 没有值则无法判断类型,一律返回null\n return self._encode_single_value(None)\n if isinstance(value[0], bool):\n _type = '[boolean'\n elif isinstance(value[0], int):\n _type = '[int'\n elif isinstance(value[0], float):\n _type = '[double'\n elif isinstance(value[0], str):\n _type = '[string'\n elif isinstance(value[0], Object):\n _type = '[object'\n else:\n raise HessianTypeError('Unknown list type: {}'.format(value[0]))\n if length < 0x7:\n result.append(0x70 + length)\n if _type not in self.types:\n self.types.append(_type)\n result.extend(self._encode_single_value(_type))\n else:\n result.extend(self._encode_single_value(self.types.index(_type)))\n else:\n result.append(0x56)\n if _type not in self.types:\n self.types.append(_type)\n result.extend(self._encode_single_value(_type))\n else:\n result.extend(self._encode_single_value(self.types.index(_type)))\n result.extend(self._encode_single_value(length))\n for v in value:\n if type(value[0]) != type(v):\n raise HessianTypeError('All elements in list must be the same type, first type'\n ' is {0} but current type is {1}'.format(type(value[0]), type(v)))\n result.extend(self._encode_single_value(v))\n return result\n\n def _encode_single_value(self, value):\n \"\"\"\n 根据hessian协议对单个变量进行编码\n :param value:\n :return:\n \"\"\"\n # 布尔类型\n if isinstance(value, bool):\n return self._encode_bool(value)\n # 整型(包括长整型)\n elif isinstance(value, int):\n return self._encode_int(value)\n # 浮点类型\n elif isinstance(value, float):\n return self._encode_float(value)\n # 字符串类型\n elif isinstance(value, str):\n return self._encode_str(value)\n # 对象类型\n elif isinstance(value, Object):\n return self._encode_object(value)\n # 列表(list)类型,不可以使用tuple替代\n elif isinstance(value, list):\n return self._encode_list(value)\n # null\n elif value is None:\n return [ord('N')]\n else:\n raise HessianTypeError('Unknown argument type: {}'.format(value))\n\n\ndef get_request_body_length(body):\n \"\"\"\n 获取body的长度,并将其转为长度为4个字节的字节数组\n :param body:\n :return:\n \"\"\"\n return list(bytearray(struct.pack('!i', len(body))))\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"BSTester/dubbo-python","sub_path":"dubbo/codec/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":13444,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"11236320346","text":"from django.conf.urls import url\nfrom django.contrib.auth.views import login, logout\nfrom . import views\n\nurlpatterns = [\n url(r'^app/?$', views.base, name='base'),\n url(r'^foo/?$', views.foo, name='foo'),\n url(r'^new_user/?$', views.new_user, name='new_user'),\n url(r'^logs/?$', views.history_view, name='history'),\n url(r'^clear_logs/?$', views.clear_logs, name='clear_logs'),\n url(r'^unauthorized/?$', views.unauthorized, name='unauthorized'),\n url(r'^user_profile/(?P[0-9]+)$', views.user_profile, name='user_profile'),\n url(r'^$', login, {\n 'template_name': 'app/login.html'\n }, name='login')\n]\n","repo_name":"rkruszynski/behave_presentation","sub_path":"django-app/training-app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"589606940","text":"class DFA:\n def __init__(self, states, transitionTable, acceptTable, sigma ):\n self.states = states\n self.transitionTable = transitionTable\n self.acceptTable = acceptTable\n self.sigma = sigma\n \n # abbaaab\n def process(self, inputD):\n try:\n state = 0\n for c in inputD:\n state = self.transitionTable[state][self.sigma.index(c)]\n return self.acceptTable[state];\n except:\n return False\n\n @classmethod\n def inputDataForm(cls):\n n, m = map(int, input().split()) # [5, 2]\n states = list(map(int, input().split())) #[0, 1, 2, 3, 4]\n sigma = input().split() #[a, b]\n endStates = list(map(int, input().split())) #[2,4]\n acceptTable = [False for i in range(n)] # [False, False, False, False, False]\n for end in endStates:\n acceptTable[end] = True\n # [False, False, True, False, True]\n transitionTable = {}\n for i in range(n):\n transitionTable[i] = [0, 0]\n \n for i in range(n*m):\n start, inpData, des =input().split()\n # 0 a 1\n start = int(start) # 0\n des = int(des) # 1\n transitionTable[start][sigma.index(inpData)] = des\n\n return cls(states, transitionTable, acceptTable, sigma)\n\ndfa = DFA.inputDataForm()\ninputData = input()\n\nif dfa.process(inputData):\n print(\"YES\")\nelse:\n print(\"NO\")","repo_name":"minhkhacoder/ct287","sub_path":"DFA.py","file_name":"DFA.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17780479652","text":"import os\nimport json\nimport luigi\nimport numpy as np\n\nfrom cluster_tools.mutex_watershed import MwsWorkflow\nfrom cluster_tools.postprocess import SizeFilterWorkflow\nfrom cluster_tools.thresholded_components.threshold import ThresholdLocal, ThresholdSlurm\nfrom cluster_tools.workflows import MulticutStitchingWorkflow\n\nimport elf.parallel\nfrom elf.io import open_file\nfrom z5py.util import copy_dataset\n\nfrom mmpb.default_config import get_default_shebang\nfrom mmpb.segmentation.network.prediction import prefilter_blocks\n\n\ndef make_global_config(mask_path, mask_key, shape, block_shape, tmp_folder, n_threads):\n config_folder = os.path.join(tmp_folder, 'configs')\n os.makedirs(config_folder, exist_ok=True)\n\n config = MwsWorkflow.get_config()['global']\n shebang = get_default_shebang()\n block_list_path = os.path.join(tmp_folder, 'blocks.json')\n prefilter_blocks(mask_path, mask_key,\n shape, block_shape,\n block_list_path, n_threads=n_threads)\n config.update({'shebang': shebang,\n 'block_shape': block_shape,\n 'block_list_path': block_list_path})\n\n with open(os.path.join(config_folder, 'global.config'), 'w') as f:\n json.dump(config, f)\n\n\ndef run_mws(offsets, path, fg_key, aff_key, out_key,\n tmp_folder, target, max_jobs,\n strides=[6, 6, 6], halo=[16, 32, 32]):\n\n config_folder = os.path.join(tmp_folder, 'configs')\n configs = MwsWorkflow.get_config()\n\n config = configs['mws_blocks']\n config.update({'strides': strides, 'time_limit': 360, 'mem_limit': 12,\n 'randomize_strides': True, 'noise_level': 1e-4})\n with open(os.path.join(config_folder, 'mws_blocks.config'), 'w') as f:\n json.dump(config, f)\n\n task = MwsWorkflow(tmp_folder=tmp_folder, max_jobs=max_jobs,\n target=target, config_dir=config_folder,\n input_path=path, input_key=aff_key,\n output_path=path, output_key=out_key,\n mask_path=path, mask_key=fg_key,\n offsets=offsets, halo=halo)\n ret = luigi.build([task], local_scheduler=True)\n if not ret:\n raise RuntimeError(\"MWS failed\")\n\n\ndef make_fg_mask(path, fg_key, fg_mask_out_key, tmp_folder, target, max_jobs):\n task = ThresholdLocal if target == 'local' else ThresholdSlurm\n\n threshold = .5\n config_folder = os.path.join(tmp_folder, 'configs')\n t = task(tmp_folder=tmp_folder, config_dir=config_folder, max_jobs=max_jobs,\n input_path=path, input_key=fg_key,\n output_path=path, output_key=fg_mask_out_key,\n threshold=threshold, threshold_mode='greater')\n ret = luigi.build([t], local_scheduler=True)\n if not ret:\n raise RuntimeError(\"Threshold failed\")\n\n\ndef find_bounding_boxes(seg_path, seg_key, n_threads, scale_factor):\n with open_file(seg_path, 'r') as f:\n ds = f[seg_key]\n ds.n_threads = n_threads\n chunks = ds.chunks\n seg = ds[:]\n\n unique_segs = elf.parallel.unique(seg, block_shape=chunks,\n n_threads=n_threads, verbose=True)[1:]\n\n bbs = []\n # could use more efficient impl from scipy/skimage\n for seg_id in unique_segs:\n where_seg = np.where(seg == seg_id)\n bb = tuple(slice(int(ws.min()) * sf,\n (int(ws.max()) + 1) * sf) for ws, sf in zip(where_seg, scale_factor))\n bbs.append(bb)\n\n return bbs\n\n\ndef set_bounding_box(tmp_folder, bb):\n config_path = os.path.join(tmp_folder, 'configs', 'global.config')\n with open(config_path, 'r') as f:\n config = json.load(f)\n\n # clear block list path\n config['block_list_path'] = None\n\n # set the bounding box\n bb_start = [b.start for b in bb]\n bb_stop = [b.stop for b in bb]\n config.update({'roi_begin': bb_start, 'roi_end': bb_stop})\n with open(config_path, 'w') as f:\n json.dump(config, f)\n\n\ndef stitching_multicut(offsets, path, aff_key, ws_key,\n tmp_folder, config_folder, target, max_jobs):\n task = MulticutStitchingWorkflow\n\n exp_path = os.path.join(tmp_folder, 'data.n5')\n assignment_key = 'node_labels/cilia'\n out_key = 'volumes/stitched_cilia'\n\n configs = task.get_config()\n config = configs['probs_to_costs']\n config.update({'weight_edges': True})\n with open(os.path.join(config_folder, 'probs_to_costs.config'), 'w') as f:\n json.dump(config, f)\n\n config = configs['block_edge_features']\n config.update({'offsets': offsets[:6]})\n with open(os.path.join(config_folder, 'block_edge_features.config'), 'w') as f:\n json.dump(config, f)\n\n task_names = ['merge_sub_graphs', 'merge_edge_features']\n for tname in task_names:\n config = configs[tname]\n config.update({'time_limit': 120, 'mem_limit': 32})\n with open(os.path.join(config_folder, '%s.config' % tname), 'w') as f:\n json.dump(config, f)\n\n config = configs['stitching_multicut']\n config.update({'time_limit': 600, 'mem_limit': 128, 'threads_per_job': 8})\n with open(os.path.join(config_folder, 'stitching_multicut.config'), 'w') as f:\n json.dump(config, f)\n\n t = task(tmp_folder=tmp_folder, config_dir=config_folder,\n max_jobs=max_jobs, target=target,\n input_path=path, input_key=aff_key,\n labels_path=path, labels_key=ws_key,\n assignment_path=exp_path, assignment_key=assignment_key,\n problem_path=exp_path,\n output_path=exp_path, output_key=out_key)\n ret = luigi.build([t], local_scheduler=True)\n if not ret:\n raise RuntimeError(\"Multicut stitching failed\")\n\n\nclass CopyAndOffset(luigi.Task):\n out_path = luigi.Parameter()\n exp_path = luigi.Parameter()\n path = luigi.Parameter()\n seg_out_key = luigi.Parameter()\n offset = luigi.IntParameter()\n out_key = luigi.Parameter()\n n_threads = luigi.IntParameter()\n bb_start = luigi.ListParameter()\n bb_stop = luigi.ListParameter()\n\n @staticmethod\n def add_non_zero(arr, val):\n arr[arr != 0] += val\n return arr\n\n def run(self):\n bb = tuple(slice(sta, sto) for sta, sto in zip(self.bb_start, self.bb_stop))\n with open_file(self.exp_path) as fin, open_file(self.path) as fout:\n ds_in, ds_out = fin[self.seg_out_key], fout[self.out_key]\n\n # apply offset to the segmentation if it is not 0\n if self.offset > 0:\n print(\"Add offsets ...\")\n # FIXME something with parallel chunk access is off and leads to segfaults,\n # so for now we just run this single threaded\n elf.parallel.apply_operation(ds_in, self.offset, self.add_non_zero,\n n_threads=1, roi=bb, verbose=True)\n\n # copy to the output\n print(\"Copy dataset ...\")\n copy_dataset(self.exp_path, self.path, self.seg_out_key, self.out_key,\n n_threads=self.n_threads, roi=bb)\n\n # find new offset\n print(\"Find new offset ...\")\n offset = elf.parallel.max(ds_out, n_threads=self.n_threads, roi=bb,\n verbose=True)\n\n with open(self.out_path, 'w') as f:\n json.dump({'offset': int(offset)}, f)\n print(\"Copy and offset done ...\")\n\n def output(self):\n return luigi.LocalTarget(self.out_path)\n\n\ndef postprocess_and_write(path, out_key, bb, tmp_folder, config_dir,\n target, max_jobs, n_threads, offset, min_size):\n task = SizeFilterWorkflow\n\n exp_path = os.path.join(tmp_folder, 'data.n5')\n seg_in_key = 'volumes/stitched_cilia'\n seg_out_key = 'volumes/filtered_cilia'\n\n # filter the segmentation objects by size\n t = task(tmp_folder=tmp_folder, config_dir=config_dir,\n max_jobs=max_jobs, target=target,\n input_path=exp_path, input_key=seg_in_key,\n output_path=exp_path, output_key=seg_out_key,\n size_threshold=min_size)\n ret = luigi.build([t], local_scheduler=True)\n if not ret:\n raise RuntimeError(\"Size filtering failed\")\n\n out_path = os.path.join(tmp_folder, 'copy_and_offset.json')\n t = CopyAndOffset(out_path=out_path, exp_path=exp_path, path=path,\n seg_out_key=seg_out_key, out_key=out_key, offset=offset,\n n_threads=n_threads,\n bb_start=[b.start for b in bb],\n bb_stop=[b.stop for b in bb])\n ret = luigi.build([t], local_scheduler=True)\n if not ret:\n raise RuntimeError(\"Copy anf offset failed\")\n\n with open(out_path) as f:\n offset = json.load(f)['offset']\n\n return offset\n\n\ndef get_scale_factor(path, key, mask_path, mask_key):\n with open_file(path, 'r') as f, open_file(mask_path, 'r') as fm:\n shape = f[key].shape\n mask_shape = fm[mask_key].shape\n scale_factor = [int(round(sh / float(ms), 0)) for sh, ms in zip(shape, mask_shape)]\n return scale_factor\n\n\ndef cilia_segmentation_workflow(offsets, path,\n fg_key, aff_key, fg_mask_out_key, out_key,\n mask_path, mask_key,\n tmp_folder, target, max_jobs, n_threads):\n\n size_threshold = 1000\n # preparation: find blocks we need to segment and write the global config\n with open_file(path, 'r') as f:\n shape = f[fg_key].shape\n block_shape = [64, 256, 256]\n make_global_config(mask_path, mask_key, shape, block_shape, tmp_folder, n_threads)\n\n # make mask for the foreground\n print(\"Make foreground mask ...\")\n make_fg_mask(path, fg_key, fg_mask_out_key, tmp_folder, target, max_jobs)\n\n # run block-wise mws\n print(\"Run mutex watershed ...\")\n run_mws(offsets, path, fg_mask_out_key, aff_key, out_key,\n tmp_folder, target, max_jobs)\n\n # find bounding box(es) of current segments mask and set it\n scale_factor = get_scale_factor(path, fg_key, mask_path, mask_key)\n bbs = find_bounding_boxes(mask_path, mask_key, n_threads, scale_factor)\n\n offset = 0\n # run multicut for the bounding boxes\n config_folder = os.path.join(tmp_folder, 'configs')\n print(\"Run stitching multicuts ...\")\n for ii, bb in enumerate(bbs):\n print(\"for bounding box\", ii, \":\", bb)\n set_bounding_box(tmp_folder, bb)\n tmp_folder_mc = os.path.join(tmp_folder, 'tmps_mc', 'tmp_%i' % ii)\n os.makedirs(tmp_folder_mc, exist_ok=True)\n stitching_multicut(offsets, path, aff_key, out_key,\n tmp_folder_mc, config_folder, target, max_jobs)\n\n print(\"Postprocess and write result ...\")\n # run filters and update offset\n offset = postprocess_and_write(path, out_key, bb,\n tmp_folder_mc, config_folder,\n target, max_jobs, n_threads,\n offset, size_threshold)\n","repo_name":"mobie/platybrowser-project","sub_path":"mmpb/segmentation/cilia/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":11063,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"1058401381","text":"from datetime import datetime\nfrom pathlib import Path\nfrom threading import Thread\n\nfrom flaky import flaky\n\nfrom compiler_gym.util.runfiles_path import create_user_logs_dir\nfrom tests.test_main import main\n\npytest_plugins = [\"tests.pytest_plugins.common\"]\n\n\n@flaky # Unlikely event that timestamps change\ndef test_create_user_logs_dir(temporary_environ, tmpdir):\n tmpdir = Path(tmpdir)\n temporary_environ[\"COMPILER_GYM_LOGS\"] = str(tmpdir)\n\n dir = create_user_logs_dir(\"foo\")\n now = datetime.now()\n\n assert dir.parent.parent == tmpdir / \"foo\"\n\n year, month, day = dir.parent.name.split(\"-\")\n assert int(year) == now.year\n assert int(month) == now.month\n assert int(day) == now.day\n\n hour, minute, second = dir.name.split(\"-\")\n assert int(hour) == now.hour\n assert int(minute) == now.minute\n assert int(second) == now.second\n\n\ndef test_create_user_logs_dir_multithreaded(temporary_environ, tmpdir):\n tmpdir = Path(tmpdir)\n temporary_environ[\"COMPILER_GYM_LOGS\"] = str(tmpdir)\n\n class MakeDir(Thread):\n def __init__(self):\n super().__init__()\n self.dir = None\n\n def run(self):\n self.dir = create_user_logs_dir(\"foo\")\n\n def join(self):\n super().join()\n return self.dir\n\n threads = [MakeDir() for _ in range(5)]\n for t in threads:\n t.start()\n\n dirs = [t.join() for t in threads]\n\n # Every directory should be unique.\n print(dirs)\n assert len(set(dirs)) == len(dirs)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"facebookresearch/CompilerGym","sub_path":"tests/util/runfiles_path_test.py","file_name":"runfiles_path_test.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":821,"dataset":"github-code","pt":"78"} +{"seq_id":"40539571659","text":"import json\nfrom collections import namedtuple, Counter\nimport string, base64, binascii, zlib\nimport datetime\n\n#\n# Helpers\n#\n\nclass cached_property(property):\n def __get__(self, instance, owner):\n if instance is None:\n return super().__get__(instance, owner)\n\n s = '_cached_' + self.fget.__name__\n try: return getattr(instance, s)\n except AttributeError: pass\n\n ret = super().__get__(instance, owner)\n setattr(instance, s, ret)\n return ret\n\n def __set__(self, instance, value):\n raise AttributeError()\n\n def __delete__(self, instance):\n if instance is not None:\n s = '_cached_' + self.fget.__name__\n try: delattr(instance, s)\n except AttributeError: pass\n super().__delete__(instance)\n\n\nclass count_trues(object):\n \"\"\"Kinda like sum(), but only for the specific case of taking a sum()\n of booleans and then applying comparison operators. The advantage is\n that it doesn't go through the whole iterator if it doesn't need to.\n For example, in `t = count_trues(i%2 for i in range(99999))`, comparing\n `t` with 8 using any comparison operator will take 16 to 18 iterations.\n \"\"\"\n def __init__(self, iterable):\n self.count = 0\n self.iterable = iterable\n\n def __gt__(self, other):\n if self.count > other:\n return True\n for i in self.iterable:\n if i:\n self.count += 1\n if self.count > other:\n return True\n return False\n\n def __ge__(self, other):\n if self.count >= other:\n return True\n for i in self.iterable:\n if i:\n self.count += 1\n if self.count >= other:\n return True\n return False\n\n def __lt__(self, other):\n return not self.__ge__(other)\n\n def __le__(self, other):\n return not self.__gt__(other)\n\n def __eq__(self, other):\n return self.__ge__(other) and self.__le__(other)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\ndef maybe_decode_base64(s):\n \"\"\"Returns None if s doesn't look like base64 (even if it's decodeable).\n Otherwise, return the decoded string and the altchars used.\"\"\"\n s = s.strip()\n charset = frozenset(s)\n if len(s) < 4:\n return None\n if not charset.issubset(string.ascii_letters + string.digits + '+/-_.=\\n'):\n return None\n if len(charset.intersection('+/-_.=')) > 3:\n return None\n\n # Looks decodable, lets decode it.\n altchars = ''.join(charset.intersection('+/_-.'))[:2]\n altchars = sorted(altchars)\n if len(altchars) == 0:\n altchars = '+/'\n elif len(altchars) == 1:\n altchars = {\n '+': '+/',\n '/': '+/',\n '-': '-_',\n '_': '-_',\n '.': '._',\n }[altchars]\n\n try:\n decoded = base64.b64decode(s + '===', altchars)\n except:\n return None\n\n # Ending in the right number of '='s is a good tell.\n if s.endswith('=') and not s.endswith('===') and ('=' not in s.rstrip('=')):\n return decoded, altchars\n\n # So is decoding to all printables or mostly letters.\n if len(decoded) > 10 and charset.issubset(string.printable):\n return decoded, altchars\n if len(decoded) >= 4:\n good_chars = frozenset(string.ascii_letters + string.digits + ' _.,')\n if sum(c in good_chars for c in decoded) > len(decoded) * 0.8:\n return decoded, altchars\n\n # Super good compressability is also a sign.\n if float(len(zlib.compress(decoded))) / len(decoded) <= 0.40:\n return decoded, altchars\n\n # decoded is apparently binary junk. Make sure the input really\n # \"looks like\" base64 before returning it. Specifically, we\n # check that the breakdown of digits to lowercase to capitals looks\n # roughly even.\n if not charset.intersection(string.digits): return None\n if not charset.intersection(string.ascii_lowercase): return None\n if not charset.intersection(string.ascii_uppercase): return None\n\n count = Counter(s)\n del count['\\n']\n del count['=']\n del count['A'] # Strings of null bytes are often more common, that's fine.\n grand_total = sum(count.values())\n digits = sum(count[c] for c in string.digits)\n lower = sum(count[c] for c in string.ascii_lowercase)\n upper = sum(count[c] for c in string.ascii_uppercase)\n total = digits + lower + upper\n if total < grand_total * 0.9: # Too many other chars to be base64!\n return None\n\n # Multipled by inverse expected probability of seeing that letter.\n digits = digits / 10.0 / total * 63\n lower = lower / 26.0 / total * 63\n upper = upper / 25.0 / total * 63\n if all(i > 0.5 for i in (digits, lower, upper)):\n return decoded, altchars\n\n\n#\n# Classes for representing the \"inferred type\" of a single string.\n#\n\nclass StrTypeBase:\n __slots__ = ()\n\nclass StrBool(namedtuple('StrBool', 'true false'), StrTypeBase):\n def invert(self, val):\n return self.true if val else self.false\n __slots__ = ()\n\nclass StrDate(namedtuple('StrDate', 'kind'), StrTypeBase):\n def invert(self, val):\n return val.isoformat()\n __slots__ = ()\n\nclass StrJSON(namedtuple('StrJSON', 'type'), StrTypeBase):\n def invert(self, val):\n return json.dumps(val)\n __slots__ = ()\n\nclass StrHex(namedtuple('StrHex', 'upper'), StrTypeBase):\n def invert(self, val):\n if isinstance(val, str):\n val = val.encode('utf-8')\n return binascii.hexlify(val).decode('utf-8')\n __slots__ = ()\n\nclass StrB64(namedtuple('StrB64', 'altchars'), StrTypeBase):\n def invert(self, val):\n # Lots of utf-8 hax. We don't expect to handle bytes well anyways.\n if isinstance(val, str):\n val = val.encode('utf-8')\n return base64.b64encode(val, altchars=self.altchars.encode('utf-8')).decode('utf-8')\n __slots__ = ()\n\nclass StrNum(namedtuple('StrNum', ''), StrTypeBase):\n def invert(self, val):\n return str(val)\n __slots__ = ()\n\nclass StrList(namedtuple('StrList', 'before delimiter after'), StrTypeBase):\n def invert(self, val):\n return self.before + self.delimiter.join(val) + self.after\n __slots__ = ()\n\nclass StrStr(namedtuple('StrStr', ''), StrTypeBase):\n def invert(self, val):\n return val\n __slots__ = ()\n\nclass StringGuesser:\n \"\"\"Class for staring really hard at strings and deciding what they are.\"\"\"\n def __init__(self, s):\n self.s = s\n self.stripped = s.strip()\n self.stripped_lower = self.stripped.lower()\n\n # Currently, the properties are non-None if the value can be interpreted\n # as the given type and **is likely to be the given type**. (For example,\n # \"abcd\" will currently return None on .hexbytes).\n # This is kinda bad cuz interpretation should be separate from guessing,\n # but whatever, not going to change this right now.\n\n @cached_property\n def isodate(self):\n # Parse ISO dates, ignoring fractional seconds or timezones cuz I'm lazy.\n stripped = self.stripped\n\n if ':' not in stripped:\n return None, None\n\n # Find end before fractional seconds or timezone\n colon = stripped.index(':')\n end = len(stripped)\n for c in '.Z+- ':\n try:\n end = min(end, stripped.index(c, colon))\n except (IndexError, ValueError):\n continue\n # Sanity check that after-the-end looks close enough.\n # Probably gonna be just a colon left or timezone name.\n if len(stripped[end:].strip(' .0123456789Z+-')) <= 4:\n try:\n val = datetime.datetime.strptime(stripped[:end], \"%Y-%m-%dT%H:%M:%S\")\n except ValueError:\n return None, None\n return StrDate('iso'), val\n\n return None, None\n\n @cached_property\n def boolean(self):\n if self.stripped_lower not in (\"true\", \"yes\", \"false\", \"no\"):\n return None, None\n\n info = (\n (\"true\", \"false\"),\n (\"True\", \"False\"),\n (\"TRUE\", \"FALSE\"),\n (\"yes\", \"no\"),\n (\"Yes\", \"No\"),\n (\"YES\", \"NO\"),\n )\n for i in info:\n if self.stripped in i:\n return StrBool(*i), self.stripped == i[0]\n else:\n # weird, couldn't match case right, just use lowercase.\n if self.stripped_lower in i:\n return StrBool(*i), self.stripped_lower == i[0]\n assert False, \"Unreachable\"\n\n @cached_property\n def hexbytes(self):\n cset = frozenset(self.stripped_lower)\n if self.stripped_lower.count(' ') > len(self.stripped_lower)/8:\n return None, None\n if len(cset) >= 3 and cset.issubset('0123456789abcdef _-') and not cset.issubset('abcdef _-'):\n try:\n decoded = binascii.unhexlify(self.stripped_lower.replace(' ', '').replace('_','').replace('-',''))\n except:\n return None, None\n\n # StrHex doesn't represent any space/underscore/dash patterns\n upper = 0\n for c in self.stripped:\n if c in 'abcdef':\n upper -= 1\n elif c in 'ABCDEF':\n upper += 1\n return StrHex(upper > 0), decoded\n\n return None, None\n\n @cached_property\n def base64bytes(self):\n ret = maybe_decode_base64(self.stripped)\n if ret is None:\n return None, None\n decoded, altchars = ret\n return StrB64(altchars), decoded\n\n @cached_property\n def json(self):\n try:\n x = json.loads(self.s)\n except:\n return None, None\n else:\n typename = type(x).__name__\n # We handle non-compoud json types with the other things.\n if typename not in ('dict', 'list'):\n return None, None\n return StrJSON(type=typename), x\n\n @cached_property\n def number(self):\n asint, asfloat = None, None\n try:\n asint = int(self.stripped)\n except ValueError:\n pass\n try:\n asfloat = float(self.stripped)\n except ValueError:\n pass\n\n if asint is not None:\n return StrNum(), asint\n elif asfloat is not None:\n return StrNum(), asfloat\n else:\n return None, None\n\n @cached_property\n def list(self):\n def comma_list(s, delimiter):\n if delimiter not in s:\n return None\n before, after = '', ''\n if s[:1] in '([<' and s[-1:] in ')]>':\n before, after = s[:1], s[-1:]\n s = s[1:-1]\n\n res = [StringGuesser(i) for i in s.split(delimiter)]\n if len(res) < 4 and count_trues(i.nonbasic for i in res) < 2:\n # If there's not many elements and less than two look like anything,\n # it's probably just a string with the delimiter in it.\n return None\n return StrList(before, delimiter, after), res\n\n def newline_list(s):\n after = '\\n' if s.endswith('\\n') else ''\n s = s.strip('\\n')\n if '\\n' not in s:\n return None\n res = [StringGuesser(i) for i in s.split('\\n')]\n if len(res) >= 4 and count_trues(i.basic for i in res) >= len(res)/2:\n # If over half the lines seem like just strings, it's\n # probably just a string with newlines in it.\n return None\n return StrList('', '\\n', after), res\n\n def whitespace_list(s):\n after = '\\n' if s.endswith('\\n') else ''\n s = s.strip().split()\n if len(s) <= 1:\n return None\n res = [StringGuesser(i) for i in s]\n if count_trues(i.basic for i in res) >= len(res)/2:\n # If over half the lines seem like just strings, it's\n # probably just a string with spaces in it.\n return None\n # fixme: ...we just assume space was the delimiter. Could do better.\n return StrList('', ' ', after), res\n\n # Trying them serially like this seems to makes sense; we could\n # return multiple possibilities but it doesn't seem worth it.\n s = self.s\n return newline_list(s) or comma_list(s, '\\t') or comma_list(s, ':') or comma_list(s, ',') or whitespace_list(s) or (None, None)\n\n @property\n def nonbasic(self):\n \"\"\"True if we get any sort of inference result beyond 'just some string'.\"\"\"\n return not self.basic\n\n @property\n def basic(self):\n \"\"\"True if there's no inference result besides 'just some string'.\"\"\"\n return isinstance(self.best_type, StrStr)\n\n @property\n def best(self):\n if self.boolean[0] is not None: return self.boolean\n if self.number[0] is not None: return self.number\n if self.isodate[0] is not None: return self.isodate\n if self.hexbytes[0] is not None: return self.hexbytes\n if self.base64bytes[0] is not None: return self.base64bytes\n if self.json[0] is not None: return self.json\n if self.list[0] is not None: return self.list\n return StrStr(), self.s\n\n @property\n def best_type(self):\n return self.best[0]\n","repo_name":"mruck/athena","sub_path":"fuzzer/guesstype/guessstring.py","file_name":"guessstring.py","file_ext":"py","file_size_in_byte":12114,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"5839346383","text":"import cv2\nimport numpy as np\nimport time\n\npeople=0\n\n# DOWNLOAD THE BELOW FILES FROM http://bluerabbit.me/yolov3.zip\nclass args:\n image = 'opencv_frame.png'\n config = 'yolov3.cfg'\n weights = 'yolov3.weights'\n classes = 'yolov3.txt'\n\n\n\ndef get_output_layers(net):\n layer_names = net.getLayerNames()\n output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n return output_layers\n\ndef draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h):\n global people\n label = str(classes[class_id])\n color = COLORS[class_id]\n cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)\n cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n #print('Label:',label, 'Confidence:',confidence, 'Location:',(x, y, x_plus_w, y_plus_h))\n if(label=='person'):\n people = people + 1\n\ncam = cv2.VideoCapture(0)\ncv2.namedWindow(\"CamCapture\")\nimg_counter = 0\nwhile True:\n ret, frame = cam.read()\n cv2.imshow(\"CamCapture\", frame)\n if not ret:\n break\n k = cv2.waitKey(1)\n\n if k%256 == 27:\n # WHEN ESC KEY PRESSED\n print(\"Escape hit, closing...\")\n break\n else:\n # PRESS SPACE BAR\n time.sleep(5)\n img_name = \"opencv_frame.png\".format(img_counter)\n cv2.imwrite(img_name, frame)\n print(\"{} written!\".format(img_name))\n img_counter += 1\n image = cv2.imread(args.image)\n Width = image.shape[1]\n Height = image.shape[0]\n scale = 0.00392\n classes = None\n with open(args.classes, 'r') as f:\n classes = [line.strip() for line in f.readlines()]\n\n COLORS = np.random.uniform(0, 255, size=(len(classes), 3))\n net = cv2.dnn.readNet(args.weights, args.config)\n blob = cv2.dnn.blobFromImage(image, scale, (416, 416), (0, 0, 0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(get_output_layers(net))\n class_ids = []\n confidences = []\n boxes = []\n conf_threshold = 0.5\n nms_threshold = 0.4\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.5:\n center_x = int(detection[0] * Width)\n center_y = int(detection[1] * Height)\n w = int(detection[2] * Width)\n h = int(detection[3] * Height)\n x = center_x - w / 2\n y = center_y - h / 2\n class_ids.append(class_id)\n confidences.append(float(confidence))\n boxes.append([x, y, w, h])\n\n indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)\n\n for i in indices:\n i = i[0]\n box = boxes[i]\n x = box[0]\n y = box[1]\n w = box[2]\n h = box[3]\n draw_prediction(image, class_ids[i], confidences[i], round(x), round(y), round(x + w), round(y + h))\n\n cv2.imshow(\"object detection\", image)\n print(people)\n people = 0\n\n cv2.waitKey(1000)\n cv2.imwrite(\"object-detection.jpg\", image)\n cv2.destroyAllWindows()\n time.sleep(5)\n\ncam.release()\n\n","repo_name":"shelwyn/Detect_objects_with_YOLO","sub_path":"Detect.py","file_name":"Detect.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8865319378","text":"import flet as ft\nfrom repath import match\n\nfrom views import MainView, BookView\n\n\ndef main(page: ft.Page):\n page.title = \"Library Manager\"\n page.vertical_alignment = ft.MainAxisAlignment.CENTER\n # page.client_storage.clear()\n\n page.window_width = 900\n page.window_max_width = 900\n page.window_min_width = 900\n\n page.window_height = 600\n page.window_max_height = 600\n page.window_min_height = 600\n\n page.window_maximizable = False\n page.padding = 0\n\n def route_change(route: ft.RouteChangeEvent):\n page.views.clear()\n router = ft.TemplateRoute(page.route)\n\n if route.data == \"/main\":\n page.views.append(\n MainView(page=page, route=\"/main\")\n )\n\n if router.match(\"/book/:uuid\"):\n book_info = list(filter(lambda b: b['uuid'] == router.uuid, page.client_storage.get(\"books\")))[0]\n page.views.append(\n BookView(page=page, route=\"/book\", book_info=book_info)\n )\n\n page.update()\n\n page.on_route_change = route_change\n page.go(\"/main\")\n\n\nif __name__ == \"__main__\":\n ft.app(\n target=main\n )\n","repo_name":"DenisZhmakin/LibraryManager","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"976910519","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n# --------------------------------------------------------------------------\nchrome_driver_path = \"C:\\devlopment\\chromedriver_win32\\chromedriver\"\ndriver = webdriver.Chrome(executable_path=chrome_driver_path)\n# driver.get(\"https://en.wikipedia.org/wiki/Main_Page\")\ndriver.get(\"http://secure-retreat-92358.herokuapp.com/\")\n# --------------------------------------------------------------------------\n# click\n# article_numbers = driver.find_element(By.CSS_SELECTOR, \"#articlecount > a:nth-child(1)\")\n# # or\n# click_portal = driver.find_element(By.LINK_TEXT, \"All portals\")\n# click_portal.click()\n\n# test\n# type_in = driver.find_element(By.CSS_SELECTOR, \"#searchInput\")\n# type_in.send_keys(\"Python\")\n# type_in.send_keys(Keys.ENTER)\n\n# Sign up\nfname = driver.find_element(By.CSS_SELECTOR, \"body > form > input.form-control.top\")\nlname = driver.find_element(By.CSS_SELECTOR, \"body > form > input.form-control.middle\")\nemail = driver.find_element(By.CSS_SELECTOR, \"body > form > input.form-control.bottom\")\nsubmit = driver.find_element(By.CSS_SELECTOR, \"body > form > button\")\nfname.send_keys(\"Priyansh\")\nlname.send_keys(\"Sharma\")\nemail.send_keys(\"piyuindia4@gmail.com\")\nsubmit.click()\n","repo_name":"Priyanshsharma21/Python","sub_path":"web_dev_py/Selanium/intraction.py","file_name":"intraction.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"71497996733","text":"from __future__ import annotations\n\nfrom typing import MutableMapping, MutableSequence\n\nfrom google.protobuf import field_mask_pb2 # type: ignore\nfrom google.protobuf import timestamp_pb2 # type: ignore\nimport proto # type: ignore\n\n__protobuf__ = proto.module(\n package=\"google.cloud.resourcemanager.v3\",\n manifest={\n \"Folder\",\n \"GetFolderRequest\",\n \"ListFoldersRequest\",\n \"ListFoldersResponse\",\n \"SearchFoldersRequest\",\n \"SearchFoldersResponse\",\n \"CreateFolderRequest\",\n \"CreateFolderMetadata\",\n \"UpdateFolderRequest\",\n \"UpdateFolderMetadata\",\n \"MoveFolderRequest\",\n \"MoveFolderMetadata\",\n \"DeleteFolderRequest\",\n \"DeleteFolderMetadata\",\n \"UndeleteFolderRequest\",\n \"UndeleteFolderMetadata\",\n },\n)\n\n\nclass Folder(proto.Message):\n r\"\"\"A folder in an organization's resource hierarchy, used to\n organize that organization's resources.\n\n Attributes:\n name (str):\n Output only. The resource name of the folder. Its format is\n ``folders/{folder_id}``, for example: \"folders/1234\".\n parent (str):\n Required. The folder's parent's resource name. Updates to\n the folder's parent must be performed using\n [MoveFolder][google.cloud.resourcemanager.v3.Folders.MoveFolder].\n display_name (str):\n The folder's display name. A folder's display name must be\n unique amongst its siblings. For example, no two folders\n with the same parent can share the same display name. The\n display name must start and end with a letter or digit, may\n contain letters, digits, spaces, hyphens and underscores and\n can be no longer than 30 characters. This is captured by the\n regular expression:\n ``[\\p{L}\\p{N}]([\\p{L}\\p{N}_- ]{0,28}[\\p{L}\\p{N}])?``.\n state (google.cloud.resourcemanager_v3.types.Folder.State):\n Output only. The lifecycle state of the folder. Updates to\n the state must be performed using\n [DeleteFolder][google.cloud.resourcemanager.v3.Folders.DeleteFolder]\n and\n [UndeleteFolder][google.cloud.resourcemanager.v3.Folders.UndeleteFolder].\n create_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. Timestamp when the folder was\n created.\n update_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. Timestamp when the folder was\n last modified.\n delete_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. Timestamp when the folder was\n requested to be deleted.\n etag (str):\n Output only. A checksum computed by the\n server based on the current value of the folder\n resource. This may be sent on update and delete\n requests to ensure the client has an up-to-date\n value before proceeding.\n \"\"\"\n\n class State(proto.Enum):\n r\"\"\"Folder lifecycle states.\n\n Values:\n STATE_UNSPECIFIED (0):\n Unspecified state.\n ACTIVE (1):\n The normal and active state.\n DELETE_REQUESTED (2):\n The folder has been marked for deletion by\n the user.\n \"\"\"\n STATE_UNSPECIFIED = 0\n ACTIVE = 1\n DELETE_REQUESTED = 2\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n parent: str = proto.Field(\n proto.STRING,\n number=2,\n )\n display_name: str = proto.Field(\n proto.STRING,\n number=3,\n )\n state: State = proto.Field(\n proto.ENUM,\n number=4,\n enum=State,\n )\n create_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=5,\n message=timestamp_pb2.Timestamp,\n )\n update_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=6,\n message=timestamp_pb2.Timestamp,\n )\n delete_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=7,\n message=timestamp_pb2.Timestamp,\n )\n etag: str = proto.Field(\n proto.STRING,\n number=8,\n )\n\n\nclass GetFolderRequest(proto.Message):\n r\"\"\"The GetFolder request message.\n\n Attributes:\n name (str):\n Required. The resource name of the folder to retrieve. Must\n be of the form ``folders/{folder_id}``.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n\n\nclass ListFoldersRequest(proto.Message):\n r\"\"\"The ListFolders request message.\n\n Attributes:\n parent (str):\n Required. The name of the parent resource whose folders are\n being listed. Only children of this parent resource are\n listed; descendants are not listed.\n\n If the parent is a folder, use the value\n ``folders/{folder_id}``. If the parent is an organization,\n use the value ``organizations/{org_id}``.\n\n Access to this method is controlled by checking the\n ``resourcemanager.folders.list`` permission on the\n ``parent``.\n page_size (int):\n Optional. The maximum number of folders to\n return in the response. The server can return\n fewer folders than requested. If unspecified,\n server picks an appropriate default.\n page_token (str):\n Optional. A pagination token returned from a previous call\n to ``ListFolders`` that indicates where this listing should\n continue from.\n show_deleted (bool):\n Optional. Controls whether folders in the\n [DELETE_REQUESTED][google.cloud.resourcemanager.v3.Folder.State.DELETE_REQUESTED]\n state should be returned. Defaults to false.\n \"\"\"\n\n parent: str = proto.Field(\n proto.STRING,\n number=1,\n )\n page_size: int = proto.Field(\n proto.INT32,\n number=2,\n )\n page_token: str = proto.Field(\n proto.STRING,\n number=3,\n )\n show_deleted: bool = proto.Field(\n proto.BOOL,\n number=4,\n )\n\n\nclass ListFoldersResponse(proto.Message):\n r\"\"\"The ListFolders response message.\n\n Attributes:\n folders (MutableSequence[google.cloud.resourcemanager_v3.types.Folder]):\n A possibly paginated list of folders that are\n direct descendants of the specified parent\n resource.\n next_page_token (str):\n A pagination token returned from a previous call to\n ``ListFolders`` that indicates from where listing should\n continue.\n \"\"\"\n\n @property\n def raw_page(self):\n return self\n\n folders: MutableSequence[\"Folder\"] = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message=\"Folder\",\n )\n next_page_token: str = proto.Field(\n proto.STRING,\n number=2,\n )\n\n\nclass SearchFoldersRequest(proto.Message):\n r\"\"\"The request message for searching folders.\n\n Attributes:\n page_size (int):\n Optional. The maximum number of folders to\n return in the response. The server can return\n fewer folders than requested. If unspecified,\n server picks an appropriate default.\n page_token (str):\n Optional. A pagination token returned from a previous call\n to ``SearchFolders`` that indicates from where search should\n continue.\n query (str):\n Optional. Search criteria used to select the folders to\n return. If no search criteria is specified then all\n accessible folders will be returned.\n\n Query expressions can be used to restrict results based upon\n displayName, state and parent, where the operators ``=``\n (``:``) ``NOT``, ``AND`` and ``OR`` can be used along with\n the suffix wildcard symbol ``*``.\n\n The ``displayName`` field in a query expression should use\n escaped quotes for values that include whitespace to prevent\n unexpected behavior.\n\n ::\n\n | Field | Description |\n |-------------------------|----------------------------------------|\n | displayName | Filters by displayName. |\n | parent | Filters by parent (for example: folders/123). |\n | state, lifecycleState | Filters by state. |\n\n Some example queries are:\n\n - Query ``displayName=Test*`` returns Folder resources\n whose display name starts with \"Test\".\n - Query ``state=ACTIVE`` returns Folder resources with\n ``state`` set to ``ACTIVE``.\n - Query ``parent=folders/123`` returns Folder resources\n that have ``folders/123`` as a parent resource.\n - Query ``parent=folders/123 AND state=ACTIVE`` returns\n active Folder resources that have ``folders/123`` as a\n parent resource.\n - Query ``displayName=\\\\\"Test String\\\\\"`` returns Folder\n resources with display names that include both \"Test\" and\n \"String\".\n \"\"\"\n\n page_size: int = proto.Field(\n proto.INT32,\n number=1,\n )\n page_token: str = proto.Field(\n proto.STRING,\n number=2,\n )\n query: str = proto.Field(\n proto.STRING,\n number=3,\n )\n\n\nclass SearchFoldersResponse(proto.Message):\n r\"\"\"The response message for searching folders.\n\n Attributes:\n folders (MutableSequence[google.cloud.resourcemanager_v3.types.Folder]):\n A possibly paginated folder search results.\n the specified parent resource.\n next_page_token (str):\n A pagination token returned from a previous call to\n ``SearchFolders`` that indicates from where searching should\n continue.\n \"\"\"\n\n @property\n def raw_page(self):\n return self\n\n folders: MutableSequence[\"Folder\"] = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message=\"Folder\",\n )\n next_page_token: str = proto.Field(\n proto.STRING,\n number=2,\n )\n\n\nclass CreateFolderRequest(proto.Message):\n r\"\"\"The CreateFolder request message.\n\n Attributes:\n folder (google.cloud.resourcemanager_v3.types.Folder):\n Required. The folder being created, only the\n display name and parent will be consulted. All\n other fields will be ignored.\n \"\"\"\n\n folder: \"Folder\" = proto.Field(\n proto.MESSAGE,\n number=2,\n message=\"Folder\",\n )\n\n\nclass CreateFolderMetadata(proto.Message):\n r\"\"\"Metadata pertaining to the Folder creation process.\n\n Attributes:\n display_name (str):\n The display name of the folder.\n parent (str):\n The resource name of the folder or\n organization we are creating the folder under.\n \"\"\"\n\n display_name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n parent: str = proto.Field(\n proto.STRING,\n number=2,\n )\n\n\nclass UpdateFolderRequest(proto.Message):\n r\"\"\"The request sent to the\n [UpdateFolder][google.cloud.resourcemanager.v3.Folder.UpdateFolder]\n method.\n\n Only the ``display_name`` field can be changed. All other fields\n will be ignored. Use the\n [MoveFolder][google.cloud.resourcemanager.v3.Folders.MoveFolder]\n method to change the ``parent`` field.\n\n Attributes:\n folder (google.cloud.resourcemanager_v3.types.Folder):\n Required. The new definition of the Folder. It must include\n the ``name`` field, which cannot be changed.\n update_mask (google.protobuf.field_mask_pb2.FieldMask):\n Required. Fields to be updated. Only the ``display_name``\n can be updated.\n \"\"\"\n\n folder: \"Folder\" = proto.Field(\n proto.MESSAGE,\n number=1,\n message=\"Folder\",\n )\n update_mask: field_mask_pb2.FieldMask = proto.Field(\n proto.MESSAGE,\n number=2,\n message=field_mask_pb2.FieldMask,\n )\n\n\nclass UpdateFolderMetadata(proto.Message):\n r\"\"\"A status object which is used as the ``metadata`` field for the\n Operation returned by UpdateFolder.\n\n \"\"\"\n\n\nclass MoveFolderRequest(proto.Message):\n r\"\"\"The MoveFolder request message.\n\n Attributes:\n name (str):\n Required. The resource name of the Folder to move. Must be\n of the form folders/{folder_id}\n destination_parent (str):\n Required. The resource name of the folder or organization\n which should be the folder's new parent. Must be of the form\n ``folders/{folder_id}`` or ``organizations/{org_id}``.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n destination_parent: str = proto.Field(\n proto.STRING,\n number=2,\n )\n\n\nclass MoveFolderMetadata(proto.Message):\n r\"\"\"Metadata pertaining to the folder move process.\n\n Attributes:\n display_name (str):\n The display name of the folder.\n source_parent (str):\n The resource name of the folder's parent.\n destination_parent (str):\n The resource name of the folder or\n organization to move the folder to.\n \"\"\"\n\n display_name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n source_parent: str = proto.Field(\n proto.STRING,\n number=2,\n )\n destination_parent: str = proto.Field(\n proto.STRING,\n number=3,\n )\n\n\nclass DeleteFolderRequest(proto.Message):\n r\"\"\"The DeleteFolder request message.\n\n Attributes:\n name (str):\n Required. The resource name of the folder to be deleted.\n Must be of the form ``folders/{folder_id}``.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n\n\nclass DeleteFolderMetadata(proto.Message):\n r\"\"\"A status object which is used as the ``metadata`` field for the\n ``Operation`` returned by ``DeleteFolder``.\n\n \"\"\"\n\n\nclass UndeleteFolderRequest(proto.Message):\n r\"\"\"The UndeleteFolder request message.\n\n Attributes:\n name (str):\n Required. The resource name of the folder to undelete. Must\n be of the form ``folders/{folder_id}``.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n\n\nclass UndeleteFolderMetadata(proto.Message):\n r\"\"\"A status object which is used as the ``metadata`` field for the\n ``Operation`` returned by ``UndeleteFolder``.\n\n \"\"\"\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","repo_name":"googleapis/google-cloud-python","sub_path":"packages/google-cloud-resource-manager/google/cloud/resourcemanager_v3/types/folders.py","file_name":"folders.py","file_ext":"py","file_size_in_byte":14945,"program_lang":"python","lang":"en","doc_type":"code","stars":4415,"dataset":"github-code","pt":"78"} +{"seq_id":"74845180731","text":"'''\r\nRHYTHMATICA is a simple rhythm game, desinged by Kokosei J a.k.a Wonjun Jung.\r\nCopyright (C) 2019, Wonjun Jung\r\n\r\nThis program is free software: you can redistribute it and/or modify\r\nit under the terms of the GNU General Public License as published by\r\nthe Free Software Foundation, either version 3 of the License, or\r\n(at your option) any later version.\r\n\r\nThis program is distributed in the hope that it will be useful,\r\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\r\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\nGNU General Public License for more details.\r\n\r\nYou should have received a copy of the GNU General Public License\r\nalong with this program. If not, see .\r\n'''\r\n#####Import Modules#####\r\nfrom function_old import *\r\nfrom objclass_old import *\r\nimport pygame\r\nfrom pygame.locals import *\r\nfrom random import randint\r\nimport os\r\nprint(\"ligma\")\r\nver = \"A5P\"\r\nprint(\"RHYTHMATICA\", ver)\r\n\r\n\r\n#####initialization process#####\r\npygame.mixer.pre_init(44100, -16, 2, 1024) #Little Buffer, Less Delay!\r\npygame.init() #initialize pygame.\r\n\r\n#set it's size, flags, caption.\r\nscreen = pygame.display.set_mode(size = (1280, 720))#, flags = pygame.FULLSCREEN)\r\npygame.display.set_caption(\"RHYTHMATICA\")\r\n\r\n#get a new clock. is it a real Rolex? damn, that's cool.\r\nrolex = pygame.time.Clock()\r\n\r\n\r\n#####set required variables#####\r\nsongnumb = 0 #songnumb should be set as 0.\r\ndesiredfps = 60 #set fps.\r\nspeed = 1.00\r\nkeylist = (K_t, K_y, K_g, K_h, K_b, K_n)\r\nloc = ((0.35, 0.2), (0.65, 0.2), (0.35, 0.5), (0.65, 0.5), (0.35, 0.8), (0.65, 0.8))\r\n\r\n#####make/load/process resources#####\r\n#make a new Surface filled with color white, because black is not my favorite color xD\r\nwhitebg = pygame.Surface(screen.get_size()).convert()\r\nwhitebg.fill((255, 255, 255))\r\n\r\n#load electron image, resize it and append 10 intro_electron class with randomly given arguments.\r\nelectron = pygame.image.load(\"res/image/ingame/inside1.png\").convert_alpha()\r\nelectron = resize_height(electron, screen.get_height())\r\nelectrons = []\r\nfor x in range(10):\r\n electrons.append(intro_electron(electron, randint(1, 10)/10, randint(1, 10)/10, randint(1, 2)/10))\r\n\r\noutside = tuple(map(lambda x: pygame.image.load(\"res/image/ingame/outside\"+str(x+1)+\".png\").convert_alpha(), range(6)))\r\n\r\noutline = pygame.image.load(\"res/image/ingame/outsidecover.png\").convert_alpha()\r\noutline = resize_height(outline, screen.get_height() * 0.25)\r\n\r\nhitimg = pygame.image.load(\"res/image/ingame/hit.png\").convert_alpha()\r\nhitimg = resize_onload(screen, hitimg, 0.4)\r\n\r\nmissimg = pygame.image.load(\"res/image/ingame/miss.png\").convert_alpha()\r\nmissimg = resize_onload(screen, missimg, 0.4)\r\n\r\n#load logo, resize it a bit because it's T 0 0 T H I C C\r\nlogo = pygame.image.load(\"res/image/ingame/Rhythmatica.png\").convert_alpha()\r\nlogo = resize_onload(screen, logo, 0.7)\r\n\r\n#load the loding screen, I won't resize it as long as it will be resized in fadeout/fadein functions. :accreate:\r\nloadimg = pygame.image.load(\"res/image/ingame/loading_wide.png\").convert()\r\n\r\n#load the rating image.\r\nratingimg = dict(map(lambda x: (x, resize_onload(screen, pygame.image.load(\"res/image/rating/\" + x + \".png\").convert_alpha())), ('a', 'b', 'c', 'd', 'f', 's')))\r\n\r\n#load font, render a text kindly. uhh, maybe not that kind... nevermind.\r\nnoto = {}\r\nnoto['black'] = pygame.font.Font(\"res/fonts/NotoSans-Black.ttf\", 100)\r\nnoto['regular'] = pygame.font.Font(\"res/fonts/NotoSans-Regular.ttf\", 100)\r\npressntostart = noto['black'].render(\"Press N to start\", 10, (0, 0, 0)).convert_alpha()\r\npressntostart = resize_onload(screen, pressntostart, 0.3)\r\nversiontxt = noto['black'].render(\"Ver: \" + ver, 10, (0, 0, 0)).convert_alpha()\r\nversiontxt = resize_onload(screen, versiontxt, 0.1)\r\n\r\n#load my cool intro uwu\r\nintrosound = pygame.mixer.Sound(\"res/audio/effect/Rhythmatica.wav\")\r\n\r\n#load the effect sounds.\r\nstartsound = pygame.mixer.Sound(\"res/audio/effect/start.wav\")\r\nchangesound = pygame.mixer.Sound(\"res/audio/effect/nextsong.wav\")\r\nclapsound = pygame.mixer.Sound(\"res/audio/effect/clap.wav\")\r\nglugsound = pygame.mixer.Sound(\"res/audio/effect/Glug.wav\")\r\ncoinsound = pygame.mixer.Sound(\"res/audio/effect/Coin.wav\")\r\n\r\n#load the result music.\r\nresultsound = pygame.mixer.Sound(\"res/audio/effect/result.wav\")\r\n\r\n#####aaand, finally we draw all these shit to the screen! yay!#####\r\n\r\n#at the start, we play the song.\r\n\r\nintrosound.play()\r\n\r\n#####This is the intro code!#####\r\nwhile True: # Let's repeat this until python breaks something.\r\n #first, blit the background.\r\n screen.blit(whitebg, (0, 0))\r\n #intro_electron class have its own blit method, and will blit itself to the argument. so we call all of them.\r\n for x in electrons:\r\n x.blit(screen)\r\n #blit logo, kind(maybe not)texts to the screen.\r\n blit_center(screen, logo)\r\n blit_center(screen, pressntostart, (0.5, 0.75))\r\n blit_center(screen, versiontxt, (1, 1), (1, 1))\r\n #and... flip! now you can see everything in the screen!\r\n pygame.display.flip()\r\n intro_beattime = pygame.time.get_ticks()\r\n #now it's time to handle some events.\r\n for event in pygame.event.get(): #get all of the events in the queue.\r\n if event.type == QUIT: #if user tried to close the window?\r\n exit() #kill the python. simple\r\n elif event.type == KEYDOWN: #if user pressed the key?\r\n if event.key == K_n: #if the key that user pressed is N?\r\n print(\"n pressed\") #first, print it in the console for debug purpose.\r\n break #and, get outta here.\r\n else: #if nothing broke:\r\n #130 is the BPM of the song. BPM/60 makes BPM to beat per second, and I doubled it up to call these codes 2 times a beat.\r\n rolex.tick(130 / 60 * 2)\r\n continue #let's keep this loop.\r\n break #if something broke, it will break this loop too.\r\nintrosound.stop() #stop the music.\r\n#game will loop from here\r\nwhile True:\r\n startsound.play() #and start the start-effect sound.\r\n fadeout_screen(rolex, screen, screen, loadimg) #call the fadeout thing\r\n \r\n \r\n #####Selection Codes starts from here!#####\r\n #flush the songpacks list.\r\n songpacks = []\r\n #load the note directory.\r\n try:\r\n songlists = os.listdir(\"note\")\r\n except: #if there was no note folder?\r\n songlists = [] #set it to blank.\r\n os.mkdir(\"note\")\r\n if not songlists: #if the folder is empty:\r\n print(\"ur note folder is empty. gtfo\") #heh\r\n exit()\r\n #get all the folders, make instances with it\r\n for x in songlists:\r\n #make instance with it's own fontset.\r\n song = songpack(x, noto)\r\n #if there was an error:\r\n if song.errmsg:\r\n #print error to the screen, for the troubleshoot.\r\n print(song.errmsg[1])\r\n print(str(song.errmsg[2].__class__.__name__)+\":\", song.errmsg[2])\r\n print(\"path:\", song.path)\r\n else:\r\n #print infos to check if it's parsed\r\n print(song.name, song.artist, song.bpm, song.difficulty)\r\n songpacks.append(song)\r\n print()\r\n songnumb_max = len(songpacks) #get the number of songs available.\r\n if songnumb >= songnumb_max: #if somehow songnumb is higher than the songnumb_max:\r\n print(\"wot m8 u deleted music while playing? impressive job\") #it doesn't usally happen so... that really is impressing job\r\n songnumb = 0 #to prevent errors, we restore songnumb to 0\r\n \r\n #get the selection screen from the songpack instance.\r\n tmpscreen = songpacks[songnumb].get_surf(screen.get_size())\r\n screen.blit(songpacks[songnumb].get_surf(screen.get_size()), (0, 0))\r\n speedstr = str(speed)\r\n if len(speedstr) == 3:\r\n speedstr += '0'\r\n speedtxt = noto['regular'].render(\"Speed: x\" + speedstr, 10, (0, 0, 0), None)\r\n speedtxt = resize_onload(tmpscreen, speedtxt, 0.15)\r\n blit_center(tmpscreen, speedtxt, (1, 1), (1, 1))\r\n #fadein.\r\n fadein_screen(rolex, screen, tmpscreen, loadimg)\r\n #play the preview song.\r\n songpacks[songnumb].pre.play()\r\n \r\n #flush the event queue, to prevent some troubles that will happen when players pressing their keyboard...\r\n pygame.event.clear()\r\n \r\n #Repeat this while python breaks something, again:\r\n while True:\r\n for event in pygame.event.get(): #get all of the events in the queue.\r\n if event.type == QUIT: #if user tried to close the window?\r\n exit() #kill the python. simple\r\n elif event.type == KEYDOWN: #if user pressed the key?\r\n if event.key == keylist[0]:\r\n if speed > 0.25:\r\n speed -= 0.25\r\n screen.blit(songpacks[songnumb].get_surf(screen.get_size()), (0, 0))\r\n speedstr = str(speed)\r\n if len(speedstr) == 3:\r\n speedstr += '0'\r\n speedtxt = noto['regular'].render(\"Speed: x\" + speedstr, 10, (0, 0, 0), None)\r\n speedtxt = resize_onload(screen, speedtxt, 0.15)\r\n blit_center(screen, speedtxt, (1, 1), (1, 1))\r\n #flip!\r\n pygame.display.update()\r\n \r\n elif event.key == keylist[1]:\r\n if speed < 5:\r\n speed += 0.25\r\n screen.blit(songpacks[songnumb].get_surf(screen.get_size()), (0, 0))\r\n speedstr = str(speed)\r\n if len(speedstr) == 3:\r\n speedstr += '0'\r\n speedtxt = noto['regular'].render(\"Speed: x\" + speedstr, 10, (0, 0, 0), None)\r\n speedtxt = resize_onload(screen, speedtxt, 0.15)\r\n blit_center(screen, speedtxt, (1, 1), (1, 1))\r\n #flip!\r\n pygame.display.update()\r\n \r\n elif event.key == keylist[2]: #If the key that user pressed is G?\r\n songpacks[songnumb].pre.stop() #stops the preview song.\r\n changesound.play() #play the change effect sound.\r\n prevsongnumb = songnumb #back up the previous song number, to show the song-changing effect\r\n #Shift the songnumb.\r\n if songnumb == 0: \r\n songnumb = songnumb_max - 1\r\n else:\r\n songnumb -= 1\r\n #move the images.\r\n move_right(rolex, screen, whitebg, songpacks[prevsongnumb], songpacks[songnumb], desiredfps)\r\n #get a information surface from songpack instance\r\n screen.blit(songpacks[songnumb].get_surf(screen.get_size()), (0, 0))\r\n speedstr = str(speed)\r\n if len(speedstr) == 3:\r\n speedstr += '0'\r\n speedtxt = noto['regular'].render(\"Speed: x\" + speedstr, 10, (0, 0, 0), None)\r\n speedtxt = resize_onload(screen, speedtxt, 0.15)\r\n blit_center(screen, speedtxt, (1, 1), (1, 1))\r\n #flip!\r\n pygame.display.update()\r\n pygame.time.wait(100) #wait a bit.\r\n songpacks[songnumb].pre.play() #play the song.\r\n \r\n elif event.key == keylist[3]: #Almost same as above.\r\n songpacks[songnumb].pre.stop()\r\n changesound.play()\r\n prevsongnumb = songnumb\r\n if songnumb == songnumb_max - 1:\r\n songnumb = 0\r\n else:\r\n songnumb += 1\r\n move_left(rolex, screen, whitebg, songpacks[prevsongnumb], songpacks[songnumb], desiredfps)\r\n screen.blit(songpacks[songnumb].get_surf(screen.get_size()), (0, 0))\r\n speedstr = str(speed)\r\n if len(speedstr) == 3:\r\n speedstr += '0'\r\n speedtxt = noto['regular'].render(\"Speed: x\" + speedstr, 10, (0, 0, 0), None)\r\n speedtxt = resize_onload(screen, speedtxt, 0.15)\r\n blit_center(screen, speedtxt, (1, 1), (1, 1))\r\n pygame.display.update()\r\n pygame.time.wait(100)\r\n songpacks[songnumb].pre.play()\r\n \r\n elif event.key == keylist[5]: #if the key that user pressed is N?\r\n print(\"n pressed\") #first, print it in the console for debug purpose.\r\n break #proceed to the next step!\r\n else: #If nothing broke:\r\n continue #do it uinthill they break something\r\n break #boom\r\n songpacks[songnumb].pre.stop()\r\n startsound.play() #and start the start-effect sound.\r\n fadeout_screen(rolex, screen, screen, loadimg)\r\n \r\n #####Preparation Process#####\r\n #load notes.\r\n cursongpack = songpacks[songnumb]\r\n notelist = get_note(cursongpack.notelist)\r\n noteamount = sum(map(lambda x: len(x), notelist))\r\n \r\n #set some variables.\r\n duration = (60 / cursongpack.bpm) / speed\r\n notes = []\r\n \r\n ispressed = [0, 0, 0, 0, 0, 0]\r\n shownote = [0, 0, 0, 0, 0, 0]\r\n judgenote = [0, 0, 0, 0, 0, 0]\r\n \r\n score = 0\r\n maxcombo = 0\r\n combo = 0\r\n hit = 0\r\n miss = 0\r\n judge_count = [0, 0, True]\r\n ishit = False\r\n ismiss = False\r\n \r\n #draw circles.\r\n background = pygame.Surface(screen.get_size()).convert()\r\n musicbg = cursongpack.image\r\n scrsize = screen.get_size()\r\n imgsize = musicbg.get_size()\r\n if imgsize[0] < imgsize[1]:\r\n musicbg = resize_width(musicbg, scrsize[0])\r\n else:\r\n musicbg = resize_height(musicbg, scrsize[1])\r\n musicbg.set_alpha(100)\r\n \r\n background.blit(whitebg, (0, 0))\r\n blit_center(background, musicbg)\r\n \r\n nametxt = noto['black'].render(cursongpack.artist + \" - \" + cursongpack.name, 10, (255, 255, 255), None)\r\n nametxt = resize_onload(screen, nametxt, 0.4)\r\n nametxt_bg = pygame.Surface((screen.get_width(), nametxt.get_height()))\r\n nametxt_bg.fill((0, 0, 0))\r\n nametxt_bg.set_alpha(100)\r\n blit_center(background, nametxt_bg, (0.5, 0), (0.5, 0))\r\n blit_center(background, nametxt, (0.5, 0), (0.5, 0))\r\n \r\n for x in range(6):\r\n inside = pygame.image.load(\"res/image/ingame/inside\"+str(x+1)+\".png\").convert_alpha()\r\n inside = resize_height(inside, screen.get_height() * 0.25)\r\n blit_center(background, inside, loc[x])\r\n tmpscreen = pygame.Surface(screen.get_size()).convert()\r\n tmpscreen.blit(background, (0, 0))\r\n for x in range(6):\r\n blit_center(tmpscreen, outline, loc[x])\r\n \r\n fadein_screen(rolex, screen, tmpscreen, loadimg)\r\n \r\n starttime = pygame.time.get_ticks()\r\n cursongpack.music.play()\r\n \r\n while pygame.mixer.get_busy():\r\n curtime = get_times(starttime)\r\n curfps = rolex.get_fps()\r\n screen.blit(background, (0, 0))\r\n scoreimg = noto['regular'].render(str(int(score)), 10, (0, 0, 0), None)\r\n scoreimg = resize_height(scoreimg, screen.get_height() * 0.1)\r\n blit_center(screen, scoreimg, (0.5, 1), (0.5, 1))\r\n #####detecting keypress / exit signal, judgement when keydown, gets each key's status(pressed or not)#####\r\n for event in pygame.event.get():\r\n if event.type == KEYDOWN:\r\n if event.key == K_q:\r\n hit = 135\r\n miss = 246\r\n maxcombo = 256\r\n score = 8295\r\n songpacks[songnumb].music.stop()\r\n for key, numb in zip(keylist, range(6)):\r\n if event.key == key:\r\n ispressed[numb] = 1\r\n if not len(notelist[numb]) <= judgenote[numb]:\r\n judgeres = judge(starttime, notelist[numb][judgenote[numb]], duration)\r\n if judgeres:\r\n #print(judgeres)\r\n judgenote[numb] += 1\r\n if judgeres == 1:\r\n clapsound.play()\r\n score += 10000 / noteamount\r\n combo += 1\r\n if combo > maxcombo: maxcombo = combo\r\n hit += 1\r\n ishit = True\r\n ismiss = False\r\n judge_count = [0, 0, True]\r\n else:\r\n combo = 0\r\n miss += 1\r\n ishit = False\r\n ismiss = True\r\n judge_count = [0, 0, True]\r\n break\r\n elif event.type == KEYUP:\r\n for key, numb in zip(keylist, range(6)):\r\n if event.key == key:\r\n ispressed[numb] = 0\r\n break\r\n elif event.type == QUIT:#if user tried to close the window?\r\n exit() #kill the python. simple\r\n #####Draw outlines#####\r\n for pressed, _loc in zip(ispressed, loc):\r\n if pressed:\r\n outline_mod = resize(outline, 1.1)\r\n else:\r\n outline_mod = resize(outline, 1)\r\n blit_center(screen, outline_mod, _loc)\r\n #####Spawn notes#####\r\n for x in range(6):\r\n if not len(notelist[x]) <= shownote[x]:\r\n if (notelist[x][shownote[x]] - duration) * 1000 <= curtime:\r\n noteimg = outside[x]\r\n noteimg = resize_height(noteimg, screen.get_height() * 0.25)\r\n notes.append(note(x, shownote[x], noteimg))\r\n shownote[x] += 1\r\n #####Judgement when player has press the key too lately, or even did not pressed the key#####\r\n if not len(notelist[x]) <= judgenote[x]:\r\n if curtime > (notelist[x][judgenote[x]] + 0.3) * 1000:\r\n judgenote[x] += 1\r\n combo = 0\r\n miss += 1\r\n ishit = False\r\n ismiss = True\r\n judge_count = [0, 0, True]\r\n #####Blit notes, Delete it from the list if it shouldn't be blited#####\r\n minusnumb = 0\r\n for x in range(len(notes)):\r\n x -= minusnumb\r\n if notes[x].blit(screen, judgenote, curfps, duration):\r\n del(notes[x])\r\n minusnumb += 1\r\n #####blit judgement text#####\r\n if ishit:\r\n judge_count[0] += 1\r\n if judge_count[2]:\r\n #hitimg = multilinerender(noto['black'], \"HIT!! \"+str(combo), color = (0, 255, 240))\r\n #hitimg = resize_height(hitimg, screen.get_height() * 0.4)\r\n blit_center(screen, hitimg)\r\n if judge_count[0] >= curfps * 0.05:\r\n judge_count[0] = 0\r\n judge_count[1] += 1\r\n judge_count[2] = not judge_count[2]\r\n if judge_count[1] == 10:\r\n ishit = False\r\n elif ismiss:\r\n judge_count[0] += 1\r\n if judge_count[2]:\r\n blit_center(screen, missimg)\r\n if judge_count[0] >= curfps * 0.05:\r\n judge_count[0] = 0\r\n judge_count[1] += 1\r\n judge_count[2] = not judge_count[2]\r\n if judge_count[1] == 10:\r\n ismiss = False\r\n if combo:\r\n combotxt = noto['black'].render(str(combo), 1, (0, 0, 0), None)\r\n combotxt = resize_height(combotxt, screen.get_height() * 0.1)\r\n blit_center(screen, combotxt, (0.5, 0.65))\r\n pygame.display.flip()\r\n rolex.tick(desiredfps)\r\n fadeout_screen(rolex, screen, tmpscreen, loadimg, desopacity = 255)\r\n \r\n background = pygame.Surface(screen.get_size()).convert()\r\n musicbg = cursongpack.image\r\n scrsize = screen.get_size()\r\n imgsize = musicbg.get_size()\r\n if imgsize[0] < imgsize[1]:\r\n musicbg = resize_width(musicbg, scrsize[0])\r\n else:\r\n musicbg = resize_height(musicbg, scrsize[1])\r\n musicbg.set_alpha(100)\r\n \r\n background.blit(whitebg, (0, 0))\r\n blit_center(background, musicbg)\r\n restxt = noto['black'].render(\"Result\", 10, (0, 0, 0), None)\r\n restxt = resize_onload(screen, restxt, 0.2)\r\n blit_center(background, restxt, (0.5, 0), (0.5, 0))\r\n nametxt = noto['black'].render(cursongpack.artist + \" - \" + cursongpack.name, 10, (255, 255, 255), None)\r\n nametxt = resize_onload(screen, nametxt, 0.4)\r\n nametxt_bg = pygame.Surface((screen.get_width(), nametxt.get_height()))\r\n nametxt_bg.fill((0, 0, 0))\r\n nametxt_bg.set_alpha(100)\r\n blit_center(background, nametxt_bg, (0.5, 1), (0.5, 1))\r\n blit_center(background, nametxt, (0.5, 1), (0.5, 1))\r\n \r\n score = int(score)\r\n hitcount = 0\r\n misscount = 0\r\n combocount = 0\r\n scorecount = 0\r\n \r\n scoretxt = multilinerender(noto['regular'], \r\n'''HIT 0\r\nMISS 0\r\nMAXCOMBO 0\r\nSCORE 0''', align = 1)\r\n scoretxt = resize_height(scoretxt, screen.get_height() * 0.4)\r\n \r\n tmpscreen = pygame.Surface(screen.get_size())\r\n tmpscreen.blit(background, (0, 0))\r\n blit_center(tmpscreen, scoretxt, (0, 0.5), (0, 0.5))\r\n \r\n fadein_screen(rolex, screen, tmpscreen, loadimg)\r\n \r\n screen.blit(tmpscreen, (0, 0))\r\n pygame.display.flip()\r\n \r\n resultsound.play()\r\n pygame.time.wait(1000)\r\n \r\n while not scorecount == score:\r\n coinsound.play()\r\n for event in pygame.event.get():\r\n if event.type == KEYDOWN:\r\n if event.key == K_n:\r\n hitcount = hit\r\n misscount = miss\r\n combocount = maxcombo\r\n scorecount = score\r\n elif event.type == QUIT:#if user tried to close the window?\r\n exit() #kill the python. simple\r\n if hitcount < hit:\r\n hitcount += 5\r\n if hitcount > hit:\r\n hitcount = hit\r\n elif misscount < miss:\r\n misscount += 5\r\n if misscount > miss:\r\n misscount = miss\r\n elif combocount < maxcombo:\r\n combocount += 5\r\n if combocount > maxcombo:\r\n combocount = maxcombo\r\n elif scorecount < score:\r\n scorecount += 100\r\n if scorecount > score:\r\n scorecount = score\r\n \r\n scoretxt = multilinerender(noto['regular'],\r\n \"HIT \" + str(hitcount) + \"\\n\" + \r\n \"MISS \" + str(misscount) + \"\\n\" +\r\n \"MAXCOMBO \" + str(combocount) + \"\\n\" +\r\n \"SCORE \" + str(scorecount),\r\n align = 1)\r\n scoretxt = resize_height(scoretxt, screen.get_height() * 0.4)\r\n \r\n screen.blit(background, (0, 0))\r\n blit_center(screen, scoretxt, (0, 0.5), (0, 0.5))\r\n pygame.display.flip()\r\n \r\n rolex.tick(20)\r\n \r\n background.blit(screen, (0, 0))\r\n \r\n if score == 10000:\r\n rating = \"s\"\r\n elif 8000 <= score < 10000:\r\n rating = \"a\"\r\n elif 6000 <= score < 8000:\r\n rating = \"b\"\r\n elif 4000 <= score < 6000:\r\n rating = \"c\"\r\n elif 2000 <= score < 4000:\r\n rating = \"d\"\r\n elif score < 2000:\r\n rating = \"f\"\r\n \r\n glugsound.play()\r\n rating_size = 0\r\n for x in range(int(desiredfps * 0.3)):\r\n rating_size += 1.25 / (desiredfps * 0.3)\r\n screen.blit(background, (0, 0))\r\n blit_center(screen, resize(ratingimg[rating], rating_size), (0.8, 0.5))\r\n \r\n pygame.display.flip()\r\n rolex.tick(60)\r\n \r\n for x in range(int(desiredfps * 0.2)):\r\n rating_size -= 0.25 / (desiredfps * 0.2)\r\n screen.blit(background, (0, 0))\r\n blit_center(screen, resize(ratingimg[rating], rating_size), (0.8, 0.5))\r\n \r\n pygame.display.flip()\r\n rolex.tick(60)\r\n \r\n while True:\r\n for x in pygame.event.get():\r\n if x.type == QUIT:\r\n exit()\r\n elif x.type == KEYDOWN:\r\n if x.key == K_n:\r\n break\r\n else:\r\n continue\r\n resultsound.stop()\r\n break","repo_name":"KokoseiJ/RHYTHMATICA","sub_path":"game_old.py","file_name":"game_old.py","file_ext":"py","file_size_in_byte":24602,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"1765970911","text":"alph = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\ndef shiftString(string, shift):\n A = ord('A')\n ans = ''\n for c in string:\n ans += chr((ord(c) - A + shift) % 26 + A)\n return ans\n\ndef everyNth(string, start, n):\n ans = ''\n for i in range(start, len(string)):\n if (i - start) % n == 0:\n ans += string[i]\n return ans\n\ndef splitString(string, n):\n ans = []\n for i in range(0,n):\n start = i;\n ans.append(everyNth(string, start, n))\n return ans\n","repo_name":"kenjones21/blackhatChallenge","sub_path":"tools/stringOps.py","file_name":"stringOps.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"72207870971","text":"t = int(input())\n\nwhile t > 0:\n\n n = int(input())\n arr = [int(x) for x in input().split()]\n\n for i in range(n):\n x = arr[n - 1]\n if i == n - 1:\n x = arr[n - 2]\n\n for j in range(n - 1):\n if i != j:\n x ^= arr[j]\n\n if x == arr[i]:\n print(x)\n break\n\n t -= 1","repo_name":"desai10/competitive-coding","sub_path":"codeforces/Codeforces Round #803 (Div. 2)/XORMixup.py","file_name":"XORMixup.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"22793208704","text":"from http import HTTPStatus\n\nclass AccountClosedError(Exception):\n def __init__(self) -> None:\n self.message = (\n {\"error\":{\n \"message\": \"cannot include new products in a closed/finished account\",\n }},\n HTTPStatus.BAD_REQUEST\n )\n super().__init__(self.message)\n","repo_name":"silviorneto/projeto-kaffa","sub_path":"app/custom_errors/account_closed.py","file_name":"account_closed.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39411972214","text":"from django import forms\nfrom .models import TodoList, TimeTask\n\n\nclass TodoForm(forms.ModelForm):\n class Meta:\n model = TodoList\n fields = [\n \"contents\",\n ]\n labels = {\n \"contents\": \"\",\n }\n widgets = {\n \"contents\": forms.TextInput(attrs={\"placeholder\": \"Write what to do\"}),\n }\n\n\nclass TimeTaskForm(forms.ModelForm):\n class Meta:\n model = TimeTask\n fields = [\n \"contents\",\n ]\n labels = {\n \"contents\": \"\",\n }\n widgets = {\n \"contents\": forms.TextInput(attrs={\"placeholder\": \"Write what to do\"}),\n }\n","repo_name":"hminn/P2P_Study_WebApp","sub_path":"todos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"34049077787","text":"import pymunk as p\r\nfrom pymunk.vec2d import Vec2d\r\nimport unittest\r\n\r\n####################################################################\r\n\r\nclass UnitTestGeneral(unittest.TestCase):\r\n \r\n def testGeneral(self):\r\n p.version\r\n p.inf\r\n p.chipmunk_version\r\n \r\n m = p.moment_for_box(1, 2, 3)\r\n self.assertAlmostEqual(m, 1.08333333333)\r\n \r\n m = p.moment_for_segment(1, Vec2d(-1,0), Vec2d(1,0))\r\n self.assertAlmostEqual(m, 0.33333333333)\r\n \r\n \r\n \r\n \r\nclass UnitTestBB(unittest.TestCase):\r\n def setUp(self):\r\n #print \"testing pymunk version \" + p.version\r\n pass\r\n \r\n def testCreation(self):\r\n bb_empty = p.BB()\r\n \r\n self.assertEqual(bb_empty.left, 0)\r\n self.assertEqual(bb_empty.bottom, 0)\r\n self.assertEqual(bb_empty.right, 0)\r\n self.assertEqual(bb_empty.top , 0)\r\n \r\n bb_defined = p.BB(-10,-5,15,20)\r\n \r\n self.assertEqual(bb_defined.left, -10)\r\n self.assertEqual(bb_defined.bottom, -5)\r\n self.assertEqual(bb_defined.right, 15)\r\n self.assertEqual(bb_defined.top, 20)\r\n \r\n def testMethods(self):\r\n bb1 = p.BB(0,0,10,10)\r\n bb2 = p.BB(10,10,20,20)\r\n bb3 = p.BB(4,4,5,5)\r\n v1 = Vec2d(1,1)\r\n v2 = Vec2d(100,5)\r\n self.assert_(bb1.intersects(bb2))\r\n\r\n self.assertFalse(bb1.contains(bb2))\r\n self.assert_(bb1.contains(bb3))\r\n \r\n self.assert_(bb1.contains_vect(v1))\r\n self.assertFalse(bb1.contains_vect(v2))\r\n \r\n self.assertEqual(bb1.merge(bb2), p.BB(0,0,20,20))\r\n \r\n self.assertEqual(bb1.expand(v1), bb1)\r\n self.assertEqual(bb1.expand(-v2), p.BB(-100,-5,10,10))\r\n \r\n self.assertEqual(bb1.clamp_vect(v2), Vec2d(10,5))\r\n \r\n self.assertEqual(bb1.wrap_vect((11,11)), (1,1))\r\n\r\n \r\nclass UnitTestBugs(unittest.TestCase):\r\n def testManyBoxCrash(self):\r\n space = p.Space()\r\n for x in [1,2]:\r\n for y in range(16):\r\n size = 10\r\n box_points = map(Vec2d, [(-size, -size), (-size, size), (size,size), (size, -size)])\r\n body = p.Body(10,20)\r\n shape = p.Poly(body, list(box_points), Vec2d(0,0))\r\n space.add(body, shape)\r\n space.step(1/50.0)\r\n \r\n \r\n def testNoStaticShape(self):\r\n space = p.Space()\r\n \r\n b1 = p.Body(1, p.inf)\r\n c1 = p.Circle(b1, 10)\r\n c1.name = \"c1\"\r\n c1.collision_type = 2\r\n \r\n b2 = p.Body(1, p.inf)\r\n c2 = p.Circle(b2, 10)\r\n c2.name = \"c2\"\r\n \r\n b3 = p.Body(1, p.inf)\r\n c3 = p.Circle(b3, 10)\r\n c3.name = \"c3\"\r\n \r\n b1.position = 0,0\r\n b2.position = 9,0\r\n b3.position = -9,0\r\n \r\n space.add(b1,c1,b2,c2,b3,c3)\r\n \r\n def remove_first(space, arbiter):\r\n first_shape = arbiter.shapes[0]\r\n #print \"REMOVE FIRST\", first_shape.name\r\n if c1 in space.shapes:\r\n space.remove(c1)\r\n #space.add_post_step_callback(space.remove, first_shape, first_shape.body)\r\n\r\n space.add_collision_handler(2, 0, separate = remove_first) \r\n \r\n space.step(1./60)\r\n b2.position = 22,0\r\n space.step(1./60)\r\n \r\n \r\n \r\n \r\n \r\n####################################################################\r\nif __name__ == \"__main__\":\r\n print (\"testing pymunk version \" + p.version)\r\n unittest.main()","repo_name":"imanolarrieta/angrybirds","sub_path":"pymunk-4.0.0/tests/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"78"} +{"seq_id":"25506711105","text":"# get a string from a given string where all occurrences of its first char have \n# been changed to '$', except the first char itself\n\ndef replace_char(string):\n\tfirst_char = string[0]\n\treplace_char = string.replace(first_char, \"$\")\n\n\treturn string[:1] + replace_char\n\nprint(replace_char(\"rererrrr\"))","repo_name":"kietalang/python_intern","sub_path":"Python exercises/String/exercise4.py","file_name":"exercise4.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3809542454","text":"import re\n\nfrom pygments.lexer import include, bygroups, using, this, words, inherit\nfrom pygments.token import Text, Keyword, Name, String, Operator, \\\n Number, Punctuation, Literal\n\nfrom pygments.lexers.c_cpp import CLexer, CppLexer\n\n__all__ = ['ObjectiveCLexer', 'ObjectiveCppLexer', 'LogosLexer', 'SwiftLexer']\n\n\ndef objective(baselexer):\n \"\"\"\n Generate a subclass of baselexer that accepts the Objective-C syntax\n extensions.\n \"\"\"\n\n # Have to be careful not to accidentally match JavaDoc/Doxygen syntax here,\n # since that's quite common in ordinary C/C++ files. It's OK to match\n # JavaDoc/Doxygen keywords that only apply to Objective-C, mind.\n #\n # The upshot of this is that we CANNOT match @class or @interface\n _oc_keywords = re.compile(r'@(?:end|implementation|protocol)')\n\n # Matches [ ? identifier ( identifier ? ] | identifier? : )\n # (note the identifier is *optional* when there is a ':'!)\n _oc_message = re.compile(r'\\[\\s*[a-zA-Z_]\\w*\\s+'\n r'(?:[a-zA-Z_]\\w*\\s*\\]|'\n r'(?:[a-zA-Z_]\\w*)?:)')\n\n class GeneratedObjectiveCVariant(baselexer):\n \"\"\"\n Implements Objective-C syntax on top of an existing C family lexer.\n \"\"\"\n\n tokens = {\n 'statements': [\n (r'@\"', String, 'string'),\n (r'@(YES|NO)', Number),\n (r\"@'(\\\\.|\\\\[0-7]{1,3}|\\\\x[a-fA-F0-9]{1,2}|[^\\\\\\'\\n])'\", String.Char),\n (r'@(\\d+\\.\\d*|\\.\\d+|\\d+)[eE][+-]?\\d+[lL]?', Number.Float),\n (r'@(\\d+\\.\\d*|\\.\\d+|\\d+[fF])[fF]?', Number.Float),\n (r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex),\n (r'@0[0-7]+[Ll]?', Number.Oct),\n (r'@\\d+[Ll]?', Number.Integer),\n (r'@\\(', Literal, 'literal_number'),\n (r'@\\[', Literal, 'literal_array'),\n (r'@\\{', Literal, 'literal_dictionary'),\n (words((\n '@selector', '@private', '@protected', '@public', '@encode',\n '@synchronized', '@try', '@throw', '@catch', '@finally',\n '@end', '@property', '@synthesize', '__bridge', '__bridge_transfer',\n '__autoreleasing', '__block', '__weak', '__strong', 'weak', 'strong',\n 'copy', 'retain', 'assign', 'unsafe_unretained', 'atomic', 'nonatomic',\n 'readonly', 'readwrite', 'setter', 'getter', 'typeof', 'in',\n 'out', 'inout', 'release', 'class', '@dynamic', '@optional',\n '@required', '@autoreleasepool'), suffix=r'\\b'),\n Keyword),\n (words(('id', 'instancetype', 'Class', 'IMP', 'SEL', 'BOOL',\n 'IBOutlet', 'IBAction', 'unichar'), suffix=r'\\b'),\n Keyword.Type),\n (r'@(true|false|YES|NO)\\n', Name.Builtin),\n (r'(YES|NO|nil|self|super)\\b', Name.Builtin),\n # Carbon types\n (r'(Boolean|UInt8|SInt8|UInt16|SInt16|UInt32|SInt32)\\b', Keyword.Type),\n # Carbon built-ins\n (r'(TRUE|FALSE)\\b', Name.Builtin),\n (r'(@interface|@implementation)(\\s+)', bygroups(Keyword, Text),\n ('#pop', 'oc_classname')),\n (r'(@class|@protocol)(\\s+)', bygroups(Keyword, Text),\n ('#pop', 'oc_forward_classname')),\n # @ can also prefix other expressions like @{...} or @(...)\n (r'@', Punctuation),\n inherit,\n ],\n 'oc_classname': [\n # interface definition that inherits\n ('([a-zA-Z$_][\\w$]*)(\\s*:\\s*)([a-zA-Z$_][\\w$]*)?(\\s*)({)',\n bygroups(Name.Class, Text, Name.Class, Text, Punctuation),\n ('#pop', 'oc_ivars')),\n ('([a-zA-Z$_][\\w$]*)(\\s*:\\s*)([a-zA-Z$_][\\w$]*)?',\n bygroups(Name.Class, Text, Name.Class), '#pop'),\n # interface definition for a category\n ('([a-zA-Z$_][\\w$]*)(\\s*)(\\([a-zA-Z$_][\\w$]*\\))(\\s*)({)',\n bygroups(Name.Class, Text, Name.Label, Text, Punctuation),\n ('#pop', 'oc_ivars')),\n ('([a-zA-Z$_][\\w$]*)(\\s*)(\\([a-zA-Z$_][\\w$]*\\))',\n bygroups(Name.Class, Text, Name.Label), '#pop'),\n # simple interface / implementation\n ('([a-zA-Z$_][\\w$]*)(\\s*)({)',\n bygroups(Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')),\n ('([a-zA-Z$_][\\w$]*)', Name.Class, '#pop')\n ],\n 'oc_forward_classname': [\n ('([a-zA-Z$_][\\w$]*)(\\s*,\\s*)',\n bygroups(Name.Class, Text), 'oc_forward_classname'),\n ('([a-zA-Z$_][\\w$]*)(\\s*;?)',\n bygroups(Name.Class, Text), '#pop')\n ],\n 'oc_ivars': [\n include('whitespace'),\n include('statements'),\n (';', Punctuation),\n (r'\\{', Punctuation, '#push'),\n (r'\\}', Punctuation, '#pop'),\n ],\n 'root': [\n # methods\n (r'^([-+])(\\s*)' # method marker\n r'(\\(.*?\\))?(\\s*)' # return type\n r'([a-zA-Z$_][\\w$]*:?)', # begin of method name\n bygroups(Punctuation, Text, using(this),\n Text, Name.Function),\n 'method'),\n inherit,\n ],\n 'method': [\n include('whitespace'),\n # TODO unsure if ellipses are allowed elsewhere, see\n # discussion in Issue 789\n (r',', Punctuation),\n (r'\\.\\.\\.', Punctuation),\n (r'(\\(.*?\\))(\\s*)([a-zA-Z$_][\\w$]*)',\n bygroups(using(this), Text, Name.Variable)),\n (r'[a-zA-Z$_][\\w$]*:', Name.Function),\n (';', Punctuation, '#pop'),\n (r'\\{', Punctuation, 'function'),\n ('', Text, '#pop'),\n ],\n 'literal_number': [\n (r'\\(', Punctuation, 'literal_number_inner'),\n (r'\\)', Literal, '#pop'),\n include('statement'),\n ],\n 'literal_number_inner': [\n (r'\\(', Punctuation, '#push'),\n (r'\\)', Punctuation, '#pop'),\n include('statement'),\n ],\n 'literal_array': [\n (r'\\[', Punctuation, 'literal_array_inner'),\n (r'\\]', Literal, '#pop'),\n include('statement'),\n ],\n 'literal_array_inner': [\n (r'\\[', Punctuation, '#push'),\n (r'\\]', Punctuation, '#pop'),\n include('statement'),\n ],\n 'literal_dictionary': [\n (r'\\}', Literal, '#pop'),\n include('statement'),\n ],\n }\n\n def analyse_text(text):\n if _oc_keywords.search(text):\n return 1.0\n elif '@\"' in text: # strings\n return 0.8\n elif re.search('@[0-9]+', text):\n return 0.7\n elif _oc_message.search(text):\n return 0.8\n return 0\n\n def get_tokens_unprocessed(self, text):\n from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \\\n COCOA_PROTOCOLS, COCOA_PRIMITIVES\n\n for index, token, value in \\\n baselexer.get_tokens_unprocessed(self, text):\n if token is Name or token is Name.Class:\n if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \\\n or value in COCOA_PRIMITIVES:\n token = Name.Builtin.Pseudo\n\n yield index, token, value\n\n return GeneratedObjectiveCVariant\n\n\nclass ObjectiveCLexer(objective(CLexer)):\n \"\"\"\n For Objective-C source code with preprocessor directives.\n \"\"\"\n\n name = 'Objective-C'\n aliases = ['objective-c', 'objectivec', 'obj-c', 'objc']\n filenames = ['*.m', '*.h']\n mimetypes = ['text/x-objective-c']\n priority = 0.05 # Lower than C\n\n\nclass ObjectiveCppLexer(objective(CppLexer)):\n \"\"\"\n For Objective-C++ source code with preprocessor directives.\n \"\"\"\n\n name = 'Objective-C++'\n aliases = ['objective-c++', 'objectivec++', 'obj-c++', 'objc++']\n filenames = ['*.mm', '*.hh']\n mimetypes = ['text/x-objective-c++']\n priority = 0.05 # Lower than C++\n\n\nclass LogosLexer(ObjectiveCppLexer):\n \"\"\"\n For Logos + Objective-C source code with preprocessor directives.\n\n .. versionadded:: 1.6\n \"\"\"\n\n name = 'Logos'\n aliases = ['logos']\n filenames = ['*.x', '*.xi', '*.xm', '*.xmi']\n mimetypes = ['text/x-logos']\n priority = 0.25\n\n tokens = {\n 'statements': [\n (r'(%orig|%log)\\b', Keyword),\n (r'(%c)\\b(\\()(\\s*)([a-zA-Z$_][\\w$]*)(\\s*)(\\))',\n bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)),\n (r'(%init)\\b(\\()',\n bygroups(Keyword, Punctuation), 'logos_init_directive'),\n (r'(%init)(?=\\s*;)', bygroups(Keyword)),\n (r'(%hook|%group)(\\s+)([a-zA-Z$_][\\w$]+)',\n bygroups(Keyword, Text, Name.Class), '#pop'),\n (r'(%subclass)(\\s+)', bygroups(Keyword, Text),\n ('#pop', 'logos_classname')),\n inherit,\n ],\n 'logos_init_directive': [\n ('\\s+', Text),\n (',', Punctuation, ('logos_init_directive', '#pop')),\n ('([a-zA-Z$_][\\w$]*)(\\s*)(=)(\\s*)([^);]*)',\n bygroups(Name.Class, Text, Punctuation, Text, Text)),\n ('([a-zA-Z$_][\\w$]*)', Name.Class),\n ('\\)', Punctuation, '#pop'),\n ],\n 'logos_classname': [\n ('([a-zA-Z$_][\\w$]*)(\\s*:\\s*)([a-zA-Z$_][\\w$]*)?',\n bygroups(Name.Class, Text, Name.Class), '#pop'),\n ('([a-zA-Z$_][\\w$]*)', Name.Class, '#pop')\n ],\n 'root': [\n (r'(%subclass)(\\s+)', bygroups(Keyword, Text),\n 'logos_classname'),\n (r'(%hook|%group)(\\s+)([a-zA-Z$_][\\w$]+)',\n bygroups(Keyword, Text, Name.Class)),\n (r'(%config)(\\s*\\(\\s*)(\\w+)(\\s*=\\s*)(.*?)(\\s*\\)\\s*)',\n bygroups(Keyword, Text, Name.Variable, Text, String, Text)),\n (r'(%ctor)(\\s*)({)', bygroups(Keyword, Text, Punctuation),\n 'function'),\n (r'(%new)(\\s*)(\\()(\\s*.*?\\s*)(\\))',\n bygroups(Keyword, Text, Keyword, String, Keyword)),\n (r'(\\s*)(%end)(\\s*)', bygroups(Text, Keyword, Text)),\n inherit,\n ],\n }\n\n _logos_keywords = re.compile(r'%(?:hook|ctor|init|c\\()')\n\n def analyse_text(text):\n if LogosLexer._logos_keywords.search(text):\n return 1.0\n return 0\n\n\nclass SwiftLexer(ObjectiveCLexer):\n \"\"\"\n For `Swift `_ source.\n\n .. versionadded:: 2.0\n \"\"\"\n name = 'Swift'\n filenames = ['*.swift']\n aliases = ['swift']\n mimetypes = ['text/x-swift']\n\n keywords_decl = set(('class', 'deinit', 'enum', 'extension', 'func', 'import',\n 'init', 'let', 'protocol', 'static', 'struct', 'subscript',\n 'typealias', 'var'))\n keywords_stmt = set(('break', 'case', 'continue', 'default', 'do', 'else',\n 'fallthrough', 'if', 'in', 'for', 'return', 'switch',\n 'where', 'while'))\n keywords_type = set(('as', 'dynamicType', 'is', 'new', 'super', 'self', 'Self',\n 'Type', '__COLUMN__', '__FILE__', '__FUNCTION__',\n '__LINE__'))\n keywords_resrv = set(('associativity', 'didSet', 'get', 'infix', 'inout', 'left',\n 'mutating', 'none', 'nonmutating', 'operator', 'override',\n 'postfix', 'precedence', 'prefix', 'right', 'set',\n 'unowned', 'unowned(safe)', 'unowned(unsafe)', 'weak',\n 'willSet'))\n operators = set(('->',))\n\n def get_tokens_unprocessed(self, text):\n for index, token, value in ObjectiveCLexer.get_tokens_unprocessed(self, text):\n if token is Name:\n if value in self.keywords_decl:\n token = Keyword\n elif value in self.keywords_stmt:\n token = Keyword\n elif value in self.keywords_type:\n token = Keyword.Type\n elif value in self.keywords_resrv:\n token = Keyword.Reserved\n elif value in self.operators:\n token = Operator\n yield index, token, value\n","repo_name":"Orochimarufan/PythonQt","sub_path":"examples/NicePyConsole/pygments/lexers/objective.py","file_name":"objective.py","file_ext":"py","file_size_in_byte":12851,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"78"} +{"seq_id":"9855890426","text":"# -*- coding: utf-8 -*-\n# @Author: Polly\n# @Date: 2021-08-26 10:13:37\n# @Last Modified by: Polly\n# @Last Modified time: 2021-08-26 10:16:22\nfrom typing import List\n\n\nclass Solution:\n def numRescueBoats(self, people: List[int], limit: int) -> int:\n n = len(people)\n ans = 0\n people.sort()\n light, heavy = 0, n - 1\n while light <= heavy:\n if people[light] + people[heavy] > limit:\n heavy -= 1\n else:\n light, heavy = light + 1, heavy - 1\n ans += 1\n return ans\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.numRescueBoats([1, 2, 3], 3))\n","repo_name":"Polly2014/LeetCode","sub_path":"881_Num_Secue_Boats.py","file_name":"881_Num_Secue_Boats.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"26006042352","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/1/12 下午2:16\n# @Title : 389. 找不同\n# @Link : https://leetcode-cn.com/problems/find-the-difference/\n\n\nQUESTION = \"\"\"\n给定两个字符串 s 和 t,它们只包含小写字母。\n\n字符串 t 由字符串 s 随机重排,然后在随机位置添加一个字母。\n\n请找出在 t 中被添加的字母。\n\n示例:\n输入:\ns = \"abcd\"\nt = \"abcde\"\n输出:\ne\n解释:\n'e' 是那个被添加的字母。\n\"\"\"\n\n\nTHINKING = \"\"\"\n遍历可以实现,找出is not in s的内个即可,但是效率不高\n题设中说了,只包含小写字母,而且只加了1个字符,那么就好办了,26个字符每一个都对应一个ASCII码\n两个ASCII的和之差,就是多出来的内个字母了呗\n\"\"\"\n\n\nclass Solution:\n def findTheDifference(self, s: str, t: str) -> str:\n return chr(sum(map(ord, t)) - sum(map(ord, s)))\n\n\nif __name__ == '__main__':\n sl = Solution()\n s = \"abcd\"\n t = \"abcde\"\n print(sl.findTheDifference(s, t))\n","repo_name":"reed-qu/leetcode-cn","sub_path":"FindTheDifference.py","file_name":"FindTheDifference.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"28480734751","text":"'''\n找零钱问题:假设只有 1 分、 2 分、五分、 1 角、二角、 五角、 1元的硬币。在超市结账 时,如果 需要找零钱,\n收银员希望将最少的硬币数找给顾客。那么,给定 需要找的零钱数目,如何求得最少的硬币数呢?\n'''\n\ndef tanxin():\n d = [0.01,0.02,0.05,0.1,0.2,0.5,1.0]\n d_num = []\n s = 0\n temp = input('请输入每种零钱的数量:')\n d_num0 = temp.split(\" \")\n for i in range(0, len(d_num0)):\n d_num.append(int(d_num0[i]))\n s += d[i] * d_num[i]\n sum = float(input(\"请输入需要找的零钱:\"))\n if sum > s:\n print(\"数据有错\")\n return 0\n s = s - sum\n i = 6\n while i >= 0:\n if sum >= d[i]:\n n = int(sum / d[i])\n if n >= d_num[i]:\n n = d_num[i]\n sum -= n * d[i]\n print(\"用了%d个%f元硬币\" % (n, d[i]))\ntanxin()","repo_name":"gschen/where2go-python-test","sub_path":"1906101041刘仕豪/十二周练习题/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"23303985899","text":"\"\"\"\nFunctions for interacting with Flask SQLAlchemy models\n\"\"\"\n\nimport logging\n\n__author__ = 'Stephen Brown (Little Fish Solutions LTD)'\n\nlog = logging.getLogger(__name__)\n\n\ndef fast_count(db, Model): # noqa\n \"\"\"\n Do a fast but sometimes inaccurate count (postgresql only).\n\n :param db: SQLAlchemy instance\n :param Model: Model class i.e. User, Order...\n \"\"\"\n return db.session.execute(\n 'SELECT n_live_tup FROM pg_stat_all_tables WHERE relname = :tablename',\n {'tablename': Model.__tablename__}\n ).scalar()\n","repo_name":"stevelittlefish/littlefish","sub_path":"littlefish/dbutil.py","file_name":"dbutil.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"17479818421","text":"class Solution:\n # @param candidates, a list of integers\n # @param target, integer\n # @return a list of lists of integers\n def combinationSum(self, candidates, target):\n # write your code here\n result = []\n if not candidates or target is None:\n return result\n candidates = sorted(list(set(candidates)))\n self.dfs(candidates, 0, [], result, target)\n return result\n \n def dfs(self, candidates, start_num, subset, result, target):\n if sum(subset) < target:\n for i in range(start_num, len(candidates)):\n subset.append(candidates[i])\n self.dfs(candidates, i, subset, result, target)\n subset.pop()\n elif sum(subset) == target:\n result.append(subset[:])","repo_name":"cy-zheng/lintcode","sub_path":"九章算法/5 - 深度优先搜索/必做/combination-sum.py","file_name":"combination-sum.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"26666660620","text":"\"\"\" A function file with all binary detection algorithms\"\"\" \nimport cv2\nimport numpy as np\nimport scipy\nimport scipy.ndimage.measurements as measurements\nfrom skimage.feature import peak_local_max\nfrom skimage.morphology import watershed\nimport sys\nimport os\nimport getopt\nimport imutils\n\n# takes an image and outputs to a path\ndef detect(im, min_box_weight, min_local_max_dist):\n\n w = min_box_weight # abbreviate\n s = [[1,1,1], # structuring element for labeling\n [1,1,1],\n [1,1,1]]\n im = np.uint8(im)\n if len(im) == 3:\n im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\n # taken from https://www.pyimagesearch.com/2015/11/02/watershed-opencv/\n # compute exact Euclidean distance from every binary\n # pixel to the nearest zero pixel, then find peaks in this\n # distance map\n D = scipy.ndimage.distance_transform_edt(im) # compute euclidean distance map\n localMax = peak_local_max(D, indices=False, min_distance=min_local_max_dist,\n \tlabels=im) # find peaks in the euclidean distance map\n\n # perform a connected component analysis on the local peaks,\n # using 8-connectivity, then apply the Watershed algorithm\n markers, num_features = measurements.label(im, s) # label image\n markers_m, num_features_m = measurements.label(localMax, s) #\n labels = watershed(-D, markers, mask=im)\n labels_m = watershed(-D, markers_m, mask=im)\n print(\"[INFO] {} unique segments found\".format(num_features))\n\n\n # loop over the unique labels\n # labeled, num_features = measurements.label(im, s) # label image\n # print('number of features detected: ', num_features)\n markers = measurements.find_objects(labels) # find labeled objects, output is slice.\n markers_m = measurements.find_objects(labels_m)\n\n bboxes = []\n bboxes_m = []\n for i in range(len(markers)):\n p1 = markers[i][1].start, markers[i][0].start\n p2 = markers[i][1].stop, markers[i][0].stop\n bboxes.append([p1,p2])\n\n for i in range(len(markers_m)):\n p1_m = markers_m[i][1].start, markers_m[i][0].start\n p2_m = markers_m[i][1].stop, markers_m[i][0].stop\n bboxes_m.append([p1_m,p2_m])\n\n # calculate the average area\n areas = []\n for i in range(len(bboxes)):\n p1 = bboxes[i][0]\n p2 = bboxes[i][1]\n area = (p2[0]-p1[0])*(p2[1]-p1[1])\n areas.append(area)\n mean_area = np.mean(areas)\n\n # delete small boxes based on avg box size.\n for i in reversed(range(len(bboxes))):\n p1 = bboxes[i][0]\n p2 = bboxes[i][1]\n area = (p2[0]-p1[0])*(p2[1]-p1[1])\n if area < min_box_weight*mean_area:\n del bboxes[i]\n\n length_boxes = len(np.copy(bboxes))\n # the following algorithm asks the user if the following detection is 'correct'.\n im_copy = 255*np.array(np.copy(im), dtype = np.uint8)\n for i in range(length_boxes):\n p1x = bboxes[i][0][0]\n p1y = bboxes[i][0][1]\n p2x = bboxes[i][1][0]\n p2y = bboxes[i][1][1]\n cv2.rectangle(im_copy, (p1x,p1y), (p2x,p2y), (255,255,255), 2, 1)\n cv2.putText(im_copy, str(i), (p1x,p1y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 1)\n cv2.namedWindow('window',cv2.WINDOW_NORMAL)\n cv2.resizeWindow('window', 1000, 1000)\n cv2.imshow('window', im_copy)\n\n print('Press any key on the image window to continue')\n k = cv2.waitKey(0)\n # ask user input to which boxes to delete or have multiple cells.\n close_window = False\n done_deleting = False\n rerun = []\n delete_list = []\n while close_window == False:\n while done_deleting == False:\n del_num = input(\"Enter a box number to delete. To clear the last deletion, press d. If none, press enter. If done, press q.\")\n if del_num != '' and del_num != 'q' and del_num != 'd':\n delete_list.append(np.int(del_num))\n if del_num == 'q':\n done_deleting = True # end the deleting loop\n close_window = True\n continue\n if del_num == 'd': # this lets the user correct their deletion.\n delete_list.pop()\n if del_num =='': # done deleting but still have to edit\n done_deleting = True\n if close_window == True: # the user is done deleting AND editing.\n continue # end the ENTIRE editing loop.\n box_num = input(\"Enter a box number that contains more than one cell. To clear last edit, press d. If done, press q: \")\n if box_num == 'q':\n close_window = True\n continue\n if del_num == 'd': # this lets the user correct their deletion.\n rerun.pop()\n else:\n print('You have selected box ', str(box_num))\n rerun.append(np.int(box_num))\n cv2.destroyWindow('window')\n\n\n\n\n # if it's multiple cells, take all points within that box add it to the list\n for i in rerun:\n p1x = bboxes[i][0][0]\n p1y = bboxes[i][0][1]\n p2x = bboxes[i][1][0]\n p2y = bboxes[i][1][1]\n for p in range(len(bboxes_m)):\n p1x_m = bboxes_m[p][0][0]\n p1y_m = bboxes_m[p][0][1]\n p2x_m = bboxes_m[p][1][0]\n p2y_m = bboxes_m[p][1][1]\n print(p1x_m, p1y_m, p2x_m, p2y_m)\n # ' within the ball park'\n if p1x_m > p1x-10 and p1y_m > p1y-10 and p2x_m < p2x+10 and p2y_m < p2y+10 and \\\n (p2x_m-p1x_m)*(p2y_m-p1y_m) > min_box_weight * mean_area: # ensure it's not a small blip\n bboxes.append(bboxes_m[p])\n # cv2.destroyWindow('window')\n # elif (k == 113): # q is pressed\n # break\n\n # the next three for loops takes care of deleting and replacing bounding boxes.\n # delete original bouding boxes that were replaced\n for i in range(len(rerun)):\n bboxes[np.max(rerun)] = 0\n rerun.remove(np.max(rerun))\n\n # delete original bounding boxes that were deleted\n for i in range(len(delete_list)):\n bboxes[np.max(delete_list)] = 0\n delete_list.remove(np.max(delete_list))\n\n # delete all boxes set to 0\n for i in reversed(range(len(bboxes))):\n if bboxes[i] == 0:\n del bboxes[i]\n\n print('number of objects detected:' , len(bboxes))\n # cv2.imwrite(output_path, im)\n\n\n # export bounding boxes into x y width height form\n boxes_export = {}\n current_frame_boxes = {}\n for index, box in enumerate(bboxes):\n x = box[0][0]\n y = box[0][1]\n width = box[1][0]-box[0][0]\n height = box[1][1] -box[0][1]\n mid_x = (box[0][0]+box[1][0])/2\n mid_y = (box[0][1] + box[1][1])/2\n box_dict = {\"x\": x, \"y\": y, \"width\": width, \"height\": height, \"mid x\": mid_x, \"mid y\": mid_y}\n current_frame_boxes[\"box \" + str(index)] = box_dict\n\n # write text file containing bounding box information\n # with open(output_picture_directory + '.txt', 'w') as f:\n # f.write(\"%s\\n\" % boxes_export)\n return current_frame_boxes\n\n\n\n\n# takes folder of images OR an array of images, writes bounding box text file + overlaid images.\ndef detect_frames(min_box_weight, min_local_max_dist, output_directory = None, images_array = None, input_folder = None, num_frames = None): # folder directory\n # Load images\n if input_folder == True:\n images_array = []\n for filename in os.listdir(input_folder):\n img = cv2.imread(os.path.join(input_folder,filename))\n if img is not None:\n images_array.append(img)\n counter = 0\n boxes_export = {}\n for im in images_array:\n # Detect each frame, index them.\n # output_path = output_directory + '_' + str(counter) + '.png'\n current_frame_boxes = detect(im, min_box_weight, min_local_max_dist)\n boxes_export[\"frame \" + str(counter)] = current_frame_boxes\n if num_frames is not None:\n if counter == num_frames-1:\n break\n counter += 1\n\n # # write bounding box frame information\n # with open(output_picture_directory + '.txt', 'w') as f:\n # f.write(\"%s\\n\" % boxes_export)\n return boxes_export\n","repo_name":"wongrp/chemotaxis_tracker_iou","sub_path":"detector_function.py","file_name":"detector_function.py","file_ext":"py","file_size_in_byte":8160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41411849957","text":"import autodocdef as ad\nimport tkinter as tk\nfrom tkinter import Checkbutton\nfrom tkinter import messagebox\n#Check-up window\n\n\n \nroot = tk.Tk()\n\ntry:\n id_ = ad.readbin('pat_hold.bin')\nexcept:\n messagebox.showinfo('No ID given.')\n root.destroy()\n ad.os.system('AutoDoc.py')\n quit()\n\nroot.geometry('800x600')\n\nroot.title('AutoDoc - Checkup')\n\nlabel_head = tk.Label(root, text = 'AutoDoc', font=('Courier', 25))\n\nlabel_head.pack()\n\nlabel_sub = tk.Label(root, text = 'Select Relevant Symptoms', font=('Courier', 20))\n\nlabel_sub.pack()\n\n#Symps\n\ndef get_value():\n l = []\n for c in (c1,c2,c3,c4,c5):\n a = c.get()\n if a == False:\n l.append(0)\n else:\n l.append(1)\n diag = ad.symp(l)\n if diag != 'na':\n \n messagebox.showinfo('AutoDoc', 'You have been diagnosed with ' +diag)\n id_[2] = diag\n ad.wribin('pat_hold.bin', id_)\n ad.log('Logged ' + str(id_))\n if l ==[0,0,0,0,0]:\n call = messagebox.askyesno('AutoDoc', 'Press yes to enter symptoms again, or no to end application')\n if call == False:\n quit()\n else:\n root.destroy()\n ad.os.system('ad_checkup.py')\n else:\n root.destroy()\n ad.os.system('ad_sympdisplay.py')\n else:\n ad.log('Diagnosis of combiantion ' + str(l) + ' failed')\n messagebox.showinfo('AutoDoc', 'Diagnosis failed; Please update owner.')\n call = messagebox.askyesno('AutoDoc', 'Press yes to enter symptoms again, or no to end application')\n if call == False:\n quit()\n else:\n root.destroy()\n ad.os.system('ad_checkup.py')\nc1=tk.BooleanVar()\nc2=tk.BooleanVar()\nc3=tk.BooleanVar()\nc4=tk.BooleanVar()\nc5=tk.BooleanVar()\n\ns1 = Checkbutton(root, text ='Fatigue',variable=c1\n ,font=('Courier', 18)).place(x = 30, y = 100)\n\ns2 = Checkbutton(root, text ='Irregular Heartbeat',variable=c2,\n font=('Courier', 18)).place(x = 30, y = 140)\n \ns3 = Checkbutton(root, text ='Difficulty in Breathing',variable=c3,\n font=('Courier', 18)).place(x = 30, y = 180)\n \ns4 = Checkbutton(root, text ='Headache',variable=c4, \n font=('Courier', 18)).place(x = 30, y = 220)\n\ns5 = Checkbutton(root, text ='Nausea',variable=c5, \n font=('Courier', 18)).place(x = 30, y = 260)\n\nl_place = tk.Label(root, text = ' ').pack(side = 'bottom')\nb_out = tk.Button(root, text = 'Continue', font=('Courier', 20), bg = 'grey', command = get_value).pack(side = 'bottom')\n\n\n\n#End of symps\n\nroot.mainloop()\n","repo_name":"ishankbhatnagar/Symptom-detector-gui","sub_path":"ad_checkup.py","file_name":"ad_checkup.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3164813951","text":"import math\nimport torch\n\ndef _get_interleave(n):\n '''\n 生成每一个对应head的权重\n '''\n def _get_interleave_power_of_2(n):\n start = (2 ** (-2 ** -(math.log2(n) - 3)))\n ratio = start\n return [start * ratio ** i for i in range(n)]\n\n if math.log2(n).is_integer():\n return _get_interleave_power_of_2(n)\n else:\n closest_power_of_2 = 2 ** math.floor(math.log2(n))\n return _get_interleave_power_of_2(closest_power_of_2) + \\\n _get_interleave(2 * closest_power_of_2)[0::2][:n - closest_power_of_2]\n\nprint(_get_interleave(8))\n\n# [0.5, 0.25, 0.125, 0.0625, 0.03125, 0.015625, 0.0078125, 0.00390625]\n\n# 对应2^-1到2^-8\n\ndef _gen_alibi_mask(n_head, max_pos):\n slopes = torch.Tensor(_get_interleave(n_head)) # n_head\n alibi = slopes.unsqueeze(1).unsqueeze(1) * torch.arange(max_pos).unsqueeze(0).unsqueeze(0).expand(\n n_head, -1, -1) \n # slopes.unsqueeze(1).unsqueeze(1):[n_head, 1, 1]\n # torch.arange(max_pos).unsqueeze(0).unsqueeze(0).expand(n_head, -1, -1): [n_head, 1, max_pos]\n # 两者逐元素相乘,alibi:[n_head, 1, max_pos]\n alibi = alibi.view(n_head, 1, max_pos)\n alibi_mask = torch.triu(\n _fill_with_neg_inf(torch.zeros([max_pos, max_pos])), 1\n )\n # _fill_with_neg_inf(torch.zeros([max_pos, max_pos])):首先,创建一个形状为[max_pos, max_pos]的零矩阵,然后使用_fill_with_neg_inf函数将矩阵中的所有元素填充为负无穷。结果矩阵维度依然为[max_pos, max_pos]。\n # torch.triu(_fill_with_neg_inf(torch.zeros([max_pos, max_pos])), 1): 使用torch.triu函数生成上三角矩阵,将传入矩阵的下半部分(对角线以下)置为0,只保留上半部分(对角线以上)的负无穷值。1参数表示从主对角线的下一行开始置为0。结果矩阵维度依然为[max_pos, max_pos]。\n # 总结起来,alibi_mask是一个形状为[max_pos, max_pos]的上三角矩阵,对角线以上的元素(不含对角线)为负无穷,其他位置为0。\n # alibi_mask起到将上三角矩阵mask的作用\n alibi_mask = alibi_mask.unsqueeze(0) + alibi\n # alibi_mask.unsqueeze(0) + alibi:将alibi_mask张量与alibi张量相加。由于它们的维度分别为(1, max_pos, max_pos)和(n_head, 1, max_pos),所以这里的加法实际上是一种广播(broadcasting)操作。广播规则要求从最后一个维度开始逐个维度比较,如果有一个维度大小不同且其中一个为1,则将该维度扩展成较大的那个。在这个例子中,根据广播规则,alibi_mask将在第一个维度上扩展n_head次,alibi将在第二个维度上扩展max_pos次。因此,alibi_mask.unsqueeze(0) + alibi的结果张量维度为(n_head, max_pos, max_pos)。\n return alibi_mask\n\nprint(torch.arange(max_pos).unsqueeze(0).unsqueeze(0).expand(\n n_head, -1, -1))\n# tensor([[[0, 1, 2, 3]],\n\n# [[0, 1, 2, 3]],\n\n# [[0, 1, 2, 3]],\n\n# [[0, 1, 2, 3]],\n\n# [[0, 1, 2, 3]],\n\n# [[0, 1, 2, 3]],\n\n# [[0, 1, 2, 3]],\n\n# [[0, 1, 2, 3]]])\n\nprint(_gen_alibi_mask(8, 4))\n# # tensor([[[0.0000, -inf, -inf, -inf],\n# [0.0000, 0.5000, -inf, -inf],\n# [0.0000, 0.5000, 1.0000, -inf],\n# [0.0000, 0.5000, 1.0000, 1.5000]],\n\n# [[0.0000, -inf, -inf, -inf],\n# [0.0000, 0.2500, -inf, -inf],\n# [0.0000, 0.2500, 0.5000, -inf],\n# [0.0000, 0.2500, 0.5000, 0.7500]],\n\n# [[0.0000, -inf, -inf, -inf],\n# [0.0000, 0.1250, -inf, -inf],\n# [0.0000, 0.1250, 0.2500, -inf],\n# [0.0000, 0.1250, 0.2500, 0.3750]],\n\n# [[0.0000, -inf, -inf, -inf],\n# [0.0000, 0.0625, -inf, -inf],\n# [0.0000, 0.0625, 0.1250, -inf],\n# [0.0000, 0.0625, 0.1250, 0.1875]],\n\n# [[0.0000, -inf, -inf, -inf],\n# [0.0000, 0.0312, -inf, -inf],\n# [0.0000, 0.0312, 0.0625, -inf],\n# [0.0000, 0.0312, 0.0625, 0.0938]],\n\n# [[0.0000, -inf, -inf, -inf],\n# [0.0000, 0.0156, -inf, -inf],\n# [0.0000, 0.0156, 0.0312, -inf],\n# [0.0000, 0.0156, 0.0312, 0.0469]],\n\n# [[0.0000, -inf, -inf, -inf],\n# [0.0000, 0.0078, -inf, -inf],\n# [0.0000, 0.0078, 0.0156, -inf],\n# [0.0000, 0.0078, 0.0156, 0.0234]],\n\n# [[0.0000, -inf, -inf, -inf],\n# [0.0000, 0.0039, -inf, -inf],\n# [0.0000, 0.0039, 0.0078, -inf],\n# [0.0000, 0.0039, 0.0078, 0.0117]]])","repo_name":"zjchen77/ML-HPC-Kernels","sub_path":"alibi.py","file_name":"alibi.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2712433961","text":"#Embedded file name: dbus/_expat_introspect_parser.py\nfrom xml.parsers.expat import ExpatError, ParserCreate\nfrom dbus.exceptions import IntrospectionParserException\n\nclass _Parser(object):\n __slots__ = ('map', 'in_iface', 'in_method', 'sig')\n\n def __init__(self):\n self.map = {}\n self.in_iface = ''\n self.in_method = ''\n self.sig = ''\n\n def parse(self, data):\n parser = ParserCreate('UTF-8', ' ')\n parser.buffer_text = True\n parser.StartElementHandler = self.StartElementHandler\n parser.EndElementHandler = self.EndElementHandler\n parser.Parse(data)\n return self.map\n\n def StartElementHandler(self, name, attributes):\n if not self.in_iface:\n if not self.in_method and name == 'interface':\n self.in_iface = attributes['name']\n elif not self.in_method and name == 'method':\n self.in_method = attributes['name']\n elif self.in_method and name == 'arg':\n if attributes.get('direction', 'in') == 'in':\n self.sig += attributes['type']\n\n def EndElementHandler(self, name):\n if self.in_iface:\n if not self.in_method and name == 'interface':\n self.in_iface = ''\n elif self.in_method and name == 'method':\n self.map[self.in_iface + '.' + self.in_method] = self.sig\n self.in_method = ''\n self.sig = ''\n\n\ndef process_introspection_data(data):\n try:\n return _Parser().parse(data)\n except Exception as e:\n raise IntrospectionParserException('%s: %s' % (e.__class__, e))\n","repo_name":"bizonix/DropBoxLibrarySRC","sub_path":"pyc_decrypted/latest/dbus/_expat_introspect_parser.py","file_name":"_expat_introspect_parser.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"12783070281","text":"def paper(d_arr):\r\n global rlt\r\n for r in range(102):\r\n if sum(d_arr[r]) != 0:\r\n code = []\r\n for idx in range(101):\r\n if d_arr[r][idx] != d_arr[r][idx + 1]:\r\n code.append(d_arr[r][idx])\r\n rlt += sum(code) * 2\r\n\r\nN = int(input())\r\narr = [[0] * 102 for _ in range(102)]\r\nfor _ in range(N):\r\n N, M = map(int, input().split())\r\n for n in range(N, N+10):\r\n for m in range(M, M+10):\r\n arr[n][m] = 1\r\n\r\nrlt = 0\r\npaper(arr)\r\npaper(list(zip(*arr)))\r\nprint(rlt)","repo_name":"baebaemin/Solved_Algorithm","sub_path":"백준/Silver/2567. 색종이 - 2/색종이 - 2.py","file_name":"색종이 - 2.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36399987149","text":"#!/usr/bin/env python\n\nfrom IPython import embed\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nfrom r4d_common.plot_tools import get_data, simple_plot, odom_pos\n\ndef solve_time():\n s = [\n ['elapsed_time', '~/fiducial_slam/out_multimarker_table.bag', '/solve_time', lambda m: m.data.to_sec()],\n ]\n\n df = get_data(s).iloc[:1000:50]\n\n embed()\n\n fig = {\n 'data': [go.Bar(x=range(1, 1000, 50),\n y=df.elapsed_time,\n name='Solve time',)],\n 'layout': {\n 'xaxis': {'title': 'Frame number'},\n 'yaxis': {'title': \"Solve time [s]\"}\n }\n }\n\n url = py.plot(fig, filename='r4d_2_multimarker_solve_time')\n\nif __name__ == \"__main__\":\n solve_time()","repo_name":"nicolov/robotics_for_developers","sub_path":"r4d_2_multimarker/scripts/plot_solve_time.py","file_name":"plot_solve_time.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"78"} +{"seq_id":"19646524584","text":"#!/usr/bin/python\n\nimport os\nimport sys\n\n## Para cada image con varias personas: Divide en varias imagens con una persona por imagen.\n## El resultado es colocado en 'output'.\n## Si solo hay una persona se asume que es un paciente y se envia la directorio \"paciente\".\n## Si hay mas de una persona en la imagen estas se envian a un directorio \"incognita\".\n\nsys.path.append('/home/fernando/Proyectos/PÓS-GRADUAÇÂO/TESIS-DOUTORADO-2/PESQUISA/software/WorkingWithFiles/library');\nimport WorkingWithFiles as rnfunc\n\nsys.path.append('/home/fernando/Proyectos/PÓS-GRADUAÇÂO/TESIS-DOUTORADO-2/PESQUISA/software/OpenpifpafTools/library');\nimport OpenpifpafAnnotations as opp\nimport OpenpifpafGetData as oppd\n\n\nbasedir='/mnt/boveda/DATASETs/PATIENT-IMAGES';\n\nnegative_list=[ os.path.join(basedir,\"dataset_800/anger\"),\n os.path.join(basedir,\"dataset_800/disgust\"),\n os.path.join(basedir,\"dataset_800/fear\"),\n os.path.join(basedir,\"dataset_800/pain\"),\n os.path.join(basedir,\"dataset_800/sad\"),\n os.path.join(basedir,\"dataset_800/surprise-disgust\")];\nneutral_list =[ os.path.join(basedir,\"dataset_800/neutro\")]\npositive_list=[ os.path.join(basedir,\"dataset_800/happy\"),\n os.path.join(basedir,\"dataset_800/surprise-happy\")];\n\nlista1=rnfunc.get_all_files_in_dir_list(negative_list);\nlista2=rnfunc.get_all_files_in_dir_list(neutral_list);\nlista3=rnfunc.get_all_files_in_dir_list(positive_list);\n\n\noutput='output'; ## \"patient_people\"\npath_paciente = os.path.join(output,'paciente');\npath_incognita = os.path.join(output,'incognita');\ntry: \n os.mkdir(output) \nexcept: \n pass\ntry: \n os.mkdir(path_paciente) \n os.mkdir(path_incognita) \nexcept: \n pass\n\ntotal=lista1+lista2+lista3\n\nk=1;\nj=1;\nfilename=\"filename\";\nfor filepath in total:\n print(filepath)\n annotation,pil_im=opp.get_openpifpaf_annotation_from_imgpath(filepath);\n N=len(annotation);\n if(N>1):\n for annot in annotation:\n tupla=oppd.get_body_bounding_rectangle(annot.data, factor=1.4);\n if not((tupla[0]==0)and(tupla[1]==0)and(tupla[2]==0)and(tupla[3]==0)):\n tupla=oppd.get_valid_bounding_rectangle(tupla, (pil_im.size[0],pil_im.size[1]))\n pil_im_crop = pil_im.crop(tupla);\n pil_im_crop.save(os.path.join(path_incognita,filename+str(k)+\".png\"));\n k=k+1;\n elif (N==1):\n for annot in annotation:\n tupla=oppd.get_body_bounding_rectangle(annot.data, factor=1.4);\n if not((tupla[0]==0)and(tupla[1]==0)and(tupla[2]==0)and(tupla[3]==0)):\n tupla=oppd.get_valid_bounding_rectangle(tupla, (pil_im.size[0],pil_im.size[1]))\n pil_im_crop = pil_im.crop(tupla);\n pil_im_crop.save(os.path.join(path_paciente,filename+str(j)+\".png\"));\n j=j+1;\n \n","repo_name":"trucomanx/datagen_labeling_script","sub_path":"split_image_patient_people.py","file_name":"split_image_patient_people.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7751713114","text":"import datetime\nimport os\nimport os.path as osp\nimport shutil\nimport numpy as np\nimport pytz\nimport scipy.misc\nimport torch\nimport tqdm\nfrom PIL import Image\nfrom loss import CrossEntropyLoss, resize_labels\nfrom utils import visualize_segmentation, get_tile_image, learning_curve\nfrom metrics import runningScore, averageMeter, get_multiscale_results\n\n\nclass Trainer:\n def __init__(self, device, model, optimizer, scheduler, train_loader,\n val_loader, out, epochs, n_classes, val_epoch=10):\n self.device = device\n\n self.model = model\n self.optim = optimizer\n self.scheduler = scheduler\n self.train_loader = train_loader\n self.val_loader = val_loader\n\n self.timestamp_start = \\\n datetime.datetime.now(pytz.timezone('UTC'))\n\n self.val_epoch = val_epoch\n\n self.out = out\n if not osp.exists(self.out):\n os.makedirs(self.out)\n\n self.log_headers = [\n 'epoch',\n 'train/loss',\n 'train/acc',\n 'train/acc_cls',\n 'train/mean_iu',\n 'train/fwavacc',\n 'valid/loss',\n 'valid/acc',\n 'valid/acc_cls',\n 'valid/mean_iu',\n 'valid/fwavacc',\n 'elapsed_time',\n ]\n if not osp.exists(osp.join(self.out, 'log.csv')):\n with open(osp.join(self.out, 'log.csv'), 'w') as f:\n f.write(','.join(self.log_headers) + '\\n')\n\n self.n_classes = n_classes\n self.epoch = 1\n self.epochs = epochs\n self.best_mean_iu = 0\n\n def train_epoch(self):\n if self.epoch % self.val_epoch == 0 or self.epoch == 1:\n self.validate()\n\n self.model.train()\n train_metrics = runningScore(self.n_classes)\n train_loss_meter = averageMeter()\n\n self.optim.zero_grad()\n\n for rgb, ir, target in tqdm.tqdm(\n self.train_loader, total=len(self.train_loader),\n desc=f'Train epoch={self.epoch}', ncols=80, leave=False):\n\n self.iter += 1\n assert self.model.training\n\n rgb, ir, target = rgb.to(self.device), ir.to(self.device), target.to(self.device)\n score = self.model(rgb, ir)\n # score = self.model(rgb)\n\n weight = self.train_loader.dataset.class_weight\n if weight:\n weight = torch.Tensor(weight).to(self.device)\n\n loss = CrossEntropyLoss(score, target, weight=weight, ignore_index=-1, reduction='mean')\n\n loss_data = loss.data.item()\n train_loss_meter.update(loss_data)\n\n if np.isnan(loss_data):\n raise ValueError('loss is nan while training')\n\n # loss.backward(retain_graph=True)\n loss.backward()\n\n self.optim.step()\n self.optim.zero_grad()\n\n if isinstance(score, (tuple, list)):\n lbl_pred = score[0].data.max(1)[1].cpu().numpy()\n else:\n lbl_pred = score.data.max(1)[1].cpu().numpy()\n lbl_true = target.data.cpu().numpy()\n train_metrics.update(lbl_true, lbl_pred)\n\n acc, acc_cls, mean_iou, fwavacc, _ = train_metrics.get_scores()\n metrics = [acc, acc_cls, mean_iou, fwavacc]\n\n with open(osp.join(self.out, 'log.csv'), 'a') as f:\n elapsed_time = (\n datetime.datetime.now(pytz.timezone('UTC')) -\n self.timestamp_start).total_seconds()\n log = [self.epoch] + [train_loss_meter.avg] + \\\n metrics + [''] * 5 + [elapsed_time]\n log = map(str, log)\n f.write(','.join(log) + '\\n')\n\n if self.scheduler:\n self.scheduler.step()\n if self.epoch % self.val_epoch == 0 or self.epoch == 1:\n lr = self.optim.param_groups[0]['lr']\n print(f'\\nCurrent base learning rate of epoch {self.epoch}: {lr:.7f}')\n\n train_loss_meter.reset()\n train_metrics.reset()\n\n def validate(self):\n\n visualizations = []\n val_metrics = runningScore(self.n_classes)\n val_loss_meter = averageMeter()\n\n with torch.no_grad():\n self.model.eval()\n for rgb, ir, target in tqdm.tqdm(\n self.val_loader, total=len(self.val_loader),\n desc=f'Valid epoch={self.epoch}', ncols=80, leave=False):\n\n rgb, ir, target = rgb.to(self.device), ir.to(self.device), target.to(self.device)\n\n score = self.model(rgb, ir)\n # score = self.model(rgb)\n\n weight = self.val_loader.dataset.class_weight\n if weight:\n weight = torch.Tensor(weight).to(self.device)\n\n loss = CrossEntropyLoss(score, target, weight=weight, reduction='mean', ignore_index=-1)\n loss_data = loss.data.item()\n if np.isnan(loss_data):\n raise ValueError('loss is nan while validating')\n\n val_loss_meter.update(loss_data)\n\n rgbs = rgb.data.cpu()\n irs = ir.data.cpu()\n\n if isinstance(score, (tuple, list)):\n lbl_pred = score[0].data.max(1)[1].cpu().numpy()\n else:\n lbl_pred = score.data.max(1)[1].cpu().numpy()\n lbl_true = target.data.cpu()\n\n for rgb, ir, lt, lp in zip(rgbs, irs, lbl_true, lbl_pred):\n rgb, ir, lt = self.val_loader.dataset.untransform(rgb, ir, lt)\n val_metrics.update(lt, lp)\n if len(visualizations) < 9:\n viz = visualize_segmentation(\n lbl_pred=lp, lbl_true=lt, img=rgb, ir=ir,\n n_classes=self.n_classes, dataloader=self.train_loader)\n visualizations.append(viz)\n\n acc, acc_cls, mean_iou, fwavacc, cls_iu = val_metrics.get_scores()\n metrics = [acc, acc_cls, mean_iou, fwavacc]\n\n print(f'\\nEpoch: {self.epoch}', f'loss: {val_loss_meter.avg}, mIoU: {mean_iou}')\n\n out = osp.join(self.out, 'visualization_viz')\n if not osp.exists(out):\n os.makedirs(out)\n out_file = osp.join(out, 'epoch{:0>5d}.jpg'.format(self.epoch))\n scipy.misc.imsave(out_file, get_tile_image(visualizations))\n\n with open(osp.join(self.out, 'log.csv'), 'a') as f:\n elapsed_time = (\n datetime.datetime.now(pytz.timezone('UTC')) -\n self.timestamp_start).total_seconds()\n log = [self.epoch] + [''] * 5 + \\\n [val_loss_meter.avg] + metrics + [elapsed_time]\n log = map(str, log)\n f.write(','.join(log) + '\\n')\n\n mean_iu = metrics[2]\n is_best = mean_iu > self.best_mean_iu\n if is_best:\n self.best_mean_iu = mean_iu\n torch.save({\n 'epoch': self.epoch,\n 'arch': self.model.__class__.__name__,\n 'optim_state_dict': self.optim.state_dict(),\n 'model_state_dict': self.model.state_dict(),\n 'best_mean_iu': self.best_mean_iu,\n }, osp.join(self.out, 'checkpoint.pth.tar'))\n if is_best:\n shutil.copy(osp.join(self.out, 'checkpoint.pth.tar'),\n osp.join(self.out, 'model_best.pth.tar'))\n\n val_loss_meter.reset()\n val_metrics.reset()\n\n class_name = self.val_loader.dataset.class_names\n if class_name is not None:\n for index, value in enumerate(cls_iu.values()):\n offset = 20 - len(class_name[index])\n print(class_name[index] + ' ' * offset + f'{value * 100:>.2f}')\n else:\n print(\"\\nyou don't specify class_names, use number instead\")\n for key, value in cls_iu.items():\n print(key, f'{value * 100:>.2f}')\n\n def train(self):\n self.iter = 0\n for epoch in tqdm.trange(self.epoch, self.epochs + 1,\n desc='Train', ncols=80):\n self.epoch = epoch\n self.train_epoch()\n\n # learning_curve(osp.join(self.out, 'log.csv'))\n","repo_name":"GitLanx/MultiModalSeg","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":8185,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"42038603878","text":"from typing import Any, Dict, List, Type\n\nimport networkx as nx\nimport pandas as pd\n\nfrom visions.application.summaries.frame.dataframe_series_summary import (\n dataframe_series_summary,\n)\nfrom visions.application.summaries.frame.dataframe_summary import dataframe_summary\nfrom visions.application.summaries.frame.dataframe_type_summary import (\n dataframe_type_summary,\n)\nfrom visions.types import VisionsBaseType\nfrom visions.utils.graph import output_graph\n\n\nclass Summary(object):\n def __init__(self, summary_ops, typeset):\n \"\"\"\n\n Args:\n summary_ops:\n typeset:\n \"\"\"\n self.typeset = typeset\n if summary_ops is None:\n summary_ops = {}\n\n if not all(\n issubclass(base_type, VisionsBaseType) for base_type in summary_ops.keys()\n ):\n raise TypeError(\"Summaries must be mapped on a type!\")\n\n self.summary_ops = summary_ops\n\n def summarize_frame(\n self, df: pd.DataFrame, series_summary: dict, series_types: dict\n ):\n \"\"\"Summarize a DataFrame based on the DataFrame object and the summaries of individual series\n\n Args:\n df: the DataFrame object\n series_summary: mapping from column name to the individual summaries\n series_types: mapping from column name to the series' type\n\n Returns:\n A summary of the DataFrame\n \"\"\"\n return {\n **dataframe_summary(df),\n **dataframe_type_summary(series_types),\n **dataframe_series_summary(series_summary),\n }\n\n def summarize_series(\n self, series: pd.Series, summary_type: Type[VisionsBaseType]\n ) -> Dict[str, Any]:\n \"\"\"\n\n Args:\n series:\n summary_type:\n\n Returns:\n\n \"\"\"\n summary: Dict[str, Any] = {}\n\n G = self.typeset.base_graph.copy()\n\n done: List[Any] = []\n for base_type, summary_ops in self.summary_ops.items():\n if base_type not in done and nx.has_path(G, base_type, summary_type):\n for op in summary_ops:\n summary.update(op(series))\n done.append(base_type)\n\n return summary\n\n def summarize(self, df: pd.DataFrame, types: dict) -> dict:\n \"\"\"\n\n Args:\n df:\n types:\n\n Returns:\n\n \"\"\"\n series_summary = {\n col: self.summarize_series(df[col], types[col]) for col in df.columns\n }\n frame_summary = self.summarize_frame(df, series_summary, types)\n return {\"types\": types, \"series\": series_summary, \"frame\": frame_summary}\n\n def plot(self, file_name, type_specific=None):\n \"\"\"\n\n Args:\n file_name:\n type_specific:\n\n Returns:\n\n \"\"\"\n G = self.typeset.base_graph.copy()\n G.graph[\"node\"] = {\"shape\": \"box\", \"color\": \"red\"}\n\n included_nodes = G.nodes\n if type_specific is not None:\n included_nodes = nx.ancestors(G, type_specific)\n included_nodes.add(type_specific)\n G.remove_nodes_from(G.nodes - included_nodes)\n\n G.add_node(\"summary\", shape=\"note\")\n for base_type, summary_ops in self.summary_ops.items():\n if len(summary_ops) > 0 and base_type in included_nodes:\n G.add_edge(\n str(base_type),\n \"summary\",\n label=\"\\n\".join([str(op.__name__) for op in summary_ops]),\n )\n\n output_graph(G, file_name)\n","repo_name":"sid-the-coder/QuickDA","sub_path":"environment/lib/python3.7/site-packages/visions/application/summaries/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"78"} +{"seq_id":"6719490301","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass DirectModel(nn.Module):\n def __init__(self):\n super(DirectModel,self).__init__()\n\n # define encoder\n res50 = torchvision.models.__dict__['resnet50']()\n # res50.load_state_dict(state_dict, strict=False)\n res50_C4_layers = []\n for name, module in res50.named_children():\n if name in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:\n res50_C4_layers.append(module)\n self.encoder = nn.Sequential(*res50_C4_layers)\n\n # define transformation\n self.fc_transform = nn.Sequential(\n nn.Linear(25 * 25, 25 * 25),\n nn.ReLU(),\n nn.Linear(25 * 25, 100 * 100),\n nn.ReLU()\n )\n\n pool_scales = [1, 10, 30, 50, 70]\n ppm = []\n for scale in pool_scales:\n ppm.append(nn.Sequential(\n nn.AdaptiveAvgPool2d(scale),\n nn.Conv2d(1024, 64, kernel_size=1, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True)\n ))\n self.ppm = nn.ModuleList(ppm)\n\n self.upsample = nn.Upsample((100, 100), mode='bilinear', align_corners=False)\n\n self.conv_last = nn.Sequential(\n nn.Conv2d(320, 50, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(50),\n nn.ReLU(inplace=True),\n nn.Dropout2d(0.1),\n nn.Conv2d(50, 2, kernel_size=1)\n )\n\n def forward(self, y):\n l = len(y[:, 0, 0, 0])\n x = y.view(l, 3, 400, 400)\n\n x = self.encoder(x)\n # print('2 shape', x.shape)\n x = x.view(x.size(0), x.size(1), x.size(2) * x.size(3))\n # print('3 shape', x.shape)\n view_comb = self.fc_transform(x)\n # print('4 shape', view_comb.shape)\n view_comb = view_comb.view(x.size(0), x.size(1), 100, 100)\n # view_comb = self.upsample(view_comb)\n\n ppm_out = []\n for pool_scale in self.ppm:\n out = self.upsample(pool_scale(view_comb))\n ppm_out.append(out)\n # # print('5 shape', x.shape)\n ppm_out = torch.cat(ppm_out, 1)\n\n x = self.conv_last(ppm_out)\n # 10 * 2 * 100 * 100\n # print('6 shape', x.shape)\n\n x = x.permute(0, 2, 3, 1)\n x = F.softmax(x, dim=3)\n # print('7 shape', x.shape)\n\n return x\n","repo_name":"nonococoleo/image-consistency","sub_path":"direct_model.py","file_name":"direct_model.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"16702013054","text":"import builtins\nfrom typing import Any, List, Tuple\n\nfrom .calls import Call\nfrom .interfaces import Observer\nfrom .wrapper import AttributeWrapper\n\n\ndef fake_builtins():\n real_isinstance = builtins.isinstance\n\n def fake_isinstance(initial: Any, class_or_tuple: Any) -> bool:\n if real_isinstance(initial, Spy):\n return initial.class_ == class_or_tuple\n return real_isinstance(initial, class_or_tuple)\n\n builtins.isinstance = fake_isinstance\n\n\nclass Spy(Observer):\n \"\"\"\n The test-double (spy), which replaces the desired object. His attributes return None, and the methods do not do\n anything if unless otherwise indicated, but all of the calls are fixed. This class in used due to make sure in\n the call of respectively functions with arguments.\n \"\"\"\n first_instance = False\n\n def __init__(self, obj: Any = None):\n super().__init__()\n if not Spy.first_instance:\n Spy.first_instance = True\n fake_builtins()\n self.chain: List[Call] = []\n self._returns = None\n self._raises = None\n if obj is not None:\n for name in dir(obj):\n if name == '__dict__':\n continue\n if callable(getattr(obj, name)):\n if name != '__class__':\n setattr(self, name, AttributeWrapper(name, self))\n else:\n setattr(self, name, self.__class__)\n else:\n setattr(self, name, None)\n self.basic = obj\n self.class_ = obj.__class__\n\n def notify(self, _call: Call):\n self.chain.append(_call)\n\n def __call__(self, *args, **kwargs):\n if self._raises is not None:\n raise self._raises\n self.chain.append(Call('', *args, **kwargs))\n return self._returns\n\n def __str__(self):\n if self.basic is None:\n return f'Empty Test Spy'\n return f'Test spy of the \"{self.basic}\" {type(self.basic)}'\n\n def all_calls(self) -> List[Call]:\n \"\"\"\n Returns list of Call objects (all method calls of the spied object)\n :return: List[Call]\n \"\"\"\n return self.chain\n\n def was_called(self) -> bool:\n \"\"\"\n Returns True if spy object was called itself\n :return: bool\n \"\"\"\n return self.was_function_called('')\n\n def was_called_with_argument(self, arg: Any) -> bool:\n \"\"\"\n Returns True if spy object was called itself with exact argument\n :param arg: Any argument to look for\n :return: bool\n \"\"\"\n return self.was_function_with_argument_called('', arg)\n\n def returns(self, result: Any):\n \"\"\"\n If spy object will be called itself return result\n :param result: any type to return when call\n :return: None\n \"\"\"\n self._returns = result\n\n def raises(self, exception_object: Exception):\n \"\"\"\n Raise an exception if object will be called itself (not its methods!)\n :param exception_object: exception object to raise\n :return: None\n \"\"\"\n self._raises = exception_object\n\n def was_function_called(self, name: str) -> bool:\n \"\"\"\n Returns True if exact function/method was called on spied object\n :param name: name of the function/method\n :return: bool\n \"\"\"\n return any([e for e in self.chain if e.name == name])\n\n def was_function_with_argument_called(self, name: str, arg: Any) -> bool:\n \"\"\"\n Returns True if exact function/method was called with exact argument on spied object\n :param name: name of the function/method\n :param arg: any argument\n :return: bool\n \"\"\"\n if not self.was_function_called(name):\n return False\n return any([e for e in self.chain if e.name == name and arg in e.args])\n\n def was_exact_function_called(self, name, *args, **kwargs):\n call = Call(name, *args, **kwargs)\n return any([e for e in self.chain if e == call])\n\n def all_calls_args(self) -> List[Tuple]:\n \"\"\"\n Returns all called function/method arguments\n :return: List[Tuple]\n \"\"\"\n return [e.args for e in self.chain]\n\n def all_calls_args_flatten(self) -> List[Any]:\n \"\"\"\n Returns flat list of all arguments of all called functions\n :return: List[Any]\n \"\"\"\n return [arg for call in self.chain for arg in call.args]\n\n\nclass TestDouble(Spy):\n \"\"\"\n The full test-double (twin of the object), the main difference with Spy is behaviour. Behaviour stays the same\n as original object has, but all calls fixed and you can change return result of the methods.\n This class in used due to make sure in the call of respectively functions with arguments.\n \"\"\"\n\n def __init__(self, obj: Any = None):\n super().__init__()\n if obj is not None:\n for name in dir(obj):\n attr = getattr(obj, name)\n if callable(attr):\n if name == '__dict__':\n continue\n if name != '__class__':\n wrapper = AttributeWrapper(name, self)\n wrapper.use_function(attr)\n setattr(self, name, wrapper)\n else:\n setattr(self, name, self.__class__)\n else:\n setattr(self, name, attr)\n wrapper = AttributeWrapper('len', self)\n setattr(self, 'len', wrapper)\n wrapper = AttributeWrapper('bool', self)\n setattr(self, 'bool', wrapper)\n wrapper = AttributeWrapper('iter', self)\n setattr(self, 'iter', wrapper)\n self.basic = obj\n self.class_ = obj.__class__\n\n def __str__(self):\n return f'Test Double of the \"{self.basic}\" {type(self.basic)}'\n\n def __len__(self):\n if self.len._return is not None or self.len._function:\n return self.len()\n self.len()\n return len(self.basic)\n\n def __bool__(self):\n if self.bool._return is not None or self.bool._function:\n return self.bool()\n self.bool()\n return bool(self.basic)\n\n def __iter__(self):\n if self.iter._return is not None or self.iter._function:\n return self.iter()\n self.iter()\n return iter(self.basic)\n","repo_name":"kotolex/checking","sub_path":"checking/classes/mocking/doubles.py","file_name":"doubles.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"}