.*?)(?<=\\\\n)(?P=fence)[ ]*$', re.MULTILINE | re.DOTALL)\nCODE_WRAP = '%s
'\nLANG_TAG = ' class=\"%s\"'\n\nclass FencedCodeExtension(markdown.Extension):\n\n def extendMarkdown(self, md, md_globals):\n md.registerExtension(self)\n md.preprocessors.add('fenced_code_block', FencedBlockPreprocessor(md), '_begin')\n\n\nclass FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):\n\n def __init__(self, md):\n markdown.preprocessors.Preprocessor.__init__(self, md)\n self.checked_for_codehilite = False\n self.codehilite_conf = {}\n\n def run(self, lines):\n if not self.checked_for_codehilite:\n for ext in self.markdown.registeredExtensions:\n if isinstance(ext, CodeHiliteExtension):\n self.codehilite_conf = ext.config\n break\n\n self.checked_for_codehilite = True\n text = '\\n'.join(lines)\n while 1:\n m = FENCED_BLOCK_RE.search(text)\n if m:\n lang = ''\n if m.group('lang'):\n lang = LANG_TAG % m.group('lang')\n if self.codehilite_conf:\n highliter = CodeHilite(m.group('code'), linenos=self.codehilite_conf['force_linenos'][0], guess_lang=self.codehilite_conf['guess_lang'][0], css_class=self.codehilite_conf['css_class'][0], style=self.codehilite_conf['pygments_style'][0], lang=m.group('lang') or None, noclasses=self.codehilite_conf['noclasses'][0])\n code = highliter.hilite()\n else:\n code = CODE_WRAP % (lang, self._escape(m.group('code')))\n placeholder = self.markdown.htmlStash.store(code, safe=True)\n text = '%s\\n%s\\n%s' % (text[:m.start()], placeholder, text[m.end():])\n else:\n break\n\n return text.split('\\n')\n\n def _escape(self, txt):\n txt = txt.replace('&', '&')\n txt = txt.replace('<', '<')\n txt = txt.replace('>', '>')\n txt = txt.replace('\"', '"')\n return txt\n\n\ndef makeExtension(configs=None):\n return FencedCodeExtension(configs=configs)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()","sub_path":"client/markdown/extensions/fenced_code.py","file_name":"fenced_code.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"611421655","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 5 11:43:41 2017\n\n@author: neha\nchanged augumentataion for next learning\n\"\"\"\n\nfrom keras.models import Model\nfrom keras.layers import Dense, GlobalAveragePooling2D, Dropout, Flatten, AveragePooling2D, Conv2D\nfrom keras.layers import Input\nfrom keras.optimizers import SGD\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, LearningRateScheduler, Callback, ReduceLROnPlateau\nfrom keras.preprocessing.image import ImageDataGenerator\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam, Adadelta\nfrom keras.layers.core import Flatten, Dense, Dropout, Activation\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras import regularizers\nfrom keras.initializers import RandomNormal\nfrom keras import backend as K\nfrom keras import layers\n\n#for large image size\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\nimport numpy as np\n\n#np.random.seed(2017)\n\n# param setup\nimg_width = 200 #299 - Xception InceptionV3\nimg_height = 200 #224 - VGG19 VGG16 ResNet50\nimg_channel = 3\nnbr_train_samples = 1184\nnbr_validation_samples = 297\nnbr_epochs = 200\nbatch_size = 32\ntrain_step = 37\nval_step = 10\nn_classes = 3\n\n# traing the model for 80% 20% split\ntrain_data_dir = '../../data/train_split_clahe_256px_crop_auto_40_lcolor'\nval_data_dir = '../../data/val_split_clahe_256px_crop_auto_40_lcolor'\nbest_model_file = \"./ex1_model_clahe_lcolor_40wh_5.h5\"\n\nCtypes = ['Type_1', 'Type_2', 'Type_3']\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block):\n \"\"\"The identity block is the block that has no conv layer at shortcut.\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the filterss of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n # Returns\n Output tensor for the block.\n \"\"\"\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters2, kernel_size,\n padding='same', name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n x = layers.add([x, input_tensor])\n x = Activation('relu')(x)\n return x\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the filterss of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n # Returns\n Output tensor for the block.\n Note that from stage 3, the first conv layer at main path is with strides=(2,2)\n And the shortcut should have strides=(2,2) as well\n \"\"\"\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(filters1, (1, 1), strides=strides,\n name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters2, kernel_size, padding='same',\n name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n shortcut = Conv2D(filters3, (1, 1), strides=strides,\n name=conv_name_base + '1')(input_tensor)\n shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)\n\n x = layers.add([x, shortcut])\n x = Activation('relu')(x)\n return x\n\n#create the model\nimg_input = Input(shape=(img_width,img_height,3))\n\nif K.image_data_format() == 'channels_last':\n bn_axis = 3\nelse:\n bn_axis = 1\n\nx = ZeroPadding2D((3, 3))(img_input)\nx = Conv2D(8, (7, 7), strides=(2, 2), name='conv1')(x)\nx = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)\nx = Activation('relu')(x)\nx = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\nx = conv_block(x, 3, [8, 8, 8], stage=2, block='a', strides=(1, 1))\nx = identity_block(x, 3, [8, 8, 8], stage=2, block='b')\nx = identity_block(x, 3, [8, 8, 8], stage=2, block='c')\n\nx = conv_block(x, 3, [8, 8, 8], stage=3, block='a')\nx = identity_block(x, 3, [8, 8, 8], stage=3, block='b')\nx = identity_block(x, 3, [8, 8, 8], stage=3, block='c')\nx = identity_block(x, 3, [8, 8, 8], stage=3, block='d')\n\nx = conv_block(x, 3, [8, 8, 8], stage=4, block='a')\nx = identity_block(x, 3, [8, 8, 8], stage=4, block='b')\nx = identity_block(x, 3, [8, 8, 8], stage=4, block='c')\nx = identity_block(x, 3, [8, 8, 8], stage=4, block='d')\nx = identity_block(x, 3, [8, 8, 8], stage=4, block='e')\nx = identity_block(x, 3, [8, 8, 8], stage=4, block='f')\n\nx = conv_block(x, 3, [12, 12, 12], stage=5, block='a')\nx = identity_block(x, 3, [12, 12, 12], stage=5, block='b')\nx = identity_block(x, 3, [12, 12, 12], stage=5, block='c')\n\nx = AveragePooling2D((7, 7), name='avg_pool')(x)\n#stage 6\n# dense layer\nx = Flatten()(x)\n#x = Dropout(0.1)(x)\n#, kernel_regularizer=regularizers.l2(0.0003)\nx = Dense(n_classes, activation='softmax')(x)\n\ninputs = img_input\n# Create model.\nmodel = Model(inputs, x, name='Cervix_pts_resnet_50')\n \n#get the model \n\n#optimizer = Adam(lr=0.00008, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\noptimizer = Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics = ['accuracy', 'categorical_crossentropy'])\nmodel.summary()\n\n# autosave best Model\nbest_model = ModelCheckpoint(best_model_file, monitor='val_loss', verbose = 1, save_best_only = True)\nstop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=4, verbose=0, mode='auto')\nlrreduce = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=35, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)\n\n#preprocess image \ndef preprocess_input(x):\n x /= 255.\n x -= 0.5\n x *= 2.\n return x\n\n# train the model on the new data for a few epochs\n# this is the augmentation configuration we will use for training\ntrain_datagen = ImageDataGenerator(\n shear_range=0.02,\n rotation_range=360.,\n zoom_range=0.05,\n width_shift_range=0.08,\n height_shift_range=0.08,\n horizontal_flip=True,\n vertical_flip=True,\n preprocessing_function=preprocess_input)\n \n# this is the augmentation configuration we will use for validation: only rescaling\nval_datagen = ImageDataGenerator(\n shear_range=0.02,\n rotation_range=360.,\n zoom_range=0.05,\n width_shift_range=0.08,\n height_shift_range=0.08,\n horizontal_flip=True,\n vertical_flip=True,\n preprocessing_function=preprocess_input)\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n color_mode = 'rgb',\n target_size = (img_width, img_height),\n batch_size = batch_size,\n shuffle = True,\n classes = Ctypes,\n class_mode = 'categorical'\n )\n\nvalidation_generator = val_datagen.flow_from_directory(\n val_data_dir,\n color_mode = 'rgb',\n target_size=(img_width, img_height),\n batch_size=batch_size,\n shuffle = True,\n classes = Ctypes,\n class_mode = 'categorical')\n\n#class_weight = {0 : 2., 1: 1., 2: 1.5}\n#class_weight = {0 : 3., 1: 1., 2: 1.9}\nclass_weight = {0 : 2.8, 1: 1., 2: 1.6} # applied smoothing .1\n\nhistory = model.fit_generator(\n train_generator,\n steps_per_epoch = train_step, \n epochs = nbr_epochs,\n validation_data = validation_generator,\n validation_steps = val_step,\n callbacks = [best_model, lrreduce],\n class_weight = class_weight,\n verbose = 1)\n \n# save the training history\nnp.savetxt(\"grey_200.csv\", history, delimiter=\",\")\n\n# list all data in history\nprint(history.history.keys())\n## summarize history for accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n# summarize history for loss\nplt.plot(history.history['loss'][0:])\nplt.plot(history.history['val_loss'][0:])\nplt.plot(history.history['categorical_crossentropy'][0:])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n\nval_loss = history.history['val_loss']\ntrain_loss = history.history['categorical_crossentropy']\n\nval_loss.extend(history.history['val_loss'])\ntrain_loss.extend(history.history['categorical_crossentropy'])\n\nplt.plot(train_loss[0:])\nplt.plot(val_loss[0:])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()","sub_path":"cevicalC/Models/10_final_version/ctype_res50_5.py","file_name":"ctype_res50_5.py","file_ext":"py","file_size_in_byte":10071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"391840032","text":"#!/usr/bin/env python2.6\n\"\"\"\ntsvcat [files]\n\nConcatenates TSV-with-header files, aligning columns with same name. \nCan rename columns and match columns across files with different names.\n\nModified by Lee Lichtenstein on June 20, 2012 to enable additional functionality and enable integration into Firehose\n\"\"\"\n\nimport sys, itertools\nimport tsvutil\ntsvutil.fix_stdio()\nimport csv\nfrom OrderedSet import OrderedSet\n#import codecs; sys.stdout = codecs.open('/dev/stdout','w',encoding='utf8',buffering=0)\nimport codecs\n\ndef flatten(iter):\n return list(itertools.chain(*iter))\n \ndef stable_uniq(x):\n s = set(); y = []\n for i in x:\n if i in s: continue\n s.add(i)\n y.append(i)\n return y\n \ndef tsv_reader(f):\n return csv.DictReader(f, dialect=None, delimiter=\"\\t\", quoting=csv.QUOTE_NONE)\n\n\ndef tsvcat(items, outFilePointer):\n alias_specs = [s for s in items if '=' in s]\n drop_specs = [s for s in items if s.startswith('-')]\n filenames = [s for s in items if s not in alias_specs and s not in drop_specs]\n #files = [open(f, 'r') for f in filenames]\n # codecs.open(filename, mode[, encoding\n\n files = [codecs.open(f, 'r', encoding=\"iso8859-1\") for f in filenames]\n if not filenames:\n files = [sys.stdin]\n # LTL: Did not handle prepended comment lines.\n # file_cols = [f.readline()[:-1].split(\"\\t\") for f in files]\n # Unique comment lines are prepended onto the final result.\n file_cols = []\n commentLines = OrderedSet()\n for f in files:\n isHeaderFound = False\n line = f.readline()\n while not isHeaderFound:\n if not line.startswith('#'):\n isHeaderFound = True\n file_cols.append(line[:-1].split(\"\\t\"))\n else:\n commentLines.add(line.rstrip())\n line = f.readline()\n all_cols = stable_uniq(flatten(file_cols))\n \n aliases = {}\n for alias_spec in alias_specs:\n left, right = alias_spec.split('=')\n assert left != right\n assert left and right\n assert left in all_cols\n aliases[right] = left\n if right not in all_cols:\n all_cols[ all_cols.index(left) ] = right\n else:\n all_cols.remove(left)\n \n for drop_spec in drop_specs:\n col = drop_spec[1:]\n #print col\n #print all_cols\n assert col in all_cols\n all_cols.remove(col)\n \n if len(commentLines) > 0:\n outFilePointer.write(\"\\n\".join(commentLines)+\"\\n\")\n\n outFilePointer.write(\"\\t\".join(all_cols)+\"\\n\")\n \n for i, f in enumerate(files):\n cols = file_cols[i]\n for line in f:\n #line = unicode(line,'utf8')\n parts = line[:-1].split(\"\\t\")\n hash = {}\n for j in range(len(cols)):\n hash[cols[j]] = parts[j]\n out = []\n for col in all_cols:\n if col in hash: \n out.append(hash[col])\n elif col in aliases:\n out.append(hash[aliases[col]])\n else:\n out.append('')\n outFilePointer.write(u\"\\t\".join(out) + '\\n')\n\n\nif __name__ == '__main__':\n items = sys.argv[1:]\n tsvcat(items,sys.stdout)\n\n","sub_path":"tasks/mutation_validator/src/fh_tsvCatFiles/tsvcat.py","file_name":"tsvcat.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"295929408","text":"from anytree import NodeMixin, RenderTree\r\n\r\nclass MyClass(NodeMixin):\r\n def __init__(self, name, start, end, parent = None):\r\n super(MyClass, self).__init__()\r\n self.name = name\r\n self.start = start\r\n self.end = end\r\n self.parent = parent\r\n\r\n def __repr__(self):\r\n return f\"({self.end-self.start:.2f}) - {self.name}\"\r\ndef main():\r\n a = MyClass(start=1, end=2, name='one')\r\n b = MyClass(start = 3, end = 4, name='two', parent=a)\r\n print(RenderTree(a))\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"tests/test_anytree.py","file_name":"test_anytree.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"236368365","text":"import pygame\nfrom pygame.locals import *\n\nimport levels\n\npygame.init()\nWIDTH = 1000\nHEIGHT = 1000\n\nclass GameDisplay:\n\n def __init__(self):\n self.surface = pygame.display.set_mode((WIDTH, HEIGHT))\n\n @property\n def size(self):\n return self.surface.get_width()\n\n def absolute(self, *cos):\n if len(cos) == 1:\n return int(cos[0]*self.size)\n return tuple(int(c*self.size) for c in cos)\n\n def draw_object(self, obj):\n color = pygame.Color(255, 255, 0)\n rect = self.absolute(*obj.top_left, *obj.box)\n pygame.draw.rect(self.surface, color, rect)\n\n def draw_game(self, game):\n self.surface.fill(pygame.Color(0, 0, 0))\n for player in game.players:\n self.draw_object(player)\n\n\nimport math, time\ndef game_loop():\n i = 0\n game = levels.level_1()\n display = GameDisplay()\n while True:\n pygame.display.update()\n i += 0.01\n time.sleep(0.01)\n game.tick()\n display.draw_game(game)\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_a:\n game.turn_cw()\n if event.key == pygame.K_s:\n game.turn_ccw()\n if event.type == QUIT:\n pygame.quit()\n break\n\ngame_loop()\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"557794776","text":"# coding: utf-8\n# 例外メッセージを指定しよう\nimport sys\nenemies = [\"スライム\", \"ドラゴン\", \"魔王\"]\n\ntry:\n number = 0\n print(\"勇者は敵に遭遇した\")\n print(\"勇者は\" + enemies[2 / number] + \"と戦った\")\nexcept ZeroDivisionError as e:\n sys.stderr.write(\"その敵は表示できません\")\nfinally:\n print(\"勇者は勝利した\")\n","sub_path":"paiza/paiza_10/paiza_10_003_001.py","file_name":"paiza_10_003_001.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"373922649","text":"import os\n\nimport pytest\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.utils import timezone\nfrom minio import Minio\nfrom simple_history.models import registered_models\n\nfrom caluma.core.management.commands import cleanup_history\n\nfrom ..models import Form\n\n\ndef test_create_bucket_command(mocker):\n mocker.patch.object(Minio, \"make_bucket\")\n call_command(\"create_bucket\", stdout=open(os.devnull, \"w\"))\n Minio.make_bucket.assert_called_once_with(settings.MINIO_STORAGE_MEDIA_BUCKET_NAME)\n\n\n@pytest.mark.parametrize(\"force\", [True, False])\n@pytest.mark.parametrize(\"keep,kept\", [(\"1 year\", 2), (\"1 day\", 1), (None, 2)])\ndef test_cleanup_history_command(db, force, keep, kept):\n # we need to override the registered models dict in order to get rid of the\n # fake models created in core tests\n cleanup_history.registered_models = {\n k: v for k, v in registered_models.items() if not k.startswith(\"core_\")\n }\n kwargs = {}\n if force:\n kwargs[\"force\"] = force\n else:\n kept = 3\n if keep:\n kwargs[\"keep\"] = keep\n\n Form.objects.create(slug=\"form 1\")\n\n f2 = Form.objects.create(slug=\"form 2\")\n f2_hist = f2.history.first()\n f2_hist.history_date = f2_hist.history_date - timezone.timedelta(days=2)\n f2_hist.save()\n\n f3 = Form.objects.create(slug=\"form 3\")\n f3_hist = f3.history.first()\n f3_hist.history_date = f3_hist.history_date - timezone.timedelta(days=730)\n f3_hist.save()\n\n call_command(\"cleanup_history\", **kwargs, stdout=open(os.devnull, \"w\"))\n\n assert Form.history.count() == kept\n","sub_path":"caluma/form/tests/test_command.py","file_name":"test_command.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"356686397","text":"'''\nCreated on Jul 10, 2018\n\n@author: daniel\n'''\n\n#from multiprocessing import Process, Manager\n#from keras.utils import np_utils\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nfrom Utils.TimerModule import TimerModule\nfrom Exploratory_Stuff.BlockDataHandler import BlockDataHandler\n#from keras.callbacks import CSVLogger,ReduceLROnPlateau\nfrom keras.layers import Dense, BatchNormalization, Conv1D, Dropout\nfrom keras.models import Sequential\nfrom keras.callbacks import CSVLogger\n#from keras.optimizers import SGD\n#import os\nfrom Utils.EmailHandler import EmailHandler\nfrom Utils.HardwareHandler import HardwareHandler\nfrom datetime import datetime\n#from keras.utils.training_utils import multi_gpu_model\n#import keras.backend as K\n#import tensorflow as tf\nimport nibabel as nib\nimport numpy as np\nfrom NMFComputer.BasicNMFComputer import BasicNMFComputer\nimport matplotlib.pyplot as plt\nfrom keras.layers.advanced_activations import PReLU\nfrom Exploratory_Stuff.ExtendedBlockDataHandler import ExtendedBlockDataHandler\n \nfrom NMFComputer.SKNMFComputer import SKNMFComputer\nimport sys\nimport os\nimport cv2\nDATA_DIR = os.path.abspath(\"../\")\nsys.path.append(DATA_DIR)\nfrom numpy import genfromtxt\n\ndef main():\n hardwareHandler = HardwareHandler()\n emailHandler = EmailHandler()\n timer = TimerModule()\n now = datetime.now()\n date_string = now.strftime('%Y-%m-%d_%H_%M')\n \n print('Loading the data! This could take some time...')\n num_training_patients = 12;\n num_validation_patients = 1;\n nmfComp = BasicNMFComputer(block_dim=8, num_components=8)\n dataHandler = ExtendedBlockDataHandler(\"Data/BRATS_2018/HGG\", nmfComp, num_patients = num_training_patients, modes = [\"flair\", \"t1ce_bf_corrected\", \"t1_bf_corrected\", \"t2_bf_corrected\"])\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_train = dataHandler.X\n labels = dataHandler.labels\n dataHandler.clear()\n \n dataHandler.setLoadingMode(\"validation\")\n\n dataHandler.setDataDirectory(\"Data/BRATS_2018/HGG_Validation\")\n dataHandler.setNumPatients(num_validation_patients)\n dataHandler.loadData()\n dataHandler.preprocessForNetwork()\n x_val = dataHandler.X\n val_labels = dataHandler.labels\n dataHandler.clear()\n \n print('Building the model now!')\n model = Sequential()\n model.add(Dense(2048, input_dim=len(dataHandler.modes)*dataHandler.nmfComp.num_components))\n model.add(BatchNormalization())\n model.add(PReLU())\n model.add(Dropout(0.5))\n \n model.add(Dense(1024))\n model.add(BatchNormalization())\n model.add(PReLU())\n model.add(Dropout(0.5))\n \n model.add(Dense(512))\n model.add(BatchNormalization())\n model.add(PReLU())\n model.add(Dropout(0.5))\n\n model.add(Dense(256))\n model.add(BatchNormalization())\n model.add(PReLU())\n model.add(Dropout(0.5))\n\n model.add(Dense(128))\n model.add(BatchNormalization())\n model.add(PReLU())\n model.add(Dropout(0.5))\n\n model.add(Dense(64))\n model.add(BatchNormalization())\n model.add(PReLU())\n model.add(Dropout(0.5))\n \n model.add(Dense(32))\n model.add(BatchNormalization())\n model.add(PReLU())\n model.add(Dropout(0.5))\n\n model.add(Dense(16))\n model.add(BatchNormalization())\n model.add(PReLU())\n model.add(Dense(labels.shape[1], activation='softmax'))\n \n \n# Compile model\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n \n \n \n model_directory = \"/home/daniel/eclipse-workspace/MRIMath/Models/extended_blocknet_\" + date_string\n if not os.path.exists(model_directory):\n os.makedirs(model_directory)\n log_info_filename = 'model_loss_log.csv'\n csv_logger = CSVLogger(model_directory + '/' + log_info_filename, append=True, separator=',')\n \n model_info_filename = 'model_info.txt'\n model_info_file = open(model_directory + '/' + model_info_filename, \"w\") \n model_info_file.write('Number of Patients (training): ' + str(num_training_patients) + '\\n')\n model_info_file.write('Number of Patients (validation): ' + str(num_validation_patients) + '\\n')\n\n model_info_file.write('Block Dimensions: ' + str(dataHandler.nmfComp.block_dim) + '\\n')\n model_info_file.write('Number of Components (k): ' + str(dataHandler.nmfComp.num_components) + '\\n')\n model_info_file.write('\\n\\n')\n model.summary(print_fn=lambda x: model_info_file.write(x + '\\n'))\n model_info_file.close();\n\n print('Training network!')\n model.fit(x_train,\n labels,\n epochs=500,\n validation_data=(x_val, val_labels),\n callbacks = [csv_logger],\n batch_size=x_train.shape[0])\n \n \n model.save(model_directory + '/model.h5')\n test_data_dir = \"Data/BRATS_2018/HGG\"\n image = None\n seg_image = None\n m = nmfComp.block_dim\n \n for subdir in os.listdir(test_data_dir):\n seg_est = np.zeros(shape=(dataHandler.W, dataHandler.H))\n data_dirs = os.listdir(test_data_dir + \"/\" + subdir)\n seg_image = nib.load(test_data_dir + \"/\" + subdir + \"/\" + [s for s in data_dirs if \"seg\" in s][0]).get_data()\n inds = [i for i in list(range(155)) if np.count_nonzero(seg_image[:,:,i]) > 0]\n for k in inds:\n X_test = []\n\n foo = []\n for path in data_dirs:\n for mode in dataHandler.modes:\n if mode in path:\n image = nib.load(test_data_dir + \"/\" + subdir + \"/\" + path).get_data()\n foo.extend(dataHandler.processData2(image[:,:,k]))\n \n chunks = [foo[x:x+int(len(foo)/len(dataHandler.modes))] for x in range(0, len(foo), int(len(foo)/len(dataHandler.modes)))]\n for i in range((int(len(foo)/len(dataHandler.modes)))):\n X_test.append(np.concatenate((chunks[0][i], chunks[1][i], chunks[2][i], chunks[3][i]), axis=None))\n\n print(len(X_test))\n est_labels = [model.predict(x.reshape((1, -1))) for x in X_test]\n #labels = model.predict(H.T)\n ind = 0\n for i in range(0, dataHandler.W, m):\n for j in range(0, dataHandler.H, m):\n seg_est[i:i+m, j:j+m] = np.full((m, m), np.argmax(est_labels[ind]))\n ind = ind+1\n fig = plt.figure()\n plt.gray();\n \n a=fig.add_subplot(1,2,1)\n plt.imshow(seg_image[:,:,k])\n plt.axis('off')\n plt.title('GT Segment')\n \n a=fig.add_subplot(1,2,2)\n plt.imshow(seg_est)\n plt.axis('off')\n plt.title('Estimate Segment')\n plt.show()\n\n \n# evaluate the model\n\nif __name__ == \"__main__\":\n main() \n","sub_path":"TestingExtendedBlockArchitecture.py","file_name":"TestingExtendedBlockArchitecture.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"468662641","text":"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Gaussian process layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensor2tensor.layers import gaussian_process\nfrom tensor2tensor.utils import test_utils\n\nimport tensorflow as tf\ntf.compat.v1.enable_eager_execution()\n\n\nclass GaussianProcessTest(tf.test.TestCase):\n\n @test_utils.run_in_graph_and_eager_modes()\n def testGaussianProcessPosterior(self):\n train_batch_size = 3\n test_batch_size = 2\n input_dim = 4\n output_dim = 5\n features = tf.to_float(np.random.rand(train_batch_size, input_dim))\n labels = tf.to_float(np.random.rand(train_batch_size, output_dim))\n layer = gaussian_process.GaussianProcess(output_dim,\n conditional_inputs=features,\n conditional_outputs=labels)\n test_features = tf.to_float(np.random.rand(test_batch_size, input_dim))\n test_labels = tf.to_float(np.random.rand(test_batch_size, output_dim))\n test_outputs = layer(test_features)\n test_nats = -test_outputs.distribution.log_prob(test_labels)\n self.evaluate(tf.global_variables_initializer())\n test_nats_val, outputs_val = self.evaluate([test_nats, test_outputs])\n self.assertEqual(test_nats_val.shape, ())\n self.assertGreaterEqual(test_nats_val, 0.)\n self.assertEqual(outputs_val.shape, (test_batch_size, output_dim))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testGaussianProcessPrior(self):\n batch_size = 3\n input_dim = 4\n output_dim = 5\n features = tf.to_float(np.random.rand(batch_size, input_dim))\n labels = tf.to_float(np.random.rand(batch_size, output_dim))\n model = tf.keras.Sequential([\n tf.keras.layers.Dense(2, activation=None),\n gaussian_process.GaussianProcess(output_dim),\n ])\n outputs = model(features)\n log_prob = outputs.distribution.log_prob(labels)\n self.evaluate(tf.global_variables_initializer())\n log_prob_val, outputs_val = self.evaluate([log_prob, outputs])\n self.assertEqual(log_prob_val.shape, ())\n self.assertLessEqual(log_prob_val, 0.)\n self.assertEqual(outputs_val.shape, (batch_size, output_dim))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testSparseGaussianProcess(self):\n dataset_size = 10\n batch_size = 3\n input_dim = 4\n output_dim = 5\n features = tf.to_float(np.random.rand(batch_size, input_dim))\n labels = tf.to_float(np.random.rand(batch_size, output_dim))\n model = gaussian_process.SparseGaussianProcess(output_dim, num_inducing=2)\n with tf.GradientTape() as tape:\n predictions = model(features)\n nll = -tf.reduce_mean(predictions.distribution.log_prob(labels))\n kl = sum(model.losses) / dataset_size\n loss = nll + kl\n\n self.evaluate(tf.global_variables_initializer())\n grads = tape.gradient(nll, model.variables)\n for grad in grads:\n self.assertIsNotNone(grad)\n\n loss_val, predictions_val = self.evaluate([loss, predictions])\n self.assertEqual(loss_val.shape, ())\n self.assertGreaterEqual(loss_val, 0.)\n self.assertEqual(predictions_val.shape, (batch_size, output_dim))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testBayesianLinearModel(self):\n \"\"\"Tests that model makes reasonable predictions.\"\"\"\n np.random.seed(42)\n train_batch_size = 5\n test_batch_size = 2\n num_features = 3\n noise_variance = 0.01\n coeffs = tf.range(num_features, dtype=tf.float32)\n features = tf.to_float(np.random.randn(train_batch_size, num_features))\n labels = (tf.tensordot(features, coeffs, [[-1], [0]])\n + noise_variance * tf.to_float(np.random.randn(train_batch_size)))\n\n model = gaussian_process.BayesianLinearModel(noise_variance=noise_variance)\n model.fit(features, labels)\n\n test_features = tf.to_float(np.random.randn(test_batch_size, num_features))\n test_labels = tf.tensordot(test_features, coeffs, [[-1], [0]])\n outputs = model(test_features)\n test_predictions = outputs.distribution.mean()\n test_predictions_variance = outputs.distribution.variance()\n\n [\n test_labels_val, test_predictions_val, test_predictions_variance_val,\n ] = self.evaluate(\n [test_labels, test_predictions, test_predictions_variance])\n self.assertEqual(test_predictions_val.shape, (test_batch_size,))\n self.assertEqual(test_predictions_variance_val.shape, (test_batch_size,))\n self.assertAllClose(test_predictions_val, test_labels_val, atol=0.1)\n self.assertAllLessEqual(test_predictions_variance_val, noise_variance)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","sub_path":"tensor2tensor/layers/gaussian_process_test.py","file_name":"gaussian_process_test.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"37094500","text":"import numpy as np\nimport pandas as pd\n\nfrom os.path import join\nfrom time import time\nfrom sklearn.model_selection import train_test_split, KFold\nfrom sklearn.metrics import roc_auc_score\n\nfrom keras.models import Model\nfrom keras.layers import Input, Dense, Dropout, Conv1D, Embedding, SpatialDropout1D, concatenate\nfrom keras.layers import CuDNNGRU, CuDNNLSTM, Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling1D\nfrom keras.callbacks import Callback, EarlyStopping, ModelCheckpoint\nfrom keras import optimizers\nfrom keras.layers import Lambda\n\nclass RocAucEvaluation( Callback ):\n def __init__( self, validation_data = (), interval = 1 ):\n super( Callback, self ).__init__()\n\n self.interval = interval\n self.X_val, self.y_val = validation_data\n self.max_score = 0\n self.not_better_count = 0\n\n def on_epoch_end( self, epoch, logs = {} ):\n if epoch % self.interval == 0:\n y_pred = self.model.predict( self.X_val, verbose = 1 )\n score = roc_auc_score( self.y_val, y_pred )\n print( \"\\n ROC-AUC - epoch: %d - score: %.6f \\n\" % ( epoch + 1, score ) )\n if ( score > self.max_score ):\n print( \"*** New High Score (previous: %.6f) \\n\" % self.max_score )\n self.model.save_weights(\"best_weights.h5\")\n self.max_score = score\n self.not_better_count = 0\n else:\n self.not_better_count += 1\n if self.not_better_count > 3: # patience\n print( \"Epoch %05d: early stopping, high score = %.6f\" % ( epoch, self.max_score ) )\n self.model.stop_training = True\n\ndef get_model( feature_size, maxlen, embedding_matrix ):\n\n features_input = Input( shape = ( feature_size, ) )\n tokens_input = Input( shape = ( maxlen, ) )\n\n x = Embedding( embedding_matrix.shape[0], embedding_matrix.shape[1],\n weights = [embedding_matrix], trainable = False )( tokens_input )\n x = SpatialDropout1D( 0.5 )( x )\n x = Bidirectional( CuDNNLSTM( 40, return_sequences = True ) )( x )\n x, x_h, x_c = Bidirectional( CuDNNGRU( 40, return_sequences = True, return_state = True ) )( x )\n avg_pool = GlobalAveragePooling1D()( x )\n max_pool = GlobalMaxPooling1D()( x )\n x = concatenate( [avg_pool, x_h, max_pool, features_input] )\n \n output = Dense( 6, activation = \"sigmoid\" )( x )\n\n model = Model( inputs = [tokens_input, features_input], outputs = output )\n adam = optimizers.adam( clipvalue = 1.0 )\n model.compile( loss='binary_crossentropy',\n optimizer = adam,\n metrics=['accuracy'] )\n return model\n\ndef get_score( pred_file, true_file ):\n pred = pd.read_csv( pred_file ).values[:,1:].astype( np.float64 )\n true = pd.read_csv( true_file ).values[:,1:].astype( np.int8 )\n scored_index = true[:,0] != -1\n pred_scored = pred[scored_index,:]\n true_scored = true[scored_index,:]\n score = roc_auc_score( true_scored, pred_scored )\n return score\n\nif __name__ == \"__main__\":\n exp_dir = \"/home/lizx/nlp_data/toxic/exp\"\n \n begin = time()\n X_train = np.load( join( exp_dir, \"X_train_300.npy\" ) )\n y_train = np.load( join( exp_dir, \"y_train.npy\" ) )\n X_test = np.load( join( exp_dir, \"X_test_300.npy\" ) )\n features = np.load( join( exp_dir, \"features.npy\" ) )\n test_features = np.load( join( exp_dir, \"test_features.npy\" ) )\n embedding_matrix = np.load( join( exp_dir, \"embedding_matrix.npy\" ) )\n consume = ( time() - begin ) / 60\n print( \"Loading data consumes %.2f min\" % consume )\n \n feature_size = features.shape[1]\n maxlen = 300\n\n predict = np.zeros( [X_test.shape[0], 6] )\n num_folds = 10\n kf = KFold( n_splits = num_folds, shuffle = True, random_state = 239 )\n for i, ( train_index, valid_index ) in enumerate( kf.split( X_train ) ):\n begin = time()\n kf_y_train, kf_y_valid = y_train[train_index], y_train[valid_index]\n kf_X_train, kf_X_valid = X_train[train_index], X_train[valid_index]\n kf_X_train_features, kf_X_valid_features = features[train_index], features[valid_index]\n\n model = get_model( feature_size, maxlen, embedding_matrix )\n ra_val = RocAucEvaluation( validation_data = ( [kf_X_valid, kf_X_valid_features], kf_y_valid ), interval = 1 )\n model.fit( [kf_X_train, kf_X_train_features], kf_y_train, \n batch_size = 32, epochs = 10, verbose = 1, callbacks = [ra_val] )\n \n model.load_weights( \"best_weights.h5\" )\n predict += model.predict( [X_test, test_features], batch_size = 32, verbose = 1 )\n consume = ( time() - begin ) / 60\n print( \"One fold consumes %.2f min\" % consume )\n\n sample_submission = pd.read_csv( join( exp_dir, \"sample_submission.csv\" ) )\n class_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\n sample_submission[class_names] = predict / num_folds\n pred_file = join( exp_dir, \"submission.csv\" )\n sample_submission.to_csv( pred_file, index = False )\n\n true_file = join( exp_dir, \"test_labels.csv\" )\n score = get_score( pred_file, true_file )\n print( \"AUC: %f\" % score )\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"train_and_predict.py","file_name":"train_and_predict.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"13116020","text":"import pandas as pd\nimport numpy as np\nimport csv\nimport multiprocessing as mp\n\nclass_mean = pd.read_csv(\"class_mean.csv\").values\nclass_mean[:, 1] = list(map(lambda x: np.fromstring(x[1:-1], dtype='float', sep=' '), class_mean[:, 1]))\n\nwith open(\"test_set_features.csv\", \"r\") as f:\n test_features = f.readlines()\n\ncores = 6\np = int(len(test_features)/cores)\n\nclf = [[\"id\",\"landmarks\"]]\n\ndef find(fr):\n\n counter = fr\n\n for line in test_features[fr:fr+p]:\n\n if counter % 1000 == 0:\n\n print(\"Processed elements: \", counter)\n\n try:\n test_id, test_feature = line[:16], np.fromstring(line[17:-2], dtype='float', sep=\", \")\n\n class_dist = list(map(lambda x: np.linalg.norm(x - test_feature), class_mean[:,1]))\n\n clf_class = class_mean[np.argmin(class_dist), 0]\n\n clf.append([test_id, str(clf_class)])\n\n counter += 1\n\n except:\n continue\n\n\npool = mp.Pool(processes=cores)\nresults = pool.map(find, range(0, len(test_features), p))\npool.close()\npool.join()\n\nwith open(\"test_predictions.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(clf)\n","sub_path":"mp_megaclassifier.py","file_name":"mp_megaclassifier.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"542069304","text":"# Time Complexity : O(n)\n# Space Complexity :O(1)\n\nclass Solution:\n def rob(self, nums: List[int]) -> int:\n if len(nums) == 0:\n return 0\n\n # maintain teo pointers\n # one is if the house is chosen and the other is if the house is not chosen\n not_chosen = 0\n chosen = nums[0]\n\n # then iterate over all the houses and find the amount when that house is chose and when its not chosen\n for n in range(1, len(nums)):\n temp = not_chosen\n # if house is not chose then the amount will be max from not chosen previous home and chosen previous home\n not_chosen = max(chosen, not_chosen)\n # if house is chose then the amount will be the value of previous to previous house + current amount\n chosen = temp + nums[n]\n return max(chosen, not_chosen)","sub_path":"Problem 2.py","file_name":"Problem 2.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"343388172","text":"# change raw data to arrays\n\ndef inOut(data,name):\n\tarrayData = []\n\tarrayDataMem = {}\n\tarrayDataMemList = [] \n\tarrayDataDic = {}\n\tfor i , val in enumerate(data[str(name)+'name']):\n\t\tarrayDataDic = {}\n\t\tarrayDataMem = {}\n\t\tarrayDataMemList = []\n\t\tarrayDataDic['name'] = val\n\t\tarrayDataDic['range'] = data[str(name)+'range'][i]\n\t\tmemnumber = data[str(name)+'mems'][i]\n\t\tfor x in range(1,int(memnumber)+1):\n\t\t\tarrayDataMem['name'] = data['mem'+str(name)+'name[' + str(val) + '][' + str(x) + ']'][0] \n\t\t\tarrayDataMem['type'] = data['mem'+str(name)+'type[' + str(val) + '][' + str(x) + ']'][0]\n\t\t\tarrayDataMem['amount'] = data['mem'+str(name)+'amount[' + str(val) + '][' + str(x) + ']'][0]\n\t\t\tarrayDataMemList.append(arrayDataMem)\n\t\t\tarrayDataMem = {} \n\t\tarrayDataDic['memship'] = arrayDataMemList\n\t\tarrayData.append(arrayDataDic)\n\treturn arrayData\n\ndef rule(data):\n\trulenumsArray = []\n\trulenums = int(data['rulenums'][0])\n\tfor i in range(1,rulenums+1):\n\t\trulenumsArray.append(data['rule'+str(i)][0])\n\treturn rulenumsArray\n","sub_path":"web/fuzzylib/fuzzyXml/arrayior.py","file_name":"arrayior.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"566331361","text":"from vizdoom import *\nfrom gym.core import Env\nimport math\nimport time\nfrom gym.spaces.discrete import Discrete\nfrom gym.spaces.box import Box\nfrom util import *\nimport sys\nimport cv2\nimport tensorflow as tf\nfrom baselines.common import set_global_seeds\n\ndef get_observation(s, real_frame = False, get_rid_of_value=[], resolution = 84):\n #returns: 84 x 84 x 1\n if not real_frame:\n if s is None:\n return np.zeros([resolution, resolution, 3], dtype=np.uint8)\n else:\n texture = depth2texture(s, get_rid_of_value)\n return np.expand_dims(cv2.resize(texture, (resolution, resolution)) * norm_factor, -1)\n else:\n if s is None:\n return np.zeros([resolution, resolution, 3], dtype=np.uint8)\n else:\n screen = s.screen_buffer\n screen = screen.transpose((1,2,0))\n return cv2.resize(screen, (resolution, resolution))\n\nclass NavigationEnv(Env):\n def __init__(self):\n self.game = DoomGame()\n self.game.add_available_game_variable(GameVariable.POSITION_X)\n self.game.add_available_game_variable(GameVariable.POSITION_Y)\n self.game.add_available_game_variable(GameVariable.POSITION_Z)\n self.game.set_depth_buffer_enabled(True)\n self.observation_space = Box(low=0, high=255, shape=(84, 84, 3), dtype=np.uint8)\n self.action_space = Discrete(3)\n self.available_actions = np.eye(4).tolist()\n\n #train configuration\n\n self.seed_min = 0\n self.seed_max = 12\n self.wait_time = 0\n self.num_envs = 16\n\n #test configuration\n '''\n self.seed_min = 10009\n self.seed_max = 10010\n self.wait_time = 0.15\n '''\n\n def seed(self, seed=None):\n return [seed]\n\n def reset(self):\n self.game.close()\n self.seed = np.random.randint(self.seed_min, self.seed_max)\n self.game.load_config('/home/robot/Desktop/PVZ/finddoor.cfg')\n self.game.set_doom_scenario_path('/home/robot/Desktop/PVZ/navigation/navigation/navigation_{}.wad'.format(self.seed))\n # print(\"initing\")\n # print(self.seed)\n # set_global_seeds(self.seed)\n self.game.init()\n # print(\"inited\")\n # sys.stdout.flush()\n\n if self.seed_min < 100:\n #avoid door\n for _ in range(4):\n self.game.make_action([0,0,1,0], 4)\n self.game.make_action([0,0,0,1])\n for _ in range(8):\n self.game.make_action([0,0,1,0])\n\n self.start_x = self.game.get_game_variable(GameVariable.POSITION_X)\n self.start_y = self.game.get_game_variable(GameVariable.POSITION_Y)\n # print(\"reset path history\")\n self._reset_path_history()\n # print(\"reseted path history\")\n return get_observation(self.game.get_state(), real_frame=True)\n\n def step(self, action):\n if self.wait_time > 0:\n time.sleep(self.wait_time)\n\n old_x = self.game.get_game_variable(GameVariable.POSITION_X)\n old_y = self.game.get_game_variable(GameVariable.POSITION_Y)\n old_z = self.game.get_game_variable(GameVariable.POSITION_Z)\n self.game.make_action(self.available_actions[action], 4)\n new_x = self.game.get_game_variable(GameVariable.POSITION_X)\n new_y = self.game.get_game_variable(GameVariable.POSITION_Y)\n new_z = self.game.get_game_variable(GameVariable.POSITION_Z)\n\n if self.game.is_player_dead() or self.game.is_episode_finished():\n return get_observation(None), 0, True, None\n else:\n\n depth_buffer = self.game.get_state().depth_buffer.astype(np.int32)\n print(np.array(depth_buffer).shape)\n if np.sum(np.max(depth_buffer, axis=0)) < 1000:\n depth_rew = -1\n else:\n depth_rew = 0\n\n explore_rew = self._register_visit()\n height_rew = new_z - old_z\n speed_rew = math.sqrt((new_x - old_x)**2 + (new_y - old_y)**2) / 33\n #original:0.05,0,1,0.05\n #print(explore_rew, speed_rew, depth_rew)\n rew = 0.25 * explore_rew + 1.0 * speed_rew + 0.05 * height_rew + 0.05*depth_rew\n rew = rew * 1000\n return get_observation(self.game.get_state(), real_frame=True), rew, False, None\n\n def _reset_path_history(self):\n self.path_history = np.ones([200,200], dtype=np.float32)\n\n def _register_visit(self):\n self.path_history = np.maximum(self.path_history * 0.98,1)\n pos_x = self.game.get_game_variable(GameVariable.POSITION_X)\n pos_y = self.game.get_game_variable(GameVariable.POSITION_Y)\n\n x_block = min(int(math.floor(pos_x)) // 50, 199)\n y_block = min(int(math.floor(pos_y)) // 50, 199)\n if self.path_history[x_block, y_block] < 10:\n self.path_history[x_block, y_block] += 1\n return 1 / float(self.path_history[x_block, y_block])\n\n","sub_path":"navigation_env.py","file_name":"navigation_env.py","file_ext":"py","file_size_in_byte":4950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"369632785","text":"\"\"\"\n 给定一个 N 叉树,返回其节点值的层序遍历。 (即从左到右,逐层遍历)。\n\n 返回其层序遍历:\n [\n [1],\n [3,2,4],\n [5,6]\n ]\n \n 说明:\n 树的深度不会超过 1000。\n 树的节点总数不会超过 5000。\n\"\"\"\n\nfrom typing import List\n\n\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\n\nclass Solution:\n def levelOrder(self, root: Node) -> List[List[int]]:\n return self.use_queue(root)\n\n @classmethod\n def recursive(cls, root: Node) -> List[List[int]]:\n \"\"\"\n 递归\n 时间复杂度 O(n)\n 空间复杂度 O(n)\n \"\"\"\n\n def hand(node: Node, level) -> None:\n if len(res) == level:\n res.append([])\n res[level].append(node.val)\n\n for child in node.children:\n hand(child, level + 1)\n\n res = []\n if root:\n hand(root, 0)\n\n return res\n\n @classmethod\n def use_queue(cls, root: Node) -> List[List[int]]:\n \"\"\"\n 使用queue保存当前层的节点。\n 时间复杂度是 O(nk) k为节点最多一层的数目,因为extend的时间复杂度是O(k)\n 空间复杂度是 O(n)\n \"\"\"\n res = []\n if root:\n queue = [root]\n while queue:\n tmp_queue = []\n tmp_res = []\n\n for node in queue: # 每一次的循环都是一层\n if node:\n tmp_res.append(node.val)\n tmp_queue.extend(node.children)\n queue = tmp_queue\n res.append(tmp_res)\n return res\n","sub_path":"Week_02/G20200343030545/LeetCode_429_545.py","file_name":"LeetCode_429_545.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"193318759","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# 视频信息封装\n\nimport json\nfrom NetworkAttributes import ENCRYPT_ONE, ENCRYPT_TWO\nfrom User import User\nimport os\nimport requests\nfrom Common import print_info\n\n\nclass Video(User):\n\n def __init__(self, jsonValue, type):\n super().__init__(jsonValue, type)\n self.gold = 0\n self.retime = 3\n if type == ENCRYPT_ONE:\n self.mvId = str(jsonValue.get(\"mv_id\"))\n self.title = str(jsonValue.get(\"mv_title\")).replace(\" \", \"\").replace('\\n', '')\n self.imgURL = str(jsonValue.get(\"mv_img_url\"))\n self.playURL = str(jsonValue.get(\"mv_play_url\"))\n self.downloadURL = str(jsonValue.get(\"mv_play_url\"))\n self.playWidth = str(jsonValue.get(\"mv_play_width\"))\n self.playHeight = str(jsonValue.get(\"mv_play_height\"))\n self.like = str(jsonValue.get(\"mv_like\"))\n self.isCatAds = bool(jsonValue.get(\"is_cat_ads\"))\n self.isCollect = str(jsonValue.get(\"is_collect\"))\n elif type == ENCRYPT_TWO:\n self.mvId = str(jsonValue.get(\"id\"))\n self.title = str(jsonValue.get(\"title\")).replace(\" \", \"\").replace('\\n', '')\n self.imgURL = str(jsonValue.get(\"cover\"))\n self.playURL = str(jsonValue.get(\"normal_url\"))\n self.downloadURL = str(jsonValue.get(\"normal_url\"))\n self.playWidth = str(jsonValue.get(\"width\"))\n self.playHeight = str(jsonValue.get(\"height\"))\n self.like = str(jsonValue.get(\"praise_num\"))\n self.isCatAds = False\n self.isCollect = str(jsonValue.get(\"is_collect\"))\n if 'type' in jsonValue.keys():\n videoType = int(jsonValue.get('type'))\n self.isCatAds = videoType == 2\n else:\n self.isCatAds = False\n\n if \"gold\" in jsonValue.keys():\n self.gold = int(jsonValue.get(\"gold\"))\n else:\n pass\n self.createSavePath()\n\n def createSavePath(self):\n \"\"\" 创建下载目录 \"\"\"\n if self.type == ENCRYPT_ONE:\n child = \"One\"\n elif self.type == ENCRYPT_TWO:\n child = \"Two\"\n else:\n child = \"Other\"\n fold = os.path.abspath('.')\n self.savePath = os.path.join(fold, \"FastVideo\", child)\n if not os.path.exists(self.savePath):\n os.makedirs(self.savePath)\n\n def download(self):\n \"\"\" 下载视频,返回文件名 \"\"\"\n url = self.downloadURL\n if url is None:\n print_info('{}_{} 下载链接为空'.format(self.type, self.mvId))\n return None\n name = '{}-{}_u:{}v:{}{}'.format(self.type, self.title, self.uId, self.mvId, os.path.splitext(url)[-1])\n\n path = os.path.join(self.savePath, name)\n if os.path.exists(path):\n print_info('{}_{} 文件存在,不用下载'.format(self.type, self.mvId))\n return name\n\n res = requests.get(url)\n # 获取文件大小\n # file_size_str = res.headers['Content-Length']\n # file_size = float(file_size_str)/1024/1024\n # print_info('开始下载:{}_{}'.format(self.type, self.mvId))\n with open(path, 'wb') as f:\n try:\n f.write(res.content)\n # 文件小于1M不上传\n if self.get_FileSize(path) < 1:\n return None\n # print_info('{}_{} 下载成功'.format(self.type, self.mvId))\n return name\n finally:\n f.close\n\n def get_FileSize(self, filePath):\n fsize = os.path.getsize(filePath)\n fsize = fsize/float(1024 * 1024)\n return round(fsize, 2)\n","sub_path":"Video.py","file_name":"Video.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"350156382","text":"import numpy as np\nimport cv2\n\n\ndef fluorescence(mask_in, gray_img, segmented_img):\n \"\"\"\n Indica los valores CTCF (Corrected Total Cell Fluorescence) y mean gray value (promedio del valor de gris) para una mascara dada, los cuales indican dos medidas de fluorescencia para las particulas detectadas\n Entradas:\n mask: imagen de tamaño NxM la cual muestra el area perteneciente a una particula con valor 1 y el resto 0 (atributo de la clase particle)\n gray_img: imagen original de tamaño NxM en valores de grises\n segmented_img: imagen segmentada de tamaño NxM (salida de la función segmentation)\n Salida:\n CTCF y mean_gray_value - medidas de fluorescencia para la mascara dada\n \"\"\"\n # mask = cv2.erode(mask_in, kernel=(3, 3), iterations=10)\n mask = mask_in\n # contruccion imagen \"fluorescent mask\"\n fluorescent_mask = gray_img * mask\n integrated_density = np.sum(fluorescent_mask)\n area_in_pixels = np.sum(mask)\n mean_gray_value = integrated_density / area_in_pixels\n\n # construccion imagen \"background\"\n segmented_img_inv = (segmented_img == 0).astype(np.uint8)\n background_img = gray_img * segmented_img_inv\n background_mean = np.sum(background_img) / (gray_img.shape[0] * gray_img.shape[1])\n CTCF = integrated_density - (area_in_pixels * background_mean)\n return CTCF, mean_gray_value\n","sub_path":"tool_containerized/docker_project/tool/src/fluorescence/fluorescence.py","file_name":"fluorescence.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"373352554","text":"from django.conf.urls import url\n\nfrom . import views\nfrom app.attendance.views import attendance\n\napp_name = 'student'\nurlpatterns = [\n url(r'^$', views.index, name='index'), # 学生登录进系统后的“首页”,还没确定放什么内容\n url(r'^member_evaluation$', views.member_evaluation, name='member_evaluation'), # 团队负责人的团队成员评价页面,可设置贡献度等\n url(r'^resources$', views.view_resources, name='resources'), # 资源列表页面\n url(r'^submits$', views.view_submitted_work, name='submitted_work'), # 查看作业提交情况\n url(r'^unsubmits$', views.view_unsubmitted_work, name='unsubmitted_work'), # 查看未提交情况\n url(r'^submit_team$', views.submit_team), # 提交组队请求\n url(r'^student_team_build$', views.student_team_build ,name='student_team_build'),\n url(r'^s$', views.download, name='download'), # 资源下载链接\n url(r'^workpage$', views.workRoot, name='workpage'), # 查看作业详情\n url(r'^work', views.workView, name='work'), # 查看作业详情\n url(r'^teampage$',views.teamRoot, name='teampage') ,# 团队主页,其下有多个功能\n url(r'^mycourse$',views.my_course,name='my_course'),\n url(r'^attendance', attendance), # 签到\n url(r'^apply_for_team', views.apply_for_team), # 申请加入团队\n url(r'^process_apply', views.process_apply), # 处理申请请求\n url(r'^finish_team_bulid', views.finish_team_bulid), # 结束组队\n url(r'^dismiss_team', views.dismiss_team), # 解散团队\n url(r'^view_score$',views.view_score,name='view_score'),\n url(r'^finish_team_bulid', views.finish_team_bulid), # 处理申请请求\n url(r'^preview_source_online/', views.preview_source_online, name='preview_source_online'),\n url(r'^view_notice$',views.viewNotice,name='view_notice'),\n\n]","sub_path":"app/student/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"141337986","text":"'''Jeu de Sudoku en simulation humaine. Joue au Sudoku de la même manière\nqu'un joueur humain en utilisant des capacités cognitives simulées avec leurs\nimperfections et leurs limites. Tente également d'apprendre à mieux jouer.\n'''\n\nimport sudoio\nfrom sudoio import sudoPause, display, displayError\nimport sudorules as rules\n\nimport sudogrid\nfrom sudogrid import SudoGrid\nfrom sudogame import SudoGame\nfrom sudoplayer import SudoPlayer\nfrom sudomemprofile import SudoMemProfile\nfrom sudothinking import SudoThinkProfile\n\nimport sudotest\nfrom sudotest import sudoTest as TEST #objet global\n\n\nclass Sudoku():\n '''Encapsule le jeu dans son ensemble. Une instance de Sudoku gère des\n grilles, des joueurs créés avec des niveaux de compétences, et fait se\n jouer des parties par les joueurs avec les grilles.\n '''\n\n def __init__(self):\n '''Instanciation'''\n self._initOk = False\n self.init()\n\n def init(self):\n '''initialisatino ou réinitialisation du jeu.'''\n display(\"Jeu de Sudoku - Simulation de joueur humain\")\n TEST.display(\"sudoku\", 3, \"Sudoku - dans init()\")\n self._player = None\n self._playerOk = False\n self._playerReady = False\n self._playerMemProfile = None\n self._playerThinkProfile = None\n self._playerProfilesOk = False\n self._gridOk = False\n self._gameOk = False\n\n self._initOk = True\n TEST.display(\"sudoku\", 1, \"Jeu de Sudoku initialisé.\")\n return\n\n def createPlayer(self, name=None):\n '''Crée un joueur en lui attribuant un nom. Le joueur n'a pas\n initialement de mémoire ni de pensée, cela est créé ensuite.\n '''\n assert self._initOk\n try:\n self._player = SudoPlayer(name)\n self._player.init()\n except:\n displayError(\"Erreur\",\"Impossible de créer le joueur\")\n self._player = None\n self._playerOk = False\n raise Sudoku_Error(\"Sudoku.createPlayer(): erreur de création\")\n self._playerOk = True\n TEST.display(\"sudoku\", 2, \"Sudoku - Joueur {0} créé\" \\\n .format(self._player.name()))\n return\n\n def playerProfile(self, memProfile=None, thinkProfile=None):\n '''Donne au joueur des capacités mémoire et pensée selon des profiles'''\n TEST.display(\"sudoku\", 3, \"Sudoku - dans playerProfile()\")\n assert self._initOk\n try:\n if memProfile is not None:\n assert isinstance(memProfile, SudoMemProfile)\n self._player.memProfile(memProfile)\n self._playerMemProfile = memProfile\n if thinkProfile is not None:\n assert isinstance(thinkProfile, SudoThinkProfile)\n self._player.thinkProfile(thinkProfile)\n self._playerThinkProfile = thinkProfile\n self._playerProfilesOk = True\n except:\n DisplayError(\"Erreur\", \"Impossible de créer le profil du joueur\")\n self._PlayerProfilesOk = False\n raise Sudoku_Error(\"Impossible de définir le profile du joueur\")\n return\n\n def playerPlay(self, player, grid):\n assert self._initOk\n assert player is not None and isinstance(player, SudoPlayer)\n assert grid is not None and isinstance(grid, SudoGrid)\n gameResult = player.play(grid)\n display(gameResult)\n return gameResult\n\n## def createGame(self, player, grid):\n## '''Crée une partie avec un joueur et une grille de Sudoku'''\n## assert self._initOk\n## assert isinstance(player, SudoPlayer) and self._playerOk\n## assert isinstance(grid, SudoGrid)\n## try:\n## self._game = SudoGame()\n## self._game.init(player, grid)\n## except:\n## DisplayError(\"Erreur\", \"Impossible de créer la partie\")\n## self._gameOk = False\n## raise Sudoku_Error(\"Impossible de créer la partie\")\n## self._grid = grid\n## self._gridOk = True\n## self._gameOk = True\n## TEST.display(\"sudoku\", 1, \"La partie est prête à être jouée\")\n## return self._game\n##\n## def play():\n## '''Lance le jeu.'''\n## result = self._game.play()\n## #resultDetails = self._game.details()\n## return\n \n \n\n#TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST\nif __name__ == \"__main__\":\n\n import sudotestall\n testAlllevel = 1\n TEST.levelAll(testAlllevel)\n display(\"Tous les niveaux de test sont à {0}\".format(testAlllevel))\n\n s = Sudoku()\n s.createPlayer(\"David\")\n \n","sub_path":"sudosimu/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":4669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"505257126","text":"import numpy as np\nimport datetime as dt\nfrom copy import deepcopy\nimport xarray\nfrom sklearn.cluster import DBSCAN\nimport h5py\nimport scipy\nimport os\n\narray_list = ['lon', 'lat', 'ws', 'rcg', 'ws_yslf_nbrcs',\n 'ws_yslf_les', 'datetimes', 'sod']\nscalar_list = ['antenna', 'prn', 'sat']\n\n\ndef read_cygnss_l2(fname):\n return xarray.open_dataset(fname)\n\n\ndef read_imerg(fname):\n return Imerg(fname)\n\n\ndef split_tracks_in_time(track, gap=60):\n \"\"\"\n This function will split a CygnssTrack object into two separate tracks\n if there is a significant gap in time. Currently can split a track up to\n three times.\n\n Parameters\n ----------\n track : CygnssTrack object\n CYGNSS track that needs to be checked for breaks in time\n\n Other Parameters\n ----------------\n gap : int\n Number of seconds in a gap before a split is forced\n\n Returns\n -------\n track_list : list\n List of CygnssTrack objects broken up from original track\n \"\"\"\n indices = np.where(np.diff(track.sod) > gap)[0]\n if len(indices) > 0:\n if len(indices) == 1:\n track1 = subset_track(deepcopy(track), 0, indices[0]+1)\n track2 = subset_track(deepcopy(track), indices[0]+1,\n len(track.sod))\n return [track1, track2]\n if len(indices) == 2:\n track1 = subset_track(deepcopy(track), 0, indices[0]+1)\n track2 = subset_track(deepcopy(track), indices[0]+1,\n indices[1]+1)\n track3 = subset_track(deepcopy(track), indices[1]+1,\n len(track.sod))\n return [track1, track2, track3]\n if len(indices) == 3:\n track1 = subset_track(deepcopy(track), 0, indices[0]+1)\n track2 = subset_track(deepcopy(track), indices[0]+1, indices[1]+1)\n track3 = subset_track(deepcopy(track), indices[1]+1, indices[2]+1)\n track4 = subset_track(deepcopy(track), indices[2]+1,\n len(track.sod))\n return [track1, track2, track3, track4]\n else:\n print('Found more than four tracks!')\n return 0\n\n\ndef subset_track(track, index1, index2):\n \"\"\"\n This function subsets a CYGNSS track to only include data from a range\n defined by two indexes.\n\n Parameters\n ----------\n track : CygnssTrack object\n CygnssTrack to be subsetted\n index1 : int\n Starting index\n index2 : int\n Ending index\n\n Returns\n -------\n track : CygnssTrack object\n Subsetted CygnssTrack object\n \"\"\"\n for arr in array_list:\n setattr(track, arr, getattr(track, arr)[index1:index2])\n for scalar in scalar_list:\n setattr(track, scalar, getattr(track, scalar))\n return track\n\n\ndef get_tracks(data, sat, min_samples=10, verbose=False,\n filter=False, window=5, eps=1, gap=60):\n \"\"\"\n Returns a list of isolated CygnssTrack objects from a CYGNSS data object.\n\n Parameters\n ----------\n data : xarray.core.dataset.Dataset object\n CYGNSS data object as read by xarray.open_dataset\n sat : int\n CYGNSS satellite to be analyzed.\n\n Other Parameters\n ----------------\n min_samples : int\n Minimum allowable track size (number of specular points)\n verbose : bool\n True - Provide text updates while running\n\n False - Don't do this\n\n filter : bool\n True - Each track will receive a filter\n\n False - Don't do this\n\n window : int\n Window length of filter, in number of specular points. Must be odd.\n eps : scalar\n This is the eps keyword to be passed to DBSCAN. It is the max distance\n (in degrees lat/lon) between two tracks for them to be considered as\n part of the same track.\n gap : int\n Number of seconds in a track gap before a split is forced\n\n Returns\n -------\n trl : list\n List of isolated CygnssTrack objects\n \"\"\"\n trl = []\n dts = get_datetime(data)\n # Currently only works for one satellite at a time due to resource issues\n if type(sat) is not int or sat < 1 or sat > 8:\n raise ValueError('sat must be integer between 1 and 8')\n else:\n csat = sat\n if verbose:\n print('CYGNSS satellite', csat)\n print('GPS code (max =', str(int(np.max(data.prn_code.data)))+'):',\n end=' ')\n for gsat in range(np.int16(np.max(data.prn_code.data)+1)):\n if verbose:\n print(gsat, end=' ')\n # This will isolate most tracks, improving later cluster analysis\n for ant in range(np.int16(np.max(data.antenna.data)+1)):\n ds = CygnssTrack(data, datetimes=dts, gpsid=gsat,\n sat=csat, antenna=ant)\n if np.size(ds.lon) > 0:\n # Cluster analysis separates out additional grouped tracks\n # Only simplistic analysis of lat/lon gaps in degrees needed\n X = list(zip(ds.lon, ds.lat))\n db = DBSCAN(min_samples=min_samples, eps=eps).fit(X)\n labels = db.labels_\n uniq = np.unique(labels)\n for element in uniq[uniq >= 0]:\n # A bit clunky, but make a copy of the CygnssTrack object\n # to help separate out remaining tracks in the scene\n dsc = deepcopy(ds)\n for key in array_list:\n setattr(dsc, key, getattr(ds, key)[labels == element])\n dsc.lon[dsc.lon > 180] -= 360.0\n for key in scalar_list:\n setattr(dsc, key, np.array(getattr(ds, key))[0])\n # Final separation by splitting about major time gaps\n test = split_tracks_in_time(dsc, gap=gap)\n if test is None: # No time gap, append the original track\n trl.append(dsc)\n # Failsafe - Ignore difficult-to-split combined tracks\n elif test == 0:\n pass\n else: # Loop thru split-up tracks and append separately\n for t in test:\n trl.append(t)\n del dsc\n del db, labels, uniq, X\n del ds # This function is a resource hog, forcing some cleanup\n if filter:\n for tr in trl:\n tr.filter_track(window=window)\n return trl\n\n\ndef get_datetime(cyg):\n epoch_start = np.datetime64('1970-01-01T00:00:00Z')\n tdelta = np.timedelta64(1, 's')\n return np.array([dt.datetime.utcfromtimestamp((st - epoch_start) / tdelta)\n for st in cyg.sample_time.data])\n\n\nclass CygnssSubsection(object):\n\n \"\"\"\n Class to handle subsectioning CYGNSS data. Subsectioning by\n satellite (via CygnssSingleSat input), time indices, GPS satellite ID,\n range-corrected gain, etc. is supported.\n\n Main Attributes\n ---------------\n ws = Wind speed array\n lon = Longitude array\n lat = Latitude array\n rcg = RangeCorrectedGain array\n gps = GpsID array\n \"\"\"\n\n def __init__(self, data, gpsid=None, gain=None, sat=None, antenna=None):\n \"\"\"\n data = CygnssSingleSat, CygnssMultiSat, or CygnssL2WindDisplay object\n gpsid = Integer ID number for GPS satellite to examine\n gain = Threshold by range-corrected gain, values below will be masked\n bad = Value to compare against lat/lon to mask out missing data\n sat = CYGNSS satellite number (1-8)\n \"\"\"\n # Set basic attributes based on input data object\n self.ws = data.wind_speed.data\n self.ws_yslf_nbrcs = data.yslf_nbrcs_wind_speed.data\n self.ws_yslf_les = data.yslf_les_wind_speed.data\n self.lon = data.lon.data\n self.lat = data.lat.data\n self.gps = np.int16(data.prn_code.data)\n self.antenna = np.int16(data.antenna.data)\n self.rcg = data.range_corr_gain.data\n self.cygnum = np.int16(data.spacecraft_num.data)\n\n # Set keyword-based attributes\n self.gpsid = gpsid\n self.gain = gain\n self.sat = sat\n self.ant_num = antenna\n\n # Now subsection the data\n self.get_good_data_mask()\n\n def get_good_data_mask(self):\n \"\"\"\n Sets a mask used to limit the data plotted. Filtered out are data\n masked out by the GoodData mask (based on RangeCorrectedGain), missing\n lat/lon values, and bad data (ws < 0)\n \"\"\"\n good1 = self.ws >= 0\n good2 = np.logical_and(np.isfinite(self.lon), np.isfinite(self.lat))\n if self.gpsid is not None and type(self.gpsid) is int:\n good2 = np.logical_and(good2, self.gps == self.gpsid)\n if self.gain is not None:\n if np.size(self.gain) == 2:\n cond = np.logical_and(self.rcg >= self.gain[0],\n self.rcg < self.gain[1])\n good2 = np.logical_and(good2, cond)\n else:\n good2 = np.logical_and(good2, self.rcg >= self.gain)\n if self.sat is not None and type(self.sat) is int:\n good2 = np.logical_and(good2, self.cygnum == self.sat)\n if self.ant_num is not None and type(self.sat) is int:\n good2 = np.logical_and(good2, self.antenna == self.ant_num)\n self.good = np.logical_and(good1, good2)\n\n\nclass CygnssTrack(object):\n\n \"\"\"\n Class to facilitate extraction of a single track of specular points\n from a CygnssSingleSat, CygnssMultiSat, or CygnssL2WindDisplay object.\n\n Attributes\n ----------\n input = CygnssSubsection object\n ws = CYGNSS wind speeds\n lon = Longitudes of specular points\n lat = Latitudes of specular points\n rcg = Range-corrected gains of specular points\n datetimes = Datetime objects for specular points\n\n The following attributes are created by filter_track method:\n fws = Filtered wind speeds\n flon = Filtered longitudes\n flat = Filtered latitudes\n These attributes are shorter than the main attributes by the window length\n \"\"\"\n\n def __init__(self, data, datetimes=None, **kwargs):\n \"\"\"\n data = CygnssSingleSat, CygnssMultiSat, or CygnssL2WindDisplay object\n datetimes = List of datetime objects from get_datetime function.\n If None, this function is called.\n \"\"\"\n self.input = CygnssSubsection(data, **kwargs)\n self.ws = self.input.ws[self.input.good]\n self.ws_yslf_nbrcs = self.input.ws_yslf_nbrcs[self.input.good]\n self.ws_yslf_les = self.input.ws_yslf_les[self.input.good]\n self.lon = self.input.lon[self.input.good]\n self.lat = self.input.lat[self.input.good]\n self.rcg = self.input.rcg[self.input.good]\n self.antenna = self.input.antenna[self.input.good]\n self.prn = self.input.gps[self.input.good]\n self.sat = self.input.cygnum[self.input.good]\n if datetimes is None:\n dts = get_datetime(data)\n else:\n dts = datetimes\n self.datetimes = dts[self.input.good]\n sod = []\n for dt1 in self.datetimes:\n sod.append((dt1 - dt.datetime(\n self.datetimes[0].year, self.datetimes[0].month,\n self.datetimes[0].day)).total_seconds())\n self.sod = np.array(sod)\n\n def filter_track(self, window=5):\n \"\"\"\n Applies a running-mean filter to the track.\n\n window = Number of specular points in the running mean window.\n Must be odd.\n \"\"\"\n if window % 2 == 0:\n raise ValueError('Window must be odd length, not even.')\n hl = int((window - 1) / 2)\n self.fws = np.convolve(\n self.ws, np.ones((window,))/window, mode='valid')\n self.flon = self.lon[hl:-1*hl]\n self.flat = self.lat[hl:-1*hl]\n\n\nclass Imerg(object):\n\n def __init__(self, filen):\n self.read_imerg(filen)\n\n def read_imerg(self, filen):\n imerg = h5py.File(filen, 'r')\n self.datetime = dt.datetime.strptime(os.path.basename(filen)[23:39],\n '%Y%m%d-S%H%M%S')\n self.precip = np.ma.masked_where(\n np.transpose(imerg['Grid']['precipitationCal']) <= 0,\n np.transpose(imerg['Grid']['precipitationCal']))\n self.lon = np.array(imerg['Grid']['lon'])\n self.lat = np.array(imerg['Grid']['lat'])\n self.filename = os.path.basename(filen)\n imerg.close()\n\n def downsample(self):\n filled_precip = self.precip.filled(fill_value=0.0)\n dummy = scipy.ndimage.interpolation.zoom(filled_precip, 0.5)\n self.coarse_precip = np.ma.masked_where(dummy <= 0, dummy)\n self.coarse_lon = self.lon[::2]\n self.coarse_lat = self.lat[::2]\n\n\ndef add_imerg(trl, ifiles, dt_imerg):\n for ii in range(len(trl)):\n check_dt = trl[ii].datetimes[len(trl[ii].sod)//2]\n # diff = np.abs(check_dt - dt_imerg)\n index = np.where(dt_imerg <= check_dt)[0][-1] # np.argmin(diff)\n imerg = Imerg(ifiles[index])\n if ii % 50 == 0:\n print(ii, end=' ')\n precip = []\n for j in range(len(trl[ii].lon)):\n ilon = int(np.round((trl[ii].lon[j] - imerg.lon[0]) / 0.10))\n ilat = int(np.round((trl[ii].lat[j] - imerg.lat[0]) / 0.10))\n precip.append(imerg.precip[ilat, ilon])\n precip = np.array(precip)\n precip[~np.isfinite(precip)] = 0.0\n setattr(trl[ii], 'precip', precip)\n setattr(trl[ii], 'imerg', os.path.basename(ifiles[index]))\n print()\n return trl\n\n\ndef write_netcdfs(trl, path):\n for i, track in enumerate(trl):\n fname = 'track_' + str(track.sat).zfill(2) + '_' + \\\n str(track.prn).zfill(2) + \\\n '_' + str(track.antenna).zfill(2) + '_' + str(i).zfill(4) + \\\n track.datetimes[0].strftime('_%Y%m%d_s%H%M%S_') + \\\n track.datetimes[-1].strftime('e%H%M%S.nc')\n ds = xarray.Dataset(\n {'ws': (['nt'], track.ws),\n 'ws_yslf_nbrcs': (['nt'], track.ws_yslf_nbrcs),\n 'ws_yslf_les': (['nt'], track.ws_yslf_les),\n 'lat': (['nt'], track.lat),\n 'lon': (['nt'], track.lon),\n 'datetimes': (['nt'], track.datetimes),\n 'rcg': (['nt'], track.rcg),\n 'precip': (['nt'], track.precip),\n 'sod': (['nt'], track.sod)},\n coords={'nt': (['nt'], np.arange(len(track.ws)))},\n attrs={'imerg': track.imerg})\n ds.to_netcdf(path + fname, format='NETCDF3_CLASSIC')\n ds.close()\n del(ds)\n","sub_path":"pygnss/orbit.py","file_name":"orbit.py","file_ext":"py","file_size_in_byte":14691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"393065436","text":"import sopel.module\n\n@sopel.module.commands('sucker','suckers')\ndef rules(bot, trigger):\n if not trigger.group(2):\n myline='suckers'\n else:\n myline = trigger.group(2).strip()\n \n if myline.endswith('s'):\n bot.say(myline + ' are for suckers!!')\n else:\n bot.say(myline + ' is for suckers!!')\n \n","sub_path":"suckers.py","file_name":"suckers.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"205727072","text":"class _movie_database:\n\n\tdef __init__(self):\n\t\tself.movies_titles = {} #uses movie ID as the key\n\t\tself.movies_genres = {} #uses movie ID as the key\n\t\tself.movies_images = {} #uses movie ID as the key\n\t\tself.users_gender = {} #uses user ID as the key\n\t\tself.users_age = {} #uses user ID as the key\n\t\tself.users_occupation = {} #uses user ID as the key\n\t\tself.users_zipcode = {} #uses user ID as the key\n\t\tself.ratings = {} #uses movie ID as the key\n\t\tself.mid = 1\n\n\tdef load_movies(self, movie_file):\n\t\t# If movies were already loaded, clear records\n\t\tif self.movies_titles:\n\t\t self.movies_titles.clear()\n\t\tif self.movies_genres:\n\t\t self.movies_genres.clear()\n\n\t# If/once records are clear, populate records\n\t\tfor line in open(movie_file, 'r', encoding = 'ISO-8859-1'):\n\t\t parts = line.split(\"::\")\n\t\t self.movies_titles[int(parts[0])] = parts[1]\n\t\t self.movies_genres[int(parts[0])] = parts[2].rstrip()\n\n\tdef modified_load_movies(self, mid):\n\t\tfor line in open(movie_file, 'r', encoding = 'ISO-8859-1'):\n\t\t parts = line.split(\"::\")\n\t\t if int(line) is int(mid):\n\t\t\t self.movies_titles[int(parts[0])] = parts[1]\n\t\t\t self.movies_genres[int(parts[0])] = parts[2].rstrip()\n\n\n\tdef load_images(self, image_file):\n\t\t# If movies were already loaded, clear records\n\t\tif self.movies_images:\n\t\t self.movies_images.clear()\n\t\t# If/once records are clear, populate records\n\t\tfor line in open(image_file, 'r', encoding = 'ISO-8859-1'):\n\t\t parts = line.split(\"::\")\n\t\t self.movies_images[int(parts[0])] = parts[2]\n\n\tdef get_image(self, mid):\n\t\tif int(mid) in self.movies_images:\n\t\t\treturn self.movies_images[int(mid)]\n\t\telse:\n\t\t\treturn None\n\n\tdef print_sorted_movies(self):\n\t\tfor key, value in sorted(self.movies_titles.items()):\n\t\t print (key, value)\n\n\tdef print_movie(self, mid):\n\t\tprint (self.movies_titles[int(mid)])\n\n\tdef get_movie(self, mid):\n\t\tif int(mid) in self.movies_titles:\n\t\t return list([self.movies_titles[int(mid)], self.movies_genres[int(mid)]])\n\t\telse:\n\t\t return None\n\n\tdef get_movies(self):\n\t\tmovieIDlist = []\n\t\tfor key in sorted(self.movies_titles.items()):\n\t\t movieIDlist.append(int(key))\n\t\treturn movieIDlist\n\n\tdef set_movie(self, mid, titleandgenre):\n\t\tself.movies_titles[int(mid)]=titleandgenre[0]\n\t\tself.movies_genres[int(mid)]=titleandgenre[1]\n\n\tdef add_movie(self, genres, title):\n\t\tthinglist = sorted(self.movies_titles.keys())\n\t\tself.mid = thinglist[-1] + 1 \n\t\tself.movies_titles[int(self.mid)]=title\n\t\tself.movies_genres[int(self.mid)]=genres\n\n\tdef delete_movie(self, mid):\n\t\tif int(mid) in self.movies_titles:\n\t\t\tdel self.movies_titles[int(mid)]\n\t\t\tdel self.movies_genres[int(mid)]\n\n\tdef load_users(self, users_file):\n\t\t# If users were already loaded, clear records\n\t\tif self.users_gender:\n\t\t\tself.users_gender.clear()\n\t\tif self.users_age:\n\t\t\tself.users_age.clear()\n\t\tif self.users_occupation:\n\t\t\tself.users_occupation.clear()\n\t\tif self.users_zipcode:\n\t\t\tself.users_zipcode.clear()\n\n\t# If/once records are clear, populate records\n\t\tfor line in open(users_file, 'r', encoding = 'ISO-8859-1'):\n\t\t\tparts = line.split(\"::\")\n\t\t\tself.users_gender[int(parts[0])] = parts[1]\n\t\t\tself.users_age[int(parts[0])] = parts[2]\n\t\t\tself.users_occupation[int(parts[0])] = parts[3]\n\t\t\tself.users_zipcode[int(parts[0])] = parts[4].rstrip()\n\n\tdef get_user(self, uid):\n\t\tif int(uid) in self.users_gender:\n\t\t\treturn list([self.users_gender[int(uid)], int(self.users_age[int(uid)]), int(self.users_occupation[int(uid)]), self.users_zipcode[int(uid)]])\n\t\telse:\n\t\t\treturn None\n\n\tdef get_users(self):\n\t\tuserIDlist = []\n\t\tfor key in sorted(self.users_gender.items()):\n\t\t\tuserIDlist.append(int(key))\n\t\treturn userIDlist\n\n\tdef add_user(self, userstuff):\n\t\tthinglist = sorted(self.users_gender.keys())\n\t\tself.uid = thinglist[-1] + 1 \n\t\tself.users_gender[int(self.uid)]=userstuff[0]\n\t\tself.users_age[int(self.uid)]=int(userstuff[1])\n\t\tself.users_occupation[int(self.uid)]=int(userstuff[2])\n\t\tself.users_zipcode[int(self.uid)]=userstuff[3]\n\n\tdef set_user(self, uid, userstuff):\n\t\tself.users_gender[int(uid)]=userstuff[0]\n\t\tself.users_age[int(uid)]=int(userstuff[1])\n\t\tself.users_occupation[int(uid)]=int(userstuff[2])\n\t\tself.users_zipcode[int(uid)]=userstuff[3]\n\n\tdef delete_user(self, uid):\n\t\tif int(uid) in self.users_gender:\n\t\t\tdel self.users_gender[int(uid)]\n\t\t\tdel self.users_age[int(uid)]\n\t\t\tdel self.users_occupation[int(uid)]\n\t\t\tdel self.users_zipcode[int(uid)]\n\n\tdef load_ratings(self, ratings_file):\n\t\t# If ratings were already loaded, clear records\n\t\tif self.ratings:\n\t\t\tself.ratings.clear()\n\n\t\t# If/once records are clear, populate records\n\t\tfor line in open(ratings_file, 'r', encoding = 'ISO-8859-1'):\n\t\t\tparts = line.split(\"::\")\n\t\t\ttry:\n\t\t\t\tself.ratings[int(parts[1])][int(parts[0])] = int(parts[2])\n\t\t\texcept KeyError:\n\t\t\t\tself.ratings[int(parts[1])] = {int(parts[0]) : int(parts[2])}\n\n\tdef get_rating(self, mid):\n\t\tsum = float(0)\n\t\tcount = float(0)\n\t\taverage = float(0)\n\t\tif mid in self.ratings:\n\t\t\tfor key, value in self.ratings[mid].items():\n\t\t\t\tcount += float(1)\n\t\t\t\tsum += value\n\t\t\taverage = float(sum)/float(count)\n\t\t\treturn float(average)\n\t\telse:\n\t\t\treturn float(0)\n\n\tdef get_highest_rated_movie(self):\n\t\tmaximum = float(0)\n\t\tthisdict = {}\n\t\tif len(self.ratings) != 0:\n\t\t\tfor key,value in self.ratings.items():\n\t\t\t\tthisdict[key] = self.get_rating(int(key))\n\t\t\tfor key, value in thisdict.items():\n\t\t\t\tif value > maximum:\n\t\t\t\t\tmaxmid = key\n\t\t\t\t\tmaximum = value\n\t\t\treturn maxmid\n\t\telse:\n\t\t\treturn None\n\n\tdef set_user_movie_rating(self, uid, mid, rating):\n\t\tif int(mid) in self.ratings:\n\t\t\tself.ratings[int(mid)][int(uid)] = int(rating)\n\t\telse:\n\t\t\tself.ratings[int(mid)] = {int(uid) : int(rating)}\n\n\tdef get_user_movie_rating(self, uid, mid):\n\t\tif int(mid) in self.ratings:\n\t\t\tif int(uid) in self.ratings[int(mid)]:\n\t\t\t\treturn self.ratings[int(mid)][int(uid)]\n\t\telse:\n\t\t\treturn None\n\n\tdef make_recommendation(self, uid):\n\t\tmaximum = float(0)\n\t\tthisdict = {}\n\t\tif len(self.ratings) != 0:\n\t\t\tfor key,value in self.ratings.items():\n\t\t\t\tthisdict[key] = self.get_rating(int(key))\n\t\t\tfor key, value in thisdict.items():\n\t\t\t\tif value > maximum:\n\t\t\t\t\tif self.get_user_movie_rating(uid, key) is None:\n\t\t\t\t\t\tmaxmid = key\n\t\t\t\t\t\tmaximum = value\n\t\t\treturn maxmid\n\t\telse:\n\t\t\treturn None\t\t\n\n\tdef delete_all_ratings(self):\n\t\tif self.ratings:\n\t\t\tself.ratings.clear()\n\nif __name__ == \"__main__\":\n mdb = _movie_database()\n\n #### MOVIES ########\n mdb.load_movies('ml-1m/movies.dat')\n mdb.load_ratings('ml-1m/ratings.dat')\n mdb.load_users('ml-1m/users.dat')\n mdb.load_images('ml-1m/images.dat')\n# print (mdb.get_rating(557))\n#\n","sub_path":"CherryPy/_movie_database.py","file_name":"_movie_database.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"532284888","text":"from tkinter import *\nfrom screens.showMarks import *\n\nclass Stud_marks():\n def __init__(self):\n self.root = Toplevel()\n self.root.geometry(\"2000x1024\")\n self.root.title(\"Marks\")\n self.c = Canvas(self.root,bg = \"gray\",height=2000,width=2024)\n \n photo = PhotoImage(file = \"images/v1.png\")\n \n # Setting the background\n self.c.create_image((0,0), image=photo, anchor=\"nw\")\n\n self.c.create_text((700, 400), text=\"Enter the USN: \", fill=\"white\", anchor=\"nw\"\n ,font=('Times',50,'italic bold'))\n \n self.button1 = Entry(self.c,font=('Times',20,'bold'))\n self.button1.configure(width = 40,relief = FLAT) \n button1_window = self.c.create_window(700, 550, anchor=NW, window=self.button1)\n\n\n self.back = Button(self.c,text='Back',bg='red',fg='white',activebackground='black',activeforeground='white',width=10,height=2, font=(\"Times\",20,'bold'),command=lambda:back())\n self.back.place(x=1400,y=900,width=100,height=40)\n\n\n self.back = Button(self.c,text='Submit',bg='red',fg='white',activebackground='black',activeforeground='white',width=10,height=2, font=(\"Times\",20,'bold'),command=lambda:submit())\n self.back.place(x=1250,y=900,width=100,height=40)\n\n\n def back():\n self.root.destroy()\n\n\n def submit():\n usn = self.button1.get()\n \n usn = usn.upper()\n \n s = ShowMarks(usn)\n\n\n self.c.pack()\n\n self.root.mainloop()","sub_path":"screens/stud_marks.py","file_name":"stud_marks.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"487911152","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndata = ([], [], [])\n\nf = open('bc-n-iter.txt')\nlines = f.readlines()\nlines.sort(key=lambda x: int(x.strip().split(' ')[0]))\nprint(lines)\nfor line in lines:\n if line.find(' ') != -1:\n n, a, b = line.strip().split(' ')\n data[0].append(n)\n data[1].append(1.0 - float(a))\n data[2].append(1.0 - float(b))\n\nfig, ax = plt.subplots()\nax.plot(data[0], data[1], color='b', label='Train')\nax.plot(data[0], data[2], color='r', label='Test')\nax.set_ylim(0, ax.get_ylim()[1] * 1.1)\nax.set_title('Neural Network (Breast Cancer)')\nax.set_ylabel('% Error')\nax.set_xlabel('# of Epochs')\nax.legend()\nplt.show()\n","sub_path":"src/python/plot/old/plot_bc_n_iter.py","file_name":"plot_bc_n_iter.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"98891250","text":"import requests\n\nurl = \"https://my-json-server.typicode.com/quyentx/my-typicode-json-server/employee\"\n\n\ndef get_request():\n # get request without parameter\n x = requests.get(url + \"/1\")\n print(f\"Content type is {x.headers['content-type']}\")\n print(f\"Body text is: {x.text}\")\n print(f\"Body json is: {x.json()}\")\n print(f\"Encoding method is: {x.encoding}\")\n print(f\"Status code is: {x.status_code}\")\n\n\ndef get_request_with_params():\n # get request with parameters in URL\n pl = {\"id\": 2, \"name\": [\"John Delaware\", \"Jack Sparrow\"]}\n y = requests.get(url, params=pl)\n print(y.url)\n print(y.json())\n\n\ndef get_with_custom_headers():\n # get request with custom header\n headers = {\"content-type\": \"plain/text\"}\n z = requests.get(url, headers=headers)\n print(z.request.headers[\"content-type\"])\n\n\ndef post_request():\n post_json = {\n \"id\": 3,\n \"name\": \"John Human\",\n \"role\": \"CEO\",\n \"age\": 34,\n \"species\": \"human\"\n }\n r = requests.post(url, data=post_json)\n print(\"==========================================\")\n print(f\"Content type is {r.headers['content-type']}\")\n print(f\"Body text is: {r.text}\")\n print(f\"Body json is: {r.json()}\")\n print(f\"Encoding method is: {r.encoding}\")\n print(f\"Status code is: {r.status_code} {r.reason}\")\n print(f\"Request is: {r.request}\")\n print(f\"Response URL is: {r.url}\")\n print(f\"Time elapsed is: {r.elapsed}\")\n assert r.json()[\"id\"] == \"3\"\n assert r.json()[\"name\"] == \"John Human\"\n assert r.json()[\"role\"] == \"CEO\"\n assert r.json()[\"age\"] == \"34\"\n assert r.json()[\"species\"] == \"human\"\n assert r.status_code == 201\n r.close()\n\n\ndef post_with_encoded_form():\n pl = {\"name\": \"Mike Powder\", \"age\": 30}\n r = requests.post(\"https://httpbin.org/post\", data=pl)\n print(r.json())\n print(r.status_code)\n\n\ndef post_with_file_content():\n files = {'file': open('rps.py', 'rb')}\n # files = {'file': open('report.xls', 'rb')}\n r = requests.post(\"https://httpbin.org/post\", files=files)\n print(r.json())\n\n\ndef put_request():\n put_json = {\n \"id\": 2,\n \"name\": \"John Button\",\n \"role\": \"Director\",\n \"age\": 33,\n \"species\": \"human\"\n }\n r = requests.put(\"https://httpbin.org/put\", json=put_json)\n print(f\"Content type is {r.headers['content-type']}\")\n print(f\"Body text is: {r.text}\")\n print(f\"Body json is: {r.json()}\")\n print(f\"Encoding method is: {r.encoding}\")\n print(f\"Status code is: {r.status_code} {r.reason}\")\n print(f\"Response URL is: {r.url}\")\n print(f\"Time elapsed is: {r.elapsed}\")\n assert r.status_code == 200\n r.close()\n\n\ndef delete_request():\n # get request without parameter\n r = requests.delete(url + \"/1\")\n print(f\"Content type is {r.headers['content-type']}\")\n print(f\"Body text is: {r.text}\")\n print(f\"Body json is: {r.json()}\")\n print(f\"Encoding method is: {r.encoding}\")\n print(f\"Status code is: {r.status_code}\")\n assert r.status_code == 200\n assert r.json() == {}\n\n\nget_request()\nget_request_with_params()\nget_with_custom_headers()\n\npost_request()\npost_with_encoded_form()\npost_with_file_content()\n\nput_request()\n\ndelete_request()\n","sub_path":"python_requests.py","file_name":"python_requests.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"88515101","text":"# coding: utf-8\n\nimport RPi.GPIO as GPIO\nimport time\n\nL_VREF = 16 # 左車輪\nL_IN1 = 20\nL_IN2 = 21\n\nR_VREF = 13 # 右車輪\nR_IN1 = 19\nR_IN2 = 26\n\nmotor_ports = [[L_IN1, L_IN2, L_VREF], [R_IN1, R_IN2, R_VREF]]\n\nGPIO.setmode(GPIO.BCM)\nfor ports in motor_ports: # 出力先が多いので,リストにまとめた後,一斉に設定\n GPIO.setup(ports, GPIO.OUT)\n\n\n# モーターを制御する関数の定義\ndef set_motor(pno,job):\n ta_switch = [ # モータドライバに対応する仕事の指示リストのリスト\n [0, 0], # 停止\n [1, 0], # 時計回り\n [0, 1]] # 反時計回り\n # GPIO.HIGH(LOW)の代わりに,1(0)が使える\n \n ports = motor_ports[pno] # motor_portsのpno番目のリスト.車輪の指定.\n sw = ta_switch[job] # ta_switchのjob番目のリスト.仕事の指示. \n GPIO.output(ports[0], sw[0]) # 左右車輪ともにIN1は,指示リストの0番目と対応.ex:[1, 0]の1.\n GPIO.output(ports[1], sw[1]) # 左右車輪ともにIN2は,指示リストの1番目と対応.ex:[1, 0]の0.\n\n\n# 両方のモーターを同時に制御する.左右同じ指示を受ける\ndef set_motor2(job):\n set_motor(0, job) # 左車輪\n set_motor(1, job) # 右車輪\n\n\npwm_l = GPIO.PWM(L_VREF, 50)\npwm_r = GPIO.PWM(R_VREF, 50)\npwm_l.start(100)\npwm_r.start(100)\n\nif __name__ == \"__main__\":\n try:\n while True:\n set_motor2(1) # 前へ(時計回り)\n time.sleep(1.5)\n set_motor2(2) # 後ろへ(反時計回り)\n time.sleep(1.5)\n set_motor2(0) # 停止\n time.sleep(1.5)\n\n except KeyboardInterrupt:\n pass\n GPIO.cleanup()\n","sub_path":"y_motor.py","file_name":"y_motor.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"599070509","text":"# Importo la librería random para utilizarlo en el cálculo del tiempo hecho por el piloto\nfrom math import pi\nimport random\n\n# Variable global\nundefined = \"Unknown\"\n\n\n\nbono = [\"chasis\",\"motor\",\"aerodinamica\"]\npilotos = []\npistas = []\ndirectores = [\"J. Montoya\", \"T, Aprilla\"]\nmecanicos = []\n# mecanicos = [\"Chavez\", \"Villanueva\", \"Ramirez\", \"Duque\", \"Henao\", \"Quintero\", \"Avellaneda\", \"Ortiz\"]\nsueldos = []\nnombre_Equipos = []\n\n\nclass Pista:\n # Definición de atributos\n nombre = undefined\n tipo_exigencia_1 = undefined\n tipo_exigencia_2 = undefined\n\n # Definición del método constructor\n def __init__(self, nombre, tipo_exigencia_1, tipo_exigencia_2):\n self.nombre = nombre\n self.tipo_exigencia_1 = tipo_exigencia_1\n self.tipo_exigencia_2 = tipo_exigencia_2\n\nclass Equipo:\n # Definición de atributos\n nombre_Equipo = undefined\n director = undefined\n mecanico1 = undefined\n mecanico2 = undefined\n mecanico3 = undefined\n mecanico4 = undefined\n piloto1 = undefined\n piloto2 = undefined\n sueldo_Director = undefined\n sueldo_Mecanico = undefined\n sueldo_Piloto = undefined\n\n # Definición del método constructor\n def __init__(self, nombre_Equipo, director, mecanico1, mecanico2, mecanico3, mecanico4, piloto1, piloto2,sueldo_Director,sueldo_Mecanico,sueldo_Piloto):\n self.nombre_Equipo = nombre_Equipo\n self.director = director\n self.mecanico1 = mecanico1\n self.mecanico2 = mecanico2\n self.mecanico3 = mecanico3\n self.mecanico4 = mecanico4\n self.piloto1 = piloto1\n self.piloto2 = piloto2\n self.sueldo_Director = sueldo_Director\n self.sueldo_Mecanico = sueldo_Mecanico\n self.sueldo_Piloto = sueldo_Piloto\n\nclass Piloto:\n nombre = undefined\n tiempo_piloto = undefined\n bono_piloto = undefined\n\n # Definición del método constructor\n def __init__(self, nombre, bono_piloto):\n self.nombre = nombre\n self.bono_piloto = bono_piloto\n\nclass Mecanico:\n nombre = undefined\n tiempo_mecanico = undefined\n\n # Definición del método constructor\n def __init__(self, nombre):\n self.nombre = nombre\n\n\nclass Ejecutora:\n pista = []\n pilotos = []\n mecanicos = []\n sueldos = []\n nombre_Equipos = []\n \n \n # Crear pilotos y agregarlos al arreglo vacío\n piloto1 = Piloto(\"C. Muñoz\", bono[0])\n piloto2 = Piloto(\"Kobayashi\", bono[1])\n piloto3 = Piloto(\"G. Chavez\", bono[2])\n piloto4 = Piloto(\"P. Wherlein\", bono[1])\n \n pilotos.append(piloto1)\n pilotos.append(piloto2)\n pilotos.append(piloto3)\n pilotos.append(piloto4)\n \n\n\n # Crear mecánicos y agregarlos al arreglo vacío\n mecanico1 = Mecanico(\"Chavez\")\n mecanico2 = Mecanico(\"Villanueva\")\n mecanico3 = Mecanico(\"Ramirez\")\n mecanico4 = Mecanico(\"Duque\")\n mecanico5 = Mecanico(\"Henao\")\n mecanico6 = Mecanico(\"Quintero\")\n mecanico7 = Mecanico(\"Avellaneda\")\n mecanico8 = Mecanico(\"Ortiz\")\n\n mecanicos.append(mecanico1)\n mecanicos.append(mecanico2)\n mecanicos.append(mecanico3)\n mecanicos.append(mecanico4)\n mecanicos.append(mecanico5)\n mecanicos.append(mecanico6)\n mecanicos.append(mecanico7)\n mecanicos.append(mecanico8)\n\n\n #Crear pistas con sus respectivas exigencias \n pista1 = Pista(\"Long Beach\",bono[0],bono[2])\n pista2 = Pista(\"Interlagos\",bono[1],bono[0])\n pista3 = Pista(\"Suzuka\",bono[2],bono[0])\n pista4 = Pista(\"Silverstone\",bono[1],bono[2])\n\n \n \n #Crear equipos\n equipo1 = Equipo(\"Equipo 1\",directores[0],mecanicos[0],mecanicos[1],mecanicos[2],mecanicos[3],pilotos[0],pilotos[1],4000,3000,2000)\n equipo2 = Equipo(\"Equipo 2\",directores[1],mecanicos[4],mecanicos[5],mecanicos[6],mecanicos[7],pilotos[2],pilotos[3],4000,3000,2000)\n \n print(\"Seleccione el número de la pista que desea correr\")\n print(\"1 = Long Beach\")\n print(\"2 = Interlagos\")\n print(\"3 = Suzuka\")\n print(\"4 = Silverstone\")\n ingreso_pista = int(input(\"¿Que pista correrán los pilotos?: \"))\n print(ingreso_pista)\n\n pista_seleccionada = undefined\n\n if ingreso_pista == 1:\n #print(\"ENTRE\")\n pista_seleccionada = pista1\n elif ingreso_pista == 2:\n pista_seleccionada = pista2\n elif ingreso_pista == 3:\n pista_seleccionada = pista3\n elif ingreso_pista == 4:\n pista_seleccionada = pista4\n \n\n\n #Retorna el tiempo del piloto\n def calcular_pole_position(self):\n # Recorrer el listado de pilotos\n for i in self.pilotos:\n # Calcular el tiempo por medio de este random\n tiempo_piloto = random.randint(1,70)\n # A cada piloto le guardo el tiempo que obtuvo\n i.tiempo_piloto = tiempo_piloto\n # print(\"El tiempo del piloto \", i.nombre, \" es: \", i.tiempo_piloto)\n \n #for j in self.pilotos:\n # print(\"Actualizacion \", j.nombre, \" es: \", j.tiempo_piloto)\n \n # Ordenar el tiempo de los pilotos de menor a mayor\n n = len(self.pilotos)\n l = len(self.pilotos)\n\n for i in range(1,len(self.pilotos)):\n for j in range(0,len(self.pilotos)-i):\n if(self.pilotos[j+1].tiempo_piloto < self.pilotos[j].tiempo_piloto):\n temp = self.pilotos[j]\n self.pilotos[j] = self.pilotos[j+1]\n self.pilotos[j+1] = temp \n \n return self.pilotos\n\n # Calcular tiempo en pits de los mecánicos\n def calcular_tiempo_pits(self):\n tiempo_mecanico = random.randint(1,3)\n return tiempo_mecanico\n \n\n # calcular nomina piloto primer puesto\n def calcular_nomina_primer_lugar(self):\n # Lista ordenada\n pole_position = ejecutar.calcular_pole_position()\n piloto_ganador = pole_position[0]\n\n if self.equipo1.piloto1.nombre == piloto_ganador.nombre or self.equipo1.piloto2.nombre == piloto_ganador.nombre:\n print(\"GANADOR!: \", piloto_ganador.nombre, \" : \", piloto_ganador.tiempo_piloto, \" sg y pertenece al equipo equipo 1 \")\n beneficio_primer_puesto = (2000 + 0.10)\n print(\"El sueldo + beneficio para el piloto \", piloto_ganador.nombre, \" del equipo 1 es de: \", beneficio_primer_puesto)\n elif self.equipo2.piloto1.nombre == piloto_ganador.nombre or self.equipo2.piloto2.nombre == piloto_ganador.nombre:\n print(\"GANADOR!: \", piloto_ganador.nombre, \" : \", piloto_ganador.tiempo_piloto, \" sg pertenece al equipo equipo 2 \")\n beneficio_primer_puesto = (2000 + 0.10)\n print(\"El sueldo + beneficio para el director \", piloto_ganador.nombre, \" del equipo 2 es de: \", beneficio_primer_puesto)\n\n \n def calcular_nomina_segundo_lugar(self):\n # Lista ordenada\n pole_position = ejecutar.calcular_pole_position()\n piloto_segunda_posicion = pole_position[1]\n\n # Piloto: 10% extra si clasifica 2do o mejor\n # 20% si lo hace en una pista donde no tiene bono\n if self.equipo1.piloto1.nombre == piloto_segunda_posicion.nombre or self.equipo1.piloto2.nombre == piloto_segunda_posicion.nombre:\n if self.pista_seleccionada.tipo_exigencia_1 != piloto_segunda_posicion.bono_piloto and self.pista_seleccionada.tipo_exigencia_2 != piloto_segunda_posicion.bono_piloto:\n print(\"Segunda posición: \", piloto_segunda_posicion.nombre, \" : \", piloto_segunda_posicion.tiempo_piloto, \" sg y pertenece al equipo 1\")\n beneficio_segunda_posición = (2000 + 0.10)\n print(\"El sueldo + beneficio por segunda posición para el piloto \", piloto_segunda_posicion.nombre, \" del equipo 1 es de: \", beneficio_segunda_posición )\n print(\"El sueldo por segunda posición para el piloto \", piloto_segunda_posicion.nombre, \" del equipo 1 es de: \", self.equipo1.sueldo_Piloto)\n elif self.equipo2.piloto1.nombre == piloto_segunda_posicion.nombre or self.equipo2.piloto2.nombre == piloto_segunda_posicion.nombre:\n if self.pista_seleccionada.tipo_exigencia_1 != piloto_segunda_posicion.bono_piloto and self.pista_seleccionada.tipo_exigencia_2 != piloto_segunda_posicion.bono_piloto:\n print(\"Segunda posición: \", piloto_segunda_posicion.nombre, \" : \", piloto_segunda_posicion.tiempo_piloto, \" sg y pertenece al equipo 2\")\n beneficio_segunda_posición = (2000 + 0.10)\n print(\"El sueldo + beneficio por segunda posición para el piloto \", piloto_segunda_posicion.nombre, \" del equipo 2 es de: \", beneficio_segunda_posición )\n print(\"El sueldo por segunda posición para el piloto \", piloto_segunda_posicion.nombre, \" del equipo 2 es de: \", self.equipo2.sueldo_Piloto)\n \n \n \n \n # Calcular la nómina para los directores\n def calcular_nomina_directores(self):\n # Director equipo: 10% bono por pole position\n\n # Lista ordenada\n pole_position = ejecutar.calcular_pole_position()\n piloto_ganador = pole_position[0]\n \n\n if self.equipo1.piloto1.nombre == piloto_ganador.nombre or self.equipo1.piloto2.nombre == piloto_ganador.nombre:\n #print(\"GANADOR!: \", piloto_ganador.nombre, \" : \", piloto_ganador.tiempo_piloto, \" sg y pertenece al equipo equipo 1 \")\n beneficio_director = (4000 + 0.10)\n print(\"El sueldo + beneficio para el director \", directores[0], \" del equipo 1 es de: \", beneficio_director)\n elif self.equipo2.piloto1.nombre == piloto_ganador.nombre or self.equipo2.piloto2.nombre == piloto_ganador.nombre:\n #print(\"GANADOR!: \", piloto_ganador.nombre, \" : \", piloto_ganador.tiempo_piloto, \" sg pertenece al equipo equipo 2 \")\n beneficio_director = (4000 + 0.10)\n print(\"El sueldo + beneficio para el director \", directores[1], \" del equipo 2 es de: \", beneficio_director)\n \n\n # Calcular la nómina para los mecánicos\n # Mecánico: 5% extra si logra una parada en pits menor 2.2 segundos\n def calcular_nomina_mecanicos(self):\n print(\"metodo mecanicos\")\n \n for i in (self.mecanicos):\n tiempo_total_mecanico = ejecutar.calcular_tiempo_pits()\n i.tiempo_mecanico = tiempo_total_mecanico\n \n if i.tiempo_mecanico <= 2.2:\n beneficio_mecanico = (3000 + 0.5)\n print(\"El mecánico \", i.nombre, \" hizo un tiempo de: \", i.tiempo_mecanico, \"El salario + beneficio es de: \", beneficio_mecanico)\n else:\n print(\"El mecánico \", i.nombre, \" hizo un tiempo de: \", i.tiempo_mecanico, \"El salario es de: \", 3000)\n \n\nejecutar = Ejecutora()\n# Lista ordenada\n#pole_position = ejecutar.calcular_pole_position()\n# Ver la pole position \n#for j in pole_position:\n # print(\"Pole position: \", j.nombre, \" es: \", j.tiempo_piloto)\n\nnomina_primer_lugar = ejecutar.calcular_nomina_primer_lugar()\nprint(\"#########################################################################\")\nnomina_segundo_lugar = ejecutar.calcular_nomina_segundo_lugar()\nprint(\"#########################################################################\")\nnomina_director = ejecutar.calcular_nomina_directores()\nprint(\"#########################################################################\")\nnomina_mecanico = ejecutar.calcular_nomina_mecanicos()\n\n","sub_path":"Taller_Final_2/TallerFinal/prueba5.py","file_name":"prueba5.py","file_ext":"py","file_size_in_byte":11356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"383158322","text":"dict = {\"P\" : \"Pikachu\", \n \"M\" : \"Mickey Mouse\", \n \"H\" : \"Hello kitty\"}\n\nwhile True :\n x = str(input())\n if x == \"-1\" :\n break\n elif x in dict :\n print(dict[x])\n else :\n print(x, \"does not exist. Enter a new one:\")\n dict[x] = str(input())","sub_path":"3068-dict字典練��II (C++ STL map).py","file_name":"3068-dict字典練習II (C++ STL map).py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"194527964","text":"t = int(input())\nwhile(t):\n t-=1\n n = int(input())\n arr = list(map(int, input().split()))\n if(n == 1):\n print(\"YES\")\n break\n\n preArr = [arr[0]]\n for i in range(1, n):\n preArr.append(preArr[i-1]+arr[i])\n \n flag = \"NO\"\n for i in range(1, n):\n if(preArr[i-1] == preArr[-1]-preArr[i]):\n # print(i)\n flag = \"YES\"\n break\n \n print(flag)\n\n # print(preArr)","sub_path":"Assignment/128_sherlockAndArray.py","file_name":"128_sherlockAndArray.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"494890934","text":"with open('file',encoding='utf-8') as f:\n\tl = f.readlines()\npage_num = int(input('请输入页码:'))\n#1 1-5\n#2 6-10\n#3 11-15\n#4 16-20\n#(n-1)*5+1\npages,mod = divmod(len(l),5)\n#print(mod)\nif mod:\n\tpages += 1 #一个多少页\nif page_num >pages or page_num <= 0:\n\tprint('输入有误!')\nelif page_num == pages and mod !=0:\n\tfor i in range(mod):\n\t\tprint(l[(page_num-1)*5 +i].strip())\nelse:\n\tfor i in range(5):\n\t\tprint(l[(page_num-1)*5 +i].strip())\n","sub_path":"fenye.py","file_name":"fenye.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"58297390","text":"import os\nimport glob\nimport exifread\n\nNAME_LENGTH = 10\n\njpg_files = glob.glob('*.jpg')\n\nfor a_file in jpg_files:\n try:\n tags = exifread.process_file(open(a_file, 'rb'))\n except Exception as e:\n print(e)\n print(\"Couldn't open the file, skipping: {}\".format(a_file))\n continue\n uid = str(tags.get('EXIF ImageUniqueID', ''))\n if not len(uid):\n print(\"{} --> UniqueID not present in EXIF\".format(a_file))\n elif len(uid) < 30:\n print(\"{} --> Possibly corrupted EXIF; {}\".format(a_file, uid))\n else:\n new_name = uid[:NAME_LENGTH] + '.jpg'\n print(\"{} --> new name: {}\".format(a_file, new_name))","sub_path":"dockerized-gists/1e3263c4b26c4c1674b8b29274d528d8/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"312647230","text":"\"\"\"Add additional indexing\n\nRevision ID: 06ce82a384b0\nRevises: 3a298d774d3f\nCreate Date: 2017-06-13 14:24:17.794833\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '06ce82a384b0'\ndown_revision = '3a298d774d3f'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.create_index('ix_item_item', 'item', [sa.text('item jsonb_path_ops')], postgresql_using='gin')\n op.create_index('ix_item_item_end-date', 'item', [sa.text(\"(item ->> 'end-date')\")], postgresql_using='btree')\n\n\ndef downgrade():\n op.drop_index('ix_item_item')\n op.drop_index('ix_item_item_end-date')\n","sub_path":"migrations/versions/06ce82a384b0_add_additional_indexing.py","file_name":"06ce82a384b0_add_additional_indexing.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"458965389","text":"# produce allRuns.txt with: RecoLuminosity/LumiDB/scripts/lumiCalc2.py overview -i RecoLuminosity/LumiDB/Cert_190456-198485_8TeV_PromptReco_Collisions12_JSON.txt -o MyAnalysis/ZLumiStudy/test/macros/allRuns.txt\n\nnumbers = []\n\nread_file = open(\"allRuns.txt\", \"r\")\nfor line in read_file:\n\tlineparts = line.split(\",\")\n\trunnumber, fill = lineparts[0].split(\":\")\n\tif runnumber != \"Run\":\n\t\tnumbers.append(runnumber)\n\t\n\nread_file.close()\n\nwrite_file = open(\"runnumberSorted_Number.txt\", \"w\")\nfor n in numbers:\n\twrite_file.write(n + '\\n')\n\nwrite_file.close()\n\n","sub_path":"MyAnalysis/ZLumiStudy/test/macros/save_allRuns.py","file_name":"save_allRuns.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"372064131","text":"import logging\n\n# GAE Patch main():\nfrom common.appenginepatch.main import *\n\n# Our own patches follow:\n\n# Local imports\nimport db_log\nimport db_cache\n\n# Verify that we're running django 1.0 or later\nlogging.info('django.__file__ = %r, django.VERSION = %r',\n django.__file__, django.VERSION)\nassert django.VERSION[0] >= 1, \"This Django version is too old\"\n\ndef log_exception(*args, **kwds):\n \"\"\"Django signal handler to log an exception.\"\"\"\n cls, err = sys.exc_info()[:2]\n logging.exception('Exception in request: %s: %s', cls.__name__, err)\n\n# Log all exceptions detected by Django.\ndjango.core.signals.got_request_exception.connect(log_exception)\n\ndef our_profile_main():\n db_log.clear_callmap()\n db_log.clear_requestmap()\n\n # From appenginepatch\n profile_main()\n\n db_log.log_callmap()\n\n# Turn on logging of profiling results\n#main = our_profile_main\n# No profiling. real_main is from appenginepatch.\nmain = real_main\n\n# Turn on per-request cache of datastore calls\ndb_cache.patch_db_get()\n# assert there was no across-request caching\nassert 0 == db_cache.get_cache_size()\n\n# Turn on logging of datastore calls\n#db_log.patch_appengine()\n\n# We have to be logging debug level to see messages from db_cache or db_log\n#logging.getLogger().setLevel(logging.DEBUG)\n#logging.debug('Set logging level to DEBUG')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main_old.py","file_name":"main_old.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"222631430","text":"import argparse\n\nimport matplotlib\nimport numpy as np\n\nfrom posenet.core.image_reader import ImageReader\nfrom posenet.core.localiser import Localiser\nfrom posenet.utils import progress_bar\nfrom posenet.utils import to_spherical\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--agg', action='store_true')\nparser.add_argument('--model', action='store', required=True)\nparser.add_argument('--dataset', action='store', required=True)\nparser.add_argument('--limits', action='store', nargs=2, type=int, \n required=False, default=[-90,90])\nparser.add_argument('--spacing', action='store', type=int, required=True)\nparser.add_argument('--verbose', action='store_true')\nparser.add_argument('--save', action='store', required=False)\nargs = parser.parse_args()\n\nif args.agg:\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\ninput_size = 224\ntest_reader = ImageReader(args.dataset, batch_size=1, \n image_size=[input_size, input_size],\n randomise=False)\nn_images = test_reader.total_images()\n\nazimuthal = []\nwith Localiser(args.model) as localiser:\n for i in range(n_images):\n images_feed, labels_feed = test_reader.next_batch()\n gt = {'x': labels_feed[0][0:3], 'q': labels_feed[0][3:7]}\n\n # Make prediction\n predicted = localiser.localise(images_feed)\n x, y, z = predicted['x']\n azimuthal.append(to_spherical(x, y, z)[1]*180/np.pi)\n\n if args.verbose:\n print('-------------{}-------------'.format(i))\n print(predicted)\n else:\n progress_bar(1.0*(i+1)/n_images, 30, text='Localising')\n print('')\n\n\nfig = plt.figure()\nfig.patch.set_facecolor('white')\nx = np.linspace(args.limits[0], args.limits[1], n_images)\nplt.plot(x, azimuthal, color='black')\nplt.plot(x[0::args.spacing], azimuthal[0::args.spacing], 'ro', ms=4)\nplt.xlim([args.limits[0], args.limits[1]])\nplt.ylabel(\"Predicted azimuthal angle\")\nplt.xlabel(\"True azimuthal angle\")\n\nif args.save:\n plt.savefig(args.save, bbox_inches='tight')\nelse:\n plt.show()","sub_path":"plot_interpolation.py","file_name":"plot_interpolation.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"352597054","text":"from pyDatalog import pyDatalog\n# pyDatalog.create_terms(\"\"\"lin_alg,math_logic,depends,\n# logic_prog,comp,prog, S, D, X, Y,\n# depends,requires,need_to_study\n# \"\"\")\n#\n# +(depends(lin_alg,math_logic))\n# +(depends(logic_prog,math_logic))\n# +(depends(comp,lin_alg))\n#\n# +(requires(prog,comp))\n#\n# need_to_study(S,D) <= requires(S,D)\n# need_to_study(S,D) <= need_to_study(S,X) & depends(X,D)\n#\n# print(need_to_study(X, math_logic))\n\npyDatalog.create_terms('X,Y,Z,pow')\nN = 4\nprint(X.in_(range(0,10)) &\n Y.in_(range(0,10)) &\n Z.in_(range(0,20)) &\n (X < Y) &\n (N*Z==pow(X,N)+Y))\n\npyDatalog.create_terms('X,Y,Z,gt')\n\ngt(X, Y) <= (X == Y + 1)\ngt(X, Y) <= gt(X,Z) & (Z == Y + 1)\n\nprint(gt(5,8))\n","sub_path":"python/logic_programming/tasks/log1.py","file_name":"log1.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"408492496","text":"import os\nimport re\nfrom typing import Any, Dict\n\n# Override much of pygments' CMakeLexer.\n# We need to parse CMake syntax definitions, not CMake code.\n\n# For hard test cases that use much of the syntax below, see\n# - module/FindPkgConfig.html (with \"glib-2.0>=2.10 gtk+-2.0\" and similar)\n# - module/ExternalProject.html (with http:// https:// git@; also has command options -E --build)\n# - manual/cmake-buildsystem.7.html (with nested $<..>; relative and absolute paths, \"::\")\n\nfrom pygments.lexers import CMakeLexer\nfrom pygments.token import Name, Operator, Punctuation, String, Text, Comment,\\\n Generic, Whitespace, Number, Keyword, Literal\nfrom pygments.lexer import bygroups, include, RegexLexer\nfrom sphinx.highlighting import lexers\n\n# Notes on regular expressions below:\n# - [\\.\\+-] are needed for string constants like gtk+-2.0\n# - Unix paths are recognized by '/'; support for Windows paths may be added if needed\n# - (\\\\.) allows for \\-escapes (used in manual/cmake-language.7)\n# - $<..$<..$>..> nested occurrence in cmake-buildsystem\n# - Nested variable evaluations are only supported in a limited capacity. Only\n# one level of nesting is supported and at most one nested variable can be present.\n\nCMakeLexer.tokens[\"root\"] = [\n (r'\\b(\\w+)([ \\t]*)(\\()', bygroups(Name.Function, Text, Name.Function), '#push'), # fctn(\n (r'\\(', Name.Function, '#push'),\n (r'\\)', Name.Function, '#pop'),\n (r'\\[', Punctuation, '#push'),\n (r'\\]', Punctuation, '#pop'),\n (r'[|;,.=*\\-]', Punctuation),\n (r'\\\\\\\\', Punctuation), # used in commands/source_group\n (r'[:]', Operator),\n (r'[<>]=', Punctuation), # used in FindPkgConfig.cmake\n (r'\\$<', Operator, '#push'), # $<...>\n (r'<[^<|]+?>(\\w*\\.\\.\\.)?', Name.Variable), # \n (r'(\\$\\w*\\{)([^\\}\\$]*)?(?:(\\$\\w*\\{)([^\\}]+?)(\\}))?([^\\}]*?)(\\})', # ${..} $ENV{..}, possibly nested\n bygroups(Operator, Name.Tag, Operator, Name.Tag, Operator, Name.Tag, Operator)),\n (r'([A-Z]+\\{)(.+?)(\\})', bygroups(Operator, Name.Tag, Operator)), # DATA{ ...}\n (r'[a-z]+(@|(://))((\\\\.)|[\\w.+-:/\\\\])+', Name.Attribute), # URL, git@, ...\n (r'/\\w[\\w\\.\\+-/\\\\]*', Name.Attribute), # absolute path\n (r'/', Name.Attribute),\n (r'\\w[\\w\\.\\+-]*/[\\w.+-/\\\\]*', Name.Attribute), # relative path\n (r'[A-Z]((\\\\.)|[\\w.+-])*[a-z]((\\\\.)|[\\w.+-])*', Name.Builtin), # initial A-Z, contains a-z\n (r'@?[A-Z][A-Z0-9_]*', Name.Constant),\n (r'[a-z_]((\\\\;)|(\\\\ )|[\\w.+-])*', Name.Builtin),\n (r'[0-9][0-9\\.]*', Number),\n (r'(?s)\"(\\\\\"|[^\"])*\"', String), # \"string\"\n (r'\\.\\.\\.', Name.Variable),\n (r'<', Operator, '#push'), # <..|..> is different from \n (r'>', Operator, '#pop'),\n (r'\\n', Whitespace),\n (r'[ \\t]+', Whitespace),\n (r'#.*\\n', Comment),\n # (r'[^<>\\])\\}\\|$\"# \\t\\n]+', Name.Exception), # fallback, for debugging only\n]\n\nclass CMakeCodeLexer(RegexLexer):\n name = 'CMakeCode'\n aliases = ['cmake_code']\n\n tokens = {\n 'root': [\n include('bracket_comment'),\n include('line_comment'),\n include('command_invocation'),\n include('new_line'),\n include('space')\n ],\n 'bracket_comment': [\n (r'(?s)(#\\[(=*)\\[.*?\\]\\2\\])', bygroups(Comment.Multiline, None))\n ],\n 'line_comment': [\n (r'#.*\\n', Comment.Single)\n ],\n 'space': [\n (r'[ \\t]+', Whitespace)\n ],\n 'new_line': [\n (r'\\n', Whitespace)\n ],\n 'escape_sequence': [\n (r'\\\\([^A-Za-z0-9]|[trn])', String.Escape)\n ],\n 'command_invocation' :[\n (r\"\"\"(?ix)([ \\t]*)(\n break|cmake_host_system_information|cmake_minimum_required\n |cmake_parse_arguments|cmake_policy|configure_file|continue\n |else|elseif|endforeach|endfunction|endif|endmacro|endwhile\n |execute_process|file|find_file|find_library|find_package\n |find_path|find_programforeach|function|get_cmake_property\n |get_directory_property|get_filename_component|get_property\n |if|include|include_guard|list|macro|mark_as_advanced\n |math|message|option|return|separate_arguments|set\n |set_directory_properties|set_property|site_name\n |string|unset|variable_watch|while|add_compile_definitions\n |add_compile_options|add_custom_command|add_custom_target\n |add_definitions|add_dependencies|add_executable|add_library\n |add_link_options|add_subdirectory|add_test|aux_source_directory\n |build_command|create_test_sourcelist|define_property\n |enable_language|enable_testing|export|fltk_wrap_ui\n |get_source_file_property|get_target_property|get_test_property\n |include_directories|include_external_msproject\n |include_regular_expression|install|link_directories\n |link_libraries|load_cache|project|remove_definitions\n |set_source_files_properties|set_target_properties\n |set_tests_properties|source_group|target_compile_definitions\n |target_compile_features|target_compile_options\n |target_include_directories|target_link_directories\n |target_link_libraries|target_link_options\n |target_precompile_headers|target_sources|try_compile|try_run\n |build_name|exec_program|export_library_dependencies\n |install_files|install_programs|install_targets|load_command\n |make_directory|output_required_files|qt_wrap_cpp|qt_wrap_ui\n |remove|subdir_depends|subdirs|use_mangled_mesa|utility_source\n |variable_requires|write_file)([ \\t]*)(\\()\"\"\", bygroups(Whitespace,\n Name.Function.Magic, Whitespace, Punctuation), 'argument_list'),\n (r'([ \\t]*)([A-Za-z_][A-Za-z0-9_]*)([ \\t]*)(\\()', bygroups(Whitespace,\n Name.Function, Whitespace, Punctuation), 'argument_list')\n ],\n 'argument_list' :[\n include('bracket_comment'),\n include('line_comment'),\n include('bracket_argument'),\n include('generator_convenience'),\n include('quoted_argument'),\n include('unquoted_argument'),\n include('new_line'),\n include('space'),\n (r'\\)', Punctuation, '#pop'),\n ],\n 'bracket_argument': [\n (r'(?s)(\\[(=*)\\[)(.*?)(\\](\\2)\\])',\n bygroups(Punctuation, None, String.Backtick, Punctuation, None)),\n ],\n 'generator_convenience': [\n (r'\\$<', Operator, 'ge_operator'),\n ],\n 'ge_operator': [\n (r'\\$<', Operator, '#push'),\n (r'[^:,> ]+', Operator.Word),\n (r'\\:', Operator, 'ge_argument'),\n (r'>', Operator, '#pop')\n ],\n 'ge_argument': [\n include('generator_convenience'),\n include('quoted_argument'),\n include('unquoted_primitives'),\n (r'[^,>]', Text),\n (r',', Operator),\n (r'>', Operator, '#pop:2')\n ],\n 'variable_convenience': [\n (r'\\b(WIN32|UNIX)\\b', Name.Variable),\n (r'(\\$)((?:ENV|CACHE)?)(\\{)', bygroups(\n Punctuation, Name.Class, Punctuation), 'variable_name'),\n ],\n 'variable_name': [\n include('variable_convenience'),\n (r'[a-zA-Z0-9/_.+-]', Name.Variable),\n (r'\\}', Punctuation, '#pop')\n ],\n 'quoted_argument': [\n (r'\"', String.Double, 'quoted_element')\n ],\n 'quoted_element': [\n include('unquoted_primitives'),\n (r'(?s)(\\\\\\\"|[^\\\"])', String.Double),\n (r'\\\"', String.Double, '#pop'),\n ],\n 'paths': [\n (r'(?:(~?\\/)([\\w.+-/\\\\]*))|(?:((?:\\\\\\\\\\?\\\\)?[a-zA-Z]+\\:)(?:[\\\\\\/]([\\w.+-/\\\\]*)))', Name.Attribute),\n (r'\\w[\\w\\.\\+-]*/[\\w.+-/\\\\]*', Name.Attribute),\n (r'\\s\\.\\.?\\s', Name.Attribute),\n (r'(?<=[\\\\\\/])\\*', Name.Attribute),\n (r\"\"\"(?x)(\\b\\w[\\w\\-\\+\\.]*|(?<=[>}\\*])|\\*)\\.(cpp|hpp|tar|txt|in|sh\n |png|gz|h)\\b\"\"\", Name.Attribute),\n ],\n 'numbers': [\n (r'\\b[0-9][0-9\\.]*\\b', Number),\n ],\n 'options': [\n (r'(? Dict[str, Any]:\n cmake_code_lexer = CMakeCodeLexer()\n lexers['CMakeCode'] = cmake_code_lexer\n lexers['cmake_code'] = cmake_code_lexer\n return {\n 'version': '1.0',\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n","sub_path":"docs/ext/CMakePygmentsLexer.py","file_name":"CMakePygmentsLexer.py","file_ext":"py","file_size_in_byte":10887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"25165203","text":"# 82: Faça a mesma coisa do 81, mais adicione uma condição elif para uma pessoa idosa que vai ser um desconto,\n# se idade < 65: preço = 10 mude o preço das outras condições,\n# idade < 18: preço = 5\n# senão preço = 5 nesse caso qualquer idade abaixo de 65, e de 18 acima,\n# a pessoa pagará 10 R$ caso contrario serão executados as outras condições, de acordo com a idade explique.\n\nidade = 12\n\nif idade < 4:\n preco = 0\nelif idade < 18:\n preco = 5\nelif idade < 65:\n preco = 10\nelse:\n preco = 5\n\nprint(\"Seu custo de admissão é R$ \" + str(preco) + \", para entrar.\")\n\n'''\nO if é sequencial, se um valor do if é falso, ele checa o primeiro elif, se for falso ele vai para o proximo,\nate chegar no else, caso tudo seja falso. Mas se um deles for True, ele executa o codigo e nao olha os proximos\nelif ou else.\n\nNesse caso se é menor que 4 anos, entra no preco = 0 e imprime a mensagem\nSe menor de 18 entra no primeiro elif, preco = 5 e imprime a mensagem\nSe maior de 18 e menor de 65 entra no segundo elif, preco = 10 e imprime a mensagem\nSe maior ou igual a 65 entra no else, preco = 0 e imprime a mensagem\n'''","sub_path":"python_crash_course/chapter_05_if/desafio_0082.py","file_name":"desafio_0082.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"374599622","text":"#Tkinter\r\n\r\nfrom tkinter import *\r\n\r\nroot=Tk()\r\n\r\nlblFirst = Label(root, text=\"First\")\r\nlblSecond = Label(root, text=\"Second\")\r\nlblThird = Label(root, text=\"Third\")\r\nlblFourth = Label(root, text=\"Fourth\")\r\n\r\n#lblFirst.pack()\r\n#lblSecond.pack()\r\n#lblThird.pack()\r\n#lblFourth.pack()\r\n\r\n#lblFirst.grid(row=0,column=0)\r\n#lblSecond.grid(row=0,column=1)\r\n#lblThird.grid(row=1,column=0)\r\n#lblFourth.grid(row=1,column=1)\r\n\r\nlblFirst.place(in_=root, x=100,y=100)\r\nlblSecond.place(in_=root, x=100,y=150)\r\nlblThird.place(in_=root, x=100,y=200)\r\nlblFourth .place(in_=root, x=100,y=250)\r\n","sub_path":"20-Tkinter/tkinter-1.py","file_name":"tkinter-1.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"44026341","text":"# Source: https://github.com/Tarpelite/UniNLP/blob/176c2a0f88c8054bf69e1f92693d353737367c34/transformers/modeling_bert.py#L2555\n# class BertForParsingV2\n\nimport logging\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\nfrom transformers import BertPreTrainedModel, BertModel\n\nlogger = logging.getLogger(__name__)\n\n\nclass BertForParsing(BertPreTrainedModel):\n \"\"\" Implements Dozat&Maning DEEP BIAFFINE ATTENTION FOR NEURAL DEPENDENCY PARSING\n https://arxiv.org/pdf/1611.01734.pdf\n \"\"\"\n\n def __init__(self, config, mlp_dim, num_labels):\n super(BertForParsing, self).__init__(config)\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.mlp_dim = mlp_dim\n self.num_labels = num_labels\n\n self.mlp_arc_head = nn.Linear(config.hidden_size, mlp_dim) # Applies a linear transformation to the incoming data: :math:`y = xA^T + b`\n self.mlp_arc_dep = nn.Linear(config.hidden_size, mlp_dim)\n self.biaffine_classifier_arcs = BiAffine(mlp_dim, 1)\n\n self.mlp_label_head = nn.Linear(config.hidden_size, mlp_dim)\n self.mlp_label_dep = nn.Linear(config.hidden_size, mlp_dim)\n self.biaffine_classifier_labels = BiAffine(mlp_dim, num_labels)\n\n self.init_weights()\n\n def forward(self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n heads=None,\n labels=None):\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds\n )\n\n sequence_output = outputs[0]\n sequence_output = self.dropout(sequence_output)\n\n # Dozat&Manning: \"rather than using the top recurrent states of the\n # LSTM in the biaffine transformations, we first put them\n # through MLP operations that reduce their dimensionality.\"\n s_arc_head = self.mlp_arc_head(sequence_output)\n s_arc_dep = self.mlp_arc_dep(sequence_output)\n logits_arc = self.biaffine_classifier_arcs(s_arc_head, s_arc_dep) # [batch_size, seq_len, seq_len]\n logits_arc = logits_arc.transpose(-1, -2)\n\n s_label_head = self.mlp_label_head(sequence_output)\n s_label_dep = self.mlp_label_dep(sequence_output) # TODO lpmayos: I change this to mlp_label_dep (it was mlp_label_head)\n logits_label = self.biaffine_classifier_labels(s_label_head, s_label_dep) # [batch_size, num_labels, seq_len, seq_len]\n logits_label = logits_label.transpose(-1, -3) # [batch_size, seq_len, seq_len, num_labels]\n\n preds = torch.argmax(logits_arc, dim=1).unsqueeze(-1) # [batch_size, seq_len, 1]\n indices = preds.unsqueeze(-1).expand(preds.shape + (self.num_labels,)) # [batch_size, seq_len, 1 , num_labels]\n logits_label = torch.gather(logits_label, -2, indices).squeeze(-2) # [batch_size, seq_len,num_labels]\n\n outputs = (logits_arc, logits_label) + outputs[2:]\n\n if heads is not None and labels is not None:\n loss_fct = CrossEntropyLoss()\n\n logits_arc = logits_arc.contiguous().view(-1, logits_arc.size(-1))\n heads = heads.view(-1)\n loss = loss_fct(logits_arc, heads)\n\n logits_label = logits_label.contiguous().view(-1, self.num_labels)\n labels = labels.view(-1)\n loss_labels = loss_fct(logits_label, labels)\n\n loss = loss + loss_labels\n outputs = (loss,) + outputs\n\n return outputs\n\n\nclass BiAffine(nn.Module):\n \"\"\"Biaffine attention layer.\"\"\"\n\n def __init__(self, input_dim, output_dim):\n super(BiAffine, self).__init__()\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.U = nn.Parameter(torch.FloatTensor(output_dim, input_dim, input_dim))\n nn.init.xavier_uniform_(self.U)\n\n def forward(self, Rh, Rd):\n Rh = Rh.unsqueeze(1)\n Rd = Rd.unsqueeze(1)\n S = Rh @ self.U @ Rd.transpose(-1, -2)\n return S.squeeze(1)\n\n\nclass BertForSRL(BertPreTrainedModel):\n\n def __init__(self, config):\n super(BertForSRL, self).__init__(config)\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n self.num_labels = config.num_labels\n self.num_BIO_labels = 3\n self.num_CRO_labels = 3\n self.num_SRL_labels = 22\n\n self.BIO_classifier = nn.Bilinear(config.hidden_size, 1, self.num_BIO_labels)\n self.CRO_classifier = nn.Bilinear(config.hidden_size, self.num_BIO_labels, self.num_CRO_labels)\n self.SRL_classifier = nn.Bilinear(config.hidden_size, self.num_CRO_labels, self.num_SRL_labels)\n self.label_classifier = nn.Bilinear(config.hidden_size, self.num_SRL_labels, self.num_labels)\n\n self.init_weights()\n\n def forward(self,\n input_ids=None,\n verb_seq_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n label_BIO_ids=None,\n label_CRO_ids=None,\n label_SRL_ids=None):\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds\n )\n\n sequence_output = outputs[0]\n sequence_output = self.dropout(sequence_output)\n\n BIO_logits = self.BIO_classifier(sequence_output, verb_seq_ids.unsqueeze(-1).float())\n CRO_logits = self.CRO_classifier(sequence_output, BIO_logits)\n SRL_logits = self.SRL_classifier(sequence_output, CRO_logits)\n\n logits = self.label_classifier(sequence_output, SRL_logits)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n\n loss_BIO = loss_fct(BIO_logits.view(-1, self.num_BIO_labels), label_BIO_ids.view(-1))\n loss_CRO = loss_fct(CRO_logits.view(-1, self.num_CRO_labels), label_CRO_ids.view(-1))\n loss_SRL = loss_fct(SRL_logits.view(-1, self.num_SRL_labels), label_SRL_ids.view(-1))\n\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n loss = loss + loss_BIO + loss_CRO + loss_SRL\n outputs = (loss,) + outputs\n\n return outputs\n","sub_path":"examples/modeling_bert.py","file_name":"modeling_bert.py","file_ext":"py","file_size_in_byte":7176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"223457263","text":"import unittest\nimport algo_ds.heap as heap\nfrom algo_ds.heap import MaxHeap, MinHeap\n\n\nclass TestHeap(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_max_heapify(self):\n heaps = [16, 4, 10, 14, 7, 9, 3, 2, 8, 1]\n heap.max_heapify(heaps, 1)\n heaps1 = [16, 14, 10, 8, 7, 9, 3, 2, 4, 1]\n self.assertEqual(heaps1, heaps)\n\n def test_build_max_heap(self):\n expected_heaps = [16, 14, 10, 8, 7, 9, 3, 2, 4, 1]\n heaps = [4, 1, 3, 2, 16, 9, 10, 14, 8, 7]\n heap.build_max_heap(heaps)\n self.assertEqual(expected_heaps, heaps)\n\n def test_max_heap(self):\n arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n h = MaxHeap()\n for i in arr:\n h.insert(i, i)\n\n expected_heaps = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\n for i in expected_heaps:\n self.assertEqual(i, h.extract_max(), f\"For i:{i}, Got unexpected maximum value\")\n\n def test_increase_key(self):\n arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n h = MaxHeap()\n for i in arr:\n h.insert(i, i)\n\n # Increase key 1 to 20\n h.increase_key(1, 20)\n\n expected_heaps = [1, 10, 9, 8, 7, 6, 5, 4, 3, 2]\n for i in expected_heaps:\n self.assertEqual(i, h.extract_max(), f\"Got unexpected maximum value\")\n\n def test_max_heap_for_custom_object(self):\n\n jobs = [Job(i, \"Job\" + str(i)) for i in range(1, 11)]\n\n h = MaxHeap()\n for job in jobs:\n h.insert(job, job._id)\n\n expected_jobs = [jobs[i] for i in range(9, -1, -1)]\n for job in expected_jobs:\n self.assertEqual(job, h.extract_max(), f\"Got unexpected maximum value\")\n\n def test_min_heap(self):\n arr = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\n\n h = MinHeap()\n for i in arr:\n h.insert(i, i)\n\n expected_heaps = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n for i in expected_heaps:\n self.assertEqual(i, h.extract_min(), f\"For i:{i}, Got unexpected minimum value\")\n\n def test_get_for_min_heap(self):\n arr = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\n\n h = MinHeap()\n for i in arr:\n h.insert(i, i)\n\n self.assertEqual(10, h.get(10), \"Got an unexpected value\")\n self.assertEqual(5, h.get(5), \"Got an unexpected value\")\n\n def test_increase_key_with_min_heap(self):\n arr = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\n\n h = MinHeap()\n for i in arr:\n h.insert(i, i)\n\n # Increase key 1 to 20m now top item will be 2, as 1 has been moved\n # to last position\n h.increase_key(1, 20)\n\n expected_heaps = [2, 3, 4, 5, 6, 7, 8, 9, 10, 1]\n for i in expected_heaps:\n self.assertEqual(i, h.extract_min(), f\"Got unexpected minimum value\")\n\n def test_min_heap_for_custom_object(self):\n\n jobs = [\n Job(4, \"Job4\"),\n Job(3, \"Job3\"),\n Job(2, \"Job2\"),\n Job(1, \"Job1\")\n ]\n\n h = MinHeap()\n for job in jobs:\n h.insert(job, job._id)\n\n expected_jobs = [\n Job(1, \"Job1\"),\n Job(2, \"Job2\"),\n Job(3, \"Job3\"),\n Job(4, \"Job4\")\n ]\n\n for job in expected_jobs:\n self.assertEqual(job, h.extract_min(), f\"Got unexpected minimum value\")\n\n\nclass Job:\n def __init__(self, id, name):\n self._id = id\n self._name = name\n\n def priority(self):\n return self._id\n\n def __eq__(self, other):\n return (self._id, self._name) == (other._id, other._name)\n\n def __str__(self):\n return f\"Job[id={self._id}, name={self._name}\"\n\n def __repr__(self):\n return str(self)\n","sub_path":"algo-ds-py/tests/test_heap.py","file_name":"test_heap.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"331085778","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass FastText(nn.Module):\n def __init__(self, vocab_size, class_num, dropout=0.5, embed_dim=300, hidden_size=256, ngram_size=200000):\n super(FastText, self).__init__()\n\n self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_dim, padding_idx=0)\n self.embedding_bigram = nn.Embedding(num_embeddings=ngram_size, embedding_dim=embed_dim)\n self.embedding_trigram = nn.Embedding(num_embeddings=ngram_size, embedding_dim=embed_dim)\n self.dropout = nn.Dropout(dropout)\n self.fc1 = nn.Linear(embed_dim * 3, hidden_size)\n self.fc2 = nn.Linear(hidden_size, class_num)\n\n def forward(self, x):\n word_feature = self.embedding(x[0])\n bigram_feature = self.embedding_bigram(x[1])\n trigram_feature = self.embedding_trigram(x[2])\n x = torch.cat((word_feature, bigram_feature, trigram_feature), -1)\n x = x.mean(dim=1)\n x = self.dropout(x)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n return x\n","sub_path":"samples_pipelines/fasttext-sentence-classification/deployment/common/FastText.py","file_name":"FastText.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"221722107","text":"from django.conf.urls.defaults import *\nfrom .views import UserDownloadsListView, download_proxy_view\nfrom .views.paywall import PaywallView\n\n\nurlpatterns = patterns(\n '',\n url(r'^my-downloads$', UserDownloadsListView.as_view(), name=\"lfsd_library\"),\n url(r'^my-downloads/(?P\\d+)$', download_proxy_view, name=\"lfsd_download_proxy\"),\n url(r'^product/(?P[-\\w]*)/digital_order$', PaywallView.as_view(), name=\"lfsd_product_paywall\"),\n url(r'^product/(?P[-\\w]*)/thanks$', \"thanks\", name=\"lfsd_thanks\"),\n url(r'^product/(?P[-\\w]*)/notnow$', \"notnow\", name=\"lfsd_no_thanks\"),\n)\n","sub_path":"lfs_downloads/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"596668623","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 2 11:17:32 2021\r\nAuthor: Adam Griffin, UKCEH\r\nProject: AQUACAT\r\nScript to summarise EC events.\r\n\"\"\"\r\nimport os\r\nimport netCDF4 as nc\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport sys\r\nimport yaml\r\nimport time\r\nimport gc\r\n\r\n# def season(x):\r\n # return [\"DJF\",\"MAM\",\"JJA\",\"SON\"][((x // 90) % 4)]\r\n\r\n#=rcm = \"10\"\r\n#period = \"198012_201011\"\r\n\r\nrcm = sys.argv[1]\r\n\r\nif sys.argv[2] == \"present\":\r\n period = \"198012_201011\"\r\nelse:\r\n period = \"205012_208011\"\r\n\r\nprint(f\"Running RCM {rcm} for {period}.\")\r\n\r\nRCMS = [\"01\",\"04\",\"05\",\"06\",\"07\",\"08\",\"09\",\"10\",\"11\",\"12\",\"13\",\"15\"]\r\nperiods = [\"198012_201011\",\"205012_208011\"]\r\n\r\n# CHANGE THIS TO THE TOP LEVEL OF THE FOLDER THE CSVs ARE IN\r\ntoplevel = r\"/prj/aquacat/Data\"\r\n\r\n# CHANGE THIS TO THE TOP LEVEL OF THE FOLDER THE NETCDFs ARE IN\r\noutlevel = toplevel #r'S:/Data' #\r\n\r\n# CHANGE THIS TO WHERE THE hasData files are, they should exist in the toplevel folder.\r\nrn = pd.read_csv(f\"{toplevel}/hasData_primary.csv\")\r\nrnreg = pd.read_csv(f\"{toplevel}/hasData_Regions.csv\")\r\n\r\nmethod=\"OBS\"\r\n\r\nregional = False\r\n\r\nif regional:\r\n subfold='/NW'\r\n fileinfix = 'NW_POT2_pc01'\r\n rn = rn[rnreg.REGION==\"NW\"]\r\n NH = 1437\r\nelse:\r\n subfold=''\r\n fileinfix = 'POT2_pc01'\r\n NH = 19914\r\n\r\nif len(sys.argv) > 3:\r\n if sys.argv[3] == \"FF\":\r\n subfold = '_FF'\r\n\r\nncpath = (f\"{outlevel}/RCM{rcm}_{period}{subfold}/event{method}_\"\r\n f\"POT2_pc01_RCM{rcm}_{period}.nc\")\r\nncfile = nc.Dataset(ncpath, mode='r')\r\n\r\nparam_path = (f\"{outlevel}/RCM{rcm}_{period}{subfold}/paramtableG\"\r\n f\"_POT2_RCM{rcm}_{period}.csv\")\r\n\r\nparam_table = pd.read_csv(param_path)\r\n\r\nthresh_path = f\"{outlevel}/RCM{rcm}_{period}{subfold}/threshMat_RCM{rcm}_{period}.csv\"\r\n\r\nthreshvec = pd.read_csv(thresh_path).iloc[:,1]\r\nif regional:\r\n threshvec = threshvec[rnreg.REGION == \"NW\"]\r\n\r\ninit_path = (f\"{outlevel}/RCM{rcm}_{period}{subfold}/initialSummary_RCM{rcm}_{period}.csv\")\r\ninit_table = pd.read_csv(init_path)\r\n\r\nsumm_path = (f\"{outlevel}/RCM{rcm}_{period}{subfold}/eventSumm\"\r\n f\"_OBS_POT2_pc01_RCM{rcm}_{period}.csv\")\r\n#summtable = pd.read_csv(summ_path) # Done in R afterwards\r\n\r\n\r\nsummtable_out = pd.DataFrame(columns=[\"eventNumber\", \"eventDay\", \"eventLength\",\r\n \"area\",\"peakA\", \"peakA_mid\", \"peakD\", \"season\",\r\n \"nclusters\",\"peakyness\"])\r\n\r\neventNo = list(ncfile.variables[\"eventNo\"][:])\r\nNE = np.sum([i > 0 for i in eventNo])\r\n\r\navec_all = ncfile.variables['ape'][:,:]\r\navec_mid = ncfile.variables['ape_mid'][:,:]\r\n\r\nprint(\"Setup complete\")\r\n\r\nstart_time = time.time()\r\nfor i in range(NE):\r\n if (i < 10) or (i % 1000) == 0:\r\n print(i)\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n start_time = time.time()\r\n ni = eventNo[i]\r\n vvec = 0\r\n avec = min(avec_all[i,:])\r\n amid = min(avec_mid[i,:])\r\n dvec = 0\r\n D = init_table.iloc[ni-1,:] # Done in R afterwards\r\n #seas = init_table.iloc[ni-1, 3]) # Done in R afterwards\r\n summtable_out.loc[i] = [ni, D[0], D[1],\r\n vvec, avec, amid, dvec,\r\n D[3], 0, 0]\r\n\r\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\nprint(\"avec amid done\")\r\n\r\ndel avec_all\r\ndel avec_mid\r\ngc.collect()\r\n\r\nvvec_all = ncfile.variables['flow'][:,:]\r\ndvec_all = ncfile.variables['dpe'][:,:]\r\n\r\nfor i in range(NE):\r\n if (i < 10) or (i % 1000) == 0:\r\n print(i)\r\n print(\">>> %s seconds >>>\" % (time.time() - start_time))\r\n start_time = time.time()\r\n ni = eventNo[i]\r\n summtable_out.iloc[i,3] = sum(vvec_all[i,:] > threshvec)\r\n summtable_out.iloc[i,6] = min(dvec_all[i,:])\r\n\r\nprint(\">>> %s seconds >>>\" % (time.time() - start_time))\r\nprint(\"vvec dvec done\")\r\n\r\n\r\nncfile.close()\r\n\r\nyaml_path = f\"{outlevel}/RCM{rcm}_{period}{subfold}/settings.yaml\"\r\n\r\nsummpath_out = (f\"{outlevel}/RCM{rcm}_{period}{subfold}/eventSumm_\"\r\n f\"{method}_POT2_pc01_RCM{rcm}_{period}.csv\")\r\n\r\nsummtable_out.to_csv(summpath_out, index=False) \r\n\r\nwith open(yaml_path) as ym:\r\n list_doc = yaml.safe_load(ym)\r\n\r\nlist_doc['OBSsumm'] = True\r\nlist_doc['propsumm'] = \"113bN.py\"\r\n\r\nwith open(yaml_path, 'w') as ym:\r\n yaml.dump(list_doc, ym, sort_keys=False)\r\n \r\nprint(time.strftime(\"%Y-%m-%d %H:%M:%S\"))\r\nprint(\"Files saved and YAML updated. End.\")\r\n","sub_path":"113bN_proper_event_summary.py","file_name":"113bN_proper_event_summary.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"571181943","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport codecs\nimport csv\nimport importlib\nimport os\nimport re\nimport sys\n\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.key import Key\nfrom django.core.management.base import BaseCommand\nfrom django.template.defaultfilters import slugify\nfrom opencivicdata.models import Membership\nfrom six import StringIO\nfrom six.moves.urllib.parse import urlsplit\n\nfrom reports.models import Report\nfrom reports.utils import get_offices, get_personal_url, module_name_to_metadata, remove_suffix_re\n\n\nclass Command(BaseCommand):\n help = 'Generates and uploads CSV files to S3'\n\n def handle(self, *args, **options):\n def save(key, body):\n k = Key(bucket)\n k.key = key\n k.set_contents_from_string(body)\n k.set_acl('public-read')\n\n sys.path.append(os.path.abspath('scrapers'))\n\n bucket = S3Connection().get_bucket('represent.opennorth.ca')\n\n names = {\n 'Parliament of Canada': 'house-of-commons',\n 'Legislative Assembly of Alberta': 'alberta-legislature',\n 'Legislative Assembly of British Columbia': 'bc-legislature',\n 'Legislative Assembly of Manitoba': 'manitoba-legislature',\n 'Legislative Assembly of New Brunswick': 'new-brunswick-legislature',\n 'Newfoundland and Labrador House of Assembly': 'newfoundland-labrador-legislature',\n 'Nova Scotia House of Assembly': 'nova-scotia-legislature',\n 'Legislative Assembly of Ontario': 'ontario-legislature',\n 'Legislative Assembly of Prince Edward Island': 'pei-legislature',\n 'Assemblée nationale du Québec': 'quebec-assemblee-nationale',\n 'Legislative Assembly of Saskatchewan': 'saskatchewan-legislature',\n }\n\n default_headers = [\n 'District name',\n 'Primary role',\n 'Name', # not in CSV schema\n 'First name',\n 'Last name',\n 'Gender',\n 'Party name',\n 'Email',\n 'Photo URL',\n 'Source URL',\n 'Website',\n 'Facebook',\n 'Instagram',\n 'Twitter',\n 'LinkedIn',\n 'YouTube',\n ]\n office_headers = [\n 'Office type', # not in CSV schema\n 'Address', # not in CSV schema\n 'Phone',\n 'Fax',\n ]\n\n all_rows = []\n max_offices_count = 0\n\n reports = Report.objects.filter(exception='').exclude(module__endswith='_candidates').exclude(module__endswith='_municipalities').order_by('module')\n for report in reports:\n try:\n metadata = module_name_to_metadata(report.module)\n\n rows = []\n offices_count = 0\n\n # Exclude party memberships.\n queryset = Membership.objects.filter(organization__jurisdiction_id=metadata['jurisdiction_id']).exclude(role__in=('member', 'candidate'))\n for membership in queryset.prefetch_related('contact_details', 'person', 'person__links', 'person__sources'):\n person = membership.person\n\n try:\n party_name = Membership.objects.get(organization__classification='party', role='member', person=person).organization.name\n except Membership.DoesNotExist:\n party_name = None\n\n facebook = None\n instagram = None\n linkedin = None\n twitter = None\n youtube = None\n for link in person.links.all():\n domain = '.'.join(urlsplit(link.url).netloc.split('.')[-2:])\n if domain in ('facebook.com', 'fb.com'):\n facebook = link.url\n elif domain == 'instagram.com':\n instagram = link.url\n elif domain == 'linkedin.com':\n linkedin = link.url\n elif domain == 'twitter.com':\n twitter = link.url\n elif domain == 'youtube.com':\n youtube = link.url\n\n if person.gender == 'male':\n gender = 'M'\n elif person.gender == 'female':\n gender = 'F'\n else:\n gender = None\n\n if ' ' in person.name:\n first_name, last_name = person.name.rsplit(' ', 1)\n else:\n first_name, last_name = None, person.name\n\n # @see https://represent.opennorth.ca/api/#fields\n sources = list(person.sources.all())\n row = [\n remove_suffix_re.sub('', membership.post.label), # District name\n membership.role, # Elected office\n person.name, # Name\n first_name, # First name\n last_name, # Last name\n gender, # Gender\n party_name, # Party name\n next((contact_detail.value for contact_detail in membership.contact_details.all() if contact_detail.type == 'email'), None), # Email\n person.image, # Photo URL\n sources[-1].url if len(sources) > 1 else None, # Source URL\n get_personal_url(person), # Website\n facebook, # Facebook\n instagram, # Instagram\n twitter, # Twitter\n linkedin, # LinkedIn\n youtube, # YouTube\n ]\n\n offices = get_offices(membership)\n if len(offices) > offices_count:\n offices_count = len(offices)\n\n for office in offices:\n for key in ('type', 'postal', 'tel', 'fax'):\n row.append(office.get(key))\n\n # If the person is associated to multiple boundaries.\n if re.search(r'\\AWards\\b', membership.post.label):\n for district_id in re.findall(r'\\d+', membership.post.label):\n row = row[:]\n row[0] = 'Ward %s' % district_id\n rows.append(row)\n else:\n rows.append(row)\n\n rows.sort()\n\n headers = default_headers[:]\n for _ in range(offices_count):\n headers += office_headers\n\n name = metadata['name']\n if name in names:\n slug = names[name]\n else:\n slug = slugify(name)\n\n io = StringIO()\n body = csv.writer(io)\n body.writerow(headers)\n body.writerows(rows)\n save('csv/%s.csv' % slug, codecs.encode(io.getvalue(), 'windows-1252'))\n\n if offices_count > max_offices_count:\n max_offices_count = offices_count\n\n for row in rows:\n row.insert(0, name)\n all_rows.append(row)\n except ImportError:\n report.delete() # delete reports for old modules\n\n headers = ['Organization'] + default_headers\n for _ in range(max_offices_count):\n headers += office_headers\n\n io = StringIO()\n body = csv.writer(io)\n body.writerow(headers)\n body.writerows(all_rows)\n save('csv/complete.csv', codecs.encode(io.getvalue(), 'windows-1252'))\n","sub_path":"reports/management/commands/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":7909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"140370966","text":"import selenium\nimport seleniumwire\nfrom seleniumwire import webdriver # Import from seleniumwire\nfrom ast import literal_eval\nimport json\n#from selenium.webdriver.chrome.options import Options\ndriver=''\nreq=[]\nUTILS=open(\"utils.js\").read()\nATOMS=open(\"atoms.js\").read()\nKAR=open(\"kar.js\").read()\nJQUERY=open(\"jquery-3.2.1.min.js\").read()\nSELENIUM_BROWSER_BOT=open(\"selenium-browserbot.js\").read()\nSELENIUM_API=open(\"selenium-api.js\").read()\nSELENIUM_COMMAND=open(\"selenium-commandhandlers.js\").read()\nUSER_EXTENSION=open(\"user_extension.js\").read()\nBOWSER=open(\"bowser.js\").read()\nDOM_COLLECTOR=open(\"dom_collector.js\").read()\nKU_RECORDER=open(\"ku-recorder.js\").read()\nKU_LOCATOR_BUILDER=open(\"ku-locatorBuilders.js\").read()\nNEIGHBOUR_GEN=open(\"neighbor-xpaths-generator.js\").read()\nNEIGHBOUR_MIN=open(\"neighbor-xpaths-generator.min.js\").read()\nrecordJs2=open(\"recordJS.js\").read()\nsmart_xpath=open(\"smart_xpath.js\").read()\nrecordJs=JQUERY+NEIGHBOUR_MIN+NEIGHBOUR_GEN+UTILS+SELENIUM_COMMAND+SELENIUM_BROWSER_BOT+ATOMS+KU_LOCATOR_BUILDER+smart_xpath+open(\"recordJS2.js\").read()\n\next_tuple=('.mp3', '.avi','.js','.css','.less','.scss','.png','.ico','.txt','.ini','.jpg','.mp4','xls','.doc','xlsx','.ppt','.pptx','.docx','.json','.java','.as','.mx','.asp','.ts','.jsp','.svg','.php','.xml','.xaml',\n '.yml' ,'.woff2','.jpeg')\ndef check_valid_api(request):\n global req\n \n if not any(word in request.path for word in ext_tuple):\n return True\n else:\n #print(str(request.path))\n return False\ndef locate(xpath):\n\n global driver\n try:\n element=driver.find_element_by_xpath(xpath)\n driver.execute_script(\"arguments[0].style.border = '0.2em solid #0d274c';\",element)\n return \"PASS\"\n except:\n return \"FAIL\"\n\ndef initiate_driver(url):\n global driver\n #options = webdriver.ChromeOptions()\n #options.add_experimental_option('debuggerAddress', 'localhost:9014')\n driver = webdriver.Chrome(executable_path =\"chromedriver.exe\",seleniumwire_options={'port': 9014})\n driver.get(url)\ndef Merge(dict1, dict2): \n return(dict2.update(dict1)) \ndef Q_recorder():\n\n global driver\n global recordJs\n Xpath=None\n data={}\n try:\n Xpath=driver.execute_script(recordJs)\n except selenium.common.exceptions.NoSuchWindowException:\n driver.switch_to_window(driver.window_handles[-1])\n if Xpath==\"HIDDEN\":\n driver.switch_to_window(driver.window_handles[-1])\n return Xpath\n\n\n \n \n\n \n \n \n \ndef Q_recorder_api():\n global req\n\n global driver\n global recordJs2\n \n data={}\n \n \n try:\n\n for request in driver.requests: \n \n \n if check_valid_api(request):\n if (request.body is None):\n BODY=\"{}\"\n \n else:\n JSON=request.body.decode('utf-8',errors='ignore').replace(\"'\", '\"')\n try:\n \n BODY=json.dumps(json.loads(JSON),indent=4)\n except:\n \n BODY=JSON\n\n data[\"METHOD\"]=request.method\n data[\"PATH\"]=request.path\n \n data[\"REQUEST\"]=\"Headers: \"+json.dumps(dict(request.headers),indent=4)+\"
Body: \"+BODY\n if (request.response is None):\n data[\"RESPONSE\"]='{\"status_code\":\"\",\"reason\":\"\",\"body\":\"\"}'\n \n else:\n if (request.response.body is None):\n RES_BODY=\"{}\"\n else:\n try:\n RES_BODY=request.response.body.decode('utf-8',errors='ignore')\n except:\n RES_BODY=request.response.body\n data[\"RESPONSE\"]=json.dumps({\"status_code\":request.response.status_code,\"reason\":request.response.reason,\"body\":RES_BODY},indent=4)\n else:\n pass\n \n except seleniumwire.proxy.client.ProxyException:\n driver.switch_to_window(driver.window_handles[-1])\n \n print(data)\n return data\ndef getCurrentUrl():\n global driver\n return driver.current_url\ndef stop():\n global driver\n JS='''\n\n\n\ndocument.body.addEventListener('mouseover', MouseInListenerFunction,true);\n \n\n\n function MouseInListenerFunction(event){\n event.target.style.border = '';\n \n }\n \n\n\n '''\n driver.execute_script(JS)\n \n return \"STOPPED\"\ndef stop_api():\n global req\n req=[]\n global driver\n JS='''\n\n\n \n\nlet oldXHROpen = '';\n\n \n\n\n '''\n driver.execute_script(JS)\n return \"STOPPED\"\ndef main():\n global driver\n \n \n #driver.switch_to.window()\n JS=open('get_ALL2.js').read()\n event_attributes=open('event_attributes.txt').read().split(\", \")\n \n windows=driver.window_handles\n \n \n for handle in windows:\n driver.switch_to.window(handle)\n \n if(str(driver.title).strip()==\"\"):\n windows.remove(handle)\n \n print(windows)\n if(len(windows)==0):\n return \"NOWINDOW\"\n driver.switch_to.window(windows[-1])\n A=driver.execute_script(JS,event_attributes)\n \n #JS11=open('smart_xpath.js').read()\n #driver.execute_script(JS11)\n #print(A)\n return A\ndef pageLocatorCreation(name,xpath):\n \n i = 0 \n L = \"import org.openqa.selenium.WebElement;\\n\" \n L+=\"import org.openqa.selenium.support.FindBy;\\n\"\n L+=\"import org.openqa.selenium.support.PageFactory;\\n\\n\"\n L+=\"public class PageLocators {\\n\"\n length = len(name)\n while i < length:\n variableName=name[i].replace(' ','_')\n L+=\"\\t@FindBy(xpath=\\\"\"+ xpath[i] + \"\\\")\\n\"\n L+=\"\\tpublic WebElement \" + variableName + \";\\n\\n\"\n i = i + 1\n L+=\"\\tpublic PageLocators()\\n\\t{\\n\"\n L+=\"\\tPageFactory.initElements(/*Please specify driver*/,this);\\n\\t}\\n}\"\n print(L)\n return L\ndef pageActionCreation(tag,name,xpath):\n objName=\"obj_PageLocators\"\n L=\"import PageLocators.PageLocators;\\n\\n\\n\"\n L+=\"public class PageActions {\\n\\n\"\n L+=\"\\tPageLocators\"+\" \"+objName+\" =new PageLocators();\\n\\n\"\n print(tag)\n for t in range(0,len(tag)):\n print(t,tag[t])\n \n if(tag[t]==\"INPUT\"or tag[t]==\"TEXTAREA\"):\n L+=\"\\tpublic void method_\"+name[t]+\"(String data) throws InterruptedException(){\\n\"\n L+=\"\\t\\t\"+objName+\".\"+name[t]+\".sendKeys(data);\\n\"\n L+=\"\\t}\\n\\n\"\n elif(tag[t]==\"SELECT\"):\n L+=\"\\tpublic void method_\"+name[t]+\"(value) throws InterruptedException(){\\n\"\n L+=\"\\t\\tSelect dropdown= new Select(\"+objName+\".\"+name[t]+\");\\n\"\n L+=\"\\t\\tdropdown.selectByVisibleText(value);\\n\"\n L+=\"\\t}\\n\\n\"\n \n \n elif(tag[t]==\"BUTTON\" or tag[t]==\"RADIO\" or tag[t]==\"CHECKBOX\" or tag[t]==\"A\" or tag[t]==\"LABEL\"):\n\n L+=\"\\tpublic void method_\"+name[t]+\"() throws InterruptedException(){\\n\"\n L+=\"\\t\\t\"+objName+\".\"+name[t]+\".click();\\n\"\n \n L+=\"\\t}\\n\\n\"\n else:\n L+=\"\\tpublic void method_\"+name[t]+\"() throws InterruptedException(){\\n\"\n L+=\"\\t\\t\"+objName+\".\"+name[t]+\".getText();\\n\"\n L+=\"\\t}\\n\\n\"\n \n L+=\"}\"\n return L\n\ndef quit():\n global driver\n driver.quit()\n","sub_path":"PL_PA.py","file_name":"PL_PA.py","file_ext":"py","file_size_in_byte":7415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"142574255","text":"#!/usr/bin/python3\n\"\"\"\nscript that distributes an archive to a server\n\"\"\"\n\nfrom fabric.api import put, run, env, local\nfrom os.path import exists\nfrom datetime import datetime\nimport os\nenv.hosts = ['35.185.121.162', '54.167.76.210']\n\n\ndef do_pack():\n \"\"\"Archive the content of web_static folder\"\"\"\n if not os.path.exists('versions'):\n os.mkdir('versions')\n try:\n time = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n path = \"versions/web_static_{}.tgz\".format(time)\n tar = \"tar -cvzf {} web_static/*\".format(path)\n local(\"mkdir -p versions\")\n local(tar)\n return path\n except Exception as e:\n return None\n\n\ndef do_deploy(archive_path):\n \"\"\"Distribute an Archive to servers\"\"\"\n if not os.path.exists(archive_path):\n return false\n try:\n file_name = archive_path.split(\"/\")[-1].split(\".\")[0]\n put(archive_path, '/tmp/')\n run('sudo mkdir -p /data/web_static/releases/{}'.format(file_name))\n run('sudo tar -xzf /tmp/{}.tgz -C /data/web_static/releases/{}'.\n format(file_name, file_name))\n run('sudo mv /data/web_static/releases/{}/web_static/* \\\n /data/web_static/releases/{}/'.\n format(file_name, file_name))\n run('sudo rm -rf /data/web_static/releases/{}/web_static'\n .format(file_name))\n run('sudo rm -rf /tmp/{}.tgz'.format(file_name))\n run('sudo rm -rf /data/web_static/current')\n run('sudo ln -sf /data/web_static/releases/{}/ \\\n /data/web_static/current'.format(file_name))\n return True\n except:\n return False\n\n\ndef deploy():\n \"\"\" creates and deploys static to the web server \"\"\"\n\n path = \"\"\n path = do_pack()\n if path == \"\":\n return False\n return(do_deploy(path))\n","sub_path":"3-deploy_web_static.py","file_name":"3-deploy_web_static.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"572480573","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.generic import ListView, DetailView \nfrom django.views.generic.dates import YearArchiveView\nfrom django.contrib.auth.decorators import login_required\nimport requests\nfrom ast import literal_eval as make_tuple\nimport json\nfrom django.shortcuts import render,redirect,get_object_or_404\nfrom django.db.models import Q\nfrom .models import Music, Rating, Review, Listenlater, History, Channel\nfrom . import models\nfrom django.db.models import Case, When\nfrom client.models import Music, Listenlater, History, Channel\nfrom django.contrib import messages\n\n\n\n\n# Create your views here.\ndef client_home(request):\n return render(request, 'client-home-page.html')\n\n\nclass HomeView(ListView):\n model = Music\n template_name = 'client-home-page.html'\n def get_context_data(self, **kwargs):\n context = super(HomeView, self).get_context_data(**kwargs)\n context['top_rated'] = Music.objects.filter(status='TR')\n context['most_watched'] = Music.objects.filter(status='MW')\n context['recently_added'] = Music.objects.filter(status='RA')\n print(context)\n return context \n\n\nclass MusicList(ListView):\n model = Music \n paginate_by = 10\n template_name = \"music_list.html\" \n \nclass MusicDetail(DetailView):\n model = Music\n template_name = \"music_detail.html\" \n def get_object(self):\n object = super(MusicDetail, self).get_object()\n object.views_count += 1\n object.save()\n return object \n\n def get_context_data(self, **kwargs):\n context = super(MusicDetail, self).get_context_data(**kwargs)\n context['related_musics'] = Music.objects.filter(category=self.get_object().category)\n print(context)\n return context\n\n \nclass MusicCategory(ListView):\n model = Music\n template_name = \"music_list.html\" \n paginate_by = 5\n \n def get_queryset(self):\n self.category = self.kwargs['category']\n # musics = Music.objects.filter(category=self.category)\n return Music.objects.filter(category=self.category)\n\n def get_context_data(self, **kwargs):\n context = super(MusicCategory, self).get_context_data(**kwargs)\n context['music_category'] = self.category\n return context\n\n\nclass MusicLanguage(ListView):\n model = Music\n template_name = \"music_list.html\" \n paginate_by = 10 \n \n def get_queryset(self):\n self.language = self.kwargs['lang']\n # musics = Music.objects.filter(category=self.category)\n return Music.objects.filter(language=self.language)\n\n def get_context_data(self, **kwargs):\n context = super(MusicLanguage, self).get_context_data(**kwargs)\n context['music_language'] = self.language\n return context\n\n\nclass MusicSearch(ListView):\n model = Music\n paginate_by = 100\n \n def get_queryset(self):\n query = self.request.GET.get('query')\n if query:\n object_list = self.model.objects.filter(title__icontains=query)\n print(query)\n print(object_list)\n else:\n object_list = self.model.objects.none()\n return object_list \n\n\nclass MusicYear(YearArchiveView):\n queryset = Music.objects.all()\n date_field ='year_of_production'\n make_object_list = True\n allow_future = True\n print(queryset)\n\n\n@login_required \ndef history(request):\n if request.method == \"POST\":\n user = request.user\n print(\"User: \", user)\n music_id = request.POST['music_id']\n print(\"Music ID: \", music_id)\n print(music_id)\n history = History(user=user, music_id=music_id)\n history.save()\n print(history)\n return redirect(\"channel.html\")\n history = History.objects.filter(user=request.user)\n ids = []\n for i in history:\n ids.append(i.music_id)\n \n preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate(ids)])\n music = Music.objects.filter(music_id__in=ids).order_by(preserved)\n\n return render(request, 'history.html', {\"history\": music})\n\n\n@login_required\ndef Listenlater(request):\n if request.method == \"POST\":\n user = request.user\n print(\"User Name: \", user)\n video_id = request.POST['video_id']\n print(\"ID: \", video_id)\n\n listen = models.Listenlater.objects.filter(user=user)\n\n for i in listen:\n if video_id == i.video_id:\n message = \"Your Music is Already Added\"\n messages.success(request, \"Your Music is Already Added\")\n break\n else:\n listenlater = models.Listenlater(user=user, video_id=video_id)\n listenlater.save()\n message = \"Your Music is Succesfully Added\"\n messages.success(request, \"Your Music is Succesfully Added\")\n\n music = Music.objects.filter(music_id=video_id).first()\n print(music.title)\n return render(request, f\"yeah.html\", {'music': music, \"message\": message})\n\n\n ll = models.Listenlater.objects.filter(user=request.user)\n ids = []\n for i in ll:\n ids.append(i.video_id)\n \n preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate(ids)])\n music = Music.objects.filter(music_id__in=ids).order_by(preserved)\n return render(request, \"listenlater.html\", {'music': music})\n\n\n\ndef music(request):\n music = Music.objects.all()\n return render(request, 'music.html', {'music': music})\n\n\ndef musicpost(request, id):\n music = Music.objects.filter(music_id=id).first()\n return render(request, 'musicpost.htm', {'music': music})\n\n\n@login_required\ndef channel(request, channel):\n chan = Channel.objects.filter(name=channel).first()\n print(\"Channel: \", chan)\n music_ids = str(chan.music).split(\" \")[1:]\n\n preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate(music_ids)])\n music = Music.objects.filter(music_id__in=music_ids).order_by(preserved) \n print(\"Music List: \", music)\n\n return render(request, \"channel.html\", {\"channel\": chan, \"music\": music}) \n\n\n@login_required\ndef upload(request):\n if request.method == \"POST\":\n title = request.POST['title']\n singer = request.POST['singer']\n band = request.POST['band']\n tag = request.POST['tag']\n year = request.POST['year_production']\n image = request.FILES['picture']\n description = request.POST['description']\n music = request.FILES['file']\n \n music_model = Music(title=title, singer=singer, band=band, cast=tag, year_of_production = year, image=image, music=music, description=description)\n music_model.save()\n messages.success(request, f'Successfully Added!!')\n music_id = music_model.music_id\n user = request.user \n print(user)\n channel_find = models.Channel.objects.filter(name=str(request.user))\n print(channel_find)\n\n for i in channel_find:\n i.music += f\" {music_id}\"\n i.save() \n return render(request, \"upload.html\")\n\n\n@login_required\ndef search_function(request):\n if request.method =='POST':\n finds = request.GET['music']\n if finds:\n match = Music.objects.filter(Q(title__icontains=finds)) \n if match: \n return render(request,'music_list.html', {'context':match})\n else:\n messages.error(request, \"The word, You type did not Exist\")\n else:\n return HttpResponseRedict('music_list.html') \n return render(request, 'music_list.html') \n\n\n\ndef ratingReview(request, mus_id):\n if not request.user.is_authenticated:\n return redirect(\"login\")\n if not request.user.is_active:\n raise Http404\n musics = get_object_or_404(Music, music_id=mus_id)\n # for rating\n if request.method == \"POST\":\n\n rate = request.POST['rating'] \n review = request.POST['review']\n\n ratingObject = Rating()\n reviewObject = Review()\n\n ratingObject.user = request.user\n reviewObject.user = request.user\n\n ratingObject.song = musics\n reviewObject.song = musics\n\n ratingObject.rating = rate\n reviewObject.review = review\n\n ratingObject.save()\n reviewObject.save()\n\n messages.success(request, \"Succssfully, Sended Your Rating and Review\")\n return redirect(\"client-home\")\n return render(request, 'music_detail.html', {'music': musics})\n\n\n\n\n\n\n\n\n\n# @login_required(login_url='login')\n# def recommend(request):\n# user_id = request.user.id\n# user_name = request.user.username\n# print(\"User ID: \", user_id)\n# print(\"User Name: \", user_name)\n# url = \"http://127.0.0.1:8000/recommend/\"\n# payload = {'user_id':user_id}\n# headers = {\n# 'content-type': \"multipart/form-data\",\n# 'cache-control': \"no-cache\",\n# }\n# responses = requests.request(\"POST\",url,data=payload)\n# import pdb; pdb.set_trace()\n# response = json.load()\n# print(\"Response\", response)\n# respnses_tuple = make_tuple(\"Tuple_Response\", response)\n# print( respnses_tuple)\n# context = list()\n# print(\"Context\", context)\n\n# for user_id in respnses_tuple:\n# try:\n# recommended = Music.objects.get(id=user_id)\n# context.append(recommended)\n# except:\n# pass\n# return render(request,\"recommend.html\",{'context': context})\n\n\n\n\n\n\n@login_required(login_url='login')\ndef recommend(request):\n user_id = request.user.id\n user_name = request.user.username\n print(\"User ID: \", user_id)\n print(\"User Name: \", user_name)\n responses = requests.request(\"POST\",url,data=payload)\n response = json.load()\n print(\"Response\", response)\n respnses_tuple = make_tuple(\"Tuple_Response\", response)\n print( respnses_tuple)\n context = list()\n print(\"Context\", context)\n\n for user_id in respnses_tuple:\n try:\n recommended = Music.objects.get(id=user_id)\n context.append(recommended)\n except:\n pass\n return render(request,\"recommend.html\",{'context': context})\n\n","sub_path":"client/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"313976883","text":"import cv2\n#Load the image into the memory\nimg = cv2.imread('sample_image.jpg',cv2.IMREAD_COLOR);\n\n#show the image in a window\n#create a window named image_display\ncv2.namedWindow('image_display',cv2.WINDOW_AUTOSIZE);\n#draw the image into the image_display window\ncv2.imshow('image_display',img);\n#wait for the user to press any key\ncv2.waitKey(0);\n#once user inputs something destroy all the windows\ncv2.destroyAllWindows();\n","sub_path":"Code_Files/Chapter_3/HelloWorld/helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"234257847","text":"from import_modules import *\n\n\nclass OkButton(QPushButton):\n def __init__(self, window):\n super().__init__(\"Ok\", window)\n self.width = 70\n self.height = 25\n self.top = window.height - self.height - 5\n self.left = window.width / 2 - self.width / 2 - 5 / 2\n self.setGeometry(QtCore.QRect(0, 0, self.width, self.height))\n self.move(self.left, self.top)\n self.clicked.connect(window.close)\n\n\nclass VictoryLabel(QLabel):\n def __init__(self, window):\n super().__init__(window)\n self.width = window.width - 10\n self.height = window.height - 40\n self.top = 5\n self.left = 5\n self.setGeometry(QtCore.QRect(0, 0, self.width, self.height))\n self.move(self.left, self.top)\n self.setPixmap(QPixmap(\"img/victory.png\"))\n self.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)\n\n\nclass Frame(QLabel):\n def __init__(self, window):\n super().__init__(window)\n self.width = window.width - 10\n self.height = window.height - 40\n self.top = 5\n self.left = 5\n self.setGeometry(QtCore.QRect(0, 0, self.width, self.height))\n self.move(self.left, self.top)\n self.setFrameStyle(QLabel.Box)\n\n\nclass VictoryWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.top = 0\n self.left = 0\n self.width = 250\n self.height = 150\n self.title = \"Victory!\"\n self.setWindowTitle(self.title)\n self.setFixedSize(self.width,\n self.height)\n self.move(self.left, self.top)\n\n # buttons\n self.ok_button = OkButton(self)\n\n # labels\n self.help_label = VictoryLabel(self)\n\n # frame\n self.frame = Frame(self)\n\n self.show()\n","sub_path":"victory_window.py","file_name":"victory_window.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"426318583","text":"import pandas as pd\nimport numpy as np\nfrom PIL import Image\nimport os\nimport importdataset\nfrom keras import applications, Input\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D\nfrom keras.layers import GlobalAveragePooling2D, AveragePooling2D, Flatten, Conv2DTranspose\nfrom keras.models import Sequential, Model, load_model\nfrom keras.optimizers import SGD, Adam\nfrom tensorflow.keras.losses import MeanSquaredError, BinaryCrossentropy\nfrom keras import metrics\nfrom keras import losses\nfrom keras.models import Sequential\nimport keras.backend as K\nfrom PIL import Image, ImageDraw, ImageFont\nfrom PIL import ImageTk, ImageWin\nimport tkinter\nimport keras\nfrom bpmll import bp_mll_loss\nimport utils\nimport h5py\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport cv2\nimport random\n\n\ndef make_gradcam_heatmap(\n img_array, model, last_conv_layer_name, classifier_layer_names, class_index=-1\n):\n with tf.GradientTape() as tape:\n last_conv_layer = model.get_layer(\"resnet50\").get_layer(last_conv_layer_name)\n last_conv_layer_model = keras.Model(model.get_layer(\"resnet50\").input, last_conv_layer.output)\n\n classifier_input = keras.Input(shape=last_conv_layer.output.shape[1:])\n x = classifier_input\n x = model.get_layer(\"spatial_dropout2d\")(x)\n x = model.get_layer(\"batch_normalization\")(x)\n x = model.get_layer(\"global_average_pooling2d\")(x)\n x = model.get_layer(\"dense\")(x)\n classifier_model = keras.Model(classifier_input, x)\n\n # Compute activations of the last conv layer and make the tape watch it\n last_conv_layer_output = last_conv_layer_model(img_array, training=False)\n tape.watch(last_conv_layer_output)\n # Compute class predictions\n preds = classifier_model(last_conv_layer_output, training=False)\n if class_index == -1:\n pred_index = tf.argmax(preds[0])\n else:\n pred_index = class_index\n class_channel = preds[:, pred_index]\n print(\"*\"*50)\n print(\"Taping gradients \\nPredictions are\")\n print(preds)\n print(\"I am looking at\")\n print(class_channel)\n print(\"*\"*50)\n\n # This is the gradient of the class score with regard to\n # the output feature map of the last conv layer\n grads = tape.gradient(class_channel, last_conv_layer_output)\n print(\"Shape of the gradient of the class score wrt last conv layer activations\")\n print(grads.shape)\n print(\"*\"*50)\n\n # This is a vector where each entry is the mean intensity of the gradient\n # over a specific feature map channel\n pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))\n print(\"Shape of the pooled gradients (a_k^c)\")\n print(pooled_grads.shape)\n print(\"*\" * 50)\n\n # We multiply each channel in the feature map array\n # by \"how important this channel is\" with regard to the top predicted class\n last_conv_layer_output = last_conv_layer_output.numpy()[0]\n pooled_grads = pooled_grads.numpy()\n for k in range(pooled_grads.shape[-1]):\n # Computes a_k^c * A^k\n last_conv_layer_output[:, :, k] *= pooled_grads[k]\n\n heatmap = tf.reduce_sum(last_conv_layer_output, axis=2)\n heatmap = tf.nn.relu(heatmap).numpy()\n\n # The channel-wise mean of the resulting feature map\n # is our heatmap of class activation\n # heatmap = np.mean(last_conv_layer_output, axis=-1)\n\n # For visualization purpose, we will also normalize the heatmap between 0 & 1\n heatmap = np.maximum(heatmap, 0) / (np.max(heatmap) if np.max(heatmap) > 0.0 else 1.0)\n\n # We load the original image\n mean = np.mean(img_array.squeeze(), axis=2)\n img = np.zeros([224, 224, 3])\n img[:, :, 0] = mean\n img[:, :, 1] = mean\n img[:, :, 2] = mean\n\n # We rescale heatmap to a range 0-255\n heatmap = np.uint8(255 * heatmap)\n\n # We use jet colormap to colorize heatmap\n jet = cm.get_cmap(\"jet\")\n\n # We use RGB values of the colormap\n jet_colors = jet(np.arange(256))[:, :3]\n jet_heatmap = jet_colors[heatmap]\n\n # We create an image with RGB colorized heatmap\n jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)\n jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))\n jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap)\n\n # Superimpose the heatmap on original image\n superimposed_img = jet_heatmap * 0.4 + img\n # save_img = keras.preprocessing.image.array_to_img(superimposed_img)\n\n # Save the superimposed image\n # save_path = \"N:/PycharmProjects/PerceptionVisualization/graphviz/visualizations/cam_4.jpg\"\n # save_img.save(save_path)\n # exit()\n\n superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img)\n # superimposed_img = keras.preprocessing.image.img_to_array(superimposed_img)\n var = np.zeros([7, 7, 3])\n var[:, :, 0] = heatmap\n var[:, :, 1] = heatmap\n var[:, :, 2] = heatmap\n heatmap = var\n heatmap = keras.preprocessing.image.array_to_img(heatmap)\n heatmap = heatmap.resize((img.shape[1], img.shape[0]))\n heatmap = keras.preprocessing.image.img_to_array(heatmap)\n heatmap = heatmap.astype(np.float32) / 255.0\n\n return superimposed_img, heatmap\n\n\nphysical_devices = tf.config.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\nbasepath = os.getcwd()\ndecoder_path = os.path.join(basepath, \"../models/decoder_dsim_250_0.4dsim_0.2rec_0.4ssim\")\ndecoder_ssim_path = os.path.join(basepath, \"../models/decoder_no_dsim\") # decoder_ssim\n# decoder_ssim_path = os.path.join(basepath, \"../models/decoder_gan_Experiment3(good)\")\nclassifier_path = os.path.join(basepath, \"../models/classifier\")\nmain_dataset_path = os.path.join(basepath, \"../datasets/dataset.h5\")\nencoder_dataset_path = os.path.join(basepath, \"../datasets/dataset_encoder.h5\")\n\ndecoder = keras.models.load_model(decoder_path, custom_objects={\"bp_mll_loss\": bp_mll_loss,\n \"euclidean_distance_loss\": utils.euclidean_distance_loss,\n \"rgb_ssim_loss\": utils.rgb_ssim_loss})\n\ndecoder_ssim = keras.models.load_model(decoder_ssim_path, custom_objects={\"bp_mll_loss\": bp_mll_loss,\n \"euclidean_distance_loss\": utils.euclidean_distance_loss,\n \"rgb_ssim_loss\": utils.rgb_ssim_loss})\n\nclassifier = keras.models.load_model(classifier_path, custom_objects={\"bp_mll_loss\": bp_mll_loss, \"euclidean_distance_loss\": utils.euclidean_distance_loss, \"rgb_ssim_loss\": utils.rgb_ssim_loss})\nencoder = keras.Model(classifier.input, classifier.get_layer(\"global_average_pooling2d\").input)\n\ndecoder.summary()\nclassifier.summary()\n\n# Load targets (The targets for the decoder are the original inputs, X in main dataset)\nhf_main = h5py.File(main_dataset_path, 'r')\n# Y_train = hf.get('X_Train').value\nX_test = hf_main['X_Test']\nY_test = hf_main['Y_Test']\n\nhf_enc = h5py.File(encoder_dataset_path, 'r')\n# X_train = hf.get('E_train').value\nE_test = hf_enc['E_test']\n\nroot = tkinter.Tk()\nroot.geometry('900x800')\ncanvas = tkinter.Canvas(root, width=896, height=800)\ncanvas.pack()\n\n\ndef image_distance(im1, im2):\n im1 = im1.astype(float)/255.0\n im2 = im2.astype(float)/255.0\n\n # MSE\n return tf.keras.metrics.mean_squared_error(im1, im2)\n\n # SSIM\n # return utils.rgb_ssim_loss(im1.reshape([1,224,224,3]), im2.reshape([1,224,224,3]))\n\n # DSIM MSE\n # emb1 = encoder(im1.reshape([1,224,224,3]))\n # emb2 = encoder(im2.reshape([1,224,224,3]))\n # return tf.keras.metrics.mean_squared_error(emb1, emb2)\n\n\ndef viz_and_save(idx):\n # infofile = open(os.path.join(basepath, \"../images/info.txt\"), 'w')\n labelsfile = open(os.path.join(basepath, \"../images/labels.txt\"), 'w')\n predictionsfile = open(os.path.join(basepath, \"../images/predictions.txt\"), 'w')\n explainedfile = open(os.path.join(basepath, \"../images/explained.txt\"), 'w')\n similarityfile = open(os.path.join(basepath, \"../images/similarity.txt\"), 'w')\n\n similarity_correct = []\n similarity_incorrect = []\n similarity_disjoint = []\n similarity_parcorrect = []\n similarity_empty = []\n\n for i in idx:\n reconstructed = ((decoder.predict(E_test[i:i+1, :, :, :]))*255).squeeze().astype(np.uint8)\n reconstructed_ssim = ((decoder_ssim.predict(E_test[i:i+1, :, :, :]))*255).squeeze().astype(np.uint8)\n original = (X_test[i:i+1, :, :, :]*255).squeeze().astype(np.uint8)\n heatmap, raw_heatmap = make_gradcam_heatmap(\n X_test[i:i + 1, :, :, :], classifier, \"conv5_block3_out\", [], -1\n )\n\n heatmap = np.array(heatmap)\n\n res = np.concatenate((original, reconstructed, reconstructed_ssim, heatmap), axis=1)\n t = (heatmap.astype(np.float32)/255.0)*0.3\n multi_map = np.concatenate((t, t, t, t), axis=1)\n multi_map = multi_map + 0.7*(res.astype(np.float32)/255.0)\n\n # Use colour gamma\n # raw_heatmap = raw_heatmap**(2/3)\n\n # Threshold\n # threshold = 0.3\n # raw_heatmap[raw_heatmap > threshold] = 1.0\n # raw_heatmap[raw_heatmap <= threshold] = 0.0\n\n mask = np.concatenate((raw_heatmap, raw_heatmap, raw_heatmap, raw_heatmap), axis=1) # This is the heatmap scaled 0...1\n masked_map = (res.astype(np.float32)/255.0) # This is a copy of the full row\n white = np.ones([224, 224*4, 3]) # This is all ones, so a white image\n\n masked_map = np.multiply(mask, masked_map) + np.multiply((white-mask), white)\n\n multi_map_int = (multi_map*255.0).astype(np.uint8)\n masked_map = (masked_map*255.0).astype(np.uint8)\n\n res = np.concatenate((res, multi_map_int, masked_map), axis=0)\n\n print(\"\\n\\n\")\n target = Y_test[i, :]\n\n print(\"Targets\")\n labelline = \"\"\n predline = \"\"\n expline = \"\"\n\n correct = []\n count = 0\n for elem in importdataset.CLASS_NAMES:\n if target[count] > 0.1:\n correct.append((elem, target[count]))\n labelline += str(elem) + \", \"\n count += 1\n print(correct)\n\n print(\"Model prediction\")\n classes = classifier.predict(X_test[i:i+1, :, :, :]).squeeze()\n # print(classes)\n accepted = []\n count = 0\n for elem in importdataset.CLASS_NAMES:\n if classes[count] > 0.5:\n accepted.append((elem, classes[count]))\n predline += str(elem) + \", \"\n count += 1\n argmax = classes.argmax()\n # accepted.append((\"TOP: \"+importdataset.CLASS_NAMES[argmax], classes[argmax]))\n expline = importdataset.CLASS_NAMES[argmax]\n image = Image.fromarray(res)\n\n dovis = False\n\n if dovis:\n photoimage = ImageTk.PhotoImage(image)\n imagesprite = canvas.create_image(0, 0, image=photoimage, anchor=\"nw\")\n root.update()\n\n print(accepted)\n\n accepted_classes = [elem[0] for elem in accepted]\n target_classes = [elem[0] for elem in correct]\n\n # Set correct prediction\n # 1 means that predictions and targets coincide\n # -1 means that none of the targets are in the predictions (the sets are disjoint)\n # 0 means that some of the targets are in the predictions\n # -2 means empty prediction\n # -3 means that one of the predictions is NOT in the target set\n correct_prediction = None\n\n tinp = 0\n pint = 0\n\n for elem in target_classes:\n if elem in accepted_classes:\n tinp += 1\n\n for elem in accepted_classes:\n if elem in target_classes:\n pint += 1\n\n if set(accepted_classes) == set(target_classes):\n correct_prediction = 1\n elif len(accepted_classes) == 0:\n correct_prediction = -2\n elif pint == tinp == 0:\n correct_prediction = -1\n elif pint == len(accepted_classes) < len(target_classes):\n correct_prediction = 0\n elif pint > 0 and tinp > 0:\n correct_prediction = -3\n\n distance = image_distance(reconstructed, reconstructed_ssim)\n\n if correct_prediction == 1:\n similarity_correct.append(distance)\n elif correct_prediction == -1:\n similarity_disjoint.append(distance)\n elif correct_prediction == -2:\n similarity_empty.append(distance)\n elif correct_prediction == 0:\n similarity_parcorrect.append(distance)\n elif correct_prediction == -3:\n similarity_incorrect.append(distance)\n\n similarityfile.writelines([str(correct_prediction) + \",\" + str(np.mean(distance))+\"\\n\"])\n\n print(\"Prediction was \" + str(correct_prediction))\n\n # sv = input(\"Any key to continue\")\n sv = \"ehwjrejrejtejt\"\n\n predline = predline[:-2]+\"\\n\"\n labelline = labelline[:-2] + \"\\n\"\n expline = expline + \"\\n\"\n\n predictionsfile.writelines(predline)\n labelsfile.writelines(labelline)\n explainedfile.writelines(expline)\n\n original = res[0:224, 0:224, :]\n cammask = res[448:672, 0:224, :]\n mask_1 = res[448:672, 224:448, :]\n mask_2 = res[448:672, 448:672, :]\n\n opt1 = Image.fromarray(np.concatenate((original, cammask, mask_1), axis=1))\n opt2 = Image.fromarray(np.concatenate((original, cammask, mask_2), axis=1))\n\n if sv == \"c\":\n Image.fromarray(np.concatenate((original, mask_1, mask_2), axis=1)).save(os.path.join(basepath, \"../images/\" + str(i) + \".jpg\"))\n\n if sv == \"1\":\n opt1.save(os.path.join(basepath, \"../images/\" + str(i) + \".jpg\"))\n\n if sv == \"2\":\n opt2.save(os.path.join(basepath, \"../images/\" + str(i) + \".jpg\"))\n\n #original = res[0:224, 0:224, :]\n #masked_original = res[448:, 0:224, :]\n #masked_recon = res[448:, 448:672, :]\n\n #original_cam = Image.fromarray(np.concatenate((original, masked_original), axis=1))\n #original_viz = Image.fromarray(np.concatenate((original, masked_recon), axis=1))\n #original = Image.fromarray(original)\n #masked_original = Image.fromarray(masked_original)\n #masked_recon = Image.fromarray(masked_recon)\n\n #original.save(os.path.join(basepath, \"../images/original/\"+str(i)+\".jpg\"))\n #masked_original.save(os.path.join(basepath, \"../images/cam/\"+str(i)+\".jpg\"))\n #masked_recon.save(os.path.join(basepath, \"../images/viz/\"+str(i)+\".jpg\"))\n #original_cam.save(os.path.join(basepath, \"../images/original+cam/\"+str(i)+\".jpg\"))\n #original_viz.save(os.path.join(basepath, \"../images/original+viz/\"+str(i)+\".jpg\"))\n # infofile.writelines([str(i)+\"; \"+str(correct)+\"; \"+str(accepted)+\"\\n\"])\n if sv == \"q\":\n # infofile.close()\n exit()\n # infofile.close()\n predictionsfile.close()\n explainedfile.close()\n labelsfile.close()\n\n print(\"Similarities\")\n print(\"Correct\")\n print(\"Count: \" + str(len(similarity_correct)) + \"Mean: \" + str(np.mean(similarity_correct)))\n print(\"Disjoint\")\n print(\"Count: \" + str(len(similarity_disjoint)) + \"Mean: \" + str(np.mean(similarity_disjoint)))\n print(\"Partially correct\")\n print(\"Count: \" + str(len(similarity_parcorrect)) + \"Mean: \" + str(np.mean(similarity_parcorrect)))\n print(\"Empty prediction\")\n print(\"Count: \" + str(len(similarity_empty)) + \"Mean: \" + str(np.mean(similarity_empty)))\n print(\"Incorrect\")\n print(\"Count: \" + str(len(similarity_incorrect)) + \"Mean: \" + str(np.mean(similarity_incorrect)))\n\ndef info_perm():\n infofile = open(os.path.join(basepath, \"../images/sorted/info.txt\"), 'w')\n rand_perm = np.array(\n [38, 72, 12, 42, 65, 15, 0, 10, 45, 95, 58, 62, 3, 61, 90, 35, 18, 36, 107, 101, 13, 53, 21, 26, 9, 59, 41, 60,\n 93, 33])\n\n for i in rand_perm:\n output_set = set()\n\n # Add the model's top prediction to the set\n classes = classifier.predict(X_test[i:i + 1, :, :, :]).squeeze()\n argmax = classes.argmax()\n output_set.add(importdataset.CLASS_NAMES[argmax])\n\n # Merge with actual labels until we fill the 4 cases\n count = 0\n target = Y_test[i, :]\n for elem in importdataset.CLASS_NAMES:\n if target[count] > 0.1 and len(output_set) < 4:\n output_set.add(elem)\n count += 1\n\n # Add random options if we haven't reached the 4 mark yet\n while len(output_set) < 4:\n ran = random.randrange(0, len(importdataset.CLASS_NAMES))\n output_set.add(importdataset.CLASS_NAMES[ran])\n\n infofile.writelines(str(output_set)+\"\\n\")\n\n infofile.close()\n\n\ndef create_additional_viz(idxs):\n participants_cam = 58\n participants_pv = 40\n\n data_cam = [57, 0, 3, 10, 0, 58, 47, 58, 13, 1, 57, 58, 58, 3, 12, 0, 58, 57, 3, 17, 58, 58, 58, 58, 58, 1, 2, 54, 1, 4]\n data_pv = [33, 10, 36, 30, 3, 32, 34, 29, 13, 13, 29, 27, 25, 17, 31, 1, 38, 32, 17, 31, 19, 38, 28, 41, 28, 4, 15, 30, 5, 7]\n\n optionsfile = open(os.path.join(basepath, \"../images/options.txt\"), 'r')\n labelsfile = open(os.path.join(basepath, \"../images/labels.txt\"), 'r')\n predictionsfile = open(os.path.join(basepath, \"../images/predictions.txt\"), 'r')\n explainedfile = open(os.path.join(basepath, \"../images/explained.txt\"), 'r')\n\n optionlines = optionsfile.readlines()\n labelslines = labelsfile.readlines()\n predictionlines = predictionsfile.readlines()\n explainedlines = explainedfile.readlines()\n\n font = ImageFont.truetype(os.path.join(basepath, \"../Gidole-Regular.ttf\"), size=15)\n\n count = 0\n for idx in idxs:\n labels = labelslines[count]\n predictions = predictionlines[count]\n explained = explainedlines[count].replace('\\n', '')\n\n options = optionlines[count].replace('}', '').replace('{', '').replace('\\n', '').replace(\"'\", '')\n options = \"Possible answers: \" + options + \", 'I just can't tell' (Correct answer: \"+ explained + \")\"\n\n resultimage = Image.new('RGB', (672+75, 224+100), (255, 255, 255, 255))\n vizimage = Image.open(os.path.join(basepath, \"../images/\"+str(idx)+\".jpg\"))\n predimage = Image.new('RGB', (224, 75), (255, 255, 255, 255))\n d = ImageDraw.Draw(predimage)\n d.text((10, 10), \"Target:\\nPredicted:\\nExplained:\", fill=(0, 0, 0), font=font)\n d.text((100, 10), labels + predictions + explained, fill=(0, 0, 0), font=font)\n predimage = predimage.rotate(270, expand=True)\n optionsimage = Image.new('RGB', (672, 50), (255, 255, 255, 255))\n d = ImageDraw.Draw(optionsimage)\n d.text((10, 10), options, fill=(0, 0, 0), font=font)\n perfimage = Image.new('RGB', (672, 60), (255, 255, 255, 255))\n d = ImageDraw.Draw(perfimage)\n d.text((10, 10), \"Grad-CAM users that correctly guessed: \", fill=(0, 0, 0), font=font)\n d.text((10, 35), \"PV users that correctly guessed: \", fill=(0, 0, 0), font=font)\n d.text((260, 10), str(data_cam[count])+\"/\"+str(participants_cam) + \" (\"+str(round(float(data_cam[count])/float(participants_cam)*float(100), 2))+\"%)\", fill=(0, 0, 0), font=font)\n d.text((260, 35), str(data_pv[count])+\"/\"+str(participants_pv) + \" (\"+str(round(float(data_pv[count])/float(participants_pv)*float(100), 2))+\"%)\", fill=(0, 0, 0), font=font)\n\n resultimage.paste(vizimage, (0, 0))\n resultimage.paste(predimage, (672, 0))\n resultimage.paste(optionsimage, (0, 224))\n resultimage.paste(perfimage, (0, 259))\n\n resultimage.save(os.path.join(basepath, \"../additionalmat/\"+str(count)+\".jpg\"))\n count += 1\n\n\nif __name__ == \"__main__\":\n rand_perm = np.array(\n [38, 72, 12, 42, 65, 15, 0, 10, 45, 95, 58, 62, 3, 61, 90, 35, 18, 36, 107, 101, 13, 53, 21, 26, 9, 59, 41, 60,\n 93, 33])\n\n # viz_and_save(rand_perm)\n viz_and_save(range(801, 1802))\n\n # create_additional_viz(rand_perm)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/test_decoder.py","file_name":"test_decoder.py","file_ext":"py","file_size_in_byte":20190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"49273460","text":"from flask import Flask, render_template, request, jsonify\nimport datetime\nimport os\nfrom opcheck import openpose_check, openpose_select\nimport re\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template('betterone.html')\n\n@app.route(\"/process\", methods=['POST'])\ndef process():\n\n cameraFile = request.files['data']\n timenow = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n cameraFile.save('video/webm/{0}.webm'.format(timenow))\n os.popen('ffmpeg -i video/webm/{0}.webm -strict -2 -y -r 30 video/mp4/{1}.mp4'.format(timenow,timenow)).readlines()\n # # os.popen('cd ~/openpose').readlines()\n # print('################ ffmpeg end #####################')\n # os.chdir('/home/fan/openpose')\n # os.popen('/home/fan/openpose/build/examples/openpose/openpose.bin --video /home/fan/Documents/video/{0}.mp4 --write_json /home/fan/Documents/openpose/output/{1}/ --display 0 --render_pose 0 --model_pose COCO'.format(timenow, timenow)).readlines()\n # print('################ openpose end #####################')\n # result = openpose_select('/home/fan/Documents/openpose/output/{0}/'.format(timenow))\n # print('select check: '+result)\n # if re.split(r'[\\s]+',result)[0] == 'error':\n # # if result == \"error 0\" or result == \"error 2\":\n # return result\n # result = openpose_check('/home/fan/Documents/openpose/output/{0}/'.format(timenow))\n # print('check check: '+result)\n # if re.split(r'[\\s]+',result)[0] == 'error':\n # # if result == \"error 0\" or result == \"error 2\":\n # return result\n # os.popen('cd ~/3d-pose-baseline-master/').readlines()\n # os.chdir('/home/fan/3d-pose-baseline-master/')\n # os.popen('python /home/fan/3d-pose-baseline-master/src/totxt.py --camera_frame --residual --batch_norm --dropout 0.5 --max_norm --evaluateActionWise --use_sh --epochs 200 --load 4874200 --openpose /home/fan/Documents/openpose/output/{0} --interpolation --multiplier 1'.format(timenow)).readlines()\n # # os.popen('cd /home/fan/Documents/3dpose/output').readlines()\n # print('################ totxt end #####################')\n # os.chdir('/home/fan/Documents/3dpose/output')\n # os.popen('python /home/fan/3d-pose-baseline-master/src/tovmd.py -t /home/fan/Documents/3dpose/output/ -b /home/fan/Documents/model.csv -x 22 -n {0}'.format(timenow)).readlines()\n # return timenow\n\nif __name__ == \"__main__\":\n app.jinja_env.auto_reload = True\n app.run(host='0.0.0.0', debug=True, port=5000, ssl_context='adhoc')\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"341570321","text":"\nimport os\nimport json\n\nimport torch\nfrom PIL import Image\nfrom openpifpaf import show\n\nfrom .visuals.printer import Printer\nfrom .network import PifPaf, ImageList, MonoLoco\nfrom .network.process import factory_for_gt, preprocess_pifpaf\n\n\ndef predict(args):\n\n cnt = 0\n\n # load pifpaf and monoloco models\n pifpaf = PifPaf(args)\n monoloco = MonoLoco(model=args.model, device=args.device, n_dropout=args.n_dropout, p_dropout=args.dropout)\n\n # data\n data = ImageList(args.images, scale=args.scale)\n data_loader = torch.utils.data.DataLoader(\n data, batch_size=1, shuffle=False,\n pin_memory=args.pin_memory, num_workers=args.loader_workers)\n\n for idx, (image_paths, image_tensors, processed_images_cpu) in enumerate(data_loader):\n images = image_tensors.permute(0, 2, 3, 1)\n\n processed_images = processed_images_cpu.to(args.device, non_blocking=True)\n fields_batch = pifpaf.fields(processed_images)\n\n # unbatch\n for image_path, image, processed_image_cpu, fields in zip(\n image_paths, images, processed_images_cpu, fields_batch):\n\n if args.output_directory is None:\n output_path = image_path\n else:\n file_name = os.path.basename(image_path)\n output_path = os.path.join(args.output_directory, file_name)\n print('image', idx, image_path, output_path)\n\n keypoint_sets, scores, pifpaf_out = pifpaf.forward(image, processed_image_cpu, fields)\n pifpaf_outputs = [keypoint_sets, scores, pifpaf_out] # keypoints_sets and scores for pifpaf printing\n images_outputs = [image] # List of 1 or 2 elements with pifpaf tensor (resized) and monoloco original image\n\n if 'monoloco' in args.networks:\n im_size = (float(image.size()[1] / args.scale),\n float(image.size()[0] / args.scale)) # Width, Height (original)\n\n # Extract calibration matrix and ground truth file if present\n with open(image_path, 'rb') as f:\n pil_image = Image.open(f).convert('RGB')\n images_outputs.append(pil_image)\n\n im_name = os.path.basename(image_path)\n\n kk, dic_gt = factory_for_gt(im_size, name=im_name, path_gt=args.path_gt)\n\n # Preprocess pifpaf outputs and run monoloco\n boxes, keypoints = preprocess_pifpaf(pifpaf_out, im_size)\n outputs, varss = monoloco.forward(keypoints, kk)\n dic_out = monoloco.post_process(outputs, varss, boxes, keypoints, kk, dic_gt)\n # print (dic_out)\n world_loc = dic_out['xyz_pred']\n image_loc = dic_out['uv_centers']\n\n else:\n dic_out = None\n kk = None\n\n factory_outputs(args, images_outputs, output_path, pifpaf_outputs, dic_out=dic_out, kk=kk)\n print('Image {}\\n'.format(cnt) + '-' * 120)\n cnt += 1\n\n\ndef factory_outputs(args, images_outputs, output_path, pifpaf_outputs, dic_out=None, kk=None):\n \"\"\"Output json files or images according to the choice\"\"\"\n\n # Save json file\n if 'pifpaf' in args.networks:\n keypoint_sets, scores, pifpaf_out = pifpaf_outputs[:]\n\n # Visualizer\n keypoint_painter = show.KeypointPainter(show_box=False)\n skeleton_painter = show.KeypointPainter(show_box=False, color_connections=True,\n markersize=1, linewidth=4)\n\n if 'json' in args.output_types and keypoint_sets.size > 0:\n with open(output_path + '.pifpaf.json', 'w') as f:\n json.dump(pifpaf_out, f)\n\n if 'keypoints' in args.output_types:\n with show.image_canvas(images_outputs[0],\n output_path + '.keypoints.png',\n show=args.show,\n fig_width=args.figure_width,\n dpi_factor=args.dpi_factor) as ax:\n keypoint_painter.keypoints(ax, keypoint_sets)\n\n if 'skeleton' in args.output_types:\n with show.image_canvas(images_outputs[0],\n output_path + '.skeleton.png',\n show=args.show,\n fig_width=args.figure_width,\n dpi_factor=args.dpi_factor) as ax:\n skeleton_painter.keypoints(ax, keypoint_sets, scores=scores)\n\n if 'monoloco' in args.networks:\n if any((xx in args.output_types for xx in ['front', 'bird', 'combined'])):\n epistemic = False\n if args.n_dropout > 0:\n epistemic = True\n\n if dic_out['boxes']: # Only print in case of detections\n printer = Printer(images_outputs[1], output_path, kk, output_types=args.output_types\n , z_max=args.z_max, epistemic=epistemic)\n figures, axes = printer.factory_axes()\n printer.draw(figures, axes, dic_out, images_outputs[1], draw_box=args.draw_box,\n save=True, show=args.show)\n\n if 'json' in args.output_types:\n with open(os.path.join(output_path + '.monoloco.json'), 'w') as ff:\n json.dump(dic_out, ff)\n","sub_path":"src/video_processing/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"320707112","text":"import panel as pn\r\n\r\n\r\ndef get_content(title=\"App Title\"):\r\n return pn.Column(\r\n \"# \" + title,\r\n pn.widgets.Button(name=\"Click me!\", sizing_mode=\"stretch_width\"),\r\n sizing_mode=\"stretch_width\",\r\n background=\"lightgray\",\r\n )\r\n\r\n\r\ndef app_body_margin_css():\r\n css = \"\"\"\r\n body {\r\n margin-top: 0px;\r\n margin-bottom: 0px;\r\n margin-left: 20%;\r\n margin-right: 20%;\r\n }\r\n \"\"\"\r\n pn.config.raw_css.append(css)\r\n content = get_content(\"App Body Margin CSS\")\r\n return content\r\n\r\n\r\ndef app_margin():\r\n content = get_content(\"App Margin\")\r\n content.margin = (0, 100, 0, 100)\r\n return content\r\n\r\n\r\ndef app_gridspec():\r\n gspec = pn.GridSpec(sizing_mode=\"stretch_width\")\r\n gspec[:, 0] = pn.Spacer()\r\n gspec[:, 1:4] = get_content(\"App GridSpace\")\r\n gspec[:, 4] = pn.Spacer()\r\n return gspec\r\n\r\n\r\napp_body_margin_css().show()\r\n","sub_path":"scripts/issue_layout.py","file_name":"issue_layout.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"279339497","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login\nfrom django.http import HttpResponseRedirect\nfrom accounts.forms import RegistrationForm\nimport os\n\ndef home(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect(\"/slides\")\n else:\n return HttpResponseRedirect(\"/accounts/login\")\ndef create_user(request):\n if not request.user.is_authenticated():\n form = RegistrationForm(request.POST or None)\n\n if request.method == \"POST\" and form.is_valid():\n form.save()\n\n user = authenticate(username=form.cleaned_data[\"username\"],\n password=form.cleaned_data[\"password1\"])\n login(request, user)\n\n comando = \"cd templates/assets/img; mkdir %s\" % user.username\n os.system(comando)\n comando = \"cd media/photos ;zip %s.zip readme.txr\" % user.username\n os.system(comando)\n return HttpResponseRedirect(\"/slides/\")\n else:\n return HttpResponseRedirect(\"/slides/\")\n\n return render(request, \"registration/create_user.html\", {\n \"form\": form,\n })","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"96891584","text":"from django.conf.urls import url\r\nfrom pythonista import views\r\n\r\nurlpatterns = [\r\n\turl(r'^$', views.index, name = 'index'),\r\n\turl(r'users/', views.list_users, name = 'list_users'),\r\n\turl(r'feedback/', views.feedback_view, name = 'feedback'),\r\n\turl(r'register/', views.user_registration, name = 'user_registration'),\r\n\turl(r'book/', views.book, name = \"book\"),\r\n\turl(r'brogrammerz/', views.we_the_brogrammerz, name = \"brogrammerz\"),\r\n]","sub_path":"pythonista/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"625806213","text":"from api.mt.mt import Mt\nimport os\nimport datetime\nfrom random import randint\n\n\nclass Delete_malls_like(Mt):\n method = 'delete'\n api = '/v1/favorite/malls/like/$mall_id'\n data = {}\n\n error_resp = {\n 'code': 400000,\n 'message': '没有可以购买的商品'\n }\n\n expected_schema = {\n \"$schema\": \"http://json-schema.org/draft-06/schema#\",\n \"title\": \"expected_data\",\n \"type\": \"object\",\n \"required\": [\"code\", \"time\"],\n \"properties\": {\n \"code\": {\"type\": \"number\"},\n \"time\": {\"type\": \"number\"}\n }\n }\n","sub_path":"banshee-master/api/mt/favorite/delete_malls_like.py","file_name":"delete_malls_like.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"621836485","text":"from payment_ui import config\nfrom payment_ui.custom_extensions.cachebust_static_assets.main import \\\n CachebustStaticAssets\nfrom payment_ui.custom_extensions.csrf.main import CSRF\nfrom payment_ui.custom_extensions.enhanced_logging.main import EnhancedLogging\nfrom payment_ui.custom_extensions.gzip_static_assets.main import \\\n GzipStaticAssets\nfrom payment_ui.custom_extensions.jinja_markdown_filter.main import \\\n JinjaMarkdownFilter\nfrom payment_ui.custom_extensions.security_headers.main import SecurityHeaders\n\n# from payment_ui.custom_extensions.content_security_policy.main import ContentSecurityPolicy\n\n\n# Create empty extension objects here\ncachebust_static_assets = CachebustStaticAssets()\nenhanced_logging = EnhancedLogging()\ngzip_static_assets = GzipStaticAssets()\nsecurity_headers = SecurityHeaders()\njinja_markdown_filter = JinjaMarkdownFilter()\ncsrf = CSRF()\n# content_security_policy = ContentSecurityPolicy()\n\n\ndef register_extensions(app):\n \"\"\"Adds any previously created extension objects into the app, and does any further setup they need.\"\"\"\n enhanced_logging.init_app(app)\n security_headers.init_app(app)\n jinja_markdown_filter.init_app(app)\n csrf.init_app(app)\n # content_security_policy.init_app(app)\n\n if config.STATIC_ASSETS_MODE == 'production':\n cachebust_static_assets.init_app(app)\n gzip_static_assets.init_app(app)\n\n # All done!\n app.logger.info(\"Extensions registered\")\n","sub_path":"payment_ui/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"593997704","text":"import os\nimport socket\nimport collections\n\n\ndef initPathSMAP(dirDB, dirOut, dirResult):\n pathSMAP = collections.OrderedDict(\n DB_L3_Global=os.path.join(dirDB, 'Daily_L3'),\n DB_L3_NA=os.path.join(dirDB, 'Daily_L3_NA'),\n Out_L3_Global=os.path.join(dirOut, 'L3_Global'),\n Out_L3_NA=os.path.join(dirOut, 'L3_NA'),\n outTest=os.path.join(dirOut, 'Test'),\n dirDB=dirDB,\n dirOut=dirOut,\n dirResult=dirResult)\n return pathSMAP\n\n\nhostName = socket.gethostname()\n\nif hostName == 'AW-m17':\n dirDB = os.path.join(os.path.sep, 'D:', 'rnnSMAP', 'Database_SMAPgrid')\n dirOut = os.path.join(os.path.sep, 'D:', 'rnnSMAP', 'Model_SMAPgrid')\n dirResult = os.path.join(os.path.sep, 'D:', 'rnnSMAP',\n 'Result_SMAPgrid')\n pathSMAP = initPathSMAP(dirDB, dirOut, dirResult)\n os.environ[\n 'PROJ_LIB'] = r'C:\\Users\\geofk\\Anaconda3\\pkgs\\proj4-5.2.0-ha925a31_1\\Library\\share'\n dirData = r'C:\\Users\\geofk\\work\\database'\n dirWQ = r'C:\\Users\\geofk\\work\\waterQuality'\n dirCode=r'C:\\Users\\geofk\\work\\GitHUB\\geolearn'\nelif hostName[:4] == 'icme':\n host='icme'\n dirData = r'/home/kuaifang/Data/'\n dirWQ = r'/home/kuaifang/waterQuality/'\n dirJob=r'/home/kuaifang/jobs/' \nelif hostName[:2] == 'sh':\n host='sherlock'\n dirData = r'/scratch/users/kuaifang/Data/'\n dirWQ = r'/scratch/users/kuaifang/waterQuality/'\n dirJob=r'/scratch/users/kuaifang/jobs/'\n dirCode=r'/home/users/kuaifang/GitHUB/geolearn'\n","sub_path":"hydroDL/kPath.py","file_name":"kPath.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"412827401","text":"import sys\r\n\r\nSCORESEQFILENAME = sys.argv[1]\r\nOUTPUTFILENAME = sys.argv[2]\r\n\r\ndef getNumSeqOccurrences():\r\n\t# Get the number of occurrences of each sequence in an output file from scoreSeq.py\r\n\tseqDict = {}\r\n\tscoreSeqFile = open(SCORESEQFILENAME)\r\n\tscoreSeqFile.readline() # Remove the header\r\n\tfor line in scoreSeqFile:\r\n\t\t# Iterate through the lines of the file from score seq, and increment the counts for each sequence\r\n\t\tlineElements = line.split(\"\\t\")\r\n\t\tcurrentSeq = lineElements[2]\r\n\t\tif currentSeq in seqDict.keys():\r\n\t\t\t# The current sequence is in the dictionary, so add 1 to its count\r\n\t\t\tseqDict[currentSeq] = seqDict[currentSeq] + 1\r\n\t\telse:\r\n\t\t\t# Add the current sequence to the dictionary\r\n\t\t\tseqDict[currentSeq] = 1\r\n\tscoreSeqFile.close()\r\n\treturn seqDict\r\n\r\ndef outputSeqDict(seqDict):\r\n\t# Output the sequence dictionary\r\n\toutputFile = open(OUTPUTFILENAME, 'w+')\r\n\tfor seq in seqDict.keys():\r\n\t\t# Iterate through the sequences and record each and its count\r\n\t\toutputFile.write(seq + \"\\t\" + str(seqDict[seq]) + \"\\n\")\r\n\toutputFile.close()\r\n\r\nif __name__==\"__main__\":\r\n\tseqDict = getNumSeqOccurrences()\r\n\toutputSeqDict(seqDict)\r\n\t","sub_path":"pwm/getNumSeqOccurrences.py","file_name":"getNumSeqOccurrences.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"572725550","text":"#!/usr/bin/env python3\n\nimport sys\nimport argparse\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='Process some integers')\n#parser.add_argument('--plot',\nparser.add_argument('input', metavar='file', type=argparse.FileType('rb'), nargs='+', help='Input files with raw integers')\nargs = parser.parse_args()\n\ndata = []\n\nfig, ax = plt.subplots(2, 1)\n\npcts = [50, 75, 90, 99]\n\nfor f in args.input:\n name = f.name\n a = np.fromfile(f, dtype=np.uint32)\n\n percentiles = np.percentile(a, pcts)\n hist_label = name + ''\n for i, pct in enumerate(pcts):\n hist_label += f'\\n p{(100-pct):02}={int(percentiles[i])}us'\n\n ax[0].plot(a, label=name)\n ax[1].hist(a, label=hist_label, bins='auto')\n\nax[0].set(ylabel='time (us)', title='Rendering time')\nax[1].set(ylabel='time (us)', title='Rendering time')\n\nax[0].legend(title='measurements')\nax[1].legend(title='measurements')\n\nplt.tight_layout()\nfig.savefig(\"test.png\")\n#plt.title(\"histogram\")\nplt.show()\n","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"518147197","text":"#! /usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport math\nimport unittest\nimport warnings\n\nimport torch\nfrom botorch.acquisition import qExpectedImprovement\nfrom botorch.exceptions.warnings import OptimizationWarning\nfrom botorch.fit import fit_gpytorch_model\nfrom botorch.gen import gen_candidates_scipy, gen_candidates_torch\nfrom botorch.models import SingleTaskGP\nfrom gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood\n\nfrom .test_fit import NOISE\n\n\nEPS = 1e-8\n\n\nclass TestBaseCandidateGeneration(unittest.TestCase):\n def _setUp(self, double=False, cuda=False, expand=False):\n device = torch.device(\"cuda\") if cuda else torch.device(\"cpu\")\n dtype = torch.double if double else torch.float\n train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)\n train_y = torch.sin(train_x * (2 * math.pi)).squeeze(-1)\n noise = torch.tensor(NOISE, device=device, dtype=dtype)\n self.train_x = train_x\n self.train_y = train_y + noise\n if expand:\n self.train_x = self.train_x.expand(-1, 2)\n ics = torch.tensor([[0.5, 1.0]], device=device, dtype=dtype)\n else:\n ics = torch.tensor([[0.5]], device=device, dtype=dtype)\n self.initial_conditions = ics\n self.f_best = self.train_y.max().item()\n model = SingleTaskGP(self.train_x, self.train_y)\n self.model = model.to(device=device, dtype=dtype)\n self.mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model)\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=OptimizationWarning)\n self.mll = fit_gpytorch_model(\n self.mll, options={\"maxiter\": 1}, max_retries=1\n )\n\n\nclass TestGenCandidates(TestBaseCandidateGeneration):\n def test_gen_candidates(self, cuda=False, gen_candidates=gen_candidates_scipy):\n for double in (True, False):\n self._setUp(double=double, cuda=cuda)\n qEI = qExpectedImprovement(self.model, best_f=self.f_best)\n candidates, _ = gen_candidates(\n initial_conditions=self.initial_conditions,\n acquisition_function=qEI,\n lower_bounds=0,\n upper_bounds=1,\n )\n self.assertTrue(-EPS <= candidates <= 1 + EPS)\n\n def test_gen_candidates_scipy_cuda(self):\n if torch.cuda.is_available():\n self.test_gen_candidates(cuda=True)\n\n def test_gen_candidates_torch(self, cuda=False):\n self.test_gen_candidates(cuda=cuda, gen_candidates=gen_candidates_torch)\n\n def test_gen_candidates_torch_cuda(self):\n if torch.cuda.is_available():\n self.test_gen_candidates_torch(cuda=True)\n\n def test_gen_candidates_with_none_fixed_features(\n self, cuda=False, gen_candidates=gen_candidates_scipy\n ):\n for double in (True, False):\n self._setUp(double=double, cuda=cuda, expand=True)\n qEI = qExpectedImprovement(self.model, best_f=self.f_best)\n candidates, _ = gen_candidates(\n initial_conditions=self.initial_conditions,\n acquisition_function=qEI,\n lower_bounds=0,\n upper_bounds=1,\n fixed_features={1: None},\n )\n candidates = candidates.squeeze(0)\n self.assertTrue(-EPS <= candidates[0] <= 1 + EPS)\n self.assertTrue(candidates[1].item() == 1.0)\n\n def test_gen_candidates_scipy_with_none_fixed_features_cuda(self):\n if torch.cuda.is_available():\n self.test_gen_candidates_with_none_fixed_features(cuda=True)\n\n def test_gen_candidates_torch_with_none_fixed_features(self, cuda=False):\n self.test_gen_candidates_with_none_fixed_features(\n cuda=cuda, gen_candidates=gen_candidates_torch\n )\n\n def test_gen_candidates_torch_with_none_fixed_features_cuda(self):\n if torch.cuda.is_available():\n self.test_gen_candidates_torch_with_none_fixed_features(cuda=True)\n\n def test_gen_candidates_with_fixed_features(\n self, cuda=False, gen_candidates=gen_candidates_scipy\n ):\n for double in (True, False):\n self._setUp(double=double, cuda=cuda, expand=True)\n qEI = qExpectedImprovement(self.model, best_f=self.f_best)\n candidates, _ = gen_candidates(\n initial_conditions=self.initial_conditions,\n acquisition_function=qEI,\n lower_bounds=0,\n upper_bounds=1,\n fixed_features={1: 0.25},\n )\n candidates = candidates.squeeze(0)\n self.assertTrue(-EPS <= candidates[0] <= 1 + EPS)\n self.assertTrue(candidates[1].item() == 0.25)\n\n def test_gen_candidates_scipy_with_fixed_features_cuda(self, cuda=False):\n if torch.cuda.is_available():\n self.test_gen_candidates_with_fixed_features(cuda=True)\n\n def test_gen_candidates_torch_with_fixed_features(self, cuda=False):\n self.test_gen_candidates_with_fixed_features(\n cuda=cuda, gen_candidates=gen_candidates_torch\n )\n\n def test_gen_candidates_torch_with_fixed_features_cuda(self, cuda=False):\n if torch.cuda.is_available():\n self.test_gen_candidates_torch_with_fixed_features(cuda=True)\n","sub_path":"test/test_gen.py","file_name":"test_gen.py","file_ext":"py","file_size_in_byte":5409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"604077949","text":"# -*- coding:utf-8 -*-\n\"\"\"对所有的元素定位yaml文件进行审查,尽量是每一项元素不出错\"\"\"\nimport os\nimport yaml\nfrom config.config import rc\nfrom utils.times import running_time\n\n\n@running_time\ndef inspect_element():\n \"\"\"检查所有的元素是否正确\n 只能做一个简单的检查\n \"\"\"\n for files in os.listdir(rc.ELEMENT_PATH):\n _path = os.path.join(rc.ELEMENT_PATH, files)\n with open(_path, encoding='utf-8') as f:\n data = yaml.safe_load(f)\n for k in data.values():\n try:\n pattern, value = k.split('==')\n except ValueError:\n raise Exception(\"元素表达式中没有`==`\")\n if pattern not in rc.LOCATE_MODE:\n raise Exception('%s中元素【%s】没有指定类型' % (_path, k))\n elif pattern == 'xpath':\n assert '//' in value, \\\n '%s中元素【%s】xpath类型与值不配' % (_path, k)\n elif pattern == 'css':\n assert '//' not in value, \\\n '%s中元素【%s]css类型与值不配' % (_path, k)\n else:\n assert value, '%s中元素【%s】类型与值不匹配' % (_path, k)\n\n\nif __name__ == '__main__':\n inspect_element\n","sub_path":"script/inspect.py","file_name":"inspect.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"619014248","text":"# -*- coding: euc-kr -*-\r\n\r\nROT_13 = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')\r\n\r\ndef rot13(source):\r\n result = \"\"\r\n for l in source:\r\n if l in ROT_13:\r\n try:\r\n result += ROT_13[ROT_13.index(l) + 13]\r\n except:\r\n result += ROT_13[ROT_13.index(l) + 13 - len(ROT_13)]\r\n else:\r\n result += l\r\n return result\r\n # .upper()\r\n\r\nprint(rot13('P:/Hfref/ÀÌÈñ¿õ/NccQngn/Ybpny/Zvpebfbsg/Jvaqbjf/VArgPnpur/VR/NLHA3BO4/PuebzrFrghc.rkr'))","sub_path":"mornitor_3.py","file_name":"mornitor_3.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"529092914","text":"from atmPy.general import timeseries as _timeseries\nfrom atmPy.data_archives.arm import _netCDF\n\n\nclass ArmDatasetSub(_netCDF.ArmDataset):\n def __init__(self,*args, **kwargs):\n self._data_period = 60.\n self._time_offset = (- self._data_period, 's')\n super(ArmDatasetSub,self).__init__(*args, **kwargs)\n ## Define what is good, patchy or bad data\n\n # self._parse_netCDF()\n\n def _parse_netCDF(self):\n super(ArmDatasetSub,self)._parse_netCDF()\n # self._data_quality_control()\n self.relative_humidity = self._read_variable2timeseries(['rh_25m', 'rh_60m'], column_name='Relative Humidity (%)')\n self.temperature = self._read_variable2timeseries(['temp_25m', 'temp_60m'], column_name='Temperature ($^{\\circ}$C)')\n self.vapor_pressure = self._read_variable2timeseries(['vap_pres_25m', 'vap_pres_60m'], column_name='Vapor pressure (kPa)')\n\n def _data_quality_control(self):\n if self.data_quality_flag_max == None:\n if self.data_quality == 'good':\n self.data_quality_flag_max = 0\n elif self.data_quality == 'patchy':\n self.data_quality_flag_max = 0\n elif self.data_quality == 'bad':\n self.data_quality_flag_max = 100000\n else:\n txt = '%s is not an excepted values for data_quality (\"good\", \"patchy\", \"bad\")'%(self.data_quality)\n raise ValueError(txt)\n\n def plot_all(self):\n self.relative_humidity.plot()\n self.temperature.plot()\n self.vapor_pressure.plot()\n\n\ndef _concat_rules(arm_data_objs):\n # create class\n out = ArmDatasetSub(False)\n\n # populate class with concatinated data\n out.relative_humidity = _timeseries.concat([i.relative_humidity for i in arm_data_objs])\n out.relative_humidity._data_period = out._data_period\n out.temperature = _timeseries.concat([i.temperature for i in arm_data_objs])\n out.temperature._data_period = out._data_period\n out.vapor_pressure = _timeseries.concat([i.vapor_pressure for i in arm_data_objs])\n out.vapor_pressure._data_period = out._data_period\n\n # use time stamps from one of the variables\n out.time_stamps = out.relative_humidity.data.index\n return out\n","sub_path":"atmPy/data_archives/arm/file_io/products/_1twr10xC1.py","file_name":"_1twr10xC1.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"76860292","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np \nimport satelliteParam as P\n\n\nclass satelliteAnimation:\n '''\n Create satellite animation\n '''\n def __init__(self):\n self.flagInit = True # Used to indicate initialization\n self.fig, self.ax = plt.subplots() # Initializes a figure and axes object\n self.handle = [] # Initializes a list object that will\n # be used to contain handles to the\n # patches and line objects.\n plt.axis([-2.0*P.length, 2.0*P.length, -2.0*P.length, 2.0*P.length])\n plt.plot([-2.0*P.length, 2.0*P.length], [0, 0], 'b--')\n self.length = P.length\n self.width = P.width\n\n def drawSatellite(self, u):\n # Process inputs to function\n theta = u[0] # Angle of base, rad\n phi = u[1] # angle of panel, rad\n\n self.drawBase(theta)\n self.drawPanel(phi)\n# self.ax.axis('equal') # This will cause the image to not distort\n\n # After each function has been called, initialization is over.\n if self.flagInit == True:\n self.flagInit = False\n\n def drawBase(self, theta):\n # points that define the base\n pts =np.matrix([\n [self.width/2.0, -self.width/2.0],\n [self.width/2.0, -self.width/6.0],\n [self.width/2.0 + self.width/6.0, -self.width/6.0],\n [self.width/2.0 + self.width/6.0, self.width/6.0],\n [self.width/2.0, self.width/6.0],\n [self.width/2.0, self.width/2.0],\n [-self.width/2.0, self.width/2.0],\n [-self.width/2.0, self.width/6.0],\n [-self.width/2.0 - self.width/6.0, self.width/6.0],\n [- self.width/2.0 - self.width/6.0, -self.width/6.0],\n [- self.width/2.0, -self.width/6.0],\n [-self.width/2.0, -self.width/2.0]]).T\n R = np.matrix([[np.cos(theta), np.sin(theta)],\n [-np.sin(theta), np.cos(theta)]])\n pts = R*pts\n xy = np.array(pts.T)\n\n # When the class is initialized, a polygon patch object will be\n # created and added to the axes. After initialization, the polygon\n # patch object will only be updated.\n if self.flagInit == True:\n # Create the Rectangle patch and append its handle\n # to the handle list\n self.handle.append(mpatches.Polygon(xy, facecolor='blue', edgecolor='black'))\n self.ax.add_patch(self.handle[0]) # Add the patch to the axes\n else:\n self.handle[0].set_xy(xy) # Update polygon\n\n def drawPanel(self, phi):\n # points that define the base\n pts = np.matrix([\n [-self.length, -self.width/6.0],\n [self.length, -self.width/6.0],\n [self.length, self.width/6.0],\n [-self.length, self.width/6.0]]).T\n R = np.matrix([[np.cos(phi), np.sin(phi)],\n [-np.sin(phi), np.cos(phi)]])\n pts = R * pts\n xy = np.array(pts.T)\n\n # When the class is initialized, a polygon patch object will be\n # created and added to the axes. After initialization, the polygon\n # patch object will only be updated.\n if self.flagInit == True:\n # Create the Rectangle patch and append its handle\n # to the handle list\n self.handle.append(mpatches.Polygon(xy, facecolor='green', edgecolor='black'))\n self.ax.add_patch(self.handle[1]) # Add the patch to the axes\n else:\n self.handle[1].set_xy(xy) # Update polygon\n\n\n# Used see the animation from the command line\nif __name__ == \"__main__\":\n\n simAnimation = satelliteAnimation() # Create Animate object\n theta = 0.0*np.pi/180 # Angle of base rad\n phi = 0.0*np.pi/180 # angle of panel, rad\n simAnimation.drawSatellite([theta, phi, 0, 0])\n #plt.show()\n # Keeps the program from closing until the user presses a button.\n print('Press key to close')\n plt.waitforbuttonpress()\n plt.close()","sub_path":"control_book_public_solutions-master/_C_satellite/python_old/satelliteAnimation.py","file_name":"satelliteAnimation.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"651060336","text":"\n# import ffmpeg\nimport re\nfrom azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient\nfrom tqdm import tqdm\nfrom timer import timer\nimport concurrent.futures \nfrom ast import literal_eval\nfrom multiprocessing import set_start_method\nfrom os.path import isfile, join\n\n\ndef pull_main(video_id = None, container_client = 'athenaliveprod', lang = 'hindi' ):\n basepath = '/app'\n if isfile(f'{basepath}/{video_id}.mp4'):\n print(f'file already exists')\n pass\n else: \n\n\n # set_start_method(\"spawn\", force=True)\n connect_str = \"DefaultEndpointsProtocol=https;AccountName=videobank;AccountKey=+7+BZaxs5zBHwyDAMJHnMEJS1mhzIN4AC6PS7wIbVgE1hd35eHEB9IAbc+E2PfV4GNP7dkFrWiLAVMZ8HgnFEw==;EndpointSuffix=core.windows.net\"\n blob_service_client = BlobServiceClient.from_connection_string(connect_str)\n\n container = blob_service_client.get_container_client(container_client)\n\n if container_client == 'athenaliveprod':\n blobs = container.list_blobs(name_starts_with=f'athenaliveprod/{video_id}')\n else:\n blobs = container.list_blobs(name_starts_with=f'{video_id}')\n\n pat_format = re.compile('.*\\.mp4')\n pat_lang = re.compile(f'.*{lang}', re.IGNORECASE)\n\n for b in blobs:\n name_blob = b.name\n if re.search(pat_format,name_blob) and re.search(pattern=pat_lang, string=name_blob):\n print('<<<<< BLOB MATCH FOUND s >>>>')\n print(f\"Downloading {video_id}.mp4\")\n downloader = container.download_blob(b)\n # file_name = name_blob.split('/')[-1]\n with open(f\"{basepath}/{video_id}.mp4\", 'wb') as f:\n downloader.readinto(f)\n break\n\n\n# @timer(1,1)\n# def main():\n# with concurrent.futures.ProcessPoolExecutor() as executor:\n# executor.map(pull_main, vid_list)\n# executor.shutdown(wait=True)\n","sub_path":"source_code/pull_blob.py","file_name":"pull_blob.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"228252926","text":"from tests.base import BaseUnitTest\nfrom smalleducator_account.views.login import LoginView\nfrom smalleducator_account.models.user import User\n\nfrom uuid import uuid4\nfrom unittest import mock\nfrom unittest.mock import MagicMock, patch\nfrom sqlalchemy.orm.exc import NoResultFound\n\n\nclass TestAccessTokens(BaseUnitTest):\n def setUp(self):\n super().setUp()\n with mock.patch.object(LoginView, '__init__', return_value=None):\n self.view = LoginView(**{})\n \n self.view.service = MagicMock()\n self.view.request = MagicMock()\n self.view.logger = MagicMock()\n\n self.mock_user = User(\n id=uuid4(),\n first_name=\"Jack\",\n last_name=\"Nickelback\",\n ldap_username=\"NickelJack420\",\n access_token=\"t0cken4cce55\"\n )\n\n def test_token_generation(self):\n \"\"\"Test a succesful login and token generation\"\"\"\n self.view.request.json_body = {\n \"user_name\": \"NickelJack420\",\n \"user_pass\": \"upass\"\n }\n self.view.request.create_jwt_token = MagicMock(\n return_value=self.mock_user.access_token\n )\n result = self.view.login()\n assert result['result'] == \"OK\"\n assert result['token'] == \"t0cken4cce55\"\n \n def test_invalid_login(self):\n \"\"\"Test username/password combination not found\"\"\"\n self.view.request.json_body = {\n \"user_name\": \"NickelJack420\",\n \"user_pass\": \"upass\"\n }\n self.view.service.authenticate = MagicMock(\n side_effect=NoResultFound\n )\n result = self.view.login()\n assert result['result'] == \"Error\"\n \n def test_get_session(self):\n \"\"\"Test succesfully retrieving the users session\"\"\"\n self.view.service.get_session = MagicMock(\n return_value=self.mock_user\n )\n result = self.view.get_session()\n assert result['session'] == self.mock_user\n \n def test_no_session_found(self):\n \"\"\"Test the user not having a session\"\"\"\n self.view.service.get_session = MagicMock(\n return_value=None\n )\n result = self.view.get_session()\n assert 'error' in result\n","sub_path":"tests/test_unit_access_tokens.py","file_name":"test_unit_access_tokens.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"215898633","text":"# -*- Coding: utf-8 -*-\r\nimport re\r\nimport utility.print_method_result as util\r\nimport utility.extract as ext\r\n\r\n\r\n# 23. セクション構造\r\n# 記事中に含まれるセクション名と\r\n# そのレベル(例えば\"== セクション名 ==\"なら1)を表示せよ.\r\n\r\n\r\n@util.print_result\r\ndef extract_section(target_json='', target_content=''):\r\n repatter = re.compile(r'''\r\n ^ # 行頭\r\n (={2,}) # キャプチャ対象、2個以上の'='\r\n \\s* # 余分な0個以上の空白('哲学'や'婚姻'の前後に余分な空白があるので除去)\r\n (.+?) # キャプチャ対象、任意の文字が1文字以上、非貪欲(以降の条件の巻き込み防止)\r\n \\s* # 余分な0個以上の空白\r\n \\1 # 後方参照、1番目のキャプチャ対象と同じ内容\r\n .* # 任意の文字が0文字以上\r\n $ # 行末\r\n ''', re.MULTILINE + re.VERBOSE)\r\n json_contents = ext.extract_content(target_json, target_content)\r\n extracted = repatter.findall(json_contents[0])\r\n\r\n levels = []\r\n for line in extracted:\r\n level = len(line[0]) - 1\r\n levels.append(str(level) + ':' + line[1])\r\n return levels\r\n\r\n\r\nif __name__ == '__main__':\r\n extract_section('./data/jawiki-country.json.gz', 'イギリス')\r\n","sub_path":"work23.py","file_name":"work23.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"613956841","text":"import openpyxl\nfrom django.http import HttpResponse\nimport time\nfrom datetime import date\nDOC_TYPE_LIST = ('+','-')\nDOC_TYPE_CELL = 'B1'\nSIZE_OF_HEAD = 3\n\ndef get_info_from_excel(file):\n work_book = openpyxl.load_workbook(file)\n sheet_list = work_book.sheetnames\n work_sheet = work_book.active #get active sheet \n \n doc_type = work_sheet[DOC_TYPE_CELL].value\n rows = work_sheet.rows\n good_table = tuple(rows)[SIZE_OF_HEAD:]\n return {\n 'doc_type':doc_type,\n 'good_table':good_table\n }\n\n\ndef is_valid_or_list_error(doc):\n def analyzer_doc_type(value):\n return value in DOC_TYPE_LIST\n pull_erorrs = {\n f'doc_type_error (cell {DOC_TYPE_CELL}):':analyzer_doc_type(doc['doc_type']),\n 'empty_file:':(doc is not None)\n }\n if False in pull_erorrs.values():\n return pull_erorrs\n else:\n return True\n\ndef print_table(table):\n for row in table:\n for cell in row:\n print (cell,end='')\n print ()\n\ndef build_book(data_range,table):\n wb = openpyxl.Workbook()\n ws = wb.active\n #ws.merge_cells('A1:A3')\n ws.column_dimensions['A'].width = 25\n ws.column_dimensions['C'].width = 15\n start,end = None, None\n if data_range[0]:\n start = data_range[0].date().strftime(\"%d/%m/%Y\")\n if data_range[1]:\n end = data_range[1].date().strftime(\"%d/%m/%Y\")\n if start and end:\n ws['A1'] = f\"Срез c {start} по {end}\"\n elif not start and end:\n ws['A1'] = f\"Срез до {end}\"\n else:\n today = date.today()\n current_data = today.strftime(\"%m/%d/%y\")\n ws['A1'] = f\"Срез до {current_data}\"\n ws['A2'] = \"Название\"\n ws['B2'] = \"Партия\"\n ws['C2'] = \"Количество\"\n min_row=3\n max_col=1000\n max_row=len(table)+min_row\n for row,row_table in zip(ws.iter_rows(min_row=min_row, max_col=max_col, max_row=max_row),table):\n for cell,cell_table in zip(row,row_table):\n cell.value = cell_table\n print(cell.value)\n stream = openpyxl.writer.excel.save_virtual_workbook(wb)\n response = HttpResponse(stream, content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=\"foo.xlsx\"'\n return response\n\n\n\nif __name__ == \"__main__\": \n document = get_info_from_excel('C:\\\\Users\\\\Admin\\\\Desktop\\\\put_file.xlsx')\n analyzer_doc(document)\n\n\n \n","sub_path":"store/med_store/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"437053939","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nGroup evaluation for Linear Regression with NN-Encoded QPESUMS.\n- Read in encoded QPESUMS data\n- Read in precipitation data of 45 stations\n- Loop through 45 stations\n - train with 2013~2015 and evaluate on 2016\n\"\"\"\nimport sys, os, csv, logging, argparse, h5py\nimport numpy as np\nimport pandas as pd\nfrom sklearn import linear_model, svm\n\n__author__ = \"Ting-Shuo Yo\"\n__copyright__ = \"Copyright 2019~2020, DataQualia Lab Co. Ltd.\"\n__credits__ = [\"Ting-Shuo Yo\"]\n__license__ = \"Apache License 2.0\"\n__version__ = \"0.1.0\"\n__maintainer__ = \"Ting-Shuo Yo\"\n__email__ = \"tingyo@dataqualia.com\"\n__status__ = \"development\"\n__date__ = '2019-12-20'\n\n# Parameters\nstdids = ['466880', '466910', '466920', '466930', '466940', \n 'C0A520', 'C0A530', 'C0A540', 'C0A550', 'C0A560', \n 'C0A570', 'C0A580', 'C0A640', 'C0A650', 'C0A660', \n 'C0A710', 'C0A860', 'C0A870', 'C0A880', 'C0A890', \n 'C0A920', 'C0A940', 'C0A950', 'C0A970', 'C0A980', \n 'C0A9A0', 'C0A9B0', 'C0A9C0', 'C0A9E0', 'C0A9F0', \n 'C0A9G0', 'C0A9I1', 'C0AC40', 'C0AC60', 'C0AC70', \n 'C0AC80', 'C0ACA0', 'C0AD00', 'C0AD10', 'C0AD20', \n 'C0AD30', 'C0AD40', 'C0AD50', 'C0AG90', 'C0AH00']\n#-----------------------------------------------------------------------\n# Functions\n#-----------------------------------------------------------------------\n# Load input/output data for model\ndef loadIOTab(srcx, srcy, dropna=False, yshift=0):\n import pandas as pd\n import os\n # Read raw input and output\n #logging.info(\"Reading input X from: \"+ srcx)\n logging.info(\"Reading input X from: \"+ srcx)\n xfiles = []\n for root, dirs, files in os.walk(srcx): \n for fn in files: \n if fn.endswith('.npy'): \n xfiles.append({'date':fn.replace('.enc.npy',''), 'xuri':os.path.join(root, fn)})\n xfiles = pd.DataFrame(xfiles)\n logging.info(\"... read input size: \"+str(xfiles.shape))\n #logging.info(\"Reading output Y from: \"+ srcy)\n logging.info(\"Reading output Y from: \"+ srcy)\n yraw = pd.read_csv(srcy, encoding='utf-8')\n yraw['date'] = yraw['date'].apply(str)\n logging.info(\"... read output size: \"+str(yraw.shape))\n # Shift y if specified\n if yshift > 0:\n logging.info(\"... shifting output for forecasting by: \"+str(yshift))\n for c in yraw.columns:\n if not c=='date':\n yraw[c] = yraw[c].shift(yshift)\n # Create complete IO-data\n logging.info(\"Pairing X-Y and splitting training/testing data.\")\n iotab = pd.merge(yraw, xfiles, on='date', sort=True)\n logging.info(\"... data size after merging: \"+str(iotab.shape))\n # Dro NA if specified\n if dropna:\n logging.info('Dropping records with NA')\n iotab = iotab.dropna()\n logging.info(\"... data size after dropping-NAs: \"+str(iotab.shape))\n # Done\n return(iotab)\n\n# Function to give report for binary classifications\ndef evaluate_binary(yt, yp, stid=None, ythresh=30.):\n from sklearn.metrics import confusion_matrix\n ytb = (yt>=ythresh)*1\n ypb = (yp>=ythresh)*1\n # Derive metrics\n output = {'id':stid}\n TN, FP, FN, TP = confusion_matrix(ytb, ypb).ravel()\n output['true_positive'] = np.round(TP,2)\n output['false_positive'] = np.round(FP,2)\n output['false_negative'] = np.round(FN,2)\n output['true_negative'] = np.round(TN,2)\n output['sensitivity'] = np.round(TP/(TP+FN),2)\n output['specificity'] = np.round(TN/(FP+TN),2)\n output['prevalence'] = np.round((TP+FN)/(FN+TP+FP+TN),8)\n output['ppv'] = np.round(TP/(TP+FP),4)\n output['npv'] = np.round(TN/(TN+FN),4)\n output['fpr'] = np.round(FP/(FP+TN),4)\n output['fnr'] = np.round(FN/(FN+TP),4)\n output['fdr'] = np.round(FP/(FP+TP),4)\n output['FOR'] = np.round(FN/(TN+FN),4)\n output['accuracy'] = np.round((TP+TN)/(FN+TP+FP+TN),4)\n output['F1'] = np.round(2*TP/(2*TP+FP+FN),4)\n output['MCC'] = np.round((TP*TN-FP*FN)/np.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)),4)\n output['informedness'] = np.round(output['sensitivity'] + output['specificity'] - 1,4)\n output['markedness'] = np.round(output['ppv'] + output['npv'] -1,4)\n return(output)\n\n# Function to give report for regression\ndef evaluate_regression(y_true, y_pred, stid=None):\n import sklearn.metrics as metrics\n # Calculate measures\n results = {'id':stid}\n results['y_true_mean'] = y_true.mean()\n results['y_true_var'] = y_true.var()\n results['y_pred_mean'] = y_pred.mean()\n results['y_pred_var'] = y_pred.var()\n results['rmse'] = np.sqrt(metrics.mean_squared_error(y_true,y_pred))\n if y_pred.var()<=10e-8:\n results['corr'] = 0\n else:\n results['corr'] = np.corrcoef(y_true,y_pred)[0,1]\n # Return results\n return(results)\n\ndef y_to_log(y):\n ''' Convert the y to log(y+1). '''\n ylog = np.log(y+1).astype(np.float32)\n return(ylog)\n\ndef log_to_y(y):\n ''' Convert the predicted y in log-scale back to original scale. '''\n yori = (np.exp(y.flatten())-1.0).astype(np.float32)\n yori[yori<0.5] = 0. # Set the minimal values to 0.\n return(yori)\n\n#-----------------------------------------------------------------------\ndef main():\n # Configure Argument Parser\n parser = argparse.ArgumentParser(description='Retrieve DBZ data for further processing.')\n parser.add_argument('--xpath', '-x', help='the directory containing ebcoded QPESUMS data.')\n parser.add_argument('--ypath', '-y', help='the file containing the precipitation data.')\n parser.add_argument('--output', '-o', help='the prefix of output files.')\n parser.add_argument('--yth', '-t', default=-1, type=float, help='Threshold of Y for training.')\n parser.add_argument('--logy', '-g', default=0, type=int, choices=range(0, 2), help='Use Y in log-space.')\n parser.add_argument('--forecast', '-f', default=0, type=int, help='Number of hours to forecast.')\n parser.add_argument('--logfile', '-l', default=None, help='the log file.')\n args = parser.parse_args()\n # Set up logging\n if not args.logfile is None:\n logging.basicConfig(level=logging.DEBUG, filename=args.logfile, filemode='w')\n else:\n logging.basicConfig(level=logging.DEBUG)\n logging.debug(args)\n # IO data generation\n iotab = loadIOTab(args.xpath, args.ypath, dropna=False, yshift=args.forecast)\n logging.info(' number of total records listed: '+str(iotab.shape[0]))\n # Load Input\n x_full=[]\n for i in range(iotab.shape[0]):\n x_full.append(np.load(iotab['xuri'].iloc[i]).flatten())\n x_full = pd.DataFrame(np.array(x_full))\n x_full.index = list(iotab['date'])\n logging.info(' number of total records read: '+str(x_full.shape[0]))\n # Loop through stations\n report_train = []\n report_test = []\n for sid in stdids:\n # Create iotable for the station\n logging.info('Station id: '+sid)\n stdio = iotab.loc[:,['date', sid]].merge(x_full, left_on='date', right_index=True).dropna().reset_index(drop=True)\n logging.info(' number of valid records: '+str(stdio.shape[0]))\n y = stdio[sid]\n x = stdio.iloc[:, 2:]\n # Split training and testing data\n idx2016 = np.floor(stdio['date'].astype(float)/1000000.) == 2016\n size_of_2016 = sum(idx2016)\n size_before_2016 = sum(stdio['date'].astype(int)<2016010100)\n logging.info(' Data index of 2016: '+str(size_of_2016))\n #y_train = y.iloc[:size_before_2016]\n #x_train = x.iloc[:size_before_2016,:]\n y_train = y.loc[~idx2016]\n x_train = x.loc[~idx2016,:]\n # Apply filter on training data\n idx_filtered = y_train>args.yth\n logging.info(' Filter y: '+str(args.yth))\n y_train = y_train.loc[idx_filtered]\n x_train = x_train.loc[idx_filtered,:]\n # Reporting training/testing size\n logging.info(' Data dimension of training data: '+str(x_train.shape[0]) + ', ' +str(x_train.shape[1]))\n y_test = y.loc[idx2016].reset_index(drop=True)\n x_test = x.loc[idx2016,:].reset_index(drop=True)\n logging.info(' Data dimension of testing data: '+str(x_test.shape[0]) + ', ' +str(x_test.shape[1]))\n # Train model and test\n reg = linear_model.SGDRegressor(loss='squared_loss', penalty='elasticnet', alpha=0.0001, l1_ratio=0.25)\n #reg = svm.SVR(kernel='poly', degree=2, gamma='scale', coef0=0.0, tol=0.0001, C=0.05, epsilon=0.25)\n #reg = linear_model.LinearRegression(fit_intercept=True, normalize=True, copy_X=True, n_jobs=4)\n #reg = linear_model.BayesianRidge(normalize=True)\n reg.fit(x_train, y_to_log(y_train))\n yp_train = reg.predict(x_train)\n yp_test = reg.predict(x_test)\n # Evaluate\n evtrain = evaluate_regression(y_train, log_to_y(yp_train), stid=sid)\n report_train.append(evtrain)\n logging.info(evtrain)\n evtest = evaluate_regression(y_test, log_to_y(yp_test), stid=sid)\n report_test.append(evtest)\n logging.info(evtest)\n # Output results\n pd.DataFrame(report_train).to_csv(args.output+'_train.csv', index=False)\n logging.info(pd.DataFrame(report_test).describe())\n pd.DataFrame(report_test).to_csv(args.output+'_test.csv', index=False)\n # done\n return(0)\n \n#==========\n# Script\n#==========\nif __name__==\"__main__\":\n main()\n","sub_path":"utils/qpesums_enc_lr_qpf.py","file_name":"qpesums_enc_lr_qpf.py","file_ext":"py","file_size_in_byte":9386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"366309382","text":"# -*- coding: utf-8 -*-\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndf_ratings = pd.read_csv('ratings.csv')\r\ndf_movies = pd.read_csv('movies.csv')\r\n\r\nratings_pt = df_ratings.pivot(index = 'userId', columns = 'movieId', values = 'rating').fillna(0)\r\n\r\ndf_dummy = df_ratings.copy()\r\ndf_dummy['rating'] = df_dummy['rating'].apply(lambda x: 0 if x > 0 else 1)\r\n\r\ndf_dummy = df_dummy.pivot(index = 'userId', columns = 'movieId', values = 'rating').fillna(1)\r\n\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\n\r\n# User Similarity Matrix using Cosine similarity as a similarity measure between Users\r\nuser_similarity = cosine_similarity(ratings_pt)\r\nuser_similarity[np.isnan(user_similarity)] = 0\r\n\r\nuser_pred_ratings = np.dot(user_similarity, ratings_pt)\r\n\r\nuser_final_ratings = np.multiply(user_pred_ratings, df_dummy)\r\n\r\n\r\ndef get_movie_recommendations_user_based(user_id):\r\n movies_to_recommend = 10\r\n if user_id in user_final_ratings.index:\r\n recommendations = user_final_ratings.iloc[user_id - 1].sort_values(ascending = False)[0 : movies_to_recommend]\r\n recommend_frame = []\r\n for idx in recommendations.index:\r\n recommend_frame.append({'Title' : df_movies.iloc[idx, 1], 'Values' : recommendations[idx]})\r\n df = pd.DataFrame(recommend_frame, index = range(1, movies_to_recommend + 1))\r\n return df\r\n \r\n else:\r\n return \"No user found!\"","sub_path":"Movie Recommender System/userRecommender.py","file_name":"userRecommender.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"620054153","text":"import inspect\nimport numpy as np\n\n# Import the EnsembleGeneratorClass\nfrom .ensemble_generator_class import EnsembleGeneratorClass\n\n# Import Epidemic ODE library from pyda utilities\nfrom ..utilities import epiODElib as epi\n\nclass SEIRplusEnsemble(EnsembleGeneratorClass):\n def __init__(self):\n # RETURNS:\n # EnsArray = (2*Ntimesteps)x(EnSize) numpy array. Column is \n # (S(t0), E(t0), I(t0), S(t1), E(t1), I(t1), ..., S(tN), E(tN), I(tN))^T\n # EnsTime = Ntimsteps numpy vector of time\n self.Name = 'Deterministic rk4 SEIRplus'\n\n def fwd_propagate(self, Param, start_time, stop_time, Ntimestep):\n # Param = (EnSize)x10 Numpy array of ensemble member parameters\n # \\t\\t\\t\\t\\t\\t\\t\\t\\t\n # Ntimestep = number of time steps\n EnSize = Param.shape[0]\n\n # Define time \n EnsTime = np.linspace(start_time, stop_time, Ntimestep)\n\n # Define empty array to append ensemble members to\n EnsArray = np.zeros((3*Ntimestep, EnSize))\n\n # Generate each of the ensemble members\n for i in range(EnSize):\n # Map search parameters to SEIR variables.\n S0 = Param[i,0]\n E0 = Param[i,1]\n I0 = Param[i,2]\n beta = 1.0/(24.0*Param[i,3])\n mu = 1.0/(24.0*Param[i,4])\n gamma = 1.0/(24.0*Param[i,5])\n nu = Param[i,6]\n alpha= Param[i,7] \n c = (7.0*24.0)*Param[i,8]\n w = (7.0*24.0)*Param[i,9]\n\n # SEIR\n y0 = np.array([[S0], [E0], [I0]])\n\t\t\n # SEIR-plus\n Xsim = epi.SEIRplusode(y0, EnsTime, beta, mu, gamma, nu, alpha, c, w)\n Xsim = Xsim.transpose()\n\n # Reshape and write to EnsArray.\n EnsArray[:,i] = Xsim.reshape(3*Ntimestep)\n\n return [EnsArray, EnsTime]\n\n","sub_path":"pyda/ensemble_generator/SEIRplusEnsemble.py","file_name":"SEIRplusEnsemble.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"526683303","text":"import warnings\nfrom abc import ABC, abstractmethod\nfrom typing import Any, List, Optional, Sequence\n\nimport numpy as np\nfrom openfermion import IsingOperator, QubitOperator, SymbolicOperator\nfrom pyquil.wavefunction import Wavefunction\n\nfrom ..bitstring_distribution import (\n BitstringDistribution,\n create_bitstring_distribution_from_probability_distribution,\n)\nfrom ..circuits import Circuit\nfrom ..circuits.layouts import CircuitConnectivity\nfrom ..measurement import ExpectationValues, Measurements, expectation_values_to_real\nfrom ..openfermion import change_operator_type, get_expectation_value\n\n\nclass QuantumBackend(ABC):\n \"\"\"\n Interface for implementing different quantum backends.\n\n Args:\n n_samples (int): number of times a circuit should be sampled.\n\n \"\"\"\n\n supports_batching = False\n batch_size = None\n\n def __init__(self, n_samples: Optional[int] = None):\n if n_samples is not None:\n warnings.warn(\n \"\"\"The n_samples attribute is deprecated. In future releases,\n n_samples will need to be passed as an argument to\n run_circuit_and_measure or run_circuitset_and_measure.\"\"\".replace(\n \"\\n\", \"\"\n ),\n DeprecationWarning,\n )\n self.n_samples = n_samples\n self.number_of_circuits_run = 0\n self.number_of_jobs_run = 0\n\n if self.supports_batching:\n assert isinstance(self.batch_size, int)\n assert self.batch_size > 0\n\n @abstractmethod\n def run_circuit_and_measure(\n self, circuit: Circuit, n_samples: Optional[int] = None, **kwargs\n ) -> Measurements:\n \"\"\"\n Method for executing the circuit and measuring the outcome.\n Args:\n circuit: quantum circuit to be executed.\n n_samples: The number of samples to collect. If None, the\n number of samples is determined by the n_samples attribute.\n\n Returns:\n core.measurement.Measurements: Object representing the measurements\n resulting from the circuit.\n \"\"\"\n self.number_of_circuits_run += 1\n self.number_of_jobs_run += 1\n\n # This value is only returned so that mypy doesn't complain.\n # You can remove this workaround when we reimplement counter increments in\n # a more type-elegant way.\n return Measurements()\n\n def run_circuitset_and_measure(\n self,\n circuits: Sequence[Circuit],\n n_samples: Optional[List[int]] = None,\n **kwargs\n ) -> List[Measurements]:\n \"\"\"Run a set of circuits and measure a certain number of bitstrings.\n\n It may be useful to override this method for backends that support\n batching. Note that self.n_samples shots are used for each circuit.\n\n Args:\n circuits: The circuits to execute.\n n_samples: The number of samples to collect for each circuit. If\n None, the number of samples for each circuit is given by the\n n_samples attribute.\n\n Returns:\n Measurements for each circuit.\n \"\"\"\n measurement_set: List[Measurements]\n\n if not self.supports_batching:\n measurement_set = []\n if n_samples is not None:\n for circuit, n_samples_for_circuit in zip(circuits, n_samples):\n measurement_set.append(\n self.run_circuit_and_measure(\n circuit, n_samples=n_samples_for_circuit, **kwargs\n )\n )\n else:\n for circuit in circuits:\n measurement_set.append(\n self.run_circuit_and_measure(circuit, **kwargs)\n )\n\n return measurement_set\n else:\n self.number_of_circuits_run += len(circuits)\n if isinstance(self.batch_size, int):\n self.number_of_jobs_run += int(np.ceil(len(circuits) / self.batch_size))\n\n # This value is only returned so that mypy doesn't complain.\n # You can remove this workaround when we reimplement counter increments in\n # a more type-elegant way.\n measurement_set = []\n return measurement_set\n\n def get_bitstring_distribution(\n self, circuit: Circuit, **kwargs\n ) -> BitstringDistribution:\n \"\"\"Calculates a bitstring distribution.\n\n Args:\n circuit: quantum circuit to be executed.\n\n Returns:\n Probability distribution of getting specific bistrings.\n\n \"\"\"\n # Get the expectation values\n measurements = self.run_circuit_and_measure(circuit, **kwargs)\n return measurements.get_distribution()\n\n\nclass QuantumSimulator(QuantumBackend):\n @abstractmethod\n def __init__(\n self,\n n_samples: Optional[int] = None,\n noise_model: Optional[Any] = None,\n device_connectivity: Optional[CircuitConnectivity] = None,\n ):\n super().__init__(n_samples)\n\n @abstractmethod\n def get_wavefunction(self, circuit: Circuit, **kwargs) -> Wavefunction:\n \"\"\"Returns a wavefunction representing quantum state produced by a circuit\n\n Args:\n circuit: quantum circuit to be executed.\n \"\"\"\n self.number_of_circuits_run += 1\n self.number_of_jobs_run += 1\n\n def get_exact_expectation_values(\n self, circuit: Circuit, operator: SymbolicOperator, **kwargs\n ) -> ExpectationValues:\n \"\"\"Calculates the expectation values for given operator, based on the exact\n quantum state produced by circuit.\n\n Args:\n circuit: quantum circuit to be executed.\n operator: Operator for which we calculate the expectation value.\n\n Returns:\n Expectation values for given operator.\n \"\"\"\n wavefunction = self.get_wavefunction(circuit)\n if isinstance(operator, IsingOperator):\n operator = change_operator_type(operator, QubitOperator)\n expectation_values = ExpectationValues(\n np.array([get_expectation_value(term, wavefunction) for term in operator])\n )\n expectation_values = expectation_values_to_real(expectation_values)\n return expectation_values\n\n def get_bitstring_distribution(\n self, circuit: Circuit, **kwargs\n ) -> BitstringDistribution:\n \"\"\"Calculates a bitstring distribution.\n\n Args:\n circuit: quantum circuit to be executed.\n\n Returns:\n Probability distribution of getting specific bistrings.\n \"\"\"\n if self.n_samples is None:\n wavefunction = self.get_wavefunction(circuit, **kwargs)\n return create_bitstring_distribution_from_probability_distribution(\n wavefunction.probabilities()\n )\n else:\n # Get the expectation values\n measurements = self.run_circuit_and_measure(circuit, **kwargs)\n return measurements.get_distribution()\n\n\ndef _flip_bits(n, num_bits):\n return int(bin(n)[2:].zfill(num_bits)[::-1], 2)\n\n\ndef flip_wavefunction(wavefunction: Wavefunction):\n number_of_states = len(wavefunction.amplitudes)\n ordering = [\n _flip_bits(n, number_of_states.bit_length() - 1)\n for n in range(number_of_states)\n ]\n flipped_amplitudes = [wavefunction.amplitudes[i] for i in ordering]\n return Wavefunction(np.array(flipped_amplitudes))\n","sub_path":"src/python/zquantum/core/interfaces/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":7534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"474151335","text":"from prime import Prime\nans = 0\ncurr = 0\nfor b in Prime.prime():\n if b > 1000:\n break\n for a in range(-999, 1000):\n n = 0\n for i in map(lambda x: b + a * x + x ** 2, range(2000)):\n if not Prime.isPrime(i):\n break\n n += 1\n if n > curr:\n curr = n\n ans = a * b\n\nprint(ans)\n\n\n\n\n","sub_path":"page01/27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"181495462","text":"##############################################################################\n# DISPATCHES was produced under the DOE Design Integration and Synthesis\n# Platform to Advance Tightly Coupled Hybrid Energy Systems program (DISPATCHES),\n# and is copyright (c) 2021 by the software owners: The Regents of the University\n# of California, through Lawrence Berkeley National Laboratory, National\n# Technology & Engineering Solutions of Sandia, LLC, Alliance for Sustainable\n# Energy, LLC, Battelle Energy Alliance, LLC, University of Notre Dame du Lac, et\n# al. All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license\n# information, respectively. Both files are also available online at the URL:\n# \"https://github.com/gmlc-dispatches/dispatches\".\n#\n##############################################################################\nimport pytest\n# Import objects from pyomo package\nfrom pyomo.environ import ConcreteModel, SolverFactory, Var\nfrom pyomo.util.check_units import assert_units_consistent\n\n# Import the main FlowsheetBlock from IDAES. The flowsheet block will contain the unit model\nfrom idaes.core import FlowsheetBlock\n\nfrom dispatches.models.renewables_case.wind_power import Wind_Power\n\n\ndef test_windpower():\n # Create the ConcreteModel and the FlowsheetBlock, and attach the flowsheet block to it.\n m = ConcreteModel()\n m.fs = FlowsheetBlock(default={\"dynamic\": False}) # dynamic or ss flowsheet needs to be specified here\n\n # ((wind m/s, wind degrees from north clockwise, probability), )\n resource_timeseries = dict()\n for time in list(m.fs.config.time.data()):\n resource_timeseries[time] = ((10, 180, 0.5),\n (24, 180, 0.5))\n\n wind_config = {'resource_probability_density': resource_timeseries}\n\n m.fs.unit = Wind_Power(default=wind_config)\n\n assert hasattr(m.fs.unit, \"capacity_factor\")\n assert hasattr(m.fs.unit, \"electricity_out\")\n assert isinstance(m.fs.unit.system_capacity, Var)\n assert isinstance(m.fs.unit.electricity, Var)\n\n m.fs.unit.system_capacity.fix(50000) # kW\n\n assert_units_consistent(m)\n\n solver = SolverFactory('ipopt')\n solver.solve(m.fs)\n\n assert m.fs.unit.capacity_factor[0].value == pytest.approx(0.0001905, rel=1e-2)\n assert m.fs.unit.electricity_out.electricity[0].value == pytest.approx(9.525, rel=1e-2)\n\n","sub_path":"dispatches/models/renewables_case/test_wind_power.py","file_name":"test_wind_power.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"508560661","text":"# 영어 끝말잇기 (Summer/Winter Coding ~2018)\n\n# solution2 리팩토링\ndef solution(n, words):\n for i in range(1, len(words)):\n # 이미 했던 단어일 경우 or 앞 단어 끝 글자와 현재 단어 첫 글자가 다를 경우 탈락\n if words[i] in words[:i] or words[i-1][-1]!=words[i][0]:\n return [i%n+1, i//n+1] # 사람 번호, 몇 번째 차례인지\n return [0, 0]\n\ndef solution2(n, words):\n chk_list = [words[0]]\n cur_last_char = words[0][-1]\n for i in range(1, len(words)):\n # 이미 했던 단어일 경우 or 앞 단어 끝 글자와 현재 단어 첫 글자가 다를 경우 탈락\n if words[i] in chk_list or cur_last_char!=words[i][0]:\n return [i%n+1, i//n+1] # 사람 번호, 몇 번째 차례인지\n chk_list.append(words[i])\n cur_last_char = words[i][-1] # 끝 글자 갱신\n return [0, 0]\n\nprint(solution(3, [\"tank\", \"kick\", \"know\", \"wheel\", \"land\", \"dream\", \"mother\", \"robot\", \"tank\"]))\nprint(solution(5, [\"hello\", \"observe\", \"effect\", \"take\", \"either\", \"recognize\", \"encourage\", \"ensure\", \"establish\", \"hang\", \"gather\", \"refer\", \"reference\", \"estimate\", \"executive\"]))\nprint(solution(2, [\"hello\", \"one\", \"even\", \"never\", \"now\", \"world\", \"draw\"]))","sub_path":"Programmers/[코테연습]영어끝말잇기.py","file_name":"[코테연습]영어끝말잇기.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"80694341","text":"from django.utils import timezone\nfrom django.conf import settings\n\nimport subprocess\nimport os\nimport asyncio\nimport json\nfrom .models import Project\nimport docker\n\npath = settings.SPLASH_CODE_GENERATOR_PATH\nclient = docker.from_env()\ndocker_api = client.api\ndocker_image = client.images\ndocker_container = client.containers\nasync def save_code(user, title, prev_schema, schema):\n try:\n if os.path.isdir(\"usr_src/{}/src/{}\".format(user.username, title)) :\n merge_code(user.username, title, prev_schema, schema)\n else:\n generate_code(user, title, schema)\n \n except Exception as e:\n print(\"error occurs while save code: \", str(e))\n raise e\n finally:\n if os.path.isfile(\"usr_src/{}/temp.json\".format(user.username)):\n os.remove(\"usr_src/{}/temp.json\".format(user.username))\n\ndef generate_code(user, title, schema):\n if not os.path.isdir(\"usr_src/{}/src\".format(user.username)):\n os.makedirs(\"usr_src/{}/src\".format(user.username))\n with open(\"usr_src/{}/temp.json\".format(user.username), \"w\") as f:\n f.write(schema)\n command = \"python {}/main.py --name {} --file temp.json --path {}/usr_src/{} --username {} --email {}\".format(path, title, settings.BASE_DIR, user.username, user.username, user.email)\n # print(command)\n process = subprocess.Popen(command.split(), cwd=\"usr_src/{}\".format(user.username), stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n out, err = process.communicate()\n # print(out)\n print(err)\n if err:\n delete_code(user, title)\n project = Project.objects.get(title=title)\n project.delete()\ndef merge_code(user, title, prev_schema, schema):\n prev_schema = json.loads(prev_schema)\n schema = json.loads(schema)\n added_nodes, removed_nodes, modified_nodes = compare_schema_node(prev_schema, schema)\n added_links, removed_links, modified_links = compare_schema_link(prev_schema, schema)\n base_dict = {'class': 'GraphLinksModel', 'linkKeyProperty': 'key', 'nodeDataArray': [], 'linkDataArray': []}\n added_dict = removed_dict = modified_dict = base_dict\n \n added_dict[\"nodeDataArray\"] = added_nodes\n removed_dict[\"nodeDataArray\"] = removed_nodes\n \n print(\"Added nodes: \", added_nodes)\n print(\"Removed nodes: \", removed_nodes)\n print(\"Modified nodes: \", modified_nodes)\n print(\"Added links: \", added_links)\n print(\"Removed links: \", removed_links)\n print(\"Modified links: \", modified_links)\n\ndef compare_schema_node(prev, new):\n prev_length = len(prev[\"nodeDataArray\"])\n new_length = len(new[\"nodeDataArray\"])\n prev_uuid = [0]*prev_length\n new_uuid = [0]*new_length\n\n for i in range(prev_length):\n prev_uuid[i]= prev[\"nodeDataArray\"][i][\"UUID\"]\n\n for i in range (new_length):\n new_uuid[i] = new[\"nodeDataArray\"][i][\"UUID\"]\n\n set_prevuuid = set(prev_uuid)\n set_newuuid = set(new_uuid)\n shared = set_prevuuid.intersection(set_newuuid)\n\n removed = set_prevuuid-set_newuuid\n if removed ==set():\n removed = {\"none\"}\n added = set_newuuid-set_prevuuid\n if added == set():\n added = {\"none\"}\n\n same = set(a for a in shared if prev[\"nodeDataArray\"][prev_uuid.index(a)] == new[\"nodeDataArray\"][new_uuid.index(a)])\n modified = set(a for a in shared if prev[\"nodeDataArray\"][prev_uuid.index(a)] != new[\"nodeDataArray\"][new_uuid.index(a)])\n\n if modified ==set():\n modified = {\"none\"}\n\n added_list = list(added)\n removed_list = list(removed)\n modified_list = list(modified)\n\n added_list2 = [0]*len(added_list)\n removed_list2 = [0]*len(removed_list)\n modified_list2 = [0]*len(modified_list)\n modified_list3 = [0]*len(modified_list)\n\n if added_list == [\"none\"]:\n added_list2 = []\n else:\n for i in range(len(added_list)):\n added_list2[i] = new[\"nodeDataArray\"][new_uuid.index(added_list[i])]\n\n if removed_list == [\"none\"]:\n removed_list2 = []\n else:\n for i in range(len(removed_list)):\n removed_list2[i] = prev[\"nodeDataArray\"][prev_uuid.index(removed_list[i])]\n\n modified_list2 = []\n\n if modified_list == [\"none\"]:\n modified_list2 = []\n\n else:\n for i in range(len(modified_list)):\n set1 = set(prev[\"nodeDataArray\"][prev_uuid.index(modified_list[i])].items())\n set2 = set(new[\"nodeDataArray\"][new_uuid.index(modified_list[i])].items())\n diction1 = {\"UUID: \": prev[\"nodeDataArray\"][i][\"UUID\"]}\n diction1[\"from\"] = set1-set2\n diction1[\"to\"] = set2-set1\n modified_list2.append(diction1)\n\n return added_list2, removed_list2, modified_list2\n\ndef compare_schema_link(prev, new):\n prev_length = len(prev[\"linkDataArray\"])\n new_length = len(new[\"linkDataArray\"])\n prev_key = [0]*prev_length\n new_key = [0]*new_length\n\n for i in range(prev_length):\n prev_key[i]= prev[\"linkDataArray\"][i][\"key\"]\n\n for i in range (new_length):\n new_key[i] = new[\"linkDataArray\"][i][\"key\"]\n\n print(\"prev_key\", prev_key)\n print(new_key)\n set_prevkey = set(prev_key)\n set_newkey = set(new_key)\n shared = set_prevkey.intersection(set_newkey)\n\n\n removed = set_prevkey-set_newkey\n if removed ==set():\n removed = {\"none\"}\n\n added = set_newkey-set_prevkey\n if added == set():\n added = {\"none\"}\n\n modified = set(a for a in shared if prev[\"linkDataArray\"][prev_key.index(a)] != new[\"linkDataArray\"][new_key.index(a)])\n\n if modified ==set():\n modified = {\"none\"}\n\n added_list = list(added)\n removed_list = list(removed)\n modified_list = list(modified)\n print(added_list, removed_list, modified_list)\n\n\n added_list2 = [0]*len(added_list)\n removed_list2 = [0]*len(removed_list)\n modified_list2 = [0]*len(modified_list)\n modified_list3 = [0]*len(modified_list)\n\n if added_list == [\"none\"]:\n added_list2 = []\n else:\n for i in range(len(added_list)):\n added_list2[i] = new[\"linkDataArray\"][new_key.index(added_list[i])]\n\n if removed_list == [\"none\"]:\n removed_list2 = []\n else:\n for i in range(len(removed_list)):\n removed_list2[i] = prev[\"linkDataArray\"][prev_key.index(removed_list[i])]\n\n modified_list2 = []\n\n if modified_list == [\"none\"]:\n modified_list2 = []\n\n else:\n for i in range(len(modified_list)):\n set1 = set(prev[\"linkDataArray\"][prev_key.index(modified_list[i])].items())\n set2 = set(new[\"linkDataArray\"][new_key.index(modified_list[i])].items())\n diction1 = {\"key: \": prev[\"linkDataArray\"][i][\"key\"]}\n diction1[\"from\"] = set1-set2\n diction1[\"to\"] = set2-set1\n modified_list2.append(diction1)\n\n return added_list2, removed_list2, modified_list2\n\ndef delete_code(user, title):\n os.rmdir(\"usr_src/{}/src/{}\".format(user.username, title))\n\nasync def build_docker_image(user, title):\n path = f'usr_src/{user.username}/src/Dockerfile'\n username = user.username.lower()\n with open(path, 'w') as f:\n f.write('FROM splash_environment:0.0\\n')\n # f.write(f'COPY {title} /root/dev_ws/src/{title}\\n')\n f.write('WORKDIR /root/dev_ws\\n')\n f.write('SHELL [\"/bin/bash\", \"-c\"]\\n')\n f.write('RUN echo \\\"source /opt/ros/dashing/setup.bash\\\" >> /root/.bashrc\\n')\n # f.write('RUN colcon build\\n')\n # f.write('RUN echo \\\"source /root/dev_ws/install/setup.bash\\\" >> /root/.bashrc\\n')\n f.write('RUN apt update\\n')\n f.write('RUN apt install -y openssh-server\\n')\n f.write('RUN mkdir /var/run/sshd\\n')\n f.write('RUN echo \\'root:root\\' | chpasswd\\n')\n f.write('RUN sed -ri \\'s/^#?PermitRootLogin\\s+.*/PermitRootLogin yes/\\' /etc/ssh/sshd_config\\n')\n f.write('RUN sed -ri \\'s/UsePAM yes/#UsePAM yes/g\\' /etc/ssh/sshd_config\\n')\n f.write('RUN mkdir /root/.ssh\\n')\n f.write('RUN echo \\\"cd /root/dev_ws\\\" >> /root/.bashrc\\n')\n f.write('RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\\n')\n f.write('EXPOSE 22\\n')\n f.write('CMD [\\\"/usr/sbin/sshd\\\", \\\"-D\\\"]\\n')\n\n image = docker_image.build(path=f'{settings.BASE_DIR}/usr_src/{user.username}/src', tag=f'{username}_splash_{title}:0.0')\n docker_container.run(image=f'{username}_splash_{title}:0.0', init=True, tty=True, detach=True, name=f'{username}_splash_{title}', ports={'22/tcp': 11111}, volumes={f'{settings.BASE_DIR}/usr_src/{user.username}/src/{title}': {'bind': f'/root/dev_ws/src/{title}', 'mode': 'rw'}})\n os.remove(path)\n\n\nasync def make_build_unit(user, title, build_unit_name):\n path = f'usr_src/{user.username}/src/Dockerfile'\n username = user.username.lower()\n with open(path, 'w') as f:\n f.write(f'FROM {username}_splash_{title}:0.0\\n')\n f.write('WORKDIR /root/dev_ws\\n')\n f.write('SHELL [\"/bin/bash\", \"-c\"]\\n')\n f.write('RUN echo \\\"source /opt/ros/dashing/setup.bash\\\" >> /root/.bashrc\\n')\n f.write(f'COPY {title} /root/dev/src/{title}\\n')\n f.write('colcon build')\n f.write('RUN echo \\\"source /root/dev_ws/install/setup.bash\\\" >> /root/.bashrc\\n')\n f.write('CMD ros2 run {title} {build_unit_name}')\n\n image = docker_image.build(path=f'{settings.BASE_DIR}/usr_src/{user.username}/src', tag=f'{username}_splash_{title}_{build_unit_name}:0.0')\n docker_container.run(image=f'{username}_splash_{title}_{build_unit_name}:0.0', init=True, tty=True, detach=True, name=f'{username}_splash_{title}_{build_unit_name}')\n os.remove(path)","sub_path":"core/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":9603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"162437223","text":"# coding=utf-8\n\n__author__ = 'jamon'\n\nfrom config.globalconfig import GlobalConfig\nfrom obespoir.share.ob_log import logger\nfrom share.message_ids import USER_TEST_ACT\nfrom share.errorcode import CARD_NOT_IN_HAND_CARD, TEST_PARAMS_ERROR, CANT_USE_TEST, CANT_USE_TEST_GAME_STATUS_UN_AGREE, NEED_S_CARD_T_CARD\nfrom service.mahjong.constants.gamedefine import Act, TestActionType\nfrom service.mahjong.controls.notifybridge import notify_single_user\nfrom service.mahjong.constants.gamedefine import GameStatus\nfrom service.mahjong.models.playeract.base_player_act import BasePlayerAct\n\n\nclass TestAct(BasePlayerAct):\n \"\"\"測試接口\"\"\"\n def __init__(self, game_data):\n super(TestAct, self).__init__(game_data=game_data)\n self.game_data = game_data\n self.handlers = {\n TestActionType.HUAN_CARD: self.huan_card, # 換牌\n TestActionType.SURE_NEXT_CARDS: self.sure_next_cards, # 确定接下来的牌\n TestActionType.GET_LAST_CARD: self.get_last_card, # 胡碰碰胡\n TestActionType.INIT_DRAW_CARDS: self.init_draw_cards, # 初始化发牌\n TestActionType.QUICK_DRAW: self.quick_draw # 快速留局\n }\n\n def execute(self, seat_id, act, card_list):\n \"\"\"\n 执行测试换牌\n :param act_params:\n :return:\n \"\"\"\n logger.debug(u\"測試换牌: %s\", str([seat_id, act, card_list]))\n return self.handlers.get(act)(seat_id, card_list)\n\n def huan_card(self, seat_id, test_params):\n if not self.game_data.game_config.test_mode:\n self.notify_player_card_change(seat_id, code=CANT_USE_TEST)\n return\n logger.debug(u\"换牌:%s\", str([seat_id, test_params]))\n old_card = test_params.get(\"source_card\")[0]\n new_card = test_params.get(\"target_card\")[0]\n if not old_card or not new_card:\n self.notify_player_card_change(seat_id, code=NEED_S_CARD_T_CARD)\n return\n if not self.players[seat_id].hand_card.has_card(old_card):\n self.notify_player_card_change(seat_id, code=CARD_NOT_IN_HAND_CARD)\n return\n self.players[seat_id].hand_card.del_hand_card_by_val(card_val=old_card)\n self.players[seat_id].hand_card.add_hand_card_by_vals(card_vals=[new_card])\n self.notify_player_card_change(seat_id)\n\n def sure_next_cards(self, seat_id, test_params):\n if not self.game_data.game_config.test_mode:\n self.notify_player_card_change(seat_id, code=CANT_USE_TEST)\n return\n card_list = test_params.get(\"target_card\")\n if not isinstance(card_list, list):\n self.notify_player_card_change(seat_id, TEST_PARAMS_ERROR)\n return\n logger.debug(u\"确定接下来的牌:%s\", str([seat_id, card_list]))\n if GlobalConfig().test_sure_next_cards.get(self.desk_id):\n GlobalConfig().test_sure_next_cards[self.desk_id][seat_id].extend(card_list)\n else:\n GlobalConfig().test_sure_next_cards[self.desk_id] = [[] for _ in range(self.game_data.max_player_num)]\n GlobalConfig().test_sure_next_cards[self.desk_id][seat_id].extend(card_list)\n\n self.notify_player_next(seat_id)\n\n\n\n def get_last_card(self, seat_id, card_list):\n if not self.game_data.game_config.test_mode:\n self.notify_player_card_change(seat_id, code=CANT_USE_TEST)\n return\n logger.debug(u\"获取最后一张牌:%s\", str([seat_id, card_list]))\n last_card = self.game_data.card_dealer.get_the_last_card()\n self.notify_player(seat_id, [last_card])\n\n def init_draw_cards(self, seat_id, test_params):\n\n self.notify_player_card_change(seat_id, code=CANT_USE_TEST_GAME_STATUS_UN_AGREE)\n\n\n def quick_draw(self, seat_id, test_params):\n if not self.game_data.game_config.test_mode:\n self.notify_player_card_change(seat_id, code=CANT_USE_TEST)\n return\n self.game_data.card_dealer.card_count = 0\n self.notify_player_quick_draw(seat_id)\n\n\n def notify_player_card_change(self, seat_id, code=200):\n data = {\"test_type\": TestActionType.HUAN_CARD,\n \"seat_id\": seat_id,\n \"hand_card\": self.players[seat_id].hand_card.hand_card_vals}\n notify_single_user(self.desk_id, seat_id, USER_TEST_ACT, data, code)\n\n def notify_player_next(self, seat_id, code=200):\n data = {\"test_type\": TestActionType.SURE_NEXT_CARDS,\n \"seat_id\": seat_id,\n \"next_cards\": GlobalConfig().test_sure_next_cards[self.desk_id][seat_id]}\n notify_single_user(self.desk_id, seat_id, USER_TEST_ACT, data, code)\n\n def notify_player(self, seat_id, cards, code=200):\n data = {\"test_type\": TestActionType.GET_LAST_CARD, \"seat_id\": seat_id, \"cards\": cards}\n notify_single_user(self.desk_id, seat_id, USER_TEST_ACT, data, code)\n\n def notify_player_quick_draw(self, seat_id, code=200):\n data = {\"test_type\": TestActionType.QUICK_DRAW, \"seat_id\": seat_id, \"remain_cards\": 0}\n notify_single_user(self.desk_id, seat_id, USER_TEST_ACT, data, code)\n","sub_path":"echecs_espoir/service/mahjong/models/playeract/test_act.py","file_name":"test_act.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"261452647","text":"\"\"\"region.py: Region class and region_inst().\"\"\"\nimport logging\n\nfrom aospy_synthetic.__config__ import LAT_STR, LON_STR\n\n\nclass Region(object):\n \"\"\"Geographical region.\"\"\"\n def __init__(self, name='', description='', lon_bounds=[], lat_bounds=[],\n mask_bounds=[], do_land_mask=False):\n \"\"\"Instantiate a Region object.\"\"\"\n self.name = name\n self.description = description\n if lon_bounds and lat_bounds and not mask_bounds:\n self.mask_bounds = [(lat_bounds, lon_bounds)]\n else:\n self.mask_bounds = mask_bounds\n self.do_land_mask = do_land_mask\n\n def __str__(self):\n return 'Geographical region \"' + self.name + '\"'\n\n __repr__ = __str__\n\n @staticmethod\n def _add_to_mask(data, lat_bounds, lon_bounds):\n \"\"\"Add mask spanning given lat-lon rectangle.\"\"\"\n mask_lat = ((data[LAT_STR] > lat_bounds[0]) &\n (data[LAT_STR] < lat_bounds[1]))\n return mask_lat & ((data[LON_STR] > lon_bounds[0]) &\n (data[LON_STR] < lon_bounds[1]))\n\n def make_mask(self, data):\n \"\"\"Construct the mask that defines this region.\"\"\"\n # For each set of bounds add to the conditional.\n mask = False\n for lat_bounds, lon_bounds in self.mask_bounds:\n mask |= self._add_to_mask(data, lat_bounds, lon_bounds)\n return mask\n\n def mask_var(self, data):\n \"\"\"Mask the data of the given variable outside the region.\"\"\"\n return data.where(self.make_mask(data))\n\n @staticmethod\n def _get_land_mask(data, do_land_mask):\n if not do_land_mask:\n return 1\n try:\n land_mask = data.land_mask.copy()\n except AttributeError:\n # TODO: Implement aospy built-in land mask to default to.\n msg = (\"No land mask found. Using empty mask, which amounts to \"\n \"no land or ocean mask being applied. Regions that use a \"\n \"land or ocean mask will therefore NOT be accurately \"\n \"computed.\")\n logging.warning(msg)\n return 1\n try:\n percent_bool = land_mask.units.lower() in ('%', 'percent')\n except AttributeError:\n # Wrong for the edge case where no grid cell is 100% land.\n percent_bool = land_mask.max() == 100\n if percent_bool:\n land_mask *= 0.01\n if do_land_mask in (True, 'land'):\n return land_mask\n if do_land_mask == 'ocean':\n return 1. - land_mask\n if do_land_mask in ('strict_land', 'strict_ocean'):\n raise NotImplementedError\n msg = (\"'do_land_mask' value of '{0}' is not one of the valid \"\n \"choices: [True, False, 'land', 'ocean', 'strict_land', \"\n \"'strict_ocean']\").format(do_land_mask)\n raise ValueError(msg)\n\n @staticmethod\n def _sum_over_lat_lon(arr):\n \"\"\"Sum an array over the latitude and longitude dimensions.\"\"\"\n return arr.sum(LAT_STR).sum(LON_STR)\n\n def ts(self, data):\n \"\"\"Create time-series of region-average data.\"\"\"\n data_masked = self.mask_var(data)\n sfc_area = data.sfc_area\n land_mask = self._get_land_mask(data, self.do_land_mask)\n weights = self._sum_over_lat_lon((self.mask_var(sfc_area)*land_mask))\n return (self._sum_over_lat_lon(data_masked*sfc_area*land_mask) /\n weights)\n\n def av(self, data):\n \"\"\"Time average of region-average time-series.\"\"\"\n ts_ = self.ts(data)\n if 'year' not in ts_.coords:\n return ts_\n return ts_.mean('year')\n\n def std(self, data):\n \"\"\"Standard deviation of region-average time-series.\"\"\"\n ts_ = self.ts(data)\n if 'year' not in ts_.coords:\n return ts_\n return ts_.std('year')\n","sub_path":"aospy_synthetic/region.py","file_name":"region.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"578375222","text":"def cint(s):\n ''' helper to convert strings in constructor of Sudoku '''\n if s == ' ':\n return None\n else:\n return int(s)\n\n\nclass Sudoku:\n\n def __init__(self,rows):\n ''' takes list of 9 strings, each with len of 9 of space or digit '''\n self.board = []\n for i in range(9):\n self.board.append( [cint(x) for x in list(rows[i])] )\n self.empties = self.empty_list()\n\n def __str__(self):\n ''' return board formated for console '''\n s = \"\"\n for r in range(9):\n if r % 3 == 0:\n s += \"+=========+=========+=========+\\n\"\n for c in range(9):\n if c % 3 == 0:\n s += \"|\"\n x = self.board[r][c]\n if x is None:\n s += \" . \"\n else:\n s += \" %d \" % x\n s += \"|\\n\"\n s += \"+=========+=========+=========+\\n\"\n return s\n\n def empty_list(self):\n ''' find list of empty cells in (r,c) form '''\n elist = []\n for r in range(9):\n for c in range(9):\n if self.board[r][c] is None:\n elist.append((r,c))\n return elist\n\n\n def local_set(self,cell):\n ''' return set of number in local block of cell'''\n r,c = cell\n r0 = r - ( r % 3)\n c0 = c - ( c % 3)\n s = set()\n for r in range(r0,r0+3):\n for c in range( c0, c0+3):\n x = self.board[r][c]\n if x is not None:\n s.add(x)\n return s\n\n def row_set(self,cell):\n ''' return set of number in row of cell'''\n r0,c0 = cell\n s = set()\n for c in range(9):\n x = self.board[r0][c]\n if x is not None:\n s.add(x)\n return s\n\n def column_set(self,cell):\n ''' return set of number in column of cell'''\n r0,c0 = cell\n s = set()\n for r in range(9):\n x = self.board[r][c0]\n if x is not None:\n s.add(x)\n return s\n\n\n def possible_for_cell(self,cell):\n ''' return list of pairs for open cell '''\n return set(range(1,10)) - self.local_set(cell) - self.row_set(cell) - self.column_set(cell)\n\n def solve(self):\n ''' recursive solve, return when solution, or raise error\n try each cell combo, back out by clearing cell to none and return\n unless solve, then keep filled board.\n '''\n\n if len(self.empties) == 0:\n print(\"FOUND SOLUTION\")\n print(self)\n return True\n\n r,c = self.empties.pop()\n to_try = self.possible_for_cell((r,c))\n if len(to_try) == 0:\n # can't succeed, no number possible\n self.empties.append((r,c))\n self.board[r][c] = None\n return False;\n\n for n in to_try:\n self.board[r][c] = n\n if self.solve():\n return True; # found number for cell r,c\n \n\t\t# failed if we got here\n self.empties.append((r,c)) # add failed try cell back to empties\n self.board[r][c] = None # clear last try from it\n return False;\n \n \n##----------- solve a soduku puzzle:\nimport time\n\nrows = [\n \" 3 7 \",\n \" 91 32\",\n \" 26 \",\n \" 7 6 4 \",\n \" 6 5 7 \",\n \" 5 3 6 \",\n \" 18 \",\n \"78 46 \",\n \" 3 1 \",\n\n\n ]\ns = Sudoku(rows)\nprint(s)\n\n#for r in range(9):\n# for c in range(9):\n# print(\"cell:\", (r+1,c+1), \" try:\", s.possible_for_cell((r,c)))\n\nstart = time.clock()\ns.solve()\nt = time.clock()-start\nprint( '(%.2f seconds)\\n' % t )\n","sub_path":"Python/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"327596287","text":"# Задача №3. Закрепляем условный оператор и операторы сравнения\n# Напишите программу, предлагающую пользователю ввести три целых числа.\n# Программа выводит их в порядке возрастания, разделяя запятыми.\n# Например, если пользователь вводит числа 10 4 6,\n# то программа должна вывести на экран числа 4, 6, 10.\n# Если два числа совпадают. то они должны идти одно за другим.\n# Например. если пользователь вводит числа 4 5 4, то программа должна вывести на экран 4, 4, 5.\n# Запрещено(!) использовать стандартные функции сортировки!\n# Имя файла: task_01_03.py\n# Входные данные: 10 4 6\n# Выходные данные: 4, 6, 10\n# Входные данные: 4 5 4\n# Выходные данные: 4, 4, 5\n\n\nA = int(input())\n\nB = int(input())\n\nC = int(input())\n\nlst1 = [A, B, C]\n\nif lst1[0] > lst1[1]:\n if lst1[1] > lst1[2]:\n lst1[0] = C\n lst1[1] = B\n lst1[2] = A\n else:\n lst1[0] = B\n lst1[1] = A\n lst1[2] = C\nelif lst1[0] < lst1[1]:\n if lst1[1] > lst1[2]:\n lst1[0] = A\n lst1[1] = C\n lst1[2] = B\n else:\n lst1[0] = C\n lst1[1] = A\n lst1[2] = B\n\nprint(lst1[0], lst1[1], lst1[2], sep=', ')\n\n","sub_path":"task_01_03.py","file_name":"task_01_03.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"495546087","text":"import numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ndef get_recommend_data(sim):\n def wrap():\n top10_near = sim()\n drop_data = drop_movie_data()\n mult_rec = DataFrame()\n for i in top10_near.columns:\n if i != guest_id:\n mult_rec[i] = data[i] * (1 + top10_near[i][0])\n\n mult_rec = mult_rec.drop(drop_data)\n return mult_rec.T.sum().sort_values(ascending=False).to_frame('recommend rate').head(10)\n return wrap\n\n@get_recommend_data\ndef get_cos_similarity_data():\n sim = DataFrame()\n for i in data.columns:\n sim[i] = DataFrame(cosine_similarity([data[guest_id]], [data[i]]))\n\n return sim.T.sort_values(by=0, ascending=False).head(11).T\n\ndef drop_movie_data():\n return data[data[guest_id] > 0][guest_id].to_frame('rating').T\n\nguest_id = int(input('insert a user id : '))\n\ndata = pd.read_table('data/u.data', sep='\\t', usecols=['user id', 'movie id', 'rating'])\ndata = data.pivot_table(index='movie id', columns='user id', values='rating', fill_value=0)\ndata = get_cos_similarity_data()\n\nitem = pd.read_table('data/u.item', sep='|', usecols=['movie id', 'movie title', 'release date'])\nitem = item.set_index(['movie id'])\nprint(pd.concat([data, item], axis=1, join_axes=[data.index]))","sub_path":"movie_cf_v0.py","file_name":"movie_cf_v0.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"169571761","text":"\"\"\"\n==============================================================================\nProgram: SpellingCorrector.py\n Author: Kyle Reese Almryde\n Date: Thu 03/28/2013 @ 12:03:42 PM\n\n Description: This program tries to correct the spelling of a word using a\n supplied dictionary and a criteria.\n\n\n\n==============================================================================\n\"\"\"\nimport os\nimport sys\nimport difflib\nimport wave\nfrom pprint import pprint\n\n\ndef getWAVduration(fname):\n \"\"\" Determine the duration of a .WAV file\n\n Params:\n fname -- String: The WAV filename\n\n Returns:\n A Float representing the duration in milliseconds\n \"\"\"\n f = wave.open(fname, 'r')\n frames = f.getnframes()\n rate = f.getframerate()\n duration = frames/float(rate) * 1000\n return duration\n\n\n#=============================== START OF MAIN ===============================\n\ndef main():\n\n SOUNDS = '/usr/local/Utilities/PROJECTS/Russian/sounds'\n inc = os.path.join(SOUNDS, 'IncorrectList.txt')\n corr = os.path.join(SOUNDS, 'CorrectList.txt')\n\n lexicon = {os.path.split(y)[1].strip().lower() for y in open(corr).readlines()}\n\n outFile = os.path.join(SOUNDS, 'FixedList.txt')\n fout = open(outFile, 'w')\n\n template = \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\n\"\n header = [\"Speaker\", \"Gender\", \"Marking\", \"Word\", \"Fix\"]\n fout.write(template.format(*header))\n\n for line in open(inc).readlines():\n mark, l = line.split(':')\n p, l = os.path.split(l)\n speaker, g = os.path.split(p)\n gender = g[2:]\n word = l.strip().lower()\n fix = difflib.get_close_matches(word, lexicon, 1)\n fix = ''.join(fix) if len(fix) > 0 else None\n fout.write(template.format(speaker, gender, mark, word, fix))\n\n fout.close()\n\nif __name__ == '__main__':\n main()","sub_path":"WorkShop/PYTHON/SpellingCorrector.py","file_name":"SpellingCorrector.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"556007973","text":"\r\ndef matching_by_producer(wo, engine, incoming_wine_names, compare_data, distinct_producer_name):\r\n \r\n import pandas as pd\r\n #import numpy as np\r\n# =============================================================================\r\n# import os\r\n# import pyodbc\r\n# =============================================================================\r\n \r\n from datetime import datetime\r\n startTime = datetime.now()\r\n\r\n print(\"Step 1: Reading in Datasets\")\r\n \r\n # Assinging crucial variables from matching call to start process\r\n incomingwn = incoming_wine_names \r\n wo_selected_columns=compare_data \r\n wo_disinct_producer_name = distinct_producer_name\r\n \r\n get_producer_name = 'producer_name'\r\n get_wine_id = 'wine_id'\r\n get_potential_wine_match = 'clean_name'\r\n get_matching_power = 'matching_power'\r\n get_weight = 'weight'\r\n \r\n distinct_producer_name = wo_disinct_producer_name[get_producer_name].str.lower() #wo_producer_name\r\n incomingwn = incomingwn['clean_name'].tolist()\r\n \r\n datetime.now() - startTime\r\n #print(\"Finished at:\" + str(datetime.now() - startTime))\r\n \r\n #####\r\n \r\n item_match_count = 0\r\n yes_list = []\r\n unmatched_wines = []\r\n matched = []\r\n full_row = []\r\n most_likely_prod = []\r\n full_row_df = pd.DataFrame(columns=['incoming_wine_name', 'wine_id', 'potential_wine_name', 'max_count','weight', 'matching_power', 'confidence_score'])\r\n unmatched_wines_df = pd.DataFrame(columns=['wine_name', 'matched_or_not_matched', 'not_matched_reason'])\r\n \r\n print(\"Step 2: Running Main Script\")\r\n \r\n for i in range(len(incomingwn)): #\r\n print('--------')\r\n print(str(i+1)+ '.' + ' Matching: ' + incomingwn[i])\r\n \r\n \r\n inc_name_as_list_lowercase = [inc_name_as_list_lowercase.lower() for inc_name_as_list_lowercase in (str(incomingwn[i]).split(\" \"))]\r\n \r\n inc_name_as_list_lowercase_df_column_name = 'words'\r\n inc_name_as_list_lowercase_df = (pd.DataFrame(inc_name_as_list_lowercase).rename(columns={0: inc_name_as_list_lowercase_df_column_name}))\r\n \r\n for j in range(len(distinct_producer_name)):\r\n \r\n producer_words_column_name = 'producer_words'\r\n #producer_name_words_df_rename = (pd.DataFrame(distinct_producer_name).rename(columns={0: producer_words_column_name}))\r\n found_producer_name = distinct_producer_name.iloc[j]\r\n \r\n found_producer_name = str(found_producer_name)\r\n found_producer_name_after = found_producer_name.split(\" \")\r\n found_producer_name_after = found_producer_name_after\r\n \r\n new_found_producer_list = list(found_producer_name_after)\r\n new_found_producer_list_df = pd.DataFrame(new_found_producer_list)\r\n \r\n \r\n new_found_producer_list_df_rename = new_found_producer_list_df.rename(columns={0: producer_words_column_name})\r\n producer_word_count = new_found_producer_list_df.count().iloc[0]\r\n result = pd.merge(inc_name_as_list_lowercase_df, new_found_producer_list_df_rename, left_on=inc_name_as_list_lowercase_df_column_name, right_on=producer_words_column_name, how='left')\r\n result_count = result.dropna()\r\n \r\n # if all of the words of the producer name exists in the incomnig wine name, compare the weights and mp for best match\r\n # what if there are more than 1 producers / words that could relate to the incoming wine name - why only select the first one found that matches the count?\r\n if (producer_word_count == (result_count.count()).iloc[1]):\r\n \r\n #print('producer found: ' + found_producer_name)\r\n \r\n most_likely_prod_input = found_producer_name\r\n most_likely_prod.append(most_likely_prod_input)\r\n \r\n # get all wine name with the name of the found producer\r\n producer_list_df = found_producer_name\r\n get_potential_wine_names = (wo_selected_columns.loc[wo_selected_columns[get_producer_name].str.lower() == producer_list_df])[get_potential_wine_match]\r\n \r\n # loop to compare for each potential wine name\r\n for l in range(len(get_potential_wine_names)):\r\n #print('inside l loop')\r\n \r\n result = pd.merge(inc_name_as_list_lowercase_df, new_found_producer_list_df_rename, left_on=inc_name_as_list_lowercase_df_column_name, right_on=producer_words_column_name, how='left')\r\n nans = lambda result: result[result.isnull().any(axis=1)]\r\n only_nans_df = pd.DataFrame((nans(result))[inc_name_as_list_lowercase_df_column_name])\r\n \r\n result2_icwn_name = 'incwn'\r\n result2 = pd.merge(((pd.DataFrame(str(get_potential_wine_names.iloc[l]).split(\" \"))).rename(columns={0: result2_icwn_name})).apply(lambda x: x.astype(str).str.lower()), only_nans_df, left_on=result2_icwn_name, right_on=inc_name_as_list_lowercase_df_column_name, how='left')\r\n item_match_count = (result2.count()).iloc[1]\r\n \r\n yes_list_input = {'incoming_wine_name': incomingwn[i],\r\n 'potential_wine_name': get_potential_wine_names.iloc[l],\r\n 'wine_id': (wo_selected_columns.loc[wo_selected_columns[get_potential_wine_match] == get_potential_wine_names.iloc[l]])[get_wine_id].iloc[0],\r\n 'matching_power': (wo_selected_columns.loc[wo_selected_columns[get_potential_wine_match] == get_potential_wine_names.iloc[l]])[get_matching_power].iloc[0],\r\n 'weight': (wo_selected_columns.loc[wo_selected_columns[get_potential_wine_match] == get_potential_wine_names.iloc[l]])[get_weight].iloc[0],\r\n 'max_count': (item_match_count+1),\r\n 'confidence_score': ((producer_word_count+item_match_count)/len(inc_name_as_list_lowercase)*100)}\r\n \r\n yes_list.append(yes_list_input)\r\n matched_input = incomingwn[i]\r\n matched.append(matched_input)\r\n \r\n \r\n # Saved in separate list if potential wine name not found by producer name\r\n if ((j == len(distinct_producer_name) -1) # if the iteration for producer name has reached\r\n & (producer_word_count != (result_count.count()).iloc[1]) # if the producer count doesn't equal to the result_count (producer words matched)\r\n & (incomingwn[i] not in matched)): # and the current incoming wine name is not in the matched list\r\n #print('true')\r\n unmatched_wines_input = {'wine_name': incomingwn[i],\r\n 'matched_or_not_matched': 'N',\r\n 'not_matched_reason':'Producer not found'}\r\n unmatched_wines.append(unmatched_wines_input)\r\n \r\n if (unmatched_wines != []):\r\n unmatched_wines_df = pd.DataFrame(unmatched_wines)\r\n \r\n #####\r\n \r\n if (yes_list != []):\r\n yes_list_df = pd.DataFrame(yes_list)\r\n yes_list_df_max = yes_list_df.groupby(['incoming_wine_name'], as_index=False)['max_count'].max()\r\n \r\n #yes_list_df_max_full_data = []\r\n for m in range(len(yes_list_df_max)):\r\n full_row_data = yes_list_df.loc[((yes_list_df['incoming_wine_name'] == (yes_list_df_max.loc[m].iloc[0])) & (yes_list_df['max_count'] == (yes_list_df_max.loc[m].iloc[1])))].head(1)\r\n full_row_input = {'incoming_wine_name': full_row_data.iloc[0].loc['incoming_wine_name'],\r\n 'wine_id': full_row_data.iloc[0].loc['wine_id'],\r\n 'potential_wine_name': full_row_data.iloc[ 0].loc['potential_wine_name'],\r\n 'max_count': full_row_data.iloc[0].loc['max_count'],\r\n 'weight': full_row_data.iloc[0].loc['weight'],\r\n 'matching_power': full_row_data.iloc[0].loc['matching_power'],\r\n 'confidence_score':full_row_data.iloc[0].loc['confidence_score']}\r\n full_row.append(full_row_input)\r\n full_row_df = pd.DataFrame(full_row)\r\n \r\n \r\n full_row_df.to_sql('matched_irongate_300819', con=engine, if_exists='replace', index=False)\r\n unmatched_wines_df.to_sql('unmatched_irongate_300819', con=engine, if_exists='replace', index=False)\r\n \r\n\r\n# =============================================================================\r\n# # write df's to excel\r\n# writer = pd.ExcelWriter(r'C:\\Users\\deepa\\Desktop\\results.xlsx', engine='xlsxwriter')\r\n# full_row_df.to_excel(writer, sheet_name='matched_data')\r\n# unmatched_wines_df.to_excel(writer, sheet_name='unmatched_wines')\r\n# \r\n# writer.save()\r\n# os.startfile(r'C:\\Users\\deepa\\Desktop\\results.xlsx')\r\n# \r\n# print(\"Finished at: \" + str(datetime.now() - startTime))\r\n# print(\"Step 3: Exec Excel to View Results\")\r\n# =============================================================================\r\n \r\n # Print end time \r\n datetime.now() - startTime\r\n print(\"Finished: \" + str(datetime.now() - startTime))","sub_path":"PythonMatchingScripts/match_by_producer_as_module.py","file_name":"match_by_producer_as_module.py","file_ext":"py","file_size_in_byte":9440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"625005858","text":"from decimal import Decimal\n\nfrom bs4 import BeautifulSoup\n\nfrom storescraper.categories import TELEVISION, CELL, NOTEBOOK\nfrom storescraper.product import Product\nfrom storescraper.store import Store\nfrom storescraper.utils import session_with_proxy\n\n\nclass ClaroEcuador(Store):\n @classmethod\n def categories(cls):\n return [\n CELL,\n NOTEBOOK,\n TELEVISION\n ]\n\n @classmethod\n def discover_urls_for_category(cls, category, extra_args=None):\n url_extensions = [\n ['postpago', CELL],\n ['laptops', NOTEBOOK],\n ['tv', TELEVISION],\n ]\n session = session_with_proxy(extra_args)\n product_urls = []\n for url_extension, local_category in url_extensions:\n if local_category != category:\n continue\n\n url_webpage = 'https://catalogo.claro.com.ec/{}/catalogo'.format(\n url_extension)\n print(url_webpage)\n\n data = session.get(url_webpage).text\n soup = BeautifulSoup(data, 'html.parser')\n product_containers = soup.findAll('div',\n 'box-wrapper-producto')\n\n for product in product_containers:\n if 'LG' not in product.find('h3').text:\n continue\n\n product_link = product.find('a')\n product_url = 'https://catalogo.claro.com.ec' + \\\n product_link['href']\n product_urls.append(product_url)\n\n return product_urls\n\n @classmethod\n def products_for_url(cls, url, category=None, extra_args=None):\n print(url)\n session = session_with_proxy(extra_args)\n response = session.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n color_selectors = soup.findAll('input', {'name': 'color'})\n key = color_selectors[0]['value']\n assert len(color_selectors) == 1\n\n name = soup.find('h1').text.strip()\n price = Decimal(soup.find(\n 'meta', {'property': 'product:price:amount'})['content'])\n picture_tags = soup.find('div', 'productoGaleriaShow').findAll('img')\n # The page repeats the pictures, no idea why\n picture_tags = picture_tags[:(len(picture_tags) // 2)]\n picture_urls = ['https://catalogo.claro.com.ec/' + tag['data-src']\n for tag in picture_tags]\n\n p = Product(\n name,\n cls.__name__,\n category,\n url,\n url,\n key,\n -1,\n price,\n price,\n 'USD',\n sku=key,\n picture_urls=picture_urls\n )\n\n return [p]\n","sub_path":"storescraper/stores/claro_ecuador.py","file_name":"claro_ecuador.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"557082984","text":"# python3\n\ndef read_input():\n return (input().rstrip(), input().rstrip())\n\ndef print_occurrences(output):\n print(' '.join(map(str, output)))\n\ndef get_occurrences(pat, txt):\n d=256\n q=13\n \n M = len(pat) \n N = len(txt) \n i = 0\n j = 0\n p = 0 \n t = 0 \n h = 1\n result=[]\n \n for i in range(M-1): \n h = (h * d)% q \n for i in range(M): \n p = (d * p + ord(pat[i]))% q \n t = (d * t + ord(txt[i]))% q \n \n for i in range(N-M + 1): \n if p == t: \n if txt[i:i+M]==pat:\n result.append(str(i))\n \n \n if i < N-M: \n t = (d*(t-ord(txt[i])*h) + ord(txt[i + M]))% q \n \n if t < 0: \n t = t + q \n return result\n\nif __name__ == '__main__':\n print_occurrences(get_occurrences(*read_input()))\n\n","sub_path":"c era/Data Structure/Find_pattern_in_text.py","file_name":"Find_pattern_in_text.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"177689994","text":"def Maneuver(s):\r\n n = len(s)\r\n \r\n res = 0\r\n i = 0\r\n while i < n:\r\n steps = 0\r\n while i < n and s[i] == \"+\":\r\n steps += 1\r\n i += 1\r\n if i == n:\r\n return res\r\n if steps:\r\n res += 1\r\n steps = 0\r\n while i < n and s[i] == \"-\":\r\n steps += 1\r\n i += 1\r\n if i == n:\r\n return res + 1\r\n if steps:\r\n res += 1\r\n return res\r\n \r\n \r\n\r\n\r\ninf = open(\"in.txt\", \"r\")\r\nouf = open(\"out.txt\", \"w\")\r\n\r\nfor case in range(int(inf.readline())):\r\n n = inf.readline().strip()\r\n m = Maneuver(n)\r\n ouf.write(\"Case #\" + str(case + 1) + \": \" + str(m) + \"\\n\")\r\n\r\ninf.close()\r\nouf.close()","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_Pewfangirl_exb.py","file_name":"16_0_2_Pewfangirl_exb.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"68934358","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom werkzeug.contrib.fixers import ProxyFix\nfrom flask.ext import login\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask_wtf import Form\nfrom wtforms import TextField, SelectMultipleField, PasswordField, validators\nfrom flask.ext.admin import helpers\n\napp = Flask(__name__)\n\n# Basic configs\n# Need to change SECRET_KEY in production\napp.config['SECRET_KEY'] = '123456790'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///library.db'\ndb = SQLAlchemy(app)\n\nclass User(db.Model):\n \"\"\"\n User representation\n \"\"\"\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(255), unique=True)\n password = db.Column(db.String(255))\n\n def __repr__(self):\n return \"\" % self.username\n\n def is_authenticated(self):\n return True\n\n def is_active(self):\n return True\n\n def is_anonymous(self):\n return False\n\n def is_admin(self):\n return self.username == \"admin\"\n\n def get_id(self):\n return self.id\n\n def __unicode__(self):\n return self.username\n\n# Creates association table for many to many relation\nbook_authors = db.Table('book_authors', db.Model.metadata,\n db.Column('authors_id', db.Integer, db.ForeignKey('authors.id')),\n db.Column('books_id', db.Integer, db.ForeignKey('books.id'))\n )\n\nclass Author(db.Model):\n \"\"\"\n SQLAlchemy Author model\n \"\"\"\n __tablename__ = 'authors'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(120))\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return \"\" % self.name\n\nclass Book(db.Model):\n \"\"\"\n SQLAlchemy Books model\n \"\"\"\n __tablename__ = 'books'\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(255))\n authors = db.relationship(\"Author\",\n secondary=book_authors,\n backref=\"books\")\n\n def __init__(self, title):\n self.title = title\n\n def __repr__(self):\n return \"\" % self.title\n\nclass BookForm(Form):\n \"\"\"\n Form for adding and editing books\n \"\"\"\n title = TextField('Book title', [validators.Length(min=2, max=255)])\n author_choices = [(\"\",\"\")]\n author_choices += [(author.id, author.name) for author in Author.query.all()]\n authors = SelectMultipleField('Select authors', coerce=int, choices = author_choices)\n\nclass AuthorForm(Form):\n \"\"\"\n Form for adding and editing authors\n \"\"\"\n name = TextField('Author name', [validators.Length(min=4, max=120)])\n\nclass LoginForm(Form):\n \"\"\"\n User login form\n \"\"\"\n username = TextField('Username', [validators.Required()])\n password = PasswordField('Password', [validators.Required()])\n\n def get_user(self):\n return db.session.query(User).filter_by(username=self.username.data).first()\n\nclass RegistrationForm(Form):\n \"\"\"\n New user form\n \"\"\"\n username = TextField('Username', [validators.Required()])\n password = PasswordField('Password', [validators.Required()])\n\ndef init_login():\n login_manager = login.LoginManager()\n login_manager.setup_app(app)\n\n # Create user loader function\n @login_manager.user_loader\n def load_user(user_id):\n return db.session.query(User).get(user_id)\n\n@app.route(\"/\")\ndef index():\n \"\"\"\n Main Page function. Display list of all books here.\n \"\"\"\n if not login.current_user.is_authenticated():\n return redirect(url_for(\"login_view\"))\n books = Book.query.all()\n return render_template(\"index.html\", books=books)\n\n@app.route(\"/admin/\")\ndef admin_view():\n \"\"\"\n Admin's page view\n \"\"\"\n if not login.current_user.is_admin():\n return redirect(\"/\")\n books = Book.query.all()\n authors = Author.query.all()\n return render_template(\"admin.html\", books=books, authors=authors)\n\n@app.route(\"/addbook/\", methods=['GET', 'POST'])\ndef add_book():\n \"\"\"\n Add book action\n \"\"\"\n if not login.current_user.is_admin():\n return redirect(\"/\")\n form = BookForm()\n author_choices = [(\"\",\"\")]\n author_choices += [(author.id, author.name) for author in Author.query.all()]\n form.authors.choices = author_choices\n if form.validate_on_submit():\n book = Book(form.title.data)\n for choice in form.authors.data:\n author = Author.query.get(int(choice))\n book.authors.append(author)\n db.session.add(book)\n db.session.commit()\n return redirect(url_for('admin_view'))\n else:\n return render_template('addbook.html', form=form)\n\n@app.route(\"/editbook//\", methods=['GET','POST'])\ndef edit_book(book_id):\n \"\"\"\n Edit book action\n \"\"\"\n if not login.current_user.is_admin():\n return redirect(\"/\")\n book = Book.query.get(book_id)\n form = BookForm(request.form, book)\n author_choices = [(\"\",\"\")]\n author_choices += [(author.id, author.name) for author in Author.query.all()]\n form.authors.choices = author_choices\n if form.validate_on_submit():\n book.title = form.title.data\n book.authors = []\n for choice in form.authors.data:\n author = Author.query.get(int(choice))\n book.authors.append(author)\n db.session.commit()\n return redirect(url_for('admin_view'))\n return render_template('editbook.html', form=form, books_id = book_id)\n\n@app.route(\"/deletebook//\")\ndef delete_book(book_id):\n \"\"\"\n Delete book by id\n \"\"\"\n if not login.current_user.is_admin():\n return redirect(\"/\")\n book = Book.query.get(book_id)\n db.session.delete(book)\n db.session.commit()\n return redirect(url_for('admin_view'))\n\n@app.route(\"/addauthor/\", methods=['GET', 'POST'])\ndef add_author():\n \"\"\"\n Add author action\n \"\"\"\n if not login.current_user.is_admin():\n return redirect(\"/\")\n form = AuthorForm()\n if form.validate_on_submit():\n author = Author(form.name.data)\n db.session.add(author)\n db.session.commit()\n return redirect(url_for('admin_view'))\n else:\n return render_template('addauthor.html', form=form)\n\n@app.route(\"/editauthor//\", methods=['GET','POST'])\ndef edit_author(author_id):\n \"\"\"\n Author edit action\n \"\"\"\n if not login.current_user.is_admin():\n return redirect(\"/\")\n author = Author.query.get(author_id)\n form = AuthorForm(request.form, author)\n if form.validate_on_submit():\n author.name = form.name.data\n db.session.commit()\n return redirect(url_for('admin_view'))\n return render_template('editauthor.html', form=form, authors_id = author_id)\n\n@app.route(\"/deleteauthor//\")\ndef delete_author(author_id):\n \"\"\"\n Delete author by id\n \"\"\"\n if not login.current_user.is_admin():\n return redirect(\"/\")\n author = Author.query.get(author_id)\n db.session.delete(author)\n db.session.commit()\n return redirect(url_for('admin_view'))\n\n@app.route(\"/search/\", methods=['POST'])\ndef books_search():\n \"\"\"\n Search book by title or by authors name\n \"\"\"\n if not login.current_user.is_authenticated():\n return redirect(url_for(\"login_view\"))\n q= request.form['q']\n books = Book.query.filter(Book.title.like(\"%\" + q + \"%\")).all()\n authors = Author.query.filter(Author.name.like(\"%\" + q + \"%\")).all()\n for author in authors:\n books += author.books\n books = set(books)\n return render_template(\"index.html\", books=books)\n\n@app.route(\"/login/\", methods=('GET', 'POST'))\ndef login_view():\n \"\"\"\n Log in page\n \"\"\"\n if login.current_user.is_authenticated():\n return redirect(\"/\")\n form = LoginForm()\n if form.validate_on_submit():\n user = form.get_user()\n if user is None or user.password != form.password.data:\n return redirect(url_for(\"login_view\"))\n login.login_user(user)\n return redirect(\"/\")\n\n return render_template('login.html', form=form)\n\n\n@app.route(\"/register/\", methods=('GET', 'POST'))\ndef register_view():\n \"\"\"\n Registration page\n \"\"\"\n if login.current_user.is_authenticated():\n return redirect(\"/\")\n form = RegistrationForm()\n if form.validate_on_submit():\n if db.session.query(User).filter_by(username=form.username.data).count() > 0:\n return redirect(url_for(\"register_view\"))\n user = User()\n form.populate_obj(user)\n db.session.add(user)\n db.session.commit()\n login.login_user(user)\n return redirect(\"/\")\n\n return render_template('register.html', form=form)\n\n\n@app.route(\"/logout/\")\ndef logout_view():\n \"\"\"\n Log out action\n \"\"\"\n login.logout_user()\n return redirect(\"/\")\n \napp.wsgi_app = ProxyFix(app.wsgi_app)\ninit_login()\n\n#Start website\nif __name__ == \"__main__\":\n app.run()","sub_path":"library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":8962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"615356873","text":"#coding:utf-8\r\n\r\n\r\n\"\"\" scrapy_plus 框架的默认配置文件,可以统一管理所有的配置参数, \"\"\"\r\n\r\n\r\nimport logging\r\n\r\n# 默认的配置\r\nDEFAULT_LOG_LEVEL = logging.INFO # 默认等级\r\nDEFAULT_LOG_FMT = '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s: %(message)s' # 默认日志格式\r\nDEFUALT_LOG_DATEFMT = '%Y-%m-%d %H:%M:%S' # 默认时间格式\r\nDEFAULT_LOG_FILENAME = 'log.log' # 默认日志文件名称\r\n\r\n\r\n#ASYNC_TYPE = \"thread\"\r\nASYNC_TYPE = \"coroutine\"\r\nASYNC_COUNT = 10\r\n\r\n\r\n\r\n# redis请求队列默认配置\r\nREDIS_QUEUE_NAME = 'request_queue'\r\nREDIS_QUEUE_HOST = 'localhost'\r\nREDIS_QUEUE_PORT = 6379\r\nREDIS_QUEUE_DB = 10\r\n\r\n# redis请求指纹集合默认配置\r\nREDIS_REDIS_NAME = 'fingerprint_fp'\r\nREDIS_REDIS_HOST = 'localhost'\r\nREDIS_REDIS_PORT = 6379\r\nREDIS_REDIS_DB = 10\r\n\r\n# 需要启用的爬虫\r\nSPIDERS = [\r\n\r\n]\r\n\r\n# 需要启用的管道\r\nPIPELINES = [\r\n\r\n]\r\n\r\n# 需要启用的爬虫中间件\r\nSPIDER_MIDDLEWARES = [\r\n\r\n]\r\n\r\n# 需要启用的下载中间件\r\nDOWNLOADER_MIDDLEWARES = [\r\n\r\n]\r\n\r\n\r\n# 最后导入用户运行环境下的 settings 文件里配置信息,可以覆盖框架里的同名配置参数\r\nfrom settings import *\r\n\r\n","sub_path":"爬虫练手/python爬虫-第15天/scrapy_plus/conf/default_settings.py","file_name":"default_settings.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"291027801","text":"import sys\nsys.stdin = open('4013.txt')\n\nTC = int(input())\n\ndef clockwise(wheel):\n a = wheel.pop()\n wheel.insert(0, a)\n\ndef a_clockwise(wheel):\n a = wheel.pop(0)\n wheel.append(a)\n\ndef checkspinr(wheel, wheelr, dir):\n if dir == 1:\n if wheel[2] == wheelr[7]:\n return False\n else:\n return True\n elif dir == -1:\n if wheel[2] == wheelr[5]:\n return False\n else:\n return True\n\ndef checkspinl(wheel, wheell, dir):\n if dir == 1:\n if wheel[6] == wheell[3]:\n return False\n else:\n return True\n if dir == -1:\n if wheel[6] == wheell[1]:\n return False\n else:\n return True\n\n\ndef spin(wheel_number, dir):\n wheel_tf=[0, 0, 0, 0]\n wheel_tf[wheel_number]=1\n if dir == 1 :\n clockwise(wheels[wheel_number])\n if wheel_number+1 < 4 and wheel_tf[wheel_number+1]==0:\n if checkspinr(wheels[wheel_number], wheels[wheel_number+1], 1):\n spin(wheel_number+1, -1)\n else :\n a_clockwise(wheels[wheel_number])\n if wheel_number-1 >= 0 and wheel_tf[wheel_number-1]==0:\n if checkspinl(wheels[wheel_number], wheels[wheel_number-1], -1):\n spin(wheel_number-1, -1)\n\nTC = 1\n\nfor tc in range(1, TC+1):\n K = int(input())\n wheels = [list(map(int, input().split())) for _ in range(4)]\n turns = [list(map(int, input().split())) for _ in range(K)]\n print(wheels)\n spin(turns[0][0], turns[0][1])\n print(wheels)\n\n\n\n\n\n\n","sub_path":"Algorithm/4013. 특이한 자석.py","file_name":"4013. 특이한 자석.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"130777388","text":"\"\"\"Helps managing the get or insert mecanism\"\"\"\nimport collections\nimport functools\n\nimport peewee\n\nimport lasagna.db.models as models\n\nimport lasagna.utils.helpers as helpers\nimport lasagna.utils.exceptions as exc\n\nMULTIPLE = 'Multiple entries for the same element.'\nCORRESPONDING = 'No corresponding id in database.'\n\n\ndef _multiple_entries_error(errors, elts, msg):\n \"\"\"Convenient method for adding error message in elt error list\n\n This function is commented for keeping track of the algorithm\n \"\"\"\n for elt in elts:\n # retrieve the elt and its index\n i, elt = elt['index'], elt['elt']\n # append the error message in the correct field according on\n # element attributes\n errors[i]['id' if 'id' in elt else 'name'].append(msg)\n\n\ndef _get(model, elts):\n \"\"\"Retrieve elts from database\n\n Filters elts, try to retrieve those existing in the database\n (according to name or id).\n If name does not exist, it will be created in the _insert method\n If id does not exist, an error is reported\n\n This function ensure that we keep track of the element index in\n success and errors\n \"\"\"\n dd = collections.defaultdict\n dm = helpers.dict_merge\n\n ids, names, errors = dd(list), dd(list), dd(lambda: dd(list))\n\n for i, elt in enumerate(elts):\n key, struct = (elt['id'], ids) if 'id' in elt else (elt['name'], names)\n struct[key].append({'index': i, 'elt': elt})\n\n id_keys, name_keys = list(ids.keys()), list(names.keys())\n\n if id_keys and name_keys:\n where_clause = (model.id << id_keys) | (model.name << name_keys)\n elif names:\n where_clause = (model.name << name_keys)\n elif ids:\n where_clause = (model.id << id_keys)\n\n for obj in model.select().where(where_clause):\n\n if obj.id in ids and obj.name in names:\n err_msg = {'msg': MULTIPLE, 'id': obj.id, 'name': obj.name}\n for elt in ids.pop(obj.id):\n errors[elt['index']]['id'].append(err_msg)\n for elt in names.pop(obj.name):\n errors[elt['index']]['name'].append(err_msg)\n\n continue\n\n elt = ids.pop(obj.id, []) or names.pop(obj.name, [])\n\n if len(elt) > 1:\n _multiple_entries_error(errors, elt, MULTIPLE)\n continue\n\n elt = elt.pop()\n elts[elt['index']] = dm(obj._data, elt['elt'])\n\n for v in ids.values():\n if len(v) > 1:\n _multiple_entries_error(errors, v, MULTIPLE)\n _multiple_entries_error(errors, v, CORRESPONDING)\n\n for k, v in names.items():\n if len(v) > 1:\n _multiple_entries_error(errors, v, MULTIPLE)\n else:\n names[k] = v[0]\n\n return names, errors\n\n\ndef _insert(model, elts, names):\n \"\"\"Insert names into the database and add them to elts\"\"\"\n req = (model\n .insert_many([elt['elt'] for elt in names.values()])\n .returning())\n\n for obj in req.execute():\n elts[names[obj.name]['index']] = obj._data\n\n\ndef get_or_insert(model, elts):\n \"\"\"Get the elements from a model or create them if they not exist\"\"\"\n names, errors = {}, {}\n\n if elts:\n names, errors = _get(model, elts)\n\n if names and not errors:\n _insert(model, elts, names)\n\n return elts, errors\n\n\ndef get(model, pk):\n \"\"\"Get a specific elt or raise 404 if it does not exists\"\"\"\n try:\n return model.get(model.id == pk)\n except peewee.DoesNotExist:\n raise exc.APIException('%s not found' % model._meta.name, 404)\n\n\ndef update(model, value):\n \"\"\"Update an elt and return it\"\"\"\n try:\n return (model\n .update(**value)\n .where(model.id == value.pop('id'))\n .returning()\n .execute()\n .next())\n except StopIteration:\n raise exc.APIException('%s not found' % model._meta.name, 404)\n\n\nget_or_insert_utensils = functools.partial(get_or_insert, models.Utensil)\nget_or_insert_ingrs = functools.partial(get_or_insert, models.Ingredient)\n\nupdate_utensil = functools.partial(update, models.Utensil)\nupdate_ingredient = functools.partial(update, models.Ingredient)\n\n\ndef select_recipes(where_clause=None):\n \"\"\"Select recipes according to where_clause if provided\"\"\"\n\n recipes = (\n models.Recipe\n .select(models.Recipe, models.RecipeIngredients, models.Ingredient,\n models.RecipeUtensils, models.Utensil)\n .join(models.RecipeIngredients, peewee.JOIN.LEFT_OUTER)\n .join(models.Ingredient, peewee.JOIN.LEFT_OUTER)\n .switch(models.Recipe)\n .join(models.RecipeUtensils, peewee.JOIN.LEFT_OUTER)\n .join(models.Utensil, peewee.JOIN.LEFT_OUTER)\n .switch(models.Recipe)\n )\n if where_clause:\n recipes = recipes.where(where_clause)\n\n return recipes.aggregate_rows().execute()\n\n\ndef select_utensils(recipe_id):\n \"\"\"Retrieve recipe utensils from database\"\"\"\n return list(\n models.Utensil\n .select()\n .join(models.RecipeUtensils)\n .where(models.RecipeUtensils.recipe == recipe_id)\n .dicts()\n )\n\n\ndef select_ingredients(recipe_id):\n \"\"\"Retrieve recipe ingredients from database\"\"\"\n return list(\n models.RecipeIngredients\n .select(\n models.RecipeIngredients.quantity,\n models.RecipeIngredients.measurement,\n models.Ingredient\n )\n .join(models.Ingredient)\n .where(models.RecipeIngredients.recipe == recipe_id)\n .dicts()\n )\n\n\ndef recipe_insert_utensils(recipe_id, utensils):\n \"\"\"Insert the utensils of a recipe into its intermediary table\"\"\"\n recipe_utensils = [\n {'recipe': recipe_id, 'utensil': utensil['id']} for utensil in utensils\n ]\n if recipe_utensils:\n models.RecipeUtensils.insert_many(recipe_utensils).execute()\n\n\ndef recipe_insert_ingredients(recipe_id, ingredients):\n \"\"\"Insert the ingredients of a recipe into its intermediary table\"\"\"\n def ingr_builder(ingr):\n return {\n 'recipe': recipe_id, 'ingredient': ingr['id'],\n 'quantity': ingr['quantity'], 'measurement': ingr['measurement']\n }\n\n recipe_ingredients = [ingr_builder(ingr) for ingr in ingredients]\n if recipe_ingredients:\n models.RecipeIngredients.insert_many(recipe_ingredients).execute()\n\n\ndef _update_recipe(recipe):\n \"\"\"Update a recipe\"\"\"\n\n def delete_old_entries(model, recipe_id):\n \"\"\"Delete entries on model given a specific recipe id\"\"\"\n model.delete().where(model.recipe == recipe_id).execute()\n\n ingredients = recipe.pop('ingredients', None)\n utensils = recipe.pop('utensils', None)\n\n recipe = update(models.Recipe, recipe)\n\n if utensils is None:\n utensils = select_utensils(recipe.id)\n else:\n delete_old_entries(models.RecipeUtensils, recipe.id)\n recipe_insert_utensils(recipe.id, utensils)\n\n if ingredients is None:\n ingredients = select_ingredients(recipe.id)\n else:\n delete_old_entries(models.RecipeIngredients, recipe.id)\n recipe_insert_ingredients(recipe.id, ingredients)\n\n recipe._data.update({'ingredients': ingredients, 'utensils': utensils})\n return recipe\n\nupdate_recipe = functools.partial(_update_recipe)\n","sub_path":"lasagna/lasagna/db/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":7277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"341860489","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: terrplant_parameters\n :synopsis: A useful module indeed.\n\"\"\"\nfrom django import forms\nfrom django.utils.safestring import mark_safe\n\nfrom pram_app.models.forms import validation\n\nSELECT_INCORPORATION = (('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'))\n\nSELECT_DRIFT = (('0.01', '0.01'), ('0.05', '0.05'), ('0', '0'))\n\nSELECT_RUN = (('0.01', '0.01'), ('0.02', '0.02'), ('0.05', '0.05'))\n\nSELECT_VERSION = (('1.2.2', '1.2.2'),)\n\n\nclass TerrplantInp(forms.Form):\n version = forms.ChoiceField(\n choices=SELECT_VERSION,\n label='Version',\n initial='1.2.2')\n chemical_name = forms.CharField(\n widget=forms.Textarea(attrs={'cols': 20, 'rows': 1}),\n label='Chemical Name',\n initial='Terrplant Example')\n pc_code = forms.CharField(\n widget=forms.Textarea(attrs={'cols': 20, 'rows': 1}),\n label='PC Code',\n initial='90501')\n use = forms.CharField(\n widget=forms.Textarea(attrs={'cols': 20, 'rows': 2}),\n label='Use',\n initial='Corn')\n application_method = forms.CharField(\n widget=forms.Textarea(attrs={'cols': 20, 'rows': 2}),\n label='Application Method',\n initial='Ground')\n application_form = forms.CharField(\n widget=forms.Textarea(attrs={'cols': 20, 'rows': 2}),\n label='Application Form',\n initial='Spray')\n solubility = forms.FloatField(\n label='Solubility (ppm)',\n initial=240,\n validators=[validation.validate_positive])\n incorporation_depth = forms.ChoiceField(\n choices=SELECT_INCORPORATION,\n label='Incorporation Depth (in)')\n application_rate = forms.FloatField(\n label='Application rate (lbs ai/A)',\n initial=4)\n drift_fraction = forms.ChoiceField(\n choices=SELECT_DRIFT,\n label='Drift Fraction',\n initial=0.01,\n validators=[validation.validate_positive])\n runoff_fraction = forms.ChoiceField(\n choices=SELECT_RUN,\n label='Runoff Fraction',\n initial=0.05)\n ec25_nonlisted_seedling_emergence_monocot = forms.FloatField(\n label=mark_safe('EC25 for Non-listed Seedling Emergence Monocot (lbs ai/A)'),\n initial=0.0067,\n validators=[validation.validate_positive])\n ec25_nonlisted_seedling_emergence_dicot = forms.FloatField(\n label=mark_safe('EC25 for Non-listed Seedling Emergence Dicot (lbs ai/A)'),\n initial=0.034,\n validators=[validation.validate_positive])\n noaec_listed_seedling_emergence_monocot = forms.FloatField(\n label=mark_safe('noaec for Non-listed Seedling Emergence Monocot (lbs ai/A)'),\n initial=0.0023,\n validators=[validation.validate_positive])\n noaec_listed_seedling_emergence_dicot = forms.FloatField(\n label=mark_safe('noaec for Non-listed Seedling Emergence Dicot (lbs ai/A)'),\n initial=0.019,\n validators=[validation.validate_positive])\n ec25_nonlisted_vegetative_vigor_monocot = forms.FloatField(\n label=mark_safe('EC25 for Non-listed Vegetative Vigor Monocot (lbs ai/A)'),\n initial=0.068,\n validators=[validation.validate_positive])\n ec25_nonlisted_vegetative_vigor_dicot = forms.FloatField(\n label=mark_safe('EC25 for Non-listed Vegetative Vigor Dicot (lbs ai/A)'),\n initial=1.4,\n validators=[validation.validate_positive])\n noaec_listed_vegetative_vigor_monocot = forms.FloatField(\n label=mark_safe('noaec for Non-listed Vegetative Vigor Monocot (lbs ai/A)'),\n initial=0.037,\n validators=[validation.validate_positive])\n noaec_listed_vegetative_vigor_dicot = forms.FloatField(\n label=mark_safe('noaec for Non-listed Vegetative Vigor Dicot (lbs ai/A)'),\n initial=0.67,\n validators=[validation.validate_positive])\n","sub_path":"models/terrplant/terrplant_parameters.py","file_name":"terrplant_parameters.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"263265194","text":"import win32com.client as win32\r\n\r\ndef sendmail(emailaddress,subject,body,attachmentpath):\r\n \"\"\" function to send email from MS Outlook. File path\r\n should be specified in the format: r'H:\\Desktop\\' \"\"\"\r\n\r\n outlook = win32.Dispatch('outlook.application')\r\n mail = outlook.CreateItem(0)\r\n \r\n mail.To = emailaddress\r\n mail.Subject = subject\r\n mail.body = body\r\n mail.Attachments.Add(attachmentpath)\r\n \r\n mail.Send()\r\n ","sub_path":"sendmail.py","file_name":"sendmail.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"44918043","text":"#coding:utf-8\n'''\nCreated on 2018年5月12日\n\n@author: Administrator\n'''\nimport unittest\nfrom selenium.webdriver.firefox.webdriver import WebDriver as Firefox\nimport time\nclass Test(unittest.TestCase):\n\n\n def testName(self):\n driver=Firefox()\n driver.get(\"https://www.baidu.com/\")\n driver.implicitly_wait(10)\n driver.maximize_window()\n driver.set_page_load_timeout(30)\n driver.find_element_by_id('kw').send_keys('selenium')\n driver.find_element_by_id('su').click()\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()","sub_path":"test001.py","file_name":"test001.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"513051654","text":"'''\r\nUn robottino deve muoversi su di una scacchiera di 15 x 15 celle con celle bianche e nere ciascuna di lato 40. \r\nPer rendere il percorso accidentato alcune delle celle della scacchiera contengono ostacoli (queste celle sono colorate di rosso).\r\n\r\nUn esempio di scacchiera con ostacoli e' dato dall'immagine 'I1.png'\r\n\r\nPer caricare e salvare immagini PNG usate le funzioni load e save che abbiamo preparato nel modulo immagini.py .\r\n\r\nAl'inizio il robottino e' posizionato sulla prima cella in altro a sinistra della scacchiera ed e' rivolto verso destra (x crescente). \r\nAd ogni step tenta di ragiungere una delle celle adiacenti in orizzontale o verticale. \r\nLe regole di movimento del robottino sono le seguenti: \r\n- al generico step, si sposta sulla cella che ha di fronte se questa e' libera da ostacoli e non ci e' gia transitato in passato. \r\n- se invece la cella risulta occupata o e' una cella su cui ha gia transitato, ruota di 90 gradi in senso orario ed aspetta lo step successivo. \r\n- dopo aver ruotato di 360 gradi senza essere riuscito a spostarsi si ferma. \r\n\r\nProgettare la funzione percorso(fname, fname1) che presi in input:\r\n- il percorso di un file (fname) contenente l'immagine in formato .png di una scacchiera con ostacoli\r\n- il percorso di un file di tipo .png (fname1) da creare\r\nlegge l'immagine della scacchiera in fname, colora di verde le celle della scacchiera percorse dal robottino prima di fermarsi, \r\ncolora di blu la cella in cui il robottino si ferma e registra l'immagine ricolorata nel file fname1. \r\nInoltre restituisce una stringa dove in sequanza sono codificati i passi effettuati dal robottino prima di fermarsi. \r\nLa codifica e' a seguente: \r\n '0' per un passo verso destra (x crescenti)\r\n '1' per un passo verso il basso (y crescenti)\r\n '2' per un passo verso sinistra (x decrescenti)\r\n '3' per un passo verso l'alto (y decrescenti)\r\n\r\nSi puo' assumere che la cella in alto a sinistra sia priva di ostacoli. \r\n\r\nPer esempi di scacchiere con ostacoli e relativi cammini vedere il file grade02.txt \r\n\r\nNOTA: il timeout per la esecuzione del grader e' fissato a 10*N secondi (per N test eseguiti dal grader)\r\n'''\r\nfrom immagini import load,save\r\ndef matrice(f,s):\r\n i=[[]for x in range(s)]\r\n r=0\r\n while r in range(0,len(f)):\r\n for x, c in enumerate(range(0,len(f[0]),40)):\r\n i[x]+=[int((f[c][r]==(255,255,255))or(f[c][r]==(0,0,0)))]\r\n r+=40\r\n if r>len(f):\r\n break\r\n i[0][0]=0\r\n return i\r\ndef color(f,pat,posi):\r\n for a,r in enumerate(pat):\r\n for b,v in enumerate(r):\r\n if v == 2:\r\n cposi = (a * 40, b * 40)\r\n for imga in range(0, 40):\r\n for imgb in range(0, 40):\r\n if (a, b) == posi:\r\n f[cposi[0] + imgb][cposi[1] + imga] = (0,0,255)\r\n elif (a, b)!= posi:\r\n f[cposi[0] + imgb][cposi[1] + imga] = (0,255,0)\r\n else:\r\n break\r\n return f\r\ndef cammino(fname, fname1):\r\n f=load(fname)\r\n m=matrice(f, 15)\r\n pat=matrice(f, 15)\r\n posi=(0, 0) \r\n dire=(0, 1)\r\n p= \"\"\r\n pat[0][0] = 2\r\n c=int(pat[posi[0]][max(0,min(posi[1]-1,14))]==1)+\\\r\n int(pat[max(0,min(posi[0]-1,14))][posi[1]]==1)+\\\r\n int(pat[max(0,min(posi[0]+1,14))][posi[1]]==1)+\\\r\n int(pat[posi[0]][max(0,min(posi[1]+1,14))]==1)\r\n while c>0:\r\n newposi = (max(0,min(posi[0] + dire[0], 14)),max(0,min(posi[1] + dire[1], 14)))\r\n nextc = m[newposi[0]][newposi[1]]\r\n patc = pat[newposi[0]][newposi[1]]\r\n if nextc*patc == 1:\r\n pat[newposi[0]][newposi[1]] = 2\r\n posi = (posi[0] + dire[0], posi[1] + dire[1])\r\n if dire==(1,0):\r\n p += '1'\r\n elif dire==(0,1):\r\n p += '0'\r\n elif dire==(-1,0):\r\n p += '3'\r\n else:\r\n p += '2'\r\n else:\r\n if dire==(1,0):\r\n dire = (0,-1)\r\n elif dire==(0,1):\r\n dire = (1,0)\r\n elif dire==(0,-1):\r\n dire = (-1,0)\r\n else:\r\n dire = (0,1)\r\n if int(pat[posi[0]][max(0,min(posi[1]-1,14))]==1)+\\\r\n int(pat[max(0,min(posi[0]-1,14))][posi[1]]==1)+\\\r\n int(pat[max(0,min(posi[0]+1,14))][posi[1]]==1)+\\\r\n int(pat[posi[0]][max(0,min(posi[1]+1,14))]==1)<=0:\r\n break\r\n f=color(f,pat,posi)\r\n save(f,fname1)\r\n return p \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"students/1806338/homework03/program02.py","file_name":"program02.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"138575357","text":"from functools import lru_cache\n\nN = int(input())\n\nclass Combination:\n def __init__(self, n):\n self.n = n\n fact = [1] * (n + 1)\n for i in range(1, n + 1):\n fact[i] = fact[i - 1] * i\n self.fact = fact\n\n def ncr(self, n, r):\n if n < 0 or r < 0 or n < r:\n return 0\n return self.fact[n] // self.fact[n - r] // self.fact[r]\n\ncomb = Combination(N + 10)\n\ndef prob(n, k): # n人→k人の確率\n if n == k:\n ret = 0\n if n % 3 == 0:\n ret += q(n // 3, n // 3, n // 3)\n ret += q(n, 0, 0) * 3\n return ret\n ret = 0\n for G in range(n + 1):\n for C in range(n + 1):\n P = n - G - C\n if P < 0:\n break\n g, c, p = sorted([G, C, P])\n\n if g == c == 0 or g == c == p:\n continue\n elif g == 0:\n if c == k:\n ret += q(g, c, p)\n elif g == k:\n ret += q(g, c, p)\n\n return ret\n\n@lru_cache(maxsize=None)\ndef q(g, c, p):\n n = g + c + p\n return comb.ncr(n, g) * comb.ncr(n - g, c) / pow(3, n)\n\n@lru_cache(maxsize=None)\ndef search(n):\n if n == 1:\n return 0\n ret = 0\n for i in range(1, n):\n ret += prob(n, i) * (search(i) + 1)\n ret += prob(n, n)\n return ret / (1 - prob(n, n))\n\nans = search(N)\nprint(ans)\n\n\n","sub_path":"AtCoder/other/dowangoプログラミングコンテスト/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"357968012","text":"def ler_num(): #verifica se o input é numérico\n while True:\n n1 = input('Digite um número inteiro: ')\n if n1.isnumeric():\n return n1\n print('Valor invalido\\n')\n\n\ndef separar(n1: str): #separa o valor lido em uma lista\n l1 = list()\n for c in n1:\n l1.append(c)\n return l1\n\n\ndef ishappy(l1: list):\n ciclos = 0\n while True:\n for pos, val in enumerate(l1):\n l1[pos] = int(val)\n l1[pos] = l1[pos] ** 2\n\n soma = sum(l1[:])\n\n if soma == 1:\n return True\n\n elif ciclos > 500:\n return False\n del l1\n l1 = separar(str(soma))\n ciclos += 1\n #print(f'ciclos = {ciclos}\\n soma = {soma}\\n lista = {l1}')\n\n\ndef main():\n print('NÚMEROS MÁGICOS!!')\n print('São números em que a soma do quadrado de cada digito é igual a 1')\n print('Exemplo de números felizes: 1, 7, 10, 13, 19, 23, 28, 31, 32, 44')\n z = ler_num()\n x = separar(z)\n if ishappy(x):\n print(f'O número {z} é feliz!')\n else:\n print(f'Depois de muitatas tentativas o numero {z} ficou triste por não conseguir atingir o numero 1')\n\n\nmain()\n","sub_path":"Solutions-Python/Numbers/Happy_Numbers/happy_numbers.py","file_name":"happy_numbers.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"32922929","text":"import pytest\n\nfrom catinabox.food_truck import (\n FoodTruck, FoodQuantityCannotBeNegativeError,\n)\n\n\n@pytest.fixture\ndef food_truck():\n return FoodTruck()\n\n\ndef test_no_food_in_new_food_truck(food_truck):\n assert food_truck.inventory == {}\n\n\n@pytest.mark.parametrize('food,quantity', (\n ('tacos', 10),\n ('sushi', 1),\n ('curry', 0),\n))\ndef test_stocking_new_food(food_truck, food, quantity):\n food_truck.stock(food, quantity)\n assert food_truck._inventory[food] == quantity\n\n\n@pytest.mark.parametrize('food,quantity', (\n ('tacos', -10),\n ('sushi', -1337),\n ('curry', -2),\n))\ndef test_stocking_new_food_invalid_quantity(food_truck, food, quantity):\n with pytest.raises(FoodQuantityCannotBeNegativeError):\n food_truck.stock(food, quantity)\n","sub_path":"tests/unit/test_food_truck.py","file_name":"test_food_truck.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"137037303","text":"from django.conf.urls import patterns, include, url\nfrom qa import views\nfrom django.contrib import admin\n\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'ask.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^ask/', views.ask, name='ask'),\n url(r'login/', views.index, name='login'),\n url(r'signup/', views.index, name='signup'),\n url(r'popular/', views.popular, name='popular'),\n url(r'new/', views.index, name='new'),\n url(r'^answer/', view.answer, name='answer'),\n url(r'question/(?P[0-9]+)/', views.single_question, name='question'),\n url(r'^$', views.index, name='root')\n)\n","sub_path":"ask/ask/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"631605696","text":"from accounts.models import CustomUser\nfrom django.http import request\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.core.paginator import Paginator\nfrom .models import Quest, QuestComment, Review\nfrom .forms import QuestForm, CommentForm, ReviewForm\nfrom django.urls import reverse\nimport datetime\n\ndef board(request):\n page = request.GET.get('page', '1')\n quests = Quest.objects.filter(duedate__gte=datetime.datetime.now()).order_by('-id')\n for quest in quests:\n quest.remainingdays = (quest.duedate - datetime.datetime.now().date()).days\n quest.save()\n paginator = Paginator(quests, 6)\n currentpage = paginator.get_page(page)\n context = {'quests':currentpage}\n return render(request,'board.html', context)\n\ndef sort_date(request):\n quests = Quest.objects.all().order_by('-id')\n return render(request,'board.html', {'quests':quests})\n\ndef sort_bounty(request):\n quests = Quest.objects.all().order_by('-bounty')\n return render(request,'board.html', {'quests':quests})\n\ndef newquest(request):\n return render(request, 'newquest.html')\n\ndef questdetail(request, quest_id):\n quest_detail = get_object_or_404(Quest, pk=quest_id)\n comments = QuestComment.objects.filter(quest=quest_detail.id)\n return render(request, 'questdetail.html', context = {'quest':quest_detail, 'comments':comments})\n\ndef newcomment(request):\n if not request.user.is_authenticated:\n return redirect('/accounts/login/')\n if request.method == 'POST':\n quest_id = request.POST.get('quest_id','').strip()\n body = request.POST.get('body','').strip()\n author = request.user\n comment = QuestComment.objects.create(\n author = author,\n quest_id = quest_id,\n body = body\n )\n return redirect(reverse('questdetail', kwargs={'quest_id':comment.quest_id}))\n\ndef createquest(request):\n if not request.user.is_authenticated:\n return redirect('/accounts/login/')\n if request.method == 'POST':\n form = QuestForm(request.POST, request.FILES)\n if form.is_valid():\n quest = form.save(commit=False)\n quest.author = request.user\n quest.save()\n form.save_m2m()\n return redirect('board')\n else:\n return redirect('board')\n else:\n form = QuestForm()\n return render(request, 'newquest.html', {'form':form})\n\n\ndef matching(request, quest_id):\n quest = get_object_or_404(Quest, pk=quest_id)\n return render(request, 'matching.html', {'quest':quest})\n\ndef select_reviewer(request, quest_id):\n if request.method == 'POST':\n quest = Quest.objects.filter(pk=quest_id)\n reviewer_id = request.POST.get('reviewer')\n reviewer = CustomUser.objects.get(pk=reviewer_id)\n quest.update(reviewer=reviewer, status='SOLVING')\n return redirect(reverse('questdetail', kwargs={'quest_id':quest_id}))\n\ndef review(request, quest_id):\n if not request.user.is_authenticated:\n return redirect('/accounts/login/')\n if request.method == 'POST':\n form = ReviewForm(request.POST, request.FILES)\n if form.is_valid():\n review = form.save(commit=False)\n review.author = request.user\n review.quest_id = quest_id\n review.save()\n quest = Quest.objects.filter(pk=quest_id)\n quest.update(status='SOLVED')\n return redirect(reverse('questdetail', kwargs={'quest_id':quest_id}))\n else:\n return redirect(reverse('questdetail', kwargs={'quest_id':quest_id}))\n else:\n form = ReviewForm()\n quest_detail = get_object_or_404(Quest, pk=quest_id)\n context = {'quest':quest_detail, 'form':form}\n return render(request, 'createreview.html', context)\n \ndef apply(request, quest_id):\n if not request.user.is_authenticated:\n return redirect('/accounts/login/')\n current_user = request.user\n current_quest = Quest.objects.get(id=quest_id)\n current_quest.applicants.add(CustomUser.objects.get(id=current_user.id))\n return redirect(reverse('questdetail', kwargs={'quest_id':quest_id}))\n \n\n\n ","sub_path":"Code_Hunter/wanted/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"431382048","text":"# -*- coding: utf-8 -*-\n\"\"\"\nZurich Instruments LabOne Python API Example\n\nDemonstrate how to save and load Zurich Instruments device settings\nasynchronously using the ziDeviceSettings class.\n\nNote: This example is intended for experienced users who require a non-blocking\n(asynchronous) interface for loading and saving settings. In general, the\nutility functions save_settings() and load_settings() are more appropriate; see\n`example_save_device_settings_simple`.\n\n\"\"\"\n\n# Copyright 2016 Zurich Instruments AG\n\nfrom __future__ import print_function\nimport time\nimport os\nimport zhinst.utils\n\n\ndef run_example(device_id, settings_file_path=None):\n \"\"\"\n Run the example: Connect to a Zurich Instruments instrument, save the\n instrument's settings to file, toggle the signal output enable and reload\n the settings file.\n\n Note: This example is intended for experienced users who require a\n non-blocking (asynchronous) interface for loading and saving settings. In\n general, the utility functions save_settings() and load_settings() are more\n appropriate; see `example_save_device_settings_simple'.\n\n Arguments:\n\n device_id (str): The ID of the device to run the example with. For\n example, `dev2006` or `uhf-dev2006`.\n\n setting_file_path (str, optional): Specify the path where to save the\n settings file.\n\n\n Returns:\n\n filename (str) : the name (with path) of the XML file where the settings\n were saved.\n\n Raises:\n\n RuntimeError: If the device is not \"discoverable\" from the API.\n\n See the \"LabOne Programing Manual\" for further help, available:\n - On Windows via the Start-Menu:\n Programs -> Zurich Instruments -> Documentation\n - On Linux in the LabOne .tar.gz archive in the \"Documentation\"\n sub-folder.\n\n \"\"\"\n\n apilevel_example = 6 # The API level supported by this example.\n # Call a zhinst utility function that returns:\n # - an API session `daq` in order to communicate with devices via the data server.\n # - the device ID string that specifies the device branch in the server's node hierarchy.\n (daq, device, _) = zhinst.utils.create_api_session(device_id, apilevel_example)\n zhinst.utils.api_server_version_check(daq)\n\n timestr = time.strftime(\"%Y%m%d_%H%M%S\")\n filename_noext = timestr + '_example_save_device_settings_expert' # Change this to the filename you want to save.\n\n device_settings = daq.deviceSettings()\n device_settings.set('deviceSettings/device', device)\n device_settings.set('deviceSettings/filename', filename_noext)\n if settings_file_path:\n device_settings.set('deviceSettings/path', settings_file_path)\n # Set the path to '.' save to the current directory.\n # device_settings.set('deviceSettings/path', '.')\n # NOTE: in this case, this example will have to be executed from a folder\n # where you have write access.\n\n toggle_device_setting(daq, device)\n\n # Save the instrument's current settings.\n print(\"Saving settings...\")\n device_settings.set('deviceSettings/command', 'save')\n device_settings.execute()\n while not device_settings.finished():\n time.sleep(0.2)\n print(\"Done.\")\n\n data = device_settings.get('deviceSettings/path')\n path = data['path'][0]\n filename_full_path = os.path.join(path, filename_noext) + '.xml'\n assert os.path.isfile(filename_full_path), \"Failed to save settings file '%s'\" % filename_full_path\n print(\"Saved file '{}'.\".format(filename_full_path))\n\n toggle_device_setting(daq, device)\n\n # Load the settings.\n print(\"Loading settings...\")\n device_settings.set('deviceSettings/command', 'save')\n device_settings.execute()\n while not device_settings.finished():\n time.sleep(0.2)\n print(\"Done.\")\n device_settings.clear()\n\n return filename_full_path\n\n\ndef toggle_device_setting(daq, device):\n \"\"\"\n Toggle a setting on the device: If it's enabled, disable the setting, and\n vice versa.\n \"\"\"\n path = '/%s/sigouts/0/on' % device\n is_enabled = daq.getInt(path)\n print(\"Toggling setting '{}'.\".format(path))\n daq.setInt(path, not is_enabled)\n daq.sync()\n","sub_path":"Drivers/python_libs/linux/zhinst/examples/common/example_save_device_settings_expert.py","file_name":"example_save_device_settings_expert.py","file_ext":"py","file_size_in_byte":4166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"593786425","text":"\"\"\"\nState for the gameplay.\n\"\"\"\n\nimport random\nimport pygame as pg\n\nfrom .. import statemachine, game\nfrom ..resource import RESOURCE_MANAGER\n\nclass Paddle(pg.sprite.Sprite):\n \"\"\"\n Represents a paddle controlled by a player or CPU.\n \"\"\"\n\n cpu = False\n ball = None\n direction = 0\n speed = .5\n\n def __init__(self, x, cpu=False, ball=None):\n super().__init__()\n self.image = RESOURCE_MANAGER.graphics[\"paddle\"]\n self.image = pg.transform.scale(self.image, (self.image.get_rect().w * 6, self.image.get_rect().h * 6))\n self.rect = self.image.get_rect()\n self.rect.centerx = x\n self.rect.centery = game.SCREEN_SIZE[1] / 2\n self.cpu = cpu\n self.ball = ball\n \n def update(self, delta_time):\n if self.cpu:\n self.cpu_input()\n else:\n self.human_input()\n\n self.rect.move_ip((0, self.speed * self.direction * delta_time))\n\n # Prevent from going off screen\n if self.rect.bottom >= game.SCREEN_SIZE[1]:\n self.rect.bottom = game.SCREEN_SIZE[1]\n elif self.rect.top <= 0:\n self.rect.top = 0\n\n def human_input(self):\n keys = pg.key.get_pressed()\n\n if keys[pg.K_UP]:\n self.direction = -1\n elif keys[pg.K_DOWN]:\n self.direction = 1\n else:\n self.direction = 0\n \n def cpu_input(self):\n if self.ball.rect.centerx > game.SCREEN_SIZE[0] / 2: \n if self.ball.rect.centery > self.rect.centery:\n self.direction = 1\n elif self.ball.rect.centery < self.rect.centery:\n self.direction = -1\n else:\n self.direction = 0\n\n\n \nclass Ball(pg.sprite.Sprite):\n \"\"\"\n Represents a ball players can hit.\n \"\"\"\n\n last_collide = None\n\n def __init__(self, color, score):\n super().__init__()\n self.image = RESOURCE_MANAGER.graphics[\"ball\"]\n self.image = pg.transform.scale(self.image, (32, 32))\n self.rect = self.image.get_rect()\n self.direction = [1, 1]\n self.speed = 0.5\n self.score = score\n self.bounce = pg.mixer.Sound(\"assets/sounds/ballbounce.wav\")\n\n self.image.fill(color, special_flags=pg.BLEND_MULT)\n self.restart()\n \n def update(self, delta_time):\n self.rect.move_ip((self.direction[0] * self.speed * delta_time, self.direction[1] * self.speed * delta_time))\n\n # Left bounce - Point for player two\n if self.rect.left < 0:\n self.score.add_to(2)\n self.restart()\n \n # Right bounce - Point for player one\n if self.rect.right > game.SCREEN_SIZE[0]:\n self.score.add_to(1)\n self.restart()\n\n # Top or bottom bounce\n if self.rect.top < 0 or self.rect.bottom > game.SCREEN_SIZE[1]:\n self.direction[1] = -self.direction[1]\n self.bounce.play()\n\n def restart(self):\n self.last_collide = None\n self.rect.centerx = game.SCREEN_SIZE[0] / 2\n self.rect.centery = game.SCREEN_SIZE[1] / 2\n self.direction[0] = random.choice([-1, 1])\n self.direction[1] = random.choice([-1, 1])\n\n def collision(self, other):\n if self.rect.colliderect(other.rect) and self.last_collide != other:\n self.last_collide = other\n self.direction[0] = -self.direction[0]\n self.bounce.play()\n\nclass Score(object):\n\n Y_POS = 40\n MIDDLE_OFFSET = 200\n p1_score = 0\n p2_score = 0\n font = None\n\n p1_score_text = None\n p2_score_text = None\n\n def __init__(self):\n self.font = pg.font.Font(\"assets/fonts/retro.ttf\", 100)\n self.update_score()\n \n def update_score(self):\n self.p1_score_text = self.font.render(str(self.p1_score), True, (255, 255, 255))\n self.p2_score_text = self.font.render(str(self.p2_score), True, (255, 255, 255))\n \n def add_to(self, ply):\n if ply == 1:\n self.p1_score = self.p1_score + 1\n if ply == 2:\n self.p2_score = self.p2_score + 1\n \n self.update_score()\n \n def draw(self, surface):\n surface.blit(self.p1_score_text, ((game.SCREEN_SIZE[0] / 2) - self.MIDDLE_OFFSET - self.p1_score_text.get_width() / 2, self.Y_POS))\n surface.blit(self.p2_score_text, ((game.SCREEN_SIZE[0] / 2) + self.MIDDLE_OFFSET - self.p2_score_text.get_width() / 2, self.Y_POS))\n\n\n\nclass Gameplay(statemachine.State):\n\n ball = None\n p1_paddle = None\n p2_paddle = None\n score = None\n LINE_COL = (200, 200, 200)\n BALL_COL = (255, 0, 0)\n\n def __init__(self):\n super().__init__() \n\n def startup(self, persist, current_time):\n super().startup(persist, current_time) \n self.score = Score() \n self.ball = Ball(self.BALL_COL, self.score)\n self.p1_paddle = Paddle(80)\n self.p2_paddle = Paddle(1200, True, self.ball)\n \n\n def handle_event(self, event):\n pass\n\n def update(self, delta_time):\n self.ball.update(delta_time)\n self.p1_paddle.update(delta_time)\n self.p2_paddle.update(delta_time)\n\n self.ball.collision(self.p1_paddle)\n self.ball.collision(self.p2_paddle)\n \n def draw(self, surface):\n surface.fill((0, 0, 0))\n self.score.draw(surface)\n\n # Draw separating line\n for i in range(0, 30):\n if (i % 2) == 0:\n pg.draw.rect(surface, self.LINE_COL, (630, 25 * i, 20, 25))\\\n\n surface.blit(self.p1_paddle.image, self.p1_paddle.rect)\n surface.blit(self.p2_paddle.image, self.p2_paddle.rect)\n surface.blit(self.ball.image, self.ball.rect)\n","sub_path":"source/states/gameplay.py","file_name":"gameplay.py","file_ext":"py","file_size_in_byte":5696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"183943620","text":"import requests\nimport json\nimport configparser\n\nimport shutil\nimport datetime\nimport os\nimport argparse\nimport polling\n\n\n\n\nconfig = configparser.ConfigParser()\nconfig.read('settings.cfg')\nTOKEN = config.get('authentication','token')\nround_id = config.get('authentication','round_id')\n\nSOURCE_DIR = config.get('project','source_dir')\n\ndataset_ids = [config.get('datasets','dataset'+str(i)) for i in range(4)]\nsolutions = [config.get('project','solutions'+str(i)) for i in range(4)]\n\ntopscore_dir = \"topscores\"\n\n\ndef check_submission(token,round_id, submitted):\n print('.', sep=' ', end='', flush=True)\n url = \"https://hashcode-judge.appspot.com/api/judge/v1/submissions/\"+round_id\n headers = {\n 'authorization': \"Bearer \" + token,\n 'content-type': \"application/json;charset=utf-8\",\n }\n try:\n init_res = requests.get(url, headers=headers, allow_redirects=False)\n if init_res.status_code == 200:\n items = init_res.json()['items']\n current = [t for t in items if t[\"id\"] == submitted]\n if len(current) != 0:\n return {'scored':current[0][\"scored\"],'valid':current[0][\"valid\"],'best':current[0][\"best\"],'score':current[0][\"score\"]}\n print(\"Could not retrieve result\")\n return None\n\n else:\n print(\"URL has not been created, your token might be expired.\")\n return None\n except Exception as ce:\n print(\"ERROR: \" + str(ce))\n\n\ndef zipdir(path, outputfilename):\n try:\n shutil.make_archive(outputfilename, 'zip', path)\n return True\n except Exception as ce:\n return False\n\n\ndef createUrl(token):\n url = \"https://hashcode-judge.appspot.com/api/judge/v1/upload/createUrl\"\n headers = {\n 'authorization': \"Bearer \" + token,\n 'content-type': \"application/json;charset=utf-8\",\n }\n\n try:\n init_res = requests.get(url, headers=headers, allow_redirects=False)\n if init_res.status_code == 200:\n return init_res.json()['value']\n else:\n print(\"URL has not been created, your token might be expired.\")\n return None\n except Exception as ce:\n print(\"ERROR: \" + str(ce))\n\ndef upload(url,filename):\n try:\n with open(filename, 'rb') as file:\n response = requests.post(url, files={filename:file})\n if response.status_code == 200:\n return response.json()[filename]\n else:\n print(\"Something went wrong while uploading a file\")\n except Exception as ce:\n print(ce)\n\ndef submit(sourcesBlobKey,submissionBlobKey, token ,dataSet):\n url = \"https://hashcode-judge.appspot.com/api/judge/v1/submissions\"\n data={\"dataSet\":dataSet,\"submissionBlobKey\":submissionBlobKey,\"sourcesBlobKey\":sourcesBlobKey}\n headers = {\n 'authorization': \"Bearer \" + token,\n 'content-type': \"application/json;charset=utf-8\",\n }\n try:\n response = requests.post(url,headers=headers,params=data)\n if response.status_code == 200:\n return response.json()\n else:\n print(\"Something went wrong while submitting\")\n print(response.json())\n except Exception as ce:\n print(ce)\n\ndef uploadFile(filename):\n uploadURL = createUrl(TOKEN)\n if uploadURL is not None:\n blobKey = upload(uploadURL,filename)\n return blobKey\n else:\n print(str(filename) + \" has not been uploaded.\")\n return None\n\ndef poll_submission(TOKEN,round_id,submitted):\n print('Awaiting results', sep=' ', end='', flush=True)\n try:\n polling.poll(\n lambda: check_submission(TOKEN, round_id, submitted).get(\"scored\") == True,\n step=5,\n timeout=30\n )\n return check_submission(TOKEN, round_id, submitted)\n except polling.TimeoutException as e:\n print(\"\\nTimed out...\")\n return None\n\n\nif __name__ == '__main__':\n if not os.path.exists(topscore_dir):\n os.mkdir(topscore_dir)\n\n source_zipfile = 'source_'+str(datetime.datetime.now().isoformat())\n zipped = zipdir(SOURCE_DIR, source_zipfile )\n parser = argparse.ArgumentParser(description='Submit a solution')\n parser.add_argument('dataset_id', metavar='ID', type=int, nargs=1,\n help='the ID of the dataset')\n args = parser.parse_args()\n\n solution_id = args.dataset_id[0]\n if zipped:\n sources=uploadFile(source_zipfile+\".zip\")\n solution=uploadFile(solutions[solution_id])\n\n if sources is not None and solution is not None:\n submitted = submit(sources,solution,TOKEN,dataset_ids[solution_id])['id']\n score = poll_submission(TOKEN,round_id,submitted)\n if score is not None:\n print(\"\\n=== SUBMISSION REPORT FOR DATASET \"+str(solution_id) + \" ===\")\n if score.get(\"best\"):\n print(\"You have increased your top score!!\")\n shutil.move(source_zipfile+\".zip\", topscore_dir + \"/\"+str(solution_id)+\"-[\" + score.get(\"score\") + \"].zip\") # MOVE TO TOPSCORES\n if not score.get(\"valid\"):\n print(\"The submitted solution was declared invalid.\")\n os.remove(source_zipfile+\".zip\") # cleanup\n else:\n print(\"Score: \" + score.get(\"score\"))\n else:\n print(\"Files have not been submitted.\")\n os.remove(source_zipfile+\".zip\") # cleanup\n\n else:\n print(\"Something went wrong when zipping the source directory, exiting now...\")\n","sub_path":"HashCodeCI/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"150966057","text":"import numpy as np\nimport pandas as pd\n\nfp=\"d:/data.xlsx\"\nepsilon = 1e-5\ndata=pd.read_excel(fp,index_col=None,header=None,encoding='utf8')\ndata = (data - data.min())/(data.max() - data.min())\nm,n=data.shape\n#第一步读取文件,如果未标准化,则标准化\ndata=data.values\nprint(m,n)\ndata2=np.zeros((4983,4))\nfor i in range(4983):\n for j in range(4):\n if data[i,j]-data[i,5]==0:\n data2[i,j]=0\n else:\n data2[i,j]=1/abs(data[i,j]-data[i,5])\ndata3=data2.sum(axis=0)\nwi=data3/np.sum(data3)\nprint(wi)","sub_path":"node importance/weight2.py","file_name":"weight2.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"589444005","text":"\"\"\"\nTest creation of basic plot elements\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom .. import fig_to_dict, fig_to_html\nfrom numpy.testing import assert_equal\n\n\ndef test_line():\n fig, ax = plt.subplots()\n ax.plot(np.arange(10), np.random.random(10),\n '--k', alpha=0.3, zorder=10, lw=2)\n rep = fig_to_dict(fig)\n axrep = rep['axes'][0]\n line = axrep['lines'][0]\n\n assert_equal(list(sorted(line.keys())),\n ['alpha', 'color', 'coordinates', 'dasharray', 'data',\n 'drawstyle', 'id', 'linewidth', 'xindex', 'yindex',\n 'zorder'])\n assert_equal(line['alpha'], 0.3)\n assert_equal(line['color'], \"#000000\")\n assert_equal(line['coordinates'], 'data')\n assert_equal(line['dasharray'], '7.4,3.2')\n assert_equal(line['zorder'], 10)\n assert_equal(line['linewidth'], 2)\n\n\ndef test_markers():\n fig, ax = plt.subplots()\n ax.plot(np.arange(10), np.random.random(10),\n '^k', alpha=0.3, zorder=10, mec='r', mew=2, c='b')\n rep = fig_to_dict(fig)\n axrep = rep['axes'][0]\n markers = axrep['markers'][0]\n\n assert_equal(list(sorted(markers.keys())),\n ['alpha', 'coordinates', 'data', 'edgecolor', 'edgewidth',\n 'facecolor', 'id', 'markerpath', 'xindex', 'yindex',\n 'zorder'])\n assert_equal(markers['alpha'], 0.3)\n assert_equal(markers['zorder'], 10)\n assert_equal(markers['coordinates'], 'data')\n assert_equal(markers['edgecolor'], '#FF0000')\n assert_equal(markers['edgewidth'], 2)\n assert_equal(markers['facecolor'], '#0000FF')\n assert_equal(markers['markerpath'][0],\n [[0.0, -3.0], [-3.0, 3.0], [3.0, 3.0]])\n assert_equal(markers['markerpath'][1],\n ['M', 'L', 'L', 'Z'])\n\n\ndef test_scatter():\n fig, ax = plt.subplots()\n ax.scatter(np.arange(10), np.random.random(10), c='r', s=30,\n marker='^', alpha=0.3, lw=2, edgecolors='b', zorder=10)\n rep = fig_to_dict(fig)\n axrep = rep['axes'][0]\n points = axrep['collections'][0]\n\n assert_equal(list(sorted(points.keys())),\n ['alphas', 'edgecolors', 'edgewidths', 'facecolors', 'id',\n 'offsetcoordinates', 'offsets', 'pathcoordinates', 'paths',\n 'pathtransforms', 'xindex', 'yindex', 'zorder'])\n assert_equal(points['alphas'], [0.3])\n assert_equal(points['zorder'], 10)\n assert_equal(points['edgecolors'], ['rgba(0, 0, 255, 0.3)'])\n assert_equal(points['facecolors'], ['rgba(255, 0, 0, 0.3)'])\n assert_equal(points['edgewidths'], (2.0,))\n assert_equal(points['paths'][0][0],\n [[0.0, 0.5], [-0.5, -0.5], [0.5, -0.5]])\n assert_equal(points['paths'][0][1],\n ['M', 'L', 'L', 'Z'])\n assert_equal(points['pathtransforms'],\n [[7.607257743127308, 0.0, 0.0, 7.607257743127308, 0.0, 0.0]])\n\n\ndef test_patch():\n fig, ax = plt.subplots()\n ax.add_patch(plt.Rectangle((0, 0), 1, 2, alpha=0.2, linewidth=2,\n edgecolor='green', facecolor='red', zorder=3))\n rep = fig_to_dict(fig)\n axrep = rep['axes'][0]\n path = axrep['paths'][0]\n\n assert_equal(list(sorted(path.keys())),\n ['alpha', 'coordinates', 'dasharray', 'data', 'edgecolor',\n 'edgewidth', 'facecolor', 'id', 'pathcodes',\n 'xindex', 'yindex', 'zorder'])\n\n assert_equal(path['alpha'], 0.2)\n assert_equal(path['edgecolor'], \"rgba(0, 128, 0, 0.2)\")\n assert_equal(path['facecolor'], \"rgba(255, 0, 0, 0.2)\")\n assert_equal(path['edgewidth'], 2)\n assert_equal(path['zorder'], 3)\n\n\ndef test_text():\n fig, ax = plt.subplots()\n ax.text(0.1, 0.1, \"abcde\", size=14, color='red', alpha=0.7,\n rotation=15, ha='center', va='center')\n rep = fig_to_dict(fig)\n axrep = rep['axes'][0]\n text = axrep['texts'][0]\n\n assert_equal(list(sorted(text.keys())),\n ['alpha', 'color', 'coordinates', 'fontsize', 'h_anchor',\n 'id', 'position', 'rotation', 'text', 'v_baseline',\n 'zorder'])\n assert_equal(text['alpha'], 0.7)\n assert_equal(text['color'], \"#FF0000\")\n assert_equal(text['text'], \"abcde\")\n assert_equal(text['rotation'], -15)\n assert_equal(text['fontsize'], 14)\n assert_equal(text['position'], [0.1, 0.1])\n assert_equal(text['h_anchor'], 'middle')\n assert_equal(text['v_baseline'], 'central')\n assert_equal(text['zorder'], 3)\n assert_equal(text['coordinates'], \"data\")\n\n\ndef test_image():\n fig, ax = plt.subplots()\n ax.imshow(np.random.random((20, 20)), cmap=plt.cm.binary,\n alpha=0.2, zorder=4, extent=(2, 4, 3, 5))\n rep = fig_to_dict(fig)\n axrep = rep['axes'][0]\n image = axrep['images'][0]\n\n # TODO: how to test data?\n assert_equal(list(sorted(image.keys())),\n ['alpha', 'coordinates', 'data', 'extent', 'id', 'zorder'])\n assert_equal(image['alpha'], 0.2)\n assert_equal(image['extent'], (2, 4, 3, 5))\n assert_equal(image['zorder'], 4)\n assert_equal(image['coordinates'], \"data\")\n\n\ndef test_ticks():\n plt.xticks([1,2,3])\n rep = fig_to_html(plt.gcf())\n # TODO: use casperjs here if available to confirm that the xticks\n # are rendeder as expected\n\n # pandas tslib generates ticks with unusual dtypes\n # test that they are converted to html successfully\n plt.xticks(np.array([1,2,3], dtype=np.int32))\n rep = fig_to_html(plt.gcf())\n\n # custom ticks should appear in the correct place, with the\n # correct text\n positions, labels = [0, 1, 10], ['A','B','C']\n rep = fig_to_html(plt.gcf())\n # TODO: use casperjs here if available to confirm that the xticks\n # are rendeder as expected\n","sub_path":"mpld3/tests/test_elements.py","file_name":"test_elements.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"589216323","text":"\"\"\"This module provides tests for Way model\"\"\"\n\nimport datetime\n\nfrom django.test import TestCase\nfrom django.db.models import signals\n\nfrom notification.models import Notification\nfrom notification.signals import create_notification_task, revoke_notification_task\nfrom way.models import Way\nfrom custom_user.models import CustomUser\nfrom place.models import Place\nfrom route.models import Route\n\n\nclass WayModelTestCase(TestCase):\n \"\"\"TestCase for providing Way model testing\"\"\"\n\n def setUp(self):\n \"\"\"Method that provides preparation before testing Way model's features.\"\"\"\n signals.post_save.disconnect(create_notification_task, sender=Notification)\n signals.post_delete.disconnect(revoke_notification_task, sender=Notification)\n\n self.user = CustomUser.objects.create(id=100, email='mail@gmail.com', password='Password1234', is_active=True)\n start_place = Place.objects.create(id=100, longitude=111.123456, latitude=84.123456)\n end_place = Place.objects.create(id=200, longitude=120.123456, latitude=89.123456)\n\n self.way = Way.objects.create(\n id=100,\n name='test_name',\n user=self.user\n )\n self.route = Route.objects.create(\n id=100,\n time='23:58:59',\n transport_name='',\n position=0,\n way=self.way,\n start_place=start_place,\n end_place=end_place\n )\n self.notification = Notification.objects.create(\n id=100,\n way=self.way,\n start_time=datetime.date(2019, 10, 29),\n end_time=datetime.date(2019, 12, 29),\n week_day=6,\n time=datetime.time(23, 58, 59)\n )\n\n def test_get_by_id(self):\n \"\"\"Provide tests for `get_by_id` method of certain Way instance.\"\"\"\n expected_way = Way.objects.get(id=self.way.id)\n actual_way = Way.get_by_id(obj_id=self.way.id)\n self.assertEqual(expected_way, actual_way)\n\n unexisting_way = Way.get_by_id(obj_id=999)\n self.assertIsNone(unexisting_way)\n self.assertRaises(Way.DoesNotExist, Way.objects.get, id=999)\n\n def test_delete_by_id(self):\n \"\"\"Provide tests for `delete_by_id` method of certain Way instance.\"\"\"\n is_deleted = Way.delete_by_id(obj_id=self.way.id)\n self.assertTrue(is_deleted)\n self.assertRaises(Way.DoesNotExist, Way.objects.get, id=self.way.id)\n\n is_deleted = Way.delete_by_id(obj_id=999)\n self.assertFalse(is_deleted)\n\n def test_to_dict(self):\n \"\"\"Provide tests for `to_dict` method of certain Way instance.\"\"\"\n way = Way.objects.get(id=self.way.id)\n\n expected_dict = {\n 'id': 100,\n 'name': 'test_name',\n 'user_id': 100\n }\n actual_dict = way.to_dict()\n self.assertDictEqual(expected_dict, actual_dict)\n\n def test_create(self):\n \"\"\"Provide tests for `create` method of Way model.\"\"\"\n way = Way.create(user=self.user, name='name')\n self.assertIsInstance(way, Way)\n self.assertIsNotNone(Way.objects.get(id=way.id))\n\n way = Way.create(user=CustomUser())\n self.assertIsNone(way)\n\n def test_update(self):\n \"\"\"Provide tests for `update` method of certain Way instance.\"\"\"\n new_name = 'new_test_name'\n is_updated = self.way.update(name=new_name)\n self.assertTrue(is_updated)\n\n way = Way.objects.get(id=self.way.id)\n self.assertEqual(way.name, new_name)\n\n def test_get_way_with_routes(self):\n \"\"\"Provide tests for `get_way_with_routes` method of certain Way instance.\"\"\"\n way = Way.objects.get(id=self.way.id)\n\n expected_dict = {\n 'id': 100,\n 'name': 'test_name',\n 'user_id': 100,\n 'routes': [\n {\n 'id': 100,\n 'time': datetime.time(23, 58, 59),\n 'transport_name': '',\n 'position': 0,\n 'way': 100,\n 'start_place': 100,\n 'end_place': 200\n }\n ]\n\n }\n\n actual_dict = way.get_way_with_routes()\n self.assertDictEqual(expected_dict, actual_dict)\n\n def test_str(self):\n \"\"\"Provide tests for `__str__` method of certain Way instance.\"\"\"\n expected_string = f'Way id: {self.way.id}, user id: {self.way.user.id}'\n actual_string = self.way.__str__()\n\n self.assertEqual(expected_string, actual_string)\n\n def test_get_route_by_position(self):\n \"\"\"Provide tests for `get_route_by_position` method of certain Way instance.\"\"\"\n expected_route = self.way.get_route_by_position(position=0)\n self.assertEqual(expected_route, self.route)\n\n way_without_routes = Way.objects.create(user=self.user)\n expected_route = way_without_routes.get_route_by_position(position=0)\n self.assertIsNone(expected_route)\n\n def test_get_by_notification(self):\n \"\"\"Provide tests for `get_by_notification` method of certain Way instance.\"\"\"\n expected_way = self.way.get_by_notification(notification_id=100)\n self.assertEqual(expected_way, self.way)\n","sub_path":"way_to_home/tests/unittests/way/testmodel.py","file_name":"testmodel.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"120726188","text":"#!usr/bin/env python\n\n\"\"\"plot_scale.py: This file plots the running time of minisat on a set of\ninstances that have constant clause-to-variable ratio and increasing number of\nvariables. It is used to compare the running time of instances with the same\nnumber of variables per community.\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#-----------------------------------------------------------------------------#\n# #\n# Global Variables #\n# #\n#-----------------------------------------------------------------------------#\n\n#source of plotting data\nDATA_FILE1 = \"../output_data/1_community.out\"\nDATA_FILE5 = \"../output_data/5_communities.out\"\nDATA_FILE10 = \"../output_data/10_communities.out\"\nDATA_FILE15 = \"../output_data/15_communities.out\"\nDATA_FILE20 = \"../output_data/20_communities.out\"\n\n_DATA_LIST = [DATA_FILE1, DATA_FILE5, DATA_FILE10, DATA_FILE15, DATA_FILE20]\n\n#delimiter for splitting data\nSPLIT = \" \"\n\n#boolean to skip the first line of input file, necessary for files with headers\n_FIRST_LINE = True\n\n#x-axis for plotting\n_X_AXIS = [x for x in range(10, 301, 10)]\n\n#y-axis for plotting\n_Y_AXIS1 = [0 for _ in range(10, 301, 10)]\n_Y_AXIS5 = [0 for _ in range(10, 301, 10)]\n_Y_AXIS10 = [0 for _ in range(10, 301, 10)]\n_Y_AXIS15 = [0 for _ in range(10, 301, 10)]\n_Y_AXIS20 = [0 for _ in range(10, 301, 10)]\n\nY_AXES = [_Y_AXIS1, _Y_AXIS5, _Y_AXIS10, _Y_AXIS15, _Y_AXIS20]\n\n#format for plot\nFORMAT = \"r\"\n\n#-----------------------------------------------------------------------------#\n# #\n# Data Gathering Function #\n# #\n#-----------------------------------------------------------------------------#\n\ndef get_data(inst):\n axis_index = int(inst[6]) / 5\n val_index = int(inst[4]) / int(inst[6]) / 10 - 1\n Y_AXES[axis_index][val_index] += float(inst[1])\n\ndef reform_data():\n for axis in Y_AXES:\n for i in range(len(axis)):\n axis[i] /= 5 \n\n\n#-----------------------------------------------------------------------------#\n# #\n# Read Input and Plot #\n# #\n#-----------------------------------------------------------------------------#\n\nfor j in range(len(_DATA_LIST)):\n data = open(_DATA_LIST[j], \"r\")\n for line in data:\n if _FIRST_LINE:\n _FIRST_LINE = False\n else:\n inst = [i.strip() for i in line.split(SPLIT)]\n get_data(inst)\n _FIRST_LINE = True\n\nreform_data()\nplt.plot(_X_AXIS, _Y_AXIS1, FORMAT,\n _X_AXIS, _Y_AXIS5, \"b\",\n _X_AXIS, _Y_AXIS10, \"g\",\n _X_AXIS, _Y_AXIS15, \"m\",\n _X_AXIS, _Y_AXIS20, \"y\")\nplt.show()\n","sub_path":"base_files/plot_files/plot_scale.py","file_name":"plot_scale.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"107739159","text":"#Resolution d'une equation du second degré\r\nfrom math import sqrt #importation de la fonction racine (sqrt) depuis le module math\r\nprint('entrez a, b et c \\n')\r\na=float(input())\r\nb=float(input())\r\nc=float(input())\r\nif a==0 :\r\n if b==0 :\r\n if c==0 :\r\n print('tout reel est solution')\r\n else:\r\n print('pas de solutions')\r\n else:\r\n print(-c/b)\r\nelse:\r\n d=b*b-4*a*c #calcul du discriminant\r\n if d==0:\r\n print(-b/2*a)\r\n elif d>0:\r\n print('les solutions sont :\\n')\r\n print((-b-sqrt(d))/2/a,(-b+sqrt(d))/2/a,sep=' et ') #l'interpreteur seclanchera une erreur si on n'importe pas sqrt\r\n else:\r\n print('pas de solution dans R')\r\ninput() #pour que le programme ne se ferme pas apres l'execution\r\n","sub_path":"TP/TP1/exo9.py","file_name":"exo9.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"581013213","text":"import random\nnum = random.randint(1,21)\ncount = 0\nerror = 0\nname = input('Hello! What is your name?')\n#print (\"Well, %s, I am thinking of a number between 1 and 20. Take a guess.\" % name))\nprint (\"Well, \"+name+\", I am thinking of a number between 1 and 20. Take a guess.\")\nwhile 1>0 :\n guess = input()\n if guess.isnumeric():\n guess=int(guess)\n if guess>=1 and guess<=20:\n count+=1\n if guess > num :\n print (\"Your guess is too high\\nTake a guess.\")\n elif guess < num :\n print (\"Your guess is too low\\nTake a guess.\")\n else:\n break\n else:\n error+=1\n print (\"Error! Your guess is out of range... Try again! :'(\")\n else:\n error+=1\n print (\"ValueError! Try again! :'(\")\n\nprint(\"Good job, \"+name+\"! You guessed my number in %d guesses! (error:%d)\" % (count,error))\n","sub_path":"nuri1.py","file_name":"nuri1.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"223502194","text":"import cv2\nimport os\nimport numpy as np\n\ndef AddOrigMask(orig_img_filename, mask_img_filename, dst_img_filename):\n orig_img = cv2.imread(orig_img_filename)\n mask_img = cv2.imread(mask_img_filename)\n\n mask_img = cv2.cvtColor(mask_img, cv2.COLOR_BGR2GRAY);\n\n #mask_img[mask_img == 255] = 40;\n\n #print (mask_img.shape)\n #print (orig_img.shape)\n\n orig_img[mask_img == 255] = [255,0,0]\n\n cv2.imwrite( dst_img_filename, orig_img);\n return ;\n\ntif_dir = '../Tissue images/'\nmask_dir = '../Mask/'\ndst_dir = '../OrigMask/'\npng_files = os.listdir(tif_dir);\nfor file in png_files:\n if not os.path.isdir(file):\n tiffilename = tif_dir + file;\n pngfilename = mask_dir + file[0:file.find('.tif')] + '.png';\n dstfilename = dst_dir + file[0:file.find('.tif')] + '.png';\n\n AddOrigMask(tiffilename, pngfilename, dstfilename)\n\n","sub_path":"AddAnnotations.py","file_name":"AddAnnotations.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"334996068","text":"from setuptools import setup, find_packages\r\n\r\nwith open('README.md') as readme:\r\n long_description = readme.read()\r\n\r\nversion = __import__('wooey').__version__\r\n\r\nsetup(\r\n name='Wooey',\r\n version=version,\r\n url='http://github.com/mfitzp/Wooey',\r\n author='Martin Fitzpatrick',\r\n author_email='martin.fitzpatrick@gmail.com',\r\n description='Simple Web UIs for Python Scripts',\r\n license='MIT',\r\n packages=find_packages(),\r\n include_package_data=True,\r\n classifiers=[\r\n 'Development Status :: 2 - Pre-Alpha',\r\n 'Intended Audience :: Developers',\r\n 'Topic :: Desktop Environment',\r\n 'Topic :: Software Development :: Build Tools',\r\n 'Topic :: Software Development :: Widget Sets',\r\n 'Programming Language :: Python :: 2.7',\r\n 'Programming Language :: Python :: 3.4'\r\n ],\r\n long_description=long_description,\r\n)\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"211273223","text":"\"\"\"A very simple MNIST classifier.\nSee extensive documentation at\nhttp://tensorflow.org/tutorials/mnist/beginners/index.md\n\nEdited by: Vijay Ravichandran\nThis code was adapted from the Tensorflow website.\nThe changes to it include the change of optimizer and the loss function\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport tensorflow as tf\nimport time\n\n# Import data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\n# Placeholders for input and labels\nx = tf.placeholder(tf.float32, [None, 784])\ny_ = tf.placeholder(tf.float32, [None, 10])\n\n# Define model\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\ny = tf.matmul(x, W) + b\n\n# Define loss and optimizer\ncross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n\nadam_optimizer = tf.train.AdamOptimizer(learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n use_locking=False)\n\n# Define optimization problem\ntrain_step = adam_optimizer.minimize(cross_entropy)\n\n# Create session\nsess = tf.InteractiveSession()\n\n# initialize all variables\ntf.global_variables_initializer().run()\n\n# Iteration = Train for 100 steps and check accuracy on validation set\nfor _100sep in range(20):\n start_time = time.time()\n for _step in range(100):\n # read next batch of input\n batch_xs, batch_ys = mnist.train.next_batch(100)\n\n # Run the training step\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n # Test trained model\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n duration = time.time() - start_time\n print('Iteration {}, Validation accuracy: {:3f}, Time for step: {:3f}'.format(_100sep,\n sess.run(accuracy,\n feed_dict={\n x: mnist.test.images,\n y_: mnist.test.labels}),\n duration))\n","sub_path":"mnist/mnist_adam.py","file_name":"mnist_adam.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"613848381","text":"#!/usr/bin/env python\n\nimport sys\nimport os\n\n_defaultPrefix = \"\"\n\ndef change_default_prefix():\n global _defaultPrefix\n validInputs = [\"0B\", \"0X\", \"0O\", \"DEC\"]\n\n print(\"\\nChange default prefix.\\nEnter 0b, 0x, 0o, or dec.\")\n\n _defaultPrefix = raw_input(\"Enter new default prefix: \")\n\n while True:\n try:\n validInputs.index(_defaultPrefix.upper())\n break\n except:\n _defaultPrefix = raw_input(\"Invalid input!\\nEnter new default prefix: \")\n\n print(\"Default prefix changed to {0}\".format(_defaultPrefix))\n if (_defaultPrefix.upper() == \"DEC\"):\n _defaultPrefix = \"\"\n inputNum = raw_input(\"Input: {0}\".format(_defaultPrefix))\n\n return _defaultPrefix + inputNum\n\ndef get_input():\n global _defaultPrefix\n\n if (len(sys.argv) < 2):\n baseInput = raw_input(\"Input: {0}\".format(_defaultPrefix))\n inputNum = _defaultPrefix + baseInput\n\n if (baseInput.upper() == \"EXIT\"):\n sys.exit()\n elif (baseInput.upper() == \"PREFIX\"):\n inputNum = change_default_prefix()\n\n else:\n inputNum = sys.argv[1]\n\n while True:\n try:\n if (int(eval(inputNum))):\n break\n except:\n inputNum = raw_input(\"Invalid input!\\nInput: {0}\".format(_defaultPrefix))\n\n inputNum = _defaultPrefix + inputNum\n\n prefix = inputNum[:2].upper()\n decInputNum = eval(inputNum)\n\n return prefix, decInputNum\n\ndef binary_output(decimal):\n binary = bin(decimal)\n bits = str(len(binary[2:]))\n print(\"\\tBin: {0} ({1} bits)\".format(binary, bits))\n\ndef hex_output(decimal):\n hexadecimal = hex(decimal)\n print(\"\\tHex: {0}\".format(hexadecimal))\n\ndef oct_output(decimal):\n octal = oct(decimal)\n print(\"\\tOct: {0}\".format(octal))\n\ndef dec_output(decimal):\n print(\"\\tDec: {0}\".format(decimal))\n\ndef main():\n if (len(sys.argv) < 2):\n # clear the console - comment out to disable\n os.system('cls' if os.name == 'nt' else 'clear')\n\n # Heading runs when no command line argument supplied\n print(\"\\n******\\nNumber System Converter\")\n print(\"0x = Hex, 0b = Bin, 0o = Oct\")\n print(\"To change default prefix type \\\"prefix\\\"\")\n print(\"Ctrl + c or \\\"exit\\\" to quit\\n******\")\n\n while True:\n prefix, decimal = get_input()\n\n if (prefix == \"0X\"):\n binary_output(decimal)\n dec_output(decimal)\n oct_output(decimal)\n\n elif (prefix == \"0B\"):\n print(\"\\t{0} bits\".format(str(len(bin(decimal)[2:]))))\n dec_output(decimal)\n hex_output(decimal)\n oct_output(decimal)\n\n elif (prefix == \"0O\"):\n binary_output(decimal)\n dec_output(decimal)\n hex_output(decimal)\n\n else:\n binary_output(decimal)\n hex_output(decimal)\n oct_output(decimal)\n\n # exit if run from command line\n if (len(sys.argv) > 1):\n sys.exit()\n\nif __name__ == '__main__':\n main()\n","sub_path":"numSysConverter.py","file_name":"numSysConverter.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"399975469","text":"import random\n\n# Guess the Word Application\nprint(\"**\"*50)\n\n# Welcome message.\nprint(\"Welcome To Guess the Word Application.\")\n\ngame_dict = {\"sports\": ['basketball', 'baseball', 'soccer', 'football', 'tennis', 'curling'],\n \"colors\": ['orange', 'yellow', 'purple', 'aquamarine', 'violet', 'gold'],\n \"fruits\": ['apple', 'banana', 'watermelon', 'peach', 'mango', 'strawberry'],\n \"classes\": ['english', 'history', 'science', 'mathematics', 'art', 'health'],\n }\n\ngame_keys = [key for key in game_dict.keys()] # List comprehension\n\n# Main loop\nactive = True\nwhile active:\n game_category = game_keys[random .randint(0, len(game_keys)-1)]\n game_word = game_dict[game_category][random.randint(0, len(game_category)-1)]\n\n blank_word = []\n for letter in game_word:\n blank_word.append(\"-\")\n\n print(f\"Guess a {len(blank_word)} letter word from the following category: {game_category}\")\n\n # Convert list into string by using different methods\n \"\"\" \n >>> Using .join method of strings\n print(\" \".join(blank_word))\n\n >>> Using list comprehension\n blank_word = \" \".join([str(element) for element in blank_word]) \n\n >>> Using list comprehension with map function...>>>\n blank_word = \" \".join(map(str, blank_word))\n \"\"\"\n\n guess_count = 0\n guess = \" \"\n\n while guess != game_word:\n word=\"\".join(map(str, blank_word))\n print(word)\n guess = input(\"\\nEnter your guess: \").strip().lower()\n guess_count += 1\n\n # Guess is incorrect,continue\n if guess != game_word:\n print(\"That is not correct. Let us reveal a letter to help you!\")\n on = True\n while on:\n letter_index = random.randint(0, len(blank_word)-1)\n if blank_word[letter_index] == \"-\":\n blank_word[letter_index] = game_word[letter_index]\n on = False\n\n # Guess is correst,break\n else:\n print(f\"Correct! You guessed the word in {guess_count} guesses.\")\n break\n\n # Continue or not\n Choice = input(\n \"\\nWould you like to run the program again (y/n): \").lower().strip()\n if Choice.startswith(\"y\"):\n continue\n elif Choice.startswith(\"n\"):\n active = False\n else:\n print(\"Please choose y/n only!\")\n break\n\n# End of programm.\nprint(\"\\n\\nThank you for using the Guess the Word Application. Goodbye.\\n\")\nprint(\"**\"*50)\n","sub_path":"APPLICATIONS/6 While Loops/29 Guess the Word Application.py","file_name":"29 Guess the Word Application.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"333685690","text":"### Mason Brewer\n### April 5th, 2019\n### Algebra Calculator - Flask Project\n\nfrom flask import Flask, render_template, request, redirect\n\napp = Flask(__name__) \n\ndef removeSpaces(inpt):\n str = \"\"\n for i in range(len(inpt)):\n if(inpt[i] != ' '):\n str += inpt[i]\n return str\n# 4/5/19 - For now, assuming no problamatic answers.\ndef parseVars(eqn):\n y = 0\n idx = 0\n while(eqn[idx] != \"=\"):\n y = 10 * y + int(eqn[idx])\n idx += 1\n idx += 1 # Getting past the '='\n a = 0\n while(eqn[idx] != \"x\"):\n a = 10 * a + int(eqn[idx])\n idx += 1\n idx += 2 # Getting past the 'x+'\n b = 0\n while(idx < len(eqn)):\n b = 10 * b + int(eqn[idx])\n idx += 1\n # Returning [y, a, b]\n return [y, a, b]\n\n@app.route('/') \ndef hello_world():\n return render_template('welcome.html')\n\n@app.route('/process', methods=['POST'])\ndef calculate():\n inpt = request.form['equation']\n # Starting with the easiest equation: y = ax + b\n equation = removeSpaces(inpt)\n vars = parseVars(equation)\n x = (vars[0] - vars[2]) / vars[1]\n return render_template('calculated.html', solution=x)\n\n# Execute this program and this will be runnning in \n# localhost:5000 on your web browser.\nif __name__==\"__main__\": \n app.run(debug=True)","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"151032301","text":"from threeML.models.tablemodel import NumpyTableModel\nfrom threeML.models.Parameter import Parameter\nimport numpy\nimport math\nimport scipy.integrate\nimport operator\nimport numexpr\n\nimport collections\n\nclass BandTable(NumpyTableModel):\n def setup(self):\n\n self.SetTableFile(\"/Users/jburgess/Research/3ML/threeml/models/fluxModels/band.npz\")\n\n\n \n self.functionName = \"Band function [Band et al. 1993]\"\n self.formula = r'''\n \\[f(E) = \\left\\{ \\begin{eqnarray}\n K \\left(\\frac{E}{100 \\mbox{ keV}}\\right)^{\\alpha} & \\exp{\\left(\\frac{-E}{E_{c}}\\right)} & \\mbox{ if } E < (\\alpha-\\beta)E_{c} \\\\\n K \\left[ (\\alpha-\\beta)\\frac{E_{c}}{100}\\right]^{\\alpha-\\beta}\\left(\\frac{E}{100 \\mbox{ keV}}\\right)^{\\beta} & \\exp{(\\beta-\\alpha)} & \\mbox{ if } E \\ge (\\alpha-\\beta)E_{c}\n \\end{eqnarray}\n \\right.\n \\]\n '''\n\n self.parameters = collections.OrderedDict()\n self.parameters['alpha'] = Parameter('alpha',-1.0,-10,10,0.1,fixed=False,nuisance=False,dataset=None)\n self.parameters['beta'] = Parameter('beta',-2.0,-10,10,0.1,fixed=False,nuisance=False,dataset=None)\n self.parameters['E0'] = Parameter('E0',500,10,1e5,50,fixed=False,nuisance=False,dataset=None,unit='keV')\n self.parameters['K'] = Parameter('K',1,1e-4,1e3,0.1,fixed=False,nuisance=False,dataset=None,normalization=True)\n \n \n \n \n def __call__(self,e):\n #The input e can be either a scalar or an array\n #The following will generate a wrapper which will\n #allow to treat them in exactly the same way,\n #as they both were arrays, while keeping the output\n #in line with the input: if e is a scalar, the output\n #will be a scalar; if e is an array, the output will be an array\n \n alpha = self.parameters['alpha'].value\n beta = self.parameters['beta'].value\n E0 = self.parameters['E0'].value\n K = self.parameters['K'].value\n \n return self._interpFunc((K,E0,alpha,beta,e))\n \n \n \n","sub_path":"threeML/models/fluxModels/bandTable.py","file_name":"bandTable.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"175241479","text":"#! /usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nfrom controller import MSGHandler\nfrom src.db import Room\nfrom .. import router\nfrom .wsclass import CoursesWSC\n\nRoom.defaults['courses'] = []\n\"\"\"This patches the ``Room`` class, so that it has a\ndefault attribute named ``courses``.\"\"\"\n\nMSGHandler._course = None\n\n\n@property\ndef course(self):\n \"\"\"Current course asociated with this MSGHandler.\"\"\"\n return self._course\n\n\n@course.setter\ndef course(self, new_course):\n self._course = new_course\n self.course_msg_type = \\\n 'courseMessage({})'.format(new_course.id)\n\n router_object = self.ws_objects[\n router.RouterWSC]\n courses_object = self.ws_objects[CoursesWSC]\n\n courses_object.register_action_in(\n self.course_msg_type,\n action=router_object.to_local,\n channels={'d'}\n )\nMSGHandler.course = course\n","sub_path":"backend_modules/courses/patches.py","file_name":"patches.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"616641038","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\n\r\nimport psutil\r\nimport os\r\n\r\nimport sys\r\nimport time\r\n\r\n#from sklearn.datasets.samples_generator import make_classification\r\n#X, y = make_classification(n_samples=1000, n_features=3, n_redundant=0, n_classes=3, n_informative=2,\r\n # n_clusters_per_class=1,class_sep =0.5, random_state =10)\r\nstart = time.clock()\r\n\r\nfilename = \"data-p2p.txt\"\r\nscanin=np.loadtxt(filename)\r\nbb=-48\r\nee=0\r\nfor i in range(32):\r\n bb=bb+48\r\n ee=ee+48\r\n X=scanin[bb:ee,[0,2,3]]\r\n y=scanin[bb:ee,7]\r\n \r\n \r\n# print(X,y)\r\n \r\n# ax = Axes3D(fig, rect=[0, 0, 1, 1], elev=30, azim=20)\r\n# ax.scatter(X[:, 0], X[:, 1], X[:, 2],marker='o',c=y)\r\n \r\n lda = LinearDiscriminantAnalysis(n_components=1)\r\n lda.fit(X,y)\r\n X_new = lda.transform(X)\r\n# print(X_new)\r\n \r\n# ax = Axes3D(fig, rect=[0, 0, 1, 1], elev=30, azim=20)\r\n# ax.scatter(X_new[:, 0], X_new[:, 1], X_new[:, 2],marker='o',c=y)\r\n# plt.scatter(X_new[:, 0], X_new[:, 1],X_new[:,2],marker='o',c=y)\r\n \r\n# plt.scatter(X[0:24,0],X[0:24,1],marker='o',color='r',s=10)\r\n# plt.scatter(X[25:48,0],X[25:48,1],marker='o',color='b',s=10)\r\n# \r\n# plt.scatter(100000*X_new[0:24, 0],100000*X_new[0:24, 0],marker='o',color='y')\r\n# plt.scatter(100000*X_new[25:48, 0],100000*X_new[25:48, 0],marker='o',color='g')\r\n# \r\n \r\n plt.show()\r\nelapsed = (time.clock() - start)\r\nprint(\"Time used:\",elapsed)\r\n\r\ninfo = psutil.virtual_memory()\r\nprint ('内存占用',psutil.Process(os.getpid()).memory_info().rss)\r\nprint(sys.getsizeof(X_new))\r\n#print (\"总内存\",info.total)\r\n#print ('内存占比',info.percent)\r\n#print ('cpu个数',psutil.cpu_count())\r\n\r\n\r\n\r\n","sub_path":"lianxi/scikit-lda.py","file_name":"scikit-lda.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"547168477","text":"import os\n#import sys\n#import math\nimport numpy as np\n#import subprocess\nimport time\n\nfrom config import *\n\n\n########## . ##########\n\n\n\n########## settings ##########\ngamma=2 # 1 =enable, 0 =disable\nbright=0.1 # 0 to 1 linear brightness\nxs=1 # horizontal scale, integer\nys =xs # vertical scale, integer\nbl=8 # bit depth per color, max 8\nbb=8 # brightness for bitplane zero. Like 1, 2, 4, 8 or so! each next bitplane is half\n\n\n\n\nstartx=100 # pixels from top of the input screen (fb0)\nstarty=100 # pixels from left of the input screen\nwidth=64 # input window will be this times scale\nheight=32 # same as above\n\n\nxl=64 # columns of the LED screen\nyl=16 # pjysical rows of the LED screen\nzl=2 # screens, typically 2 per panel\n\n\n\n\n\npower_lut = np.zeros((8))\npower_lut[0] = bb\nfor n in range (1, bl):\n power_lut[n] = power_lut[n-1] /2\nout_multiply = np.ceil(power_lut).astype(\"uint8\") # roundup\n\n\n\n\n########## apply brightness to output data ##########\ndef set_brightness(value, array):\n out = np.full((8, 16, xf), clk, dtype=\"uint32\") # CLK all high for stability reason\n # ^ bit, y, x\n out[:, :, 1:xl*2+1:2] = 0\n out[:, :, 2:xl*2+2:2] = clk # apply CLK to data section\n\n for n in range (8):\n if (int(array[n])):\n out[n,:,0:int(value*xf)] += enable # add brightness bit for FULL lines (don't multplay)\n else:\n out[n,:,0:int(value*xf*array[n])] += enable # add brightness bit for HALF lines\n \n # little hack to shift color and brightness one line off\n # MSB (brightest bitplane) at begining of the array\n out = np.concatenate((out[0, 0, :].ravel(), out.ravel())) # flatten the array and add extra column to move brightness bits one row away from data bits (added later) \n out = np.resize(out, (8, 16, xf)) #, refcheck=False) # resize back to expected size, cut off extra bytes\n \n # print some stats\n print(\"Total lines: \", (out_multiply.sum()+1)*16, \", allowed maximum: \", yf, \" (FB height)\")\n max_bri = power_lut.sum()*16 / (yf+3) * xf / (xf+30)\n print(\"Set brightness: \", \"%.3f\" % (value*100), \"%, real brightness: \", \"%.3f\" % (value*max_bri*100), \"%, maximum: \", \"%.3f\" % (max_bri*100), \"%\")\n return(out) \n\n\n\n########## prepare color look-up table, 8 because 2^3 (RGB) ##########\nout_lut = np.zeros((zl, 8), dtype=\"uint32\")\nfor i in range(zl):\n for n in range(8):\n d=0\n if(n&(1<<0)):\n d += out_map[i][0]\n if(n&(1<<1)):\n d += out_map[i][1]\n if(n&(1<<2)):\n d += out_map[i][2]\n out_lut[i, n]=d\n \n \n########## spawn arrays ##########\narr_input= np.zeros((1,32, 64, 3), dtype=\"uint8\")\n# ^ bitplane, y, x, color\narr_gamma= np.zeros((32, 64, 3), dtype=\"uint16\")\n# ^ y, x, color\narr_bitplanes= np.zeros((8, 32, 64, 3), dtype=\"uint8\")\n# ^ bit, y, x, color\narr_sum= np.zeros((8, 32, 64), dtype=\"uint8\")\n# ^ bit, y, x\narr_out_data = np.zeros((8, 16, xf), dtype=\"uint32\")\n# ^ bit, y, x\nd3 = np.zeros((8, 16, xl), dtype=\"uint32\")\n\n#arr_bitmask = np.zeros((8, 32, 64, 3), dtype=\"uint8\") # full size array\n#arr_bitmask = np.zeros((8, 1, 64, 3), dtype=\"uint8\") # doesn't hurt, doesn't help\narr_bitmask = np.zeros((8, 1, 1, 1), dtype=\"uint8\") # 2% speed improve\nfor n in range(0,8):\n arr_bitmask[n, :, :, :] = (1<<(7-n)) \n\n\narr_out_br_clk = np.full((8, 16, xf), clk, dtype=\"uint32\")\n# ^ bit, y, x\narr_out_br_clk[:, :, 1:xl*2+1:2] = 0\narr_out_br_clk[:, :, 2:xl*2+2:2] = clk\n\ntimer=40\ndata=([0, 0, 0, 0, 0])\n\n\n\n########## prepare black line + clock ##########\nfront_blank = np.full((13, xf), clk, dtype=\"uint32\")\nfront_blank[:, ::2] = 0\n\narr_out_br_clk = set_brightness(bright, power_lut).copy()\n\n########## apply same brightness to last empty line ##########\nend_bright = np.full((1, xf), clk, dtype=\"uint32\")\nend_bright[:, ::2] = 0 # generate clock, clear every second word\nend_bright[0,0:int(bright*xf*power_lut[bl-1])] += enable # set brightness\n\n\n\n\nwhile 1:\n time1 = time.time()\n\n\n ########## import image ##########\n with open('/dev/fb0','rb') as s:\n s.seek(xhost*starty*4, 0)\n raw=s.read(height*ys*xhost*4)\n buffer = np.frombuffer(raw, dtype=\"uint8\").reshape((1, height*ys,xhost,4))\n \n arr_input = buffer[0, ::ys, startx:startx+xs*width:xs, 0:3]\n s.close()\n \n ########## super simple gamma correction ##########\n if(gamma):\n arr_gamma = arr_input.astype(\"uint16\")\n arr_gamma = arr_gamma**2\n arr_input = np.right_shift(arr_gamma, 8).astype(\"uint8\")\n\n time2 = time.time()\n \n \n ########## convert image to bitplanes ##########\n arr_bitplanes = np.bitwise_and(arr_bitmask, arr_input) # split imported image into 8 bitplanes\n arr_bitplanes[:,:,:,0] = np.where(arr_bitplanes[:,:,:,0], 4, 0) # apply weights to colors\n arr_bitplanes[:,:,:,1] = np.where(arr_bitplanes[:,:,:,1], 2, 0)\n arr_bitplanes[:,:,:,2] = np.where(arr_bitplanes[:,:,:,2], 1, 0)\n \n arr_sum = np.bitwise_or(np.bitwise_or(arr_bitplanes[:,:,:,0], arr_bitplanes[:,:,:,1]), arr_bitplanes[:,:,:,2])\n # ^ sum colors into bitplanes \n d3 = np.bitwise_or(out_lut[0, arr_sum[:, 0:16, :]], out_lut[1, arr_sum[:, 16:32, :]])\n # ^ translate color codes (weights 1-2-4) to correct bits, different codes (bits) for each screen\n arr_out_data = arr_out_br_clk.copy()\n # ^ borrow copy of pre-generated array, with brightness bits\n arr_out_data[:, :, 1:xl*2+1:2] += d3 # drop image data into buffer\n arr_out_data[:, :, 2:xl*2+2:2] += d3 # clock and brightness already inside\n \n\n time3 = time.time()\n \n ########## write bitplanes to output framebuffer ##########\n a7 = arr_out_data[7,:,:]\n a6 = arr_out_data[6,:,:]\n a5 = arr_out_data[5,:,:]\n a4 = arr_out_data[4,:,:]\n a3 = arr_out_data[3,:,:]\n a2 = arr_out_data[2,:,:]\n a1 = arr_out_data[1,:,:]\n a0 = arr_out_data[0,:,:]\n \n a = np.concatenate((front_blank, np.repeat(arr_out_data, out_multiply, axis=0).reshape((-1, xf)), end_bright))\n # ^ easy to adjust by poking wth power_lut\n a = np.concatenate((front_blank, a0, a0, a0, a0, a0, a0, a0, a0, a1, a1, a1, a1, a2, a2, a3, a4, a5, a6, a7, end_bright))\n # ^ acually faster, but fixed and need manual tweaks\n #a = np.concatenate((front_blank, a0, a1, a0, a2, a0, a1, a0, a3, a6, a7, a0, a1, a0, a2, a0, a1, a0, a4, a5, end_bright))\n # ^ ideal data sequence, but brightness line-off data doesn't match\n \n with open('/dev/fb1','wb') as f:\n a.tofile(f)\n f.close()\n \n \n ########## calculate total fps and possible fps if divided into parallel tasks ##########\n time4 = time.time()\n \n data[0]+= (1.0/(time2-time1))\n data[1]+= (1.0/(time3-time2))\n data[2]+= (1.0/(time4-time3))\n data[3]+= (1.0/(time4-time1))\n \n #time.sleep(0.05)\n time5 = time.time()\n data[4]+= (1.0/(time5-time1))\n \n if (timer):\n timer-=1\n else:\n timer=39\n print(\"fps:\", \"%.3f\" % (data[3]/40), \"imp\", \"%.3f\" % (data[0]/40), \"conv\", \"%.3f\" % (data[1]/40), \"send\", \"%.3f\" % (data[2]/40), \"real\", \"%.3f\" % (data[4]/40))\n data = ([0, 0, 0, 0, 0])\n \n\n\n","sub_path":"6-improvements.py","file_name":"6-improvements.py","file_ext":"py","file_size_in_byte":7388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"286895101","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 27 22:55:02 2019\n\n@author: carl\n\"\"\"\n\nimport traceback\nimport os.path\nfrom collections import namedtuple\nimport numpy as np\nimport scipy.signal, scipy.io\nfrom scipy.interpolate import interp1d\nfrom PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot\n\nfrom octavvs.io import SpectralData, Parameters\nfrom octavvs.algorithms import baseline, correction, normalization, ptir\n\nclass PrepParameters(Parameters):\n \"\"\"\n A class representing all the settings that can be made in the\n preprocessing UI and used to start a batch job.\n \"\"\"\n\n def setAlgorithm(self, algo: str):\n algs = ['bassan', 'konevskikh', 'rasskazov']\n alg = next((a for a in algs if a in algo.lower()), None)\n if alg is None:\n raise ValueError('Algorithm must be one of '+str(algs))\n self.scAlgorithm = alg\n\n def load(self, filename):\n super().load(filename)\n self.setAlgorithm(self.scAlgorithm)\n\n\n\nclass PrepWorker(QObject):\n \"\"\"\n Worker thread class for the heavy parts of the preprocessing: Scattering\n correction and the multiple-file batch processing.\n \"\"\"\n # Signals for when processing is finished or failed, and for progress indication\n done = pyqtSignal(np.ndarray, np.ndarray, np.ndarray)\n stopped = pyqtSignal()\n failed = pyqtSignal(str, str)\n progress = pyqtSignal(int, int)\n progressPlot = pyqtSignal(np.ndarray, tuple)\n\n fileLoaded = pyqtSignal(int)\n loadFailed = pyqtSignal(str, str, str)\n\n batchProgress = pyqtSignal(int, int)\n batchDone = pyqtSignal(bool)\n\n def __init__(self, parent=None):\n QObject.__init__(self, parent)\n self.halt = False\n\n @pyqtSlot(int)\n def loadFile(self, data, num):\n \"Load file number num in the data object, emitting a signal on failure\"\n try:\n file = data.filenames[num]\n if file == data.curFile:\n return True\n data.read_matrix(file)\n except (RuntimeError, FileNotFoundError) as e:\n self.loadFailed.emit(file, str(e), '')\n except Exception as e:\n self.loadFailed.emit(file, str(e), traceback.format_exc())\n else:\n self.fileLoaded.emit(num)\n return True\n return False\n\n def emitProgress(self, *pargs):\n \"Combined progress signal and check for user interruption\"\n if self.halt:\n raise InterruptedError('interrupted by user')\n self.progress.emit(*pargs)\n\n def loadReference(self, data, ref, otherref='', percentile=50):\n \"\"\"\n Load an RMieSC reference spectrum, at wavenumbers that match the\n currently loaded file.\n \"\"\"\n if ref == 'Mean':\n return data.raw.mean(0)\n elif ref == 'Percentile':\n return np.percentile(data.raw, percentile, axis=0)\n elif ref == 'Other':\n if otherref == '':\n raise RuntimeError('Specify a reference spectrum file')\n return correction.load_reference(data.wavenumber,\n matfilename=otherref)\n else:\n return correction.load_reference(data.wavenumber, what=ref.lower())\n\n def callACandSC(self, data, params, wn, y):\n \"\"\"\n Run atmospheric correction and/or CRMieSC and/or mIRage correction\n \"\"\"\n if params.mcDo:\n self.emitProgress(-5, 100)\n y = ptir.normalize_mirage(wn, y,\n endpoints=params.mcEndpoints,\n slopefactor=params.mcSlopefactor)[0]\n\n if params.acDo:\n self.emitProgress(-1, 100)\n y = correction.atmospheric(wn, y, cut_co2=params.acSpline,\n extra_iters=5 if params.acLocal else 0,\n smooth_win=9 if params.acSmooth else 0,\n atm=params.acReference,\n progressCallback=self.emitProgress)[0]\n\n if params.scDo:\n self.emitProgress(-2, 100)\n ref = self.loadReference(data, params.scRef,\n otherref=params.scOtherRef,\n percentile=params.scRefPercentile)\n yold = y\n clust = params.scClusters * (-1 if params.scStable else 1) if \\\n params.scClustering else 0\n# print(params.scAmin, params.scAmax, params.scResolution)\n modelparams=dict(\n n_components=params.scPCAMax if \\\n params.scPCADynamic else params.scPCA,\n variancelimit=params.scPCAVariance*.01 if \\\n params.scPCADynamic else 0,\n a=np.linspace(params.scAmin, params.scAmax, params.scResolution),\n d=np.linspace(params.scDmin, params.scDmax, params.scResolution),\n bvals=params.scResolution,\n model=params.scAlgorithm,\n constantcomponent=params.scConstant,\n linearcomponent=params.scLinear)\n y = correction.rmiesc(\n wn, y, ref,\n iterations=params.scIters,\n clusters=clust,\n modelparams=modelparams,\n weighted=False,\n autoiterations=params.scAutoIters,\n targetrelresiduals=1-params.scMinImprov*.01,\n zeroregionpenalty=params.scPenalizeLambda if\n params.scPenalize else None,\n prefit_reference=params.scPrefitReference,\n verbose=True,\n progressCallback=self.emitProgress,\n progressPlotCallback=self.progressPlot.emit)\n self.done.emit(wn, yold, y)\n\n return y\n\n def callSGFandSRandBC(self, params, wn, y):\n \"\"\"\n Run three more preprocessing steps\n \"\"\"\n if params.sgfDo:\n self.emitProgress(-3, 100)\n y = scipy.signal.savgol_filter(\n y, params.sgfWindow, params.sgfOrder, axis=1)\n\n if params.srDo:\n a = len(wn) - wn[::-1].searchsorted(params.srMax, 'right')\n b = len(wn) - wn[::-1].searchsorted(params.srMin, 'left')\n wn = wn[a:b]\n y = y[:, a:b]\n\n if params.bcDo:\n self.emitProgress(-4, 100)\n if params.bcMethod == 'rubberband':\n y -= baseline.rubberband(\n wn, y, progressCallback=self.emitProgress)\n elif params.bcMethod == 'concaverubberband':\n y -= baseline.concaverubberband(\n wn, y, iters=params.bcIters,\n progressCallback=self.emitProgress)\n elif params.bcMethod == 'asls':\n y -= baseline.asls(\n y, lam=params.bcLambda, p=params.bcP,\n progressCallback=self.emitProgress)\n elif params.bcMethod == 'arpls':\n y -= baseline.arpls(\n y, lam=params.bcLambdaArpls,\n progressCallback=self.emitProgress)\n else:\n raise ValueError('unknown baseline correction method '+str(params.bcMethod))\n\n return wn, y\n\n @pyqtSlot(SpectralData, dict)\n def rmiesc(self, data, params):\n \"\"\" Run RMieSC, possibly preceded by atmospheric correction, on\n all or a subset of the raw data.\n Parameters:\n data: SpectralData object with raw data\n params: dictionary, mostly with things from PrepParameters (see code)\n \"\"\"\n try:\n params['scDo'] = True\n params = namedtuple('rmiescParams', params.keys())(*params.values())\n if params.selected is not None:\n y = data.raw[params.selected]\n else:\n y = data.raw\n self.callACandSC(data, params, data.wavenumber, y)\n\n except InterruptedError:\n self.stopped.emit()\n except Exception as e:\n traceback.print_exc()\n self.failed.emit(repr(e), traceback.format_exc())\n\n\n def saveCorrected(self, outfile, fmt, data, wn, y):\n if fmt == 'Quasar.mat':\n out = {'y': y, 'wavenumber': wn}\n if data.pixelxy is not None:\n map_x = np.array([x for (x,y) in data.pixelxy])\n map_y = np.array([y for (x,y) in data.pixelxy])\n else:\n map_x = np.tile(data.wh[0], data.wh[1])\n map_y = np.repeat(range(data.wh[0]), data.wh[1])\n out['map_x'] = map_x[:, None]\n out['map_y'] = map_y[:, None]\n scipy.io.savemat(outfile, out)\n else:\n ab = np.hstack((wn[:, None], y.T))\n scipy.io.savemat(outfile, {'AB': ab, 'wh': data.wh } )\n\n @pyqtSlot(SpectralData, PrepParameters, str, bool)\n def bigBatch(self, data, params, folder, preservepath):\n \"\"\"\n Run the batch processing of all the files listed in 'data'\n Parameters:\n data: SpectralData object with one or more files\n params: PrepParameters object from the user\n folder: output directory\n preservepath: if True, all processed files whose paths are under\n data.foldername will be placed in the corresponding subdirectory\n of the output directory.\n \"\"\"\n try:\n for fi in range(len(data.filenames)):\n self.batchProgress.emit(fi, len(data.filenames))\n if not self.loadFile(data, fi):\n continue\n\n wn = data.wavenumber\n y = data.raw\n\n y = self.callACandSC(data, params, wn, y)\n wn, y = self.callSGFandSRandBC(params, wn, y)\n\n if params.normDo:\n y = normalization.normalize_spectra(\n params.normMethod, y, wn, wavenum=params.normWavenum)\n\n # Figure out where to save the file\n filename = data.curFile\n if preservepath and filename.startswith(data.foldername):\n filename = filename[len(data.foldername):]\n filename = folder + filename\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n else:\n filename = os.path.join(folder, os.path.basename(filename))\n # Add the extension\n filename = os.path.splitext(filename)\n# filename = filename[0] + params.saveExt + filename[1]\n filename = filename[0] + params.saveExt + '.mat'\n\n self.saveCorrected(filename, params.saveFormat, data, wn, y)\n self.batchDone.emit(True)\n return\n\n except InterruptedError:\n self.stopped.emit()\n except Exception as e:\n traceback.print_exc()\n self.failed.emit(repr(e), traceback.format_exc())\n self.batchDone.emit(False)\n\n\n @pyqtSlot(SpectralData, PrepParameters, str)\n def createReference(self, data, params, outfile):\n \"\"\"\n Create reference spectrum from all the files listed in 'data', using\n a subset of the processing steps.\n Parameters:\n data: SpectralData object with one or more files\n params: PrepParameters object from the user\n outfile: name of output file\n \"\"\"\n params.scDo = False\n params.srDo = False\n wns = []\n ys = []\n try:\n for fi in range(len(data.filenames)):\n self.batchProgress.emit(fi, len(data.filenames))\n if not self.loadFile(data, fi):\n continue\n\n wn = data.wavenumber\n y = data.raw\n\n if params.scRef == 'Percentile':\n y = np.percentile(y, params.scRefPercentile, axis=0)[None, :]\n else:\n y = y.mean(0)[None, :]\n\n y = self.callACandSC(data, params, wn, y)\n wn, y = self.callSGFandSRandBC(params, wn, y)\n if params.normDo:\n y = normalization.normalize_spectra(\n params.normMethod, y, wn,\n wavenum=params.normWavenum)\n wns.append(wn)\n ys.append(y[0])\n\n # Do all images have the same wavenumbers?\n if all(np.array_equal(v, wns[0]) for v in wns):\n ys = np.median(ys, axis=0)\n else:\n w1 = min(v.min() for v in wns)\n w2 = max(v.max() for v in wns)\n maxres = max((len(v) - 1) / (v.max() - v.min()) for v in wns)\n wn = np.linspace(w1, w2, num=maxres * (w2 - w1) + 1)\n interpol = interp1d(np.concatenate(wns), np.concatenate(ys))\n ys = interpol(wn)\n\n ab = np.hstack((wn[:, None], ys[:, None]))\n scipy.io.savemat(outfile, {'AB': ab } )\n self.batchDone.emit(True)\n return\n\n except InterruptedError:\n self.stopped.emit()\n except Exception as e:\n traceback.print_exc()\n self.failed.emit(repr(e), traceback.format_exc())\n self.batchDone.emit(False)\n\n\nclass ABCWorker(QObject):\n \"\"\"\n A smaller worker thread class for atmospheric and baseline correction only.\n \"\"\"\n acDone = pyqtSignal(np.ndarray, np.ndarray, np.ndarray, np.ndarray)\n acFailed = pyqtSignal(str)\n bcDone = pyqtSignal(np.ndarray, np.ndarray, np.ndarray)\n bcFailed = pyqtSignal(str)\n\n def __init__(self, parent=None):\n QObject.__init__(self, parent)\n self.haltBC = False\n\n @pyqtSlot(np.ndarray, np.ndarray, dict)\n def ac(self, wn, y, params):\n \"\"\"\n Run baseline correction, emitting the processed data\n \"\"\"\n try:\n corr, factors = correction.atmospheric(\n wn, y, cut_co2=params['cut_co2'],\n extra_iters=5 if params['extra'] else 0,\n smooth_win=9 if params['smooth'] else 0,\n atm=params['ref'])\n self.acDone.emit(wn, y, corr, factors)\n except Exception:\n self.acFailed.emit(traceback.format_exc())\n\n def checkHaltBC(self, a, b):\n if self.haltBC:\n raise InterruptedError('interrupted by user')\n\n @pyqtSlot(np.ndarray, np.ndarray, str, dict)\n def bc(self, wn, y, method, params):\n try:\n self.checkHaltBC(0, 1)\n if method in {'rubberband', 'concaverubberband'}:\n corr = getattr(baseline, method)(wn, y, **params,\n progressCallback=self.checkHaltBC)\n else:\n corr = getattr(baseline, method)(y, **params,\n progressCallback=self.checkHaltBC)\n self.bcDone.emit(wn, y, corr)\n except InterruptedError:\n self.bcFailed.emit('')\n except Exception:\n self.bcFailed.emit(traceback.format_exc())\n\n","sub_path":"octavvs/prep/prepworker.py","file_name":"prepworker.py","file_ext":"py","file_size_in_byte":15150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"86582287","text":"from django.test import TestCase\n\nfrom binder import models, helpers\n\nclass HelperTests(TestCase):\n def test_ipinfo_ResolutionFail(self):\n response = helpers.ip_info(\"foobar.doesnotexist.local\")\n self.assertEqual([['Error', u'Unable to resolve foobar.doesnotexist.local: [Errno -2] Name or service not known']],\n response)\n # The following is currently the first globally unique IPv4 and IPv6 address I could find\n # that did not change based upon your geography.\n # http://test-ipv6.com/\n response = helpers.ip_info(\"ds.test-ipv6.com\")\n self.assertEqual([['IPv4 (1)', u'216.218.228.114'], ['IPv6 (1)', u'2001:470:1:18::2']],\n response)\n\n","sub_path":"binder/tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"65605351","text":"def solution(triangle):\n sumriangle = [[0]*len(line) for line in triangle]\n sumriangle[-1] = triangle[-1]\n\n for i in reversed(range(len(sumriangle)-1)):\n for j in range(i+1):\n sumriangle[i][j] = triangle[i][j] + max(sumriangle[i + 1][j], sumriangle[i + 1][j + 1])\n\n return sumriangle[0][0]\n\n\nif __name__ == '__main__':\n triangle = [[7], [3, 8], [8, 1, 0], [2, 7, 4, 4], [4, 5, 2, 6, 5]]\n print(solution(triangle))","sub_path":"python/dynamic_programming/03_triangle.py","file_name":"03_triangle.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"200992075","text":"import logging\nimport time\n\n\ndef log(level='info', title='log', message='logout'):\n # 创建一个logger\n logger = logging.getLogger('[{}]'.format(title))\n\n logger.setLevel(logging.DEBUG)\n\n # 创建一个handler,用于写入日志文件\n log_name = time.strftime('%Y-%m-%d', time.localtime(time.time())) # 日志名\n fh = logging.FileHandler('{}.log'.format(log_name), encoding='utf-8') # 文件日志\n\n # 定义handler的输出格式\n formatter = logging.Formatter(\n '%(asctime)+s %(name)+s %(levelname)+s %(message)+s')\n fh.setFormatter(formatter)\n\n # 给 logger 添加 handler\n logger.addHandler(fh)\n\n # 写入日志\n if level == 'debug':\n logger.debug(message)\n elif level == 'warning':\n logger.warning(message)\n elif level == 'error':\n logger.error(message)\n else:\n logger.error(message)\n\n # 添加下面一句,在记录日志之后移除句柄.\n # 这句是必须要加的,如果不加,会重复写log\n logger.removeHandler(fh)\n\n\nif __name__ == '__main__':\n log(level='debug', title='recv', message='json文件的数据数量有问题')\n log('warning', 'recv', '%s' % ('json文件的数据数量有问题',))\n log('error', 'recv', '%s' % ('json文件的数据数量有问题',))\n log('error', 'recv', '%s' % 'json文件的数据数量有问题',)\n","sub_path":"内置方法/log/use_log.py","file_name":"use_log.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"387354658","text":"from datetime import datetime\n\nfrom flask import request, g\nfrom flask import session\n\nfrom Ihome.models import Area, House, Facility, HouseImage, User, Order\nfrom Ihome.utils import constants\nfrom Ihome.utils.commons import login_required\nfrom Ihome.utils.image_storage import storage\nfrom Ihome.utils.response_code import RET\nfrom . import api\nfrom flask import jsonify, current_app, json\nfrom Ihome import redis_store, db\n\n\n@api.route(\"/areas\")\ndef get_area_info():\n \"\"\"获取城区信息\"\"\"\n try:\n # 先查询redis缓存,如果没有再从数据库查询\n area_li = redis_store.get(\"area_info\")\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据库查询出错\")\n\n if area_li:\n # 用日志记录一下命中redis缓存\n current_app.logger.error(\"成功击中redis缓存\")\n return area_li, 200, {\"Content-Type\": \"application/json\"}\n\n # 查询数据\n try:\n area_list = Area.query.all()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据库错误\")\n\n # 把数据集对象转化为单个的字典对象\n area_dict_list = []\n for area in area_list:\n area_dict_list.append(area.to_dict())\n\n response_dict = dict(errno=RET.OK, errmsg=\"OK\", data=area_dict_list)\n response_json = json.dumps(response_dict)\n # 把数据保存到redis缓存中\n try:\n redis_store.setex(\"area_info\", constants.AREA_INFO_TIME, response_json)\n except Exception as e:\n current_app.logger.error(e)\n\n return response_json, 200, {\"Content-Type\": \"application/json\"}\n\n\n@api.route(\"/houses/info\", methods=[\"POST\"])\n@login_required\ndef save_house_info():\n \"\"\"保存房源基本信息\"\"\"\n # 获取数据\n\n user_id = g.user_id\n house_data = request.get_json()\n\n title = house_data.get(\"title\") # 房屋名称标题\n price = house_data.get(\"price\") # 房屋单价\n area_id = house_data.get(\"area_id\") # 房屋所属城区的编号\n address = house_data.get(\"address\") # 房屋地址\n room_count = house_data.get(\"room_count\") # 房屋包含的房间数目\n acreage = house_data.get(\"acreage\") # 房屋面积\n unit = house_data.get(\"unit\") # 房屋布局(几室几厅)\n capacity = house_data.get(\"capacity\") # 房屋容纳人数\n beds = house_data.get(\"beds\") # 房屋卧床数目\n deposit = house_data.get(\"deposit\") # 押金\n min_days = house_data.get(\"min_days\") # 最小入住天数\n max_days = house_data.get(\"max_days\") # 最大入住天数\n\n # 检验参数\n if not all([title,price,area_id,address,room_count,acreage,unit,\n capacity,beds,deposit,min_days,max_days]):\n\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数不全\")\n\n # 判断金额是否正确\n try:\n price = int(float(price) * 100)\n deposit = int(float(deposit)*100)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n\n # 判断城区id是否存在\n try:\n area = Area.query.get(area_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据库异常\")\n\n if area is None:\n return jsonify(errno=RET.NODATA, errmsg=\"数据不存在\")\n\n # 保存房屋信息\n house = House(\n user_id=user_id,\n area_id=area_id,\n title=title,\n price=price,\n address=address,\n room_count=room_count,\n acreage=acreage,\n unit=unit,\n capacity=capacity,\n beds=beds,\n deposit=deposit,\n min_days=min_days,\n max_days=max_days\n )\n # try:\n # db.session.add(house)\n # except Exception as e:\n # current_app.logger.error(e)\n # return jsonify(errno=RET.DBERR, errmsg=\"数据保存错误\")\n #\n # 处理房屋设施\n facility_ids = house_data.get(\"facilities\")\n\n if facility_ids:\n try:\n facilities = Facility.query.filter(Facility.id.in_(facility_ids)).all()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据库查询异常\")\n\n # 表示填写的设施是正确的存在的并保存数据!\n if facilities:\n house.facilities = facilities\n\n try:\n db.session.add(house)\n db.session.commit()\n except Exception as e:\n # 事务回滚\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据保存错误\")\n\n # 保存数据成功\n return jsonify(errno=RET.OK, errmsg=\"OK\", data={\"house_id\": house.id})\n\n\n@api.route(\"/houses/image\", methods=[\"POST\"])\n@login_required\ndef save_house_image():\n \"\"\"保存房屋的图片\n 参数 图片 房屋的id\n \"\"\"\n image_file = request.files.get(\"house_image\")\n # 多媒体表单传参\n house_id = request.form.get(\"house_id\")\n\n if not all([image_file, house_id]):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n\n # 判断house_id正确性\n try:\n house = House.query.get(house_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据库异常\")\n\n if house is None: # if not house:\n return jsonify(errno=RET.NODATA, errmsg=\"房屋不存在\")\n\n image_data = image_file.read()\n # 保存图片到七牛中\n try:\n file_name = storage(image_data)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.THIRDERR, errmsg=\"保存图片失败\")\n\n # 保存图片信息到数据库中\n house_image = HouseImage(house_id=house_id, url=file_name)\n db.session.add(house_image)\n\n # 处理房屋的主图片\n if not house.index_image_url:\n\n\n house.index_image_url = file_name\n db.session.add(house)\n\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR, errmsg=\"保存图片数据异常\")\n\n image_url = constants.QI_NIU_URL + file_name\n\n return jsonify(errno=RET.OK, errmsg=\"OK\", data={\"image_url\": image_url})\n\n\n@api.route(\"/user/houses\", methods=[\"GET\"])\n@login_required\ndef get_user_home():\n \"\"\"获取房东的房屋列表\"\"\"\n user_id = g.user_id\n\n # 从数据库中获取用户的房屋列表数据\n try:\n # user = User.query.filter_by(id=user_id).first()\n user = User.query.get(user_id)\n print(user)\n houses = user.houses\n print(houses)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据库查询错误\")\n\n # 把数据对象转换成字典格式的列表数据\n houses_list = []\n if houses:\n for house in houses:\n houses_list.append(house.to_dict())\n\n # 转成json格式 如: \"data\":[{},{},{}]\n print(houses_list)\n house_data = json.dumps(houses_list)\n print(house_data)\n\n # return '{\"errno\": \"0\", \"errmsg\": \"OK\", \"data\": \"%s\"}' % house_data, 200, {\"Content-Type\": \"application/json\"}\n return jsonify(errno=RET.OK, errmsg=\"OK\", data={\"houses\": house_data})\n\n\n@api.route(\"/houses/index\", methods=[\"GET\"])\ndef get_home_index():\n \"\"\"\n 获取首页图片展示\n 参数:index_image_url, \n \"\"\"\n # 从缓存中获取数据\n try:\n image_list = redis_store.get(\"home_page_image\")\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据获取错误\")\n\n if image_list:\n return '{\"errno\": 0, \"errmsg\": \"OK\", \"data\": %s}' % image_list, 200, {\"Content-Type\": \"application/json\"}\n\n # 缓存理没有数据,从数据库里获取\n try:\n # 以订单量最多房屋图片来作为首页图片推荐\n home_list = House.query.order_by(House.order_count.desc()).limit(constants.home_page_max)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据库查询错误\")\n\n if home_list is None:\n return jsonify(errno=RET.NODATA, errmsg=\"无数据\")\n\n # 把图片对象转换成一个个字典数据\n home_json_list = []\n for home in home_list:\n # 如果该房子没有设置过主页图片则跳过\n if home.index_image_url is None:\n continue\n home_json_list.append(home.to_dict())\n\n # 把字典数据转换成json格式\n home_json_data = json.dumps(home_json_list)\n\n # 把数据保存到redis缓存中\n try:\n redis_store.setex(\"home_page_image\", constants.HOME_PAGE_CACHE_TIME, home_json_data)\n except Exception as e:\n current_app.logger.error(e)\n\n return '{\"errno:\"0\", \"errmsg\":\"ok\", \"data\": %s}' % home_json_data, 200, \\\n {\"Content-Type\": \"application/json\"}\n\n\n# --------------------------------------------------------------------------------------------------------------------\n@api.route(\"/houses/\", methods=[\"GET\"])\ndef get_house_detail(house_id):\n \"\"\"获取房屋详情\"\"\"\n # 前端在房屋详情页面展示时,如果浏览页面的用户不是该房屋的房东,则展示预定按钮,否则不展示,\n # 所以需要后端返回登录用户的user_id\n # 尝试获取用户登录的信息,若登录,则返回给前端登录用户的user_id,否则返回user_id=-1\n user_id = session.get(\"user_id\", \"-1\")\n\n # 校验参数\n if not house_id:\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数缺失\")\n\n # 先从redis缓存中获取信息\n try:\n ret = redis_store.get(\"house_info_%s\" % house_id)\n except Exception as e:\n current_app.logger.error(e)\n ret = None\n if ret:\n current_app.logger.info(\"击中缓存\")\n return '{\"errno\":\"0\", \"errmsg\":\"OK\", \"data\":{\"user_id\":%s, \"house\":%s}}' % (user_id, ret), \\\n 200, {\"Content-Type\": \"application/json\"}\n\n # 查询数据库\n try:\n house = House.query.get(house_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"查询数据失败\")\n\n if not house:\n return jsonify(errno=RET.NODATA, errmsg=\"房屋不存在\")\n\n # 将房屋对象数据转换为字典\n try:\n house_data = house.to_full_dict()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DATAERR, errmsg=\"数据出错\")\n\n # 存入到redis中\n json_house = json.dumps(house_data)\n try:\n redis_store.setex(\"house_info_%s\" % house_id, constants.HOUSE_LIST_CACHE_DATA_TIME, json_house)\n except Exception as e:\n current_app.logger.error(e)\n\n resp = '{\"errno\":\"0\", \"errmsg\":\"OK\", \"data\":{\"user_id\":%s, \"house\":%s}}' % (user_id, json_house), \\\n 200, {\"Content-Type\": \"application/json\"}\n return resp\n\n\n# GET /api/v1.0/houses/houses?sd=xxxx&ed=xxxxx&aid=xxxx&sk=new&p=1\n@api.route(\"/houses\")\ndef get_house_list():\n \"\"\"获取搜索的房屋列表数据\"\"\"\n # 获取参数\n start_date = request.args.get(\"sd\")\n end_date = request.args.get(\"ed\")\n area_id = request.args.get(\"aid\")\n sort_key = request.args.get(\"sk\", \"new\")\n page = request.args.get(\"p\", \"1\")\n\n # 校验参数\n try:\n\n if start_date:\n datetime.strptime(start_date, \"%Y-%m-%d\")\n\n if end_date:\n datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if start_date and end_date:\n assert start_date <= end_date\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.PARAMERR, errmsg=\"日期参数错误\")\n\n if area_id:\n try:\n area = Area.query.get(area_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据查询错误\")\n\n try:\n page = int(page)\n except Exception as e:\n current_app.logger.error(e)\n page = 1\n\n # 尝试从缓存中获取数据\n redis_key = \"house_%s_%s_%s_%s\" % (start_date, end_date, area_id, sort_key)\n\n try:\n response_json = redis_store.hget(redis_key, page)\n except Exception as e:\n current_app.logger.error(e)\n\n else:\n if response_json:\n return response_json, 200, {\"Content-Type\": \"application\"}\n\n\n # 过滤时间条件\n # 设置过滤条件的容器\n filter_params_list = []\n # 设置冲突的订单为None\n conflict_order = None\n\n try:\n if start_date and end_date:\n conflict_order = Order.query.filter(end_date >= Order.begin_date, start_date <= Order.end_date).all()\n\n elif start_date:\n conflict_order = Order.query.filter(start_date <= Order.end_date).all()\n\n elif end_date:\n conflict_order = Order.query.filter(end_date >= Order.begin_date).all()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据查询错误\")\n\n if conflict_order:\n # 获取冲突的房屋id\n conflict_houseids = [order.house_id for order in conflict_order]\n\n if conflict_houseids:\n # 获取可预定的房屋id\n houseids = House.query.filter(House.id.notin_(conflict_houseids))\n filter_params_list.append(houseids)\n\n # 地区条件 过滤不正确的地区\n if area_id:\n filter_params_list.append(House.area_id == area_id)\n\n # 先通过过滤条件得到想要的数据再进行各种方式排序\n # 各种条件进行排序\n if sort_key == \"booking\":\n houseids = House.query.filter(*filter_params_list).order_by(House.order_count.desc())\n\n elif sort_key == \"price-inc\":\n houseids = House.query.filter(*filter_params_list).order_by(House.price.asc())\n\n elif sort_key == \"price-des\":\n houseids = House.query.filter(*filter_params_list).order_by(House.price.desc())\n\n else:\n houseids = House.query.filter(*filter_params_list).order_by(House.create_time.desc())\n\n # 进行分页处理\n # paginate(page=想要返回第几页 默认第一页, per_page=按多少数据进行分页,默认20条, error_out=查询的页面超出总页数是否要抛出异常, 默认True )\n try:\n # 得到一个分页后的数据对象\n page_object = houseids.paginate(page=page, per_page=constants.HOUSE_PAGE_MAX_NUMBER, error_out=False)\n\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.UNKOWNERR, errmsg=\"分页出现异常\")\n\n # 获取每一页的数据\n house_list = page_object.items\n houses = []\n\n for house in house_list:\n houses.append(house.to_det_dict())\n\n # 获取总页数\n total_pages = page_object.pages\n\n response_dict = dict(errno=RET.OK, errmsg=\"OK\", data={\"total_page\": total_pages,\n \"houses\": houses, \"current_page\": page})\n\n response_json = json.dumps(response_dict)\n\n # 设置redis缓存数据,以查询日期+地区id+排序作为key\n if page <= total_pages:\n redis_key = \"house_%s_%s_%s_%s\" % (start_date, end_date, area_id, sort_key)\n\n try:\n # 获取管道对象, 可以存储多个执行命令\n pipeline = redis_store.pipeline()\n # 开启事务\n pipeline.multi()\n # 加入缓存\n pipeline.hset(redis_key, page, response_json)\n # 设置有效期\n pipeline.expire(redis_key, constants.HOUSE_LIST_CACHE_DATA_TIME)\n\n pipeline.execute()\n\n except Exception as e:\n current_app.logger.error(e)\n\n return response_json, 200, {\"Content-Type\": \"application/json\"}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Ihome/api_1_0/house.py","file_name":"house.py","file_ext":"py","file_size_in_byte":15913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"650275178","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 28 08:52:48 2015\n调整向辰的top_match和sim_person,添加美赛国赛获奖信息\nsim_person修改:缺失值不比较\ntop_match修改:\n@author: Aurora\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats.stats import pearsonr\n\n\n#----------------------rm function------------- \n\ndef sim_pearson(data,s1,s2):\n #s1 和 s2是两个人的编号,提取需要的数据\n p1=data[data.index==s1]\n p2=data[data.index==s2]\n #去掉Final这一列\n comparison=data.columns.drop('Final')\n #比较对象为p1和p2\n p1=p1[comparison]#相当于删除Final\n p2=p2[comparison] \n #权重向量单位化\n# W=np.array([w[k] for k in p1.columns])/np.array([w[k] for k in p1.columns]).sum()\n #归一化,W的各元素之和为1\n# l1=p1.values.astype(float)[0]*W#加权\n# l2=p2.values.astype(float)[0]*W\n l1=p1.values.astype(float)[0].tolist()#变成list\n l2=p2.values.astype(float)[0].tolist()\n col_nan_check = (np.isnan(l1)|np.isnan(l2)).tolist()\n col_nan = index_more(col_nan_check,True)\n np.delete(l1,col_nan,0)\n np.delete(l2,col_nan,0)\n if len(l1) < 1: #没有任何标签可以相比较\n r = 0\n else:\n r=pearsonr(l1,l2)[0]#计算pearson相关性\n return r\n\n#返回相关性最高的n个学校\ndef top_match(data,person,n=5,similarity=sim_pearson):\n l=data.index\n score=[(similarity(data,person,other),other) for other in l if other!=person]#计算出自己外的和别人的相关性\n score.sort()\n score.reverse()#按顺序排列\n school=[data[data.index==k[1]].Final.values[0] for k in score[0:n]]\n return school\n\ndef target(data,ran=20):\n #final school在算法推荐的学校列表中的位置\n #如果final school在算法推荐的学校列表中排名超出range,则不考虑\n idx = data.index\n result=[top_match(data,person,ran) for person in data.index]\n cnt1 = 0#预测排位累加\n cnt2 = 0 #预测失败\n i = 0\n for row in result:\n final_cur = data['Final'].loc[idx[i]]\n if(row.count(final_cur)>0):\n cnt1 = cnt1 + row.count(final_cur)\n else:\n cnt2 = cnt2 + 1\n i = i + 1\n return cnt1, cnt2\n \ndef target_weight(data,weight_array,ran=20):\n weight = weight_array\n weight2 = weight/sum(weight)\n weight2 = np.diagflat(weight2)\n col_exc_final = data.columns.drop('Final')\n data[col_exc_final] = data[col_exc_final].dot(weight2)\n res1, res2 = target(data,ran)\n return res1, res2\n\ndef mydecode(data_frame,code_gbk_or_utf8):\n #input is a dataframe, output is a dataframe\n data = data_frame\n code = code_gbk_or_utf8\n # by = by_col_or_row\n result = []\n for col_cur in data:\n data_cur = data[col_cur].values.tolist()\n result.append(map(lambda str: str.decode(code),data_cur))\n result_dataframe = pd.DataFrame(np.array(result).T,index = data.index,columns = data.columns)\n return result_dataframe\n\ndef discrete_label(lis,label_number_dictionary, unknown = 'nan'):\n #return a lsit\n #可以重写加快效率\n dic = label_number_dictionary\n key = dic.keys()\n res = []\n if unknown == 'nan' :\n unknown_dis = np.float(unknown)\n for i in range(len(lis)):\n for key_cur in key:\n if lis[i] == key_cur:\n res.append(dic[key_cur])\n else:\n res.append(unknown_dis)\n return res\n \ndef index_more(lis,element):\n res = []\n i = 0 \n total = lis.count(element)\n cnt = 0\n while cnt < total:\n if(lis.count(element)>0):\n index_cur = lis.index(element,i)\n res.append(index_cur)\n i = index_cur+1\n cnt = cnt + 1\n else:\n i = len(lis)\n return res\n \n\n\n#----------------------read the data-------------------------\ndef getdata(filename,variable_name=None):\n #missing value is nan\n data=pd.read_csv(filename,names=variable_name)\n return data.values #array\n \ndef load_data(filename):\n #missing value is ''\n\tnum_feat = len(open(filename).readline().split(','))\n\tdata_mat = [];\n\tfr = open(filename)\n\tfor line in fr.readlines():#readlines read all the lines in character\n\t\tline_arr = []\n\t\tcur_line = line.strip().split(',')\n #strip removes '\\n', split gets a char list\n\t\tfor i in range(num_feat):\n\t\t\tline_arr.append(unicode(cur_line[i],'gbk'))\n\t\tdata_mat.append(line_arr)\n\treturn data_mat\n\ndef removemore(a, element):\n for i in a:\n if i == element:\n a.remove(element)\n return a\n \n","sub_path":"rs_func.py","file_name":"rs_func.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"164712498","text":"import setuptools\n\nwith open('README.md', 'r') as f:\n long_description = f.read()\nwith open('requirements.txt', 'r') as f:\n requirements = f.read().strip('\\n').split('\\n')\n\npackage_data = {\n '': ['data/*'],\n }\n\nsetuptools.setup(\n name='planetary-spip',\n version='1.1.3',\n author='Aurélien Stcherbinine',\n author_email='aurelien.stcherbinine@nau.edu',\n description='Spacecraft Pixel footprint Projection',\n long_description=long_description,\n long_description_content_type='text/markdown',\n project_urls={\n 'Source' : 'https://github.com/NAU-PIXEL/spip',\n },\n packages=setuptools.find_packages(),\n package_data=package_data,\n python_requires='>=3.6',\n setup_requires=['wheel'],\n install_requires=requirements,\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Astronomy'\n ]\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"204163597","text":"import numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport matplotlib.pyplot as plt\r\nimport torch.nn.functional as F\r\nfrom my_op import MLP\r\nfrom sklearn import metrics\r\nfrom load_corrupted_data import Adult_dv\r\n\r\n'''\r\n此函数的作用是按照v_list的权重分数,在训练集中去除budget比例的低价值(高价值数据用上面的排序)的数据后,\r\n用剩下的数据集训练一个模型再验证他的性能。\r\n性能\r\nv_list: 数据的权重分数列表\r\nbudget: 去除数据的比例,如20%\r\nx_train: 训练集的x\r\ny_train: 训练集的y\r\nargs: 各种参数\r\ntest_loader: 测试集\r\n'''\r\n\r\n\r\ndef data_deletion(v_list, args, x_train, y_train , valid_loader, test_loader):\r\n # plt.hist(v_list)\r\n # plt.show()\r\n\r\n # sort the weight from the lowest to the highest\r\n index_list = np.argsort(v_list).tolist()\r\n\r\n\r\n # ===================1.构建数据集================\r\n # build the dataset according to the compression_rate\r\n train_loader_dv = build_dataset_dv(x_train, y_train, index_list, args)\r\n\r\n # ===================2.模型及初始化=======================\r\n MLP_dv = MLP(108, 250, 250, classes=args.num_classes).cuda()\r\n for m in MLP_dv.modules():\r\n if isinstance(m, (nn.Conv2d, nn.Linear)):\r\n nn.init.xavier_uniform_(m.weight)\r\n\r\n # ===================3.优化=============================\r\n DV_optimizer_model = torch.optim.SGD(MLP_dv.parameters(), args.meta_lr,\r\n momentum=args.momentum, weight_decay=args.weight_decay)\r\n\r\n # ===================4.训练==============================\r\n best_acc = 0\r\n for epoch in range(args.dv_epochs):\r\n # adjust_learning_rate(DV_optimizer_model, epoch)\r\n train_dv(train_loader_dv, MLP_dv, DV_optimizer_model, epoch)\r\n valid_acc = test(meta_MLP=MLP_dv, test_loader=valid_loader)\r\n if valid_acc >= best_acc:\r\n best_acc = valid_acc\r\n torch.save(MLP_dv.state_dict(), \"./MLP_dv.pkl\")\r\n\r\n MLP_dv.load_state_dict(torch.load(\"./MLP_dv.pkl\"))\r\n test_acc = test(meta_MLP=MLP_dv, test_loader=test_loader)\r\n\r\n\r\n\r\ndef train_dv(train_loader, model, optimizer_model, epoch):\r\n print('\\nEpoch: %d' % epoch)\r\n\r\n for batch_idx, (inputs, targets) in enumerate(train_loader):\r\n model.train() # 启用 BatchNormalization 和 Dropout\r\n inputs, targets = inputs.float().cuda(), targets.cuda() # 将tensor变量copy一份到device所指定的GPU上去,之后的运算都在GPU上进行。\r\n outputs = model(inputs)\r\n targets = targets.long()\r\n loss = F.cross_entropy(outputs, targets, reduce=False)\r\n loss1 = torch.mean(loss)\r\n # prec_train = accuracy(outputs.data, targets.data)\r\n\r\n optimizer_model.zero_grad()\r\n loss1.backward()\r\n optimizer_model.step()\r\n print(\"loss:\",loss1.data)\r\n return loss\r\n # if (batch_idx + 1) % 50 == 0:\r\n # print('Epoch: [%d/%d]\\t'\r\n # 'Iters: [%d/%d]\\t'\r\n # 'Loss: %.4f\\t'\r\n # 'Prec@1 %.2f\\t' % (\r\n # (epoch + 1), epochs, batch_idx + 1, len(train_loader.dataset) / args.batch_size,\r\n # (loss / (batch_idx + 1)), prec_train))\r\n\r\n\r\ndef build_dataset_dv(x_data, y_data, index_list, args):\r\n '''\r\n function: build the train dataset according to the compression_rate\r\n '''\r\n train_data_dv = Adult_dv(x_data, y_data, index_list, args.compression_rate)\r\n train_loader_dv = torch.utils.data.DataLoader(\r\n train_data_dv, batch_size=args.batch_size, shuffle=True,\r\n num_workers=args.prefetch, pin_memory=True)\r\n\r\n return train_loader_dv\r\n\r\n\r\ndef accuracy(outputs, targets):\r\n output = np.array(outputs.cpu())\r\n target = np.array(targets.cpu())\r\n tmp = []\r\n for a in output:\r\n if a[0] < 0.5:\r\n tmp.append(1)\r\n else:\r\n tmp.append(0)\r\n\r\n return metrics.accuracy_score(np.array(tmp), target)\r\n\r\n\r\ndef test(meta_MLP, test_loader):\r\n meta_MLP.eval()\r\n correct = 0\r\n test_loss = 0\r\n\r\n with torch.no_grad():\r\n for batch_idx, (inputs, targets) in enumerate(test_loader):\r\n inputs, targets = inputs.float().cuda(), targets.cuda()\r\n outputs = meta_MLP(inputs)\r\n test_loss += F.cross_entropy(outputs, targets).item()\r\n _, predicted = outputs.max(1)\r\n correct += predicted.eq(targets).sum().item()\r\n\r\n test_loss /= len(test_loader.dataset)\r\n accuracy = 100. * correct / len(test_loader.dataset)\r\n\r\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\\n'.format(\r\n test_loss, correct, len(test_loader.dataset),\r\n accuracy))\r\n\r\n return accuracy\r\n","sub_path":"expts/meta-weight-net-master-adult - 副本/data_deletion_budget.py","file_name":"data_deletion_budget.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"388827484","text":"#! /usr/bin/env python3\n#\n# ./cdel.py canvas_course_page_url\n# \n# delete a new Canvas course page with a given URL\n#\n#\n# Example:\n# cdel.py https://kth.instructure.com/courses/11/pages/test-3\n#\n# deletes the page https://kth.instructure.com/courses/11/pages/test-3\n#\n# G. Q: Maguire Jr.\n#\n# 2016.07.19\n#\n\nimport csv, requests, time\nfrom pprint import pprint\nimport optparse\nimport sys\n\nfrom lxml import html\n\nimport json\n#############################\n###### EDIT THIS STUFF ######\n#############################\n\n# styled based upon https://martin-thoma.com/configuration-files-in-python/\nwith open('config.json') as json_data_file:\n configuration = json.load(json_data_file)\n canvas = configuration['canvas']\n access_token= canvas[\"access_token\"]\n # access_token=configuration[\"canvas\"][\"access_token\"]\n #baseUrl = 'https://kth.instructure.com/api/v1/courses/' # changed to KTH domain\n baseUrl = 'https://%s/api/v1/courses/' % canvas.get('host', 'kth.instructure.com')\n header = {'Authorization' : 'Bearer ' + access_token}\n\n\n\n#modules_csv = 'modules.csv' # name of file storing module names\nlog_file = 'log.txt' # a log file. it will log things\n\n\ndef write_to_log(message):\n with open(log_file, 'a') as log:\n log.write(message + \"\\n\")\n pprint(message)\n\n\ndef details_of_external_tools_for_course(course_id, external_tool_id):\n # Use the Canvas API to GET the tool's detailed information\n # GET /api/v1/courses/:course_id/external_tools/:external_tool_id\n # GET /api/v1/accounts/:account_id/external_tools/:external_tool_id\n\n url = baseUrl + '%s/external_tools/%s' % (course_id, external_tool_id)\n if Verbose_Flag:\n print(url)\n payload={}\n r = requests.get(url, headers = header, data=payload)\n if r.status_code == requests.codes.ok:\n tool_response = r.json() \n pprint(tool_response)\n return tool_response\n else:\n print(\"No details for tool_id {1} for course_id: {2}\".format(external_tool_id, course_id))\n return False\n\ndef list_external_tools_for_course(course_id):\n list_of_all_tools=[]\n # Use the Canvas API to get the list of external tools for this course\n # GET /api/v1/courses/:course_id/external_tools\n # GET /api/v1/accounts/:account_id/external_tools\n # GET /api/v1/groups/:group_id/external_tools\n\n url = baseUrl + '%s/external_tools' % (course_id)\n if Verbose_Flag:\n print(\"url: \" + url)\n\n r = requests.get(url, headers = header)\n if Verbose_Flag:\n write_to_log(\"result of getting list of external tools: \" + r.text)\n if r.status_code == requests.codes.ok:\n tool_response=r.json()\n else:\n print(\"No external tools for course_id: {}\".format(course_id))\n return False\n\n\n for t_response in tool_response: \n list_of_all_tools.append(t_response)\n\n # the following is needed when the reponse has been paginated\n # i.e., when the response is split into pieces - each returning only some of the list of modules\n # see \"Handling Pagination\" - Discussion created by tyler.clair@usu.edu on Apr 27, 2015, https://community.canvaslms.com/thread/1500\n while r.links['current']['url'] != r.links['last']['url']: \n r = requests.get(r.links['next']['url'], headers=header) \n tool_response = r.json() \n for t_response in tool_response: \n list_of_all_tools.append(t_response)\n\n for t in list_of_all_tools:\n print(\"about to prettyprint tool: {}\".format(t['name']))\n pprint(t)\n\n\n return list_of_all_tools\n\n\n# canvas_course_page_url will be of the form: https://kth.instructure.com/courses/11/pages/notes-20160716\ndef get_course_page(canvas_course_page_url):\n # Use the Canvas API to GET the page\n #GET /api/v1/courses/:course_id/pages/:url\n\n #extract course_id from URL\n course_id=canvas_course_page_url[canvas_course_page_url.find(\"courses/\")+8:canvas_course_page_url.find(\"pages/\")-1]\n if Verbose_Flag:\n print(\"course_id: {}\".format(course_id))\n\n #extract the file name portion of the URL\n page_url=canvas_course_page_url[canvas_course_page_url.rfind(\"/\")+1:]\n if Verbose_Flag:\n print(\"page_url: {}\".format(page_url))\n\n new_file_name=canvas_course_page_url[canvas_course_page_url.rfind(\"/\")+1:]+'.html'\n if Verbose_Flag:\n print(\"new_file_name: {}\".format(new_file_name))\n\n\n url = baseUrl + '%s/pages/%s' % (course_id, page_url)\n if Verbose_Flag:\n print(url)\n payload={}\n r = requests.get(url, headers = header, data=payload)\n if Verbose_Flag:\n print(\"r.status_code: {}\".format(r.status_code))\n if r.status_code == requests.codes.ok:\n page_response = r.json()\n\n # write out body of response as a .html page\n with open(new_file_name, 'wb') as f:\n encoded_output = bytes(page_response[\"body\"], 'UTF-8')\n f.write(encoded_output)\n return True\n\n else:\n print(\"No such page: {}\".format(canvas_course_page_url))\n return False\n return False\n\n# canvas_course_page_url will be of the form: https://kth.instructure.com/courses/11/pages/notes-20160716\ndef put_course_page(canvas_course_page_url):\n # Use the Canvas API to GET the page\n #GET /api/v1/courses/:course_id/pages/:url\n\n #extract course_id from URL\n course_id=canvas_course_page_url[canvas_course_page_url.find(\"courses/\")+8:canvas_course_page_url.find(\"pages/\")-1]\n if Verbose_Flag:\n print(\"course_id: {}\".format(course_id))\n\n #extract the file name portion of the URL\n page_url=canvas_course_page_url[canvas_course_page_url.rfind(\"/\")+1:]\n if Verbose_Flag:\n print(\"page_url: {}\".format(page_url))\n\n new_file_name=canvas_course_page_url[canvas_course_page_url.rfind(\"/\")+1:]+'.html'\n if Verbose_Flag:\n print(\"new_file_name: {}\".format(new_file_name))\n\n # read .html page\n with open(new_file_name, 'rb') as f:\n file_input=f.read()\n \n url = baseUrl + '%s/pages/%s' % (course_id, page_url)\n if Verbose_Flag:\n print(url)\n payload={\"wiki_page[body]\": file_input}\n r = requests.put(url, headers = header, data=payload)\n if Verbose_Flag:\n print(\"r.status_code: {}\".format(r.status_code))\n if r.status_code == requests.codes.ok:\n page_response = r.json()\n pprint(page_response)\n else:\n print(\"No such page: {}\".format(canvas_course_page_url))\n return False\n return False\n\n# canvas_course_page_url will be of the form: https://kth.instructure.com/courses/11/pages/notes-20160716\ndef create_course_page(canvas_course_page_url, page_title):\n # Use the Canvas API to GET the page\n #GET /api/v1/courses/:course_id/pages/:url\n\n #extract course_id from URL\n course_id=canvas_course_page_url[canvas_course_page_url.find(\"courses/\")+8:canvas_course_page_url.find(\"pages/\")-1]\n if Verbose_Flag:\n print(\"course_id: {}\".format(course_id))\n\n #extract the file name portion of the URL\n page_url=canvas_course_page_url[canvas_course_page_url.rfind(\"/\")+1:]\n if Verbose_Flag:\n print(\"page_url: {}\".format(page_url))\n\n new_file_name=canvas_course_page_url[canvas_course_page_url.rfind(\"/\")+1:]+'.html'\n if Verbose_Flag:\n print(\"new_file_name: {}\".format(new_file_name))\n\n # read .html page\n with open(new_file_name, 'rb') as f:\n file_input=f.read()\n \n # note that you cannot provide the page_url for the page - as this page_url does not yet exist\n url = baseUrl + '%s/pages' % (course_id)\n if Verbose_Flag:\n print(url)\n payload={'wiki_page[title]': page_title, 'wiki_page[published]': False, \"wiki_page[body]\": file_input}\n r = requests.post(url, headers = header, data=payload)\n if Verbose_Flag:\n write_to_log(r.text) \n if Verbose_Flag:\n print(\"r.status_code: {}\".format(r.status_code))\n if r.status_code == requests.codes.ok:\n page_response = r.json()\n if Verbose_Flag:\n print(\"Created page\")\n pprint(page_response)\n return True\n elif r.status_code == requests.codes['unprocessable_entity']:\n # a list of all the status codes is at https://github.com/kennethreitz/requests/blob/master/requests/status_codes.py\n print(\"unprocessable_entity - probably because you specified a page_url, but this page does not yet exist\")\n print(\"Error when creating page: {}\".format(canvas_course_page_url))\n return False\n else:\n print(\"Error when creating page: {}\".format(canvas_course_page_url))\n return False\n return False\n\n# canvas_course_page_url will be of the form: https://kth.instructure.com/courses/11/pages/notes-20160716\ndef del_course_page(canvas_course_page_url):\n # Use the Canvas API to get the list of pages for this course\n # DELETE /api/v1/courses/:course_id/pages/:url\n\n #extract course_id from URL\n course_id=canvas_course_page_url[canvas_course_page_url.find(\"courses/\")+8:canvas_course_page_url.find(\"pages/\")-1]\n if Verbose_Flag:\n print(\"course_id: {}\".format(course_id))\n\n #extract the file name portion of the URL\n page_url=canvas_course_page_url[canvas_course_page_url.rfind(\"/\")+1:]\n if Verbose_Flag:\n print(\"page_url: {}\".format(page_url))\n\n new_file_name=canvas_course_page_url[canvas_course_page_url.rfind(\"/\")+1:]+'.html'\n if Verbose_Flag:\n print(\"new_file_name: {}\".format(new_file_name))\n\n\n url = baseUrl + '%s/pages/%s' % (course_id, page_url)\n if Verbose_Flag:\n print(url)\n payload={}\n r = requests.delete(url, headers = header, data=payload)\n if Verbose_Flag:\n write_to_log(\"result of deleting page: \" + r.text)\n\n if Verbose_Flag:\n print(\"r.status_code: {}\".format(r.status_code))\n if r.status_code == requests.codes.ok:\n page_response = r.json()\n\n print(\"{} deleted\".format(canvas_course_page_url))\n return True\n else:\n print(\"error when deleteing page: {}\".format(canvas_course_page_url))\n return False\n return False\n\n\n\ndef main():\n global Verbose_Flag\n\n parser = optparse.OptionParser()\n\n parser.add_option('-v', '--verbose',\n dest=\"verbose\",\n default=False,\n action=\"store_true\",\n help=\"Print lots of output to stdout\"\n )\n\n options, remainder = parser.parse_args()\n\n Verbose_Flag=options.verbose\n if Verbose_Flag:\n print('ARGV :', sys.argv[1:])\n print('VERBOSE :', options.verbose)\n print('REMAINING :', remainder)\n\n # add time stamp to log file\n log_time = str(time.asctime(time.localtime(time.time())))\n if Verbose_Flag:\n write_to_log(log_time) \n\n if (len(remainder) < 1):\n print(\"Inusffient arguments\\n must provide url\\n\")\n else:\n canvas_course_page_url=remainder[0]\n\n output=del_course_page(canvas_course_page_url)\n if (output):\n if Verbose_Flag:\n pprint(output)\n\n # add time stamp to log file\n log_time = str(time.asctime(time.localtime(time.time())))\n if Verbose_Flag:\n write_to_log(log_time) \n write_to_log(\"\\n--DONE--\\n\\n\")\n\nif __name__ == \"__main__\": main()\n\n","sub_path":"connecting_silos_kththesis_TCOMK_CINTE/mysite/polls/library/Canvas-master/cdel.py","file_name":"cdel.py","file_ext":"py","file_size_in_byte":12184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"210676599","text":"import math\n\n\ndef compute():\n x = 1\n for i in range(1, 21):\n x *= i // math.gcd(i, x)\n return x\n # moje prvo rješenje!!! ne baš dobro!!!\n # while True:\n # if all(x % y == 0 for y in range(1, 11)):\n # break\n # else:\n # x += 1\n\n\nif __name__ == \"__main__\":\n print(compute())\n","sub_path":"Practice/project_euler/problem_0005.py","file_name":"problem_0005.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"400209963","text":"from flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\nveggies = [\n\t{ \n\t'Vegetable' :'carrot', \n\t'Quantity':15\n\t},\n{ \n\t'Vegetable' : 'onion', \n\t'Quantity': 17\n\t}\n]\n\n\n@app.route('/groceries', methods=['GET'])\ndef veg():\n return jsonify(veggies)\n\n@app.route('/groceries/', methods=['GET'])\ndef vegg(name):\n if request.method == 'GET':\n for veg in veggies:\n if name == veg['Vegetable']:\n return veg\n else:\n return \"Vegetable not found\"\n \n@app.route('/groceries', methods=['POST'])\ndef veggie():\n if request.method == 'POST':\n dic = {}\n dic['Vegetable'] = request.json['Vegetable']\n dic['Quantity'] = request.json['Quantity']\n veggies.append(dic)\n return request.json\n\n\n@app.route('/groceries/', methods=['DELETE'])\ndef vegdel(name):\n if request.method == 'DELETE':\n count=1\n for veg in veggies:\n if name == veg['Vegetable']:\n count = count+1\n break\n veggies.pop(count) \n return veg\n \n","sub_path":"veggie.py","file_name":"veggie.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"238924955","text":"\"\"\"\nauthor: Peter Huang, Antonio Cuni\nemail: hbd730@gmail.com, anto.cuni@gmail.com\nlicense: BSD\nPlease feel free to use and modify this, but keep the above information. Thanks!\n\"\"\"\n\nfrom rl_quad.utils.quadPlot import plot_quad_3d\n\nfrom rl_quad.conventional_control import lqr_controller as lqr\nfrom rl_quad.conventional_control import pid_controller as pid\nfrom rl_quad.conventional_control import df_controller as df1\nfrom rl_quad.conventional_control import df_controller_rotor_drag as df2\nfrom rl_quad.conventional_control import df_controller_rotor_drag_VT as df3\nimport torch\nfrom stable_baselines import PPO1, PPO2\nimport rl_quad.utils.trajGen as trajGen\nimport rl_quad.utils.trajGen3D as trajGen3D\nimport rl_quad.utils.utils as utils\nimport rl_quad.environment.model.params as params\nfrom rl_quad.environment.model.quadcopter import Quadcopter\nfrom rl_quad.environment.continous import QuadEnvCont\nimport numpy as np\nfrom stable_baselines.ddpg.policies import MlpPolicy\nfrom stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec\nfrom stable_baselines import DDPG\n\nimport matplotlib.pyplot as plt\n\nanimation_frequency = 50\ncontrol_frequency = 200 # Hz for attitude control loop\ncontrol_iterations = control_frequency / animation_frequency\ndt = 1.0 / control_frequency\ntime = [0.0]\nsim_time = 2*np.pi\n\n# variables to plot\nF_t = list() # Thrust\nM_t = list() # Torque\nt_s = list() # simulation time\nd_s = list() # desired states\nq_s = list() # quadrotor states\nw_i = list() # rotor speeds\n\n# Test: sim_time = 2pi, v = 1.5, helix\n\ndef record(name):\n fig0 = plt.figure(figsize=(20,10))\n fig0.tight_layout()\n fig0ax0 = fig0.add_subplot(3,2,1)\n fig0ax1 = fig0.add_subplot(3,2,2)\n fig0ax2 = fig0.add_subplot(3,2,3)\n fig0ax3 = fig0.add_subplot(3,2,4)\n fig0ax4 = fig0.add_subplot(3,2,5)\n fig0ax5 = fig0.add_subplot(3,2,6)\n\n fig1 = plt.figure(figsize=(20,10))\n fig1.tight_layout()\n fig1ax0 = fig1.add_subplot(3,2,1)\n fig1ax1 = fig1.add_subplot(3,2,2)\n fig1ax2 = fig1.add_subplot(3,2,3)\n fig1ax3 = fig1.add_subplot(3,2,4)\n fig1ax4 = fig1.add_subplot(3,2,5)\n fig1ax5 = fig1.add_subplot(3,2,6)\n\n weight = params.mass*params.g*np.ones_like(t_s)\n fig0ax0 = utils.add_plots(fig0ax0,t_s,[F_t,weight],[\"-\",\"--\"],[\"r\",\"k\"],[\"F\",\"m*g\"],\"Rotor Thrust -F- over time\",'t {s}','F {N}')\n fig0ax0.legend(loc='lower right', shadow=True, fontsize='small') \n\n # Torques\n u2 = map(lambda a: a[0],M_t) # extract ux for all points in time\n u3 = map(lambda a: a[1],M_t)\n u4 = map(lambda a: a[2],M_t)\n\n fig0ax1 = utils.add_plots(fig0ax1,t_s,[u2,u3,u4],[\"-\",\"-\",\"-\"],[\"r\",\"g\",\"b\"],[\"u2\",\"u3\",\"u4\"],\"Components of torque vector M over time\",\"t {s}\",\"{N*m}\")\n fig0ax1.legend(loc='lower right', shadow=True, fontsize='small')\n\n # X position\n q_x = map(lambda a: a[0][0], q_s) # get quad x position\n d_x = map(lambda a: a.pos[0], d_s) # get desired x position\n x_e = map(lambda a,b: 10*(a-b),d_x,q_x) # compute error\n\n fig0ax2 = utils.add_plots(fig0ax2,t_s,[q_x,d_x,x_e],[\"-\",\"--\",\"-\"],[\"g\",\"r\",\"b\"],[\"quad -x\",\"des x\",\"x error (x10)\"],\"X - axis position of quadrotor\",\"t {s}\",\"x {m}\")\n fig0ax2.legend(loc='lower right', shadow=True, fontsize='small')\n\n # Y position\n q_y = map(lambda a: a[0][1], q_s)\n d_y = map(lambda a: a.pos[1], d_s)\n y_e = map(lambda a,b: 10*(a-b),d_y,q_y)\n\n fig0ax3 = utils.add_plots(fig0ax3,t_s,[q_y,d_y,y_e],[\"-\",\"--\",\"-\"],[\"g\",\"r\",\"b\"],[\"quad -y\",\"des y\",\"y error (x10)\"],\"Y - axis position of quadrotor\",\"t {s}\",\"y {m}\")\n fig0ax3.legend(loc='lower right', shadow=True, fontsize='small')\n\n # Z position\n q_z = map(lambda a: a[0][2], q_s)\n d_z = map(lambda a: a.pos[2], d_s)\n z_e = map(lambda a,b: 10*(a-b),d_z,q_z)\n\n fig0ax4 = utils.add_plots(fig0ax4,t_s,[q_z,d_z,z_e],[\"-\",\"--\",\"-\"],[\"g\",\"r\",\"b\"],[\"quad z\",\"des z\",\"z error (x10)\"],\"Z - axis position of quadrotor\",\"t {s}\",\"z {m}\")\n fig0ax4.legend(loc='lower right', shadow=True, fontsize='small')\n\n # Euler angles\n q_phi = map(lambda a: a[2][0]*180.0/np.pi, q_s)\n q_theta = map(lambda a: a[2][1]*180.0/np.pi, q_s)\n q_psi = map(lambda a: a[2][2]*180.0/np.pi, q_s)\n\n fig0ax5 = utils.add_plots(fig0ax5,t_s,[q_phi,q_theta,q_psi],[\"-\",\"-\",\"-\"],[\"r\",\"g\",\"b\"],[\"phi\",\"theta\",\"psi\"],\"Angular position of quadrotor\",'t {s}','phi, theta, psi {degree}')\n fig0ax5.legend(loc='lower right', shadow=True, fontsize='small')\n\n # X Linear velocity\n q_vx = map(lambda a: a[1][0], q_s)\n d_vx = map(lambda a: a.vel[0], d_s) \n vx_e = map(lambda a,b: 10*(a-b),d_vx,q_vx)\n\n fig1ax0 = utils.add_plots(fig1ax0,t_s,[q_vx,d_vx,vx_e],[\"-\",\"--\",\"-\"],[\"g\",\"r\",\"b\"],[\"quad Vx\",\"des Vx\",\"Vx error (x10)\"],\"X axis linear Velocities of quadrotor\",'t {s}','Vx {m/s}')\n fig1ax0.legend(loc='lower right', shadow=True, fontsize='small') \n\n # Y Linear velocity\n q_vy = map(lambda a: a[1][1], q_s)\n d_vy = map(lambda a: a.vel[1], d_s) \n vy_e = map(lambda a,b: 10*(a-b),d_vy,q_vy)\n\n fig1ax1 = utils.add_plots(fig1ax1,t_s,[q_vy,d_vy,vy_e],[\"-\",\"--\",\"-\"],[\"g\",\"r\",\"b\"],[\"quad Vy\",\"des Vy\",\"Vy error (x10)\"],\"Y axis linear Velocities of quadrotor\",'t {s}','Vy {m/s}')\n fig1ax1.legend(loc='lower right', shadow=True, fontsize='small') \n\n # Z Linear velocity\n q_vz = map(lambda a: a[1][2], q_s)\n d_vz = map(lambda a: a.vel[2], d_s) \n vz_e = map(lambda a,b: 10*(a-b),d_vz,q_vz)\n\n fig1ax2 = utils.add_plots(fig1ax2,t_s,[q_vz,d_vz,vz_e],[\"-\",\"--\",\"-\"],[\"g\",\"r\",\"b\"],[\"quad Vz\",\"des Vz\",\"Vz error (x10)\"],\"Z axis linear Velocities of quadrotor\",'t {s}','Vz {m/s}')\n fig1ax2.legend(loc='lower right', shadow=True, fontsize='small') \n\n # Angular velocities\n q_wx = map(lambda a: a[3][0]*180.0/np.pi, q_s)\n q_wy = map(lambda a: a[3][1]*180.0/np.pi, q_s)\n q_wz = map(lambda a: a[3][2]*180.0/np.pi, q_s)\n\n fig1ax3 = utils.add_plots(fig1ax3,t_s,[q_wx,q_wy,q_wz],[\"-\",\"-\",\"-\"],[\"r\",\"g\",\"b\"],[\"wx\",\"wy\",\"wz\"],\"Angular velocities of quadrotor\",'t {s}','wx, wy, wz {degree/s}')\n fig1ax3.legend(loc='lower right', shadow=True, fontsize='small')\n\n # rotor speeds\n w_0 = map(lambda a: np.sqrt(a[0][0]) if a[0][0] > 0 else -np.sqrt(-a[0][0]),w_i)\n w_1 = map(lambda a: np.sqrt(a[1][0]) if a[1][0] > 0 else -np.sqrt(-a[1][0]),w_i)\n w_2 = map(lambda a: np.sqrt(a[2][0]) if a[2][0] > 0 else -np.sqrt(-a[2][0]),w_i)\n w_3 = map(lambda a: np.sqrt(a[3][0]) if a[3][0] > 0 else -np.sqrt(-a[3][0]),w_i)\n\n fig1ax4 = utils.add_plots(fig1ax4, t_s, [w_0,w_1,w_2,w_3],[\"-\",\"-\",\"-\",\"-\"],[\"r\",\"g\",\"b\",\"c\"],[\"w0\",\"w1\",\"w2\",\"w3\"],\"Rotor Speeds\",'t {s}','{rpm}')\n fig1ax4.legend(loc='lower right', shadow=True, fontsize='small')\n # save\n fig0.savefig(\"t_\"+name, dpi = 300) #translation variables\n fig1.savefig(\"r_\"+name, dpi = 300) #rotation variables\n print(\"Saved t_{} and r_{}.\".format(name,name))\n\nif __name__ == \"__main__\":\n env = QuadEnvCont()\n model = PPO1.load(\"quad-ppo-v4\")\n obs = env.reset()\n\n env.quadcopter.state=np.array(np.array([0,0,0,0,0,0,0,0,0,1,0,0,0]))\n waypoints = trajGen3D.get_helix_waypoints(sim_time, 8)\n (coeff_x, coeff_y, coeff_z) = trajGen3D.get_MST_coefficients(waypoints)\n env.set_goal([-coeff_x[0], -coeff_y[0], -coeff_z[0]-10])\n print(\"ASDDDDSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSs\")\n print(env.get_state())\n\n def control_loop(i):\n \n for _ in range(int(control_iterations)):\n global obs\n action, _states = model.predict(obs)\n print(action)\n obs, rewards, dones, info = env.step(action)\n \n return env.quadcopter.world_frame()\n\n plot_quad_3d(waypoints, control_loop)\n print(\"Closing.\")\n","sub_path":"rl_quad/rl_scripts/test/rl_test_ppo.py","file_name":"rl_test_ppo.py","file_ext":"py","file_size_in_byte":7713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"348860900","text":"from setuptools import setup, find_packages\n\n__author__ = 'trezorg@gmail.com'\n__version__ = '0.0.2'\n\nsetup(\n name=\"pyadmitad-unknown\",\n version=__version__,\n author='Igor Nemilentsev',\n author_email='trezorg@gmail.com',\n description='A Python wrapper around the Admitad API',\n license='MIT',\n url='https://github.com/trezorg/admitad-python-api.git',\n keywords='admitad',\n packages=find_packages(exclude='tests'),\n install_requires=['requests', 'simplejson'],\n test_suite='unittest2.collector',\n tests_require=['mocker', 'unittest2'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications',\n 'Topic :: Internet',\n ],\n)\n\n","sub_path":"pypi_install_script/pyadmitad-unknown-0.0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"561015126","text":"from .initializer import Initializer\nfrom .lobby import Lobby\nfrom .game_players import GamePlayers\nfrom queue import Queue\nimport threading\n\nclass Game:\n\n def __init__(self):\n self.initializer = Initializer()\n self.initializer.init()\n self.lobby = Lobby()\n self.lobby_lock = threading.Lock()\n self.in_progress = False\n self.turn_number = 0\n\n def start_game(self):\n self.lobby_lock.acquire()\n if self.all_ready():\n self.in_progress = True\n self.game_players = GamePlayers(self.lobby.to_dict(), self.lobby.get_player_count())\n self.lobby_lock.release()\n return True\n else:\n self.lobby_lock.release()\n return False\n\n def add_player(self, player):\n self.lobby_lock.acquire()\n pnum = self.lobby.add_player(player)\n self.lobby_lock.release()\n return pnum\n\n def remove_player(self, player):\n self.lobby_lock.acquire()\n pnum = self.lobby.remove_player(player)\n self.lobby_lock.release()\n return pnum\n\n def update_player_profession(self, number, profession):\n self.lobby_lock.acquire()\n result = self.lobby.update_player_profession(number, profession)\n self.lobby_lock.release()\n return result\n\n def get_player_profession(self, number):\n self.lobby_lock.acquire()\n profession = self.lobby.get_player_profession(number)\n self.lobby_lock.release()\n return profession\n\n def update_player_ready(self, number, ready):\n self.lobby_lock.acquire()\n result = self.lobby.update_player_ready(number, ready)\n self.lobby_lock.release()\n return result\n\n def all_ready(self):\n return self.lobby.all_ready()\n\n def get_lobby_dict(self):\n self.lobby_lock.acquire()\n l = self.lobby.to_dict()\n self.lobby_lock.release()\n return l\n\n def to_dict(self):\n self.lobby_lock.acquire()\n game = {}\n game[\"turn_number\"] = self.turn_number\n game[\"player_number\"] = self.player_number\n game[\"players\"] = self.game_players.to_dict()\n self.lobby_lock.release()\n return game","sub_path":"server/logic/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"108000860","text":"class Matrix:\n def __init__(self, lst):\n self.y_len = len(lst)\n self.x_len = len(lst[0])\n for i in lst:\n if len(i) != self.x_len:\n raise IndexError(\"Размерность матрицы не должна меняться\")\n con_lst = [list(map(int, x_line)) for x_line in lst]\n self.elements = con_lst\n\n def __str__(self):\n st_out = ''\n for i in self.elements:\n line = (\" \".join(map(str, i)))\n st_out = f\"{st_out}{line}\\n\"\n return st_out\n\n def __add__(self, other):\n if (self.x_len != other.x_len) or (self.y_len != other.y_len):\n raise IndexError(\"Складывать можно только матрицы одинаковых размерностей\")\n total_lst = [list(map(lambda x, y: x + y, self.elements[i], other.elements[i])) for i in\n range(0, len(self.elements))]\n new_mat = Matrix(total_lst)\n return new_mat\n\n\nmat = Matrix([[1, -5, 3], [3, 4, 5], [6, 7, 8]])\nmat2 = Matrix([['2', '3', '4'], ['5', ' 6', '+7'], [8, 9, 10]])\n\nmat3 = mat + mat2\nprint(mat3)\n","sub_path":"Nikolskiy_Aleksey_dz_10/task10_1.py","file_name":"task10_1.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"614571771","text":"#Emmanuel Galeana\r\n#04/09/2020\r\n#Diseña e implementa un programa que transforme una clave en un color.\r\n\r\nx = int(input())\r\ndef clave_color(x):\r\n if x==1:\r\n return('ROJO')\r\n elif x==2:\r\n return('AZUL')\r\n elif x==3:\r\n return('BLANCO')\r\n elif x==4:\r\n return('NEGRO')\r\n elif x==5:\r\n return('AMARILLO')\r\n else:\r\n return('ERROR')\r\n\r\nprint(clave_color(x))\r\n \r\n \r\n ","sub_path":"Ejercicios #5/Clave color.py","file_name":"Clave color.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"76960555","text":"import csv\nfrom afajycal.config import Config\n\n\nclass SaveCalendar:\n \"\"\"試合スケジュールデータを永続化するクラス\n\n 取得した旭川地区サッカー協会第3種事業委員会Webサイトの\n 試合スケジュールデータをデータベースまたはファイルへ格納する。\n\n \"\"\"\n\n def __init__(self, args: dict):\n \"\"\"\n Args:\n args[\"calendar\"] (:obj:`Calendar`): Scheduleクラスのオブジェクトを\n 要素とするリストを持つオブジェクト。\n args[\"db\"] (:obj:`DB`): データベース操作をラップしたオブジェクト。\n\n \"\"\"\n\n self.__calendar = args[\"calendar\"]\n self.__db = args[\"db\"]\n self.__table_name = Config.TABLE\n\n def _schedules(self):\n return self.__calendar.schedules\n\n def _cursor(self):\n return self.__db.cursor()\n\n def _commit(self):\n return self.__db.commit()\n\n def to_csv(self, csv_path):\n \"\"\"CSVへ試合スケジュールデータを保存\n\n 旭川地区サッカー協会第3種事業委員会Webサイトからダウンロードした\n データを、CSVファイルへ出力する。\n\n Args:\n csv_path (str): 出力��るCSVのパス\n\n Returns:\n bool: データの登録が成功したらTrueを返す。\n\n \"\"\"\n with open(csv_path, \"w\") as f:\n writer = csv.writer(f)\n for schedule in self._schedules():\n writer.writerow(\n [\n schedule.number,\n schedule.category,\n schedule.match_number,\n schedule.match_date,\n schedule.kickoff_time,\n schedule.home_team,\n schedule.away_team,\n schedule.studium,\n ]\n )\n return True\n\n def to_db(self):\n \"\"\"データベースへ試合スケジュールデータを保存\n\n 旭川地区サッカー協会第3種事業委員会Webサイトからダウンロードした\n データを、データベースへ登録する。登録の時、既存のデータベースの\n データは一旦全て削除してから処理を行う。\n\n Returns:\n bool: データの登録が成功したらTrueを返す。\n\n \"\"\"\n cur = self._cursor()\n cur.execute(\"DELETE FROM \" + self.__table_name)\n for schedule in self._schedules():\n cur.execute(\n \"INSERT OR IGNORE INTO\"\n + \" \"\n + self.__table_name\n + \" \"\n + \"(number, category, match_number, match_date, kickoff_time, \"\n + \"home_team, away_team, studium, updated)\"\n + \" \"\n + \"VALUES\"\n + \" \"\n + \"(?, ?, ?, ?, ?, ?, ?, ?, datetime('now', 'localtime'));\",\n (\n schedule.number,\n schedule.category,\n schedule.match_number,\n schedule.match_date,\n schedule.kickoff_time,\n schedule.home_team,\n schedule.away_team,\n schedule.studium,\n ),\n )\n self._commit()\n return True\n","sub_path":"afajycal/save_calendar.py","file_name":"save_calendar.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"13656107","text":"#!/usr/bin/python\n\nimport collections\nfrom frc971.analysis.logentry import LogEntry\n\nclass Dataset(object):\n def __init__(self):\n self.time = []\n self.data = []\n\n def Add(self, time, data):\n self.time.append(time)\n self.data.append(data)\n\nclass CollectingLogReader(object):\n \"\"\"\n Reads log files and collected requested data.\n \"\"\"\n def __init__(self):\n self.signal = collections.OrderedDict()\n\n def Add(self, binary, struct_instance_name, *data_search_path):\n \"\"\"\n Specifies a specific piece of data to collect\n\n Args:\n binary: str, The name of the executable that generated the log.\n struct_instance_name: str, The name of the struct instance whose data\n contents should be collected.\n data_search_path: [str], The path into the struct of the exact piece of\n data to collect.\n\n Returns:\n None\n \"\"\"\n self.signal[(binary, struct_instance_name, data_search_path)] = Dataset()\n\n def HandleFile(self, f):\n \"\"\"\n Parses the specified log file.\n\n Args:\n f: str, The filename of the log whose data to parse.\n\n Returns:\n None\n \"\"\"\n with open(f, 'r') as fd:\n for line in fd:\n try:\n self.HandleLine(line)\n except Exception as ex:\n # It's common for the last line of the file to be malformed.\n print(\"Ignoring malformed log entry: \", line, ex)\n\n def HandleLine(self, line):\n \"\"\"\n Parses a line from a log file and adds the data to the plot data.\n\n Args:\n line: str, The line from the log file to parse\n\n Returns:\n None\n \"\"\"\n pline = LogEntry(line)\n\n for key in self.signal:\n value = self.signal[key]\n binary = key[0]\n struct_instance_name = key[1]\n data_search_path = key[2]\n boolean_multiplier = False\n multiplier = 1.0\n\n # If the plot definition line ends with a \"-b X\" where X is a number then\n # that number gets drawn when the value is True. Zero gets drawn when the\n # value is False.\n if len(data_search_path) >= 2 and data_search_path[-2] == '-b':\n multiplier = float(data_search_path[-1])\n boolean_multiplier = True\n data_search_path = data_search_path[:-2]\n\n if len(data_search_path) >= 2 and data_search_path[-2] == '-m':\n multiplier = float(data_search_path[-1])\n data_search_path = data_search_path[:-2]\n\n # Make sure that we're looking at the right binary structure instance.\n if binary == pline.name:\n if pline.msg.startswith(struct_instance_name + ': '):\n # Traverse the structure as specified in `data_search_path`.\n # This lets the user access very deeply nested structures.\n _, _, data = pline.ParseStruct()\n for path in data_search_path:\n data = data[path]\n\n if boolean_multiplier:\n if data == 'T':\n value.Add(pline.time, multiplier)\n else:\n value.Add(pline.time, 0)\n else:\n value.Add(pline.time, float(data) * multiplier)\n","sub_path":"frc971/analysis/logreader.py","file_name":"logreader.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"580436597","text":"from setuptools import setup, find_packages\nimport os\nimport subprocess\nimport codecs\nimport sys\n\n\nbase_dir = os.path.abspath(os.path.dirname(__file__))\nsrc_dir = os.path.join(base_dir, 'src')\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\nabout = {}\nwith open(os.path.join(src_dir, 'repovisor', '__about__.py')) as f:\n exec(f.read(), about)\n\n\ndef genRST():\n pandoc_call = ['pandoc', '--from=markdown', '--to=rst', 'README.md']\n try:\n output = subprocess.run(pandoc_call, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if output.returncode:\n print(output.stderr)\n sys.exit()\n output = output.stdout\n except AttributeError:\n try:\n output = subprocess.check_output(pandoc_call)\n except subprocess.CalledProcessError:\n sys.exit()\n return output.decode()\n\n\n# get the dependencies and installs\nwith codecs.open(os.path.join(base_dir, 'requirements.txt'), encoding='utf-8') as f:\n all_reqs = f.read().split('\\n')\n\ninstall_requires = [x.strip() for x in all_reqs if 'git+' not in x]\ndependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]\n\nsetup(\n name=about['__title__'],\n version=about['__version__'],\n description=about['__summary__'],\n long_description=genRST(),\n url=about['__uri__'],\n license=about['__license__'],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3',\n ],\n keywords='git, repository, manager',\n packages=find_packages(where='src', exclude=['docs', 'tests*']),\n package_dir={'': 'src'},\n include_package_data=True,\n author=about['__author__'],\n install_requires=install_requires,\n dependency_links=dependency_links,\n author_email=about['__email__'],\n entry_points={\n 'console_scripts': ['repovise=repovisor.__main__:main'],\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"614427090","text":"#!/usr/bin/env python3\n\"\"\"\n scan all 6 reading frames of a given line in a fasta of contigs for ORFs, extract longest nonoverlapping ORFs from each reading frame in each contig\n input: fasta of contigs\n output: in $PWD: all_orfs.fna -- longest non overlapping orfs in each contig\n all_proteins.faa -- all_orfs sequences translated to amino acid sequence\n usage: call_orfs.py -f \n\"\"\"\nimport sys\nimport argparse\n\ndef main(argv):\n \"\"\" main method\n :param argv: cmd line argument\n \"\"\"\n # parse cmd line arguments\n args = parseArgs(argv)\n # extract longest orf from each contig\n longest_orf_dict = createLongestOrfDict(args.fasta_path)\n # translate longest_orf_dict sequences to amino acid sequences\n translated_longest_orf_dict = {fasta_header: translateSequenceToAminoAcid(nuc_seq)\n for fasta_header, nuc_seq in longest_orf_dict.items()}\n # write both to file\n writeFasta(longest_orf_dict, 'all_orfs.fna')\n writeFasta(translated_longest_orf_dict, 'all_proteins.faa')\n\ndef parseArgs(argv):\n parser = argparse.ArgumentParser(\n description=\"scan all 6 reading frames of a given line in a fasta of contigs for ORFs, extract longest nonoverlapping ORFs from each contig\")\n parser.add_argument(\"-f\", \"--fasta_path\", required=True,\n help=\"[Required] Directory path of fastq files.\\n\")\n\n args = parser.parse_args(argv[1:])\n return args\n\ndef writeFasta(longest_orf_dict, filename):\n \"\"\" write longest_orf_dict to file in fasta format. header lines for each open reading frame will be:\n > longest_orf_dict[key] (see longestOpenReadingFrame for details)\n :param longest_orf_dict: see function longestOpenReadingFrame()\n :param filename: the name of the fasta file (if only basename.fasta given, output in $PWD. Give relative or absolute path if want to output elsewhere)\n :return: None. write to file. Any call to this function will overwrite a pre-existing file of the same name\n \"\"\"\n with open(filename, 'w') as fasta_output:\n for key, sequence in longest_orf_dict.items():\n fasta_output.write('%s\\n%s\\n' % (key, sequence))\n\ndef translateSequenceToAminoAcid(orf_sequence):\n \"\"\" translate a sequence to amino acid. Assume sequence starts from first nucleotide in sequence\n :param orf_sequence: a sequence that starts with a start codon\n :return: translated sequence\n \"\"\"\n # aa_dict stores {codon: amino_acid_symbol}\n aa_dict = {'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L',\n 'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L',\n 'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M',\n 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',\n 'TCT': 'S', 'TCC': 'S', 'TCA': 'S', 'TCG': 'S',\n 'CCT': 'P', 'CCC': 'P', 'CCA': 'P', 'CCG': 'P',\n 'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T',\n 'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',\n 'TAT': 'Y', 'TAC': 'Y', 'TAA': 'STOP', 'TAG': 'STOP',\n 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',\n 'AAT': 'N', 'AAC': 'N', 'AAA': 'K', 'AAG': 'K',\n 'GAT': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E',\n 'TGT': 'C', 'TGC': 'C', 'TGA': 'STOP', 'TGG': 'W',\n 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',\n 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',\n 'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G'}\n # instantiate a list to hold translated amino acid sequence\n aa_seq_list = []\n # check that orf_sequence is a string and starts with a start codon\n if not isinstance(orf_sequence, str):\n raise TypeError('the item passed to translateSequenceToAminoAcid string is not a string. Try again.')\n if not orf_sequence.startswith('ATG'):\n raise Exception('sequence does not start with start codon')\n if not aa_dict[orf_sequence[-3:]] == 'STOP':\n raise Exception('sequence does not end with a stop codon. Are you sure this is an ORF?')\n # if the sequence passes the tests above, translate\n else:\n # iterate over sequence in chunks of 3\n for index in range(0, len(orf_sequence) - 2, 3):\n # check to make sure there is not a stop codon before the end of the orf\n if aa_dict[orf_sequence[index:index + 3]] == 'STOP' and not index + 3 == len(orf_sequence):\n raise Exception('There is a stop codon in the middle of your ORF! Something is fishy here...')\n # append amino acid to list\n aa_seq_list.append(aa_dict[orf_sequence[index:index + 3]])\n aa_seq_list.pop(-1) # pop the stop codon off the list\n return ''.join(aa_seq_list) # turn list into string, return\n\ndef createLongestOrfDict(fasta_path):\n \"\"\" read fasta of contigs, extract longest reading frame from each. Return dict in form\n {longest_orf_fasta[key] : nucleotide_sequence} <-- see longestOpenReadingFrame for dict details\n :param fasta_path: path to input fasta of contigs to evaluate for longest ORF\n :return: dict of longest ORF from each contig\n \"\"\"\n longest_nonoverlapping_orf_dict = {}\n\n with open(fasta_path, 'r') as fasta_file:\n line = fasta_file.readline()\n while line:\n if line.startswith('>'):\n # extract relevant contig data from fasta header\n orf_fasta_contig_description = line.split('_')[0:5]\n contig_header = \"_\".join(orf_fasta_contig_description)\n # extract sequence\n sequence = fasta_file.readline().strip()\n orfs = longestOpenReadingFrame(contig_header, sequence)\n # add orfs to longest_orf_dict\n longest_nonoverlapping_orf_dict.update(orfs)\n\n # read next line and continue while loop, if line exists\n line = fasta_file.readline()\n\n return longest_nonoverlapping_orf_dict\n\ndef longestOpenReadingFrame(contig_header, sequence):\n \"\"\" read sequence, evaluate all 6 reading frames, return longest ORF\n :param sequence: a nucleotide sequence\n :param contig_header: extracted from input fasta. see main method\n :return: non overlapping ORF as dictionary {[__] : ATACC..}\n where orf_number is arbitrary sequential number of orfs found in a given sequence\n \"\"\"\n # instantiate dictionary for forward and reverse orfs\n orf_dict = {'forward': [], 'reverse': []}\n # instantiate a dict to hold the orfs in the structure described in the :return: statement in docstring\n fasta_orf_dict = {}\n\n # forward reading frames\n for index in range(0, 3):\n orf_dict['forward'].extend(findLongestOpenReadingFrame(sequence[index:]))\n\n # take reverse complement of the sequence\n reverse_complement = reverseComplement(sequence)\n for index in range(0, 3):\n orf_dict['reverse'].extend(findLongestOpenReadingFrame(reverse_complement[index:]))\n\n for direction, orfs_list in orf_dict.items():\n orf_number = 0\n for orf in orfs_list:\n if not orf == '': # skip empty strings -- these are from reading frames that did not contain orfs above the threshold\n orf_number = orf_number + 1 # increment orf_number\n fasta_header = '%s_%s_%s_%s' % (contig_header, orf_number, direction, len(orf)) # create fasta header\n fasta_orf_dict.setdefault(fasta_header, orf) # set entry in dict\n\n return fasta_orf_dict\n\ndef findLongestOpenReadingFrame(sequence, **kwargs):\n \"\"\"\n recursive method to return non overlapping ORFs (the longest ORFs with unique stop codons) on a given sequence. This does not account for reading frames.\n To account for reading frame, feed in string as follows: findOpenReadingFrames(sequence), findOpenReadingFrames(sequence[1:), findOpenReadingFrames(sequence[2:])\n Do the same with the reverse complement of the sequence\n alternative method: iterate over 1 nucleotide at a time. This has the advantage that it could be run in parallel. also easier to test -- could be wrapped in method to look at all 6\n :param sequence: a sequence to search for longest ORF\n :param kwargs: used to pass longest_orf and orf_list during recursion\n :return: the longest ORF in sequence (note: this does not account for reading frames -- starts at position 0)\n \"\"\"\n threshold = 100 # threshold below which orfs will be filtered out\n # store stop codons\n stop_codons = ['TAA', 'TAG', 'TGA']\n # store longest_orf and orf_list if passed. else, initiate variables\n try:\n longest_orf = kwargs['longest_orf']\n except KeyError:\n longest_orf = 1\n try:\n orf_list = kwargs['orf_list']\n except KeyError:\n orf_list = ['']\n\n # discard nucleotides in chunks of 3 if a start codon has not yet been found\n while not sequence[0:3] == 'ATG' and len(sequence) > 3:\n sequence = sequence[3:]\n # if it is still possible to find a longer ORF in the sequence, look for it.\n if not len(sequence) < threshold:\n # scan for stop codons\n for i in range(3, len(sequence) - 2, 3):\n # if a stop codon is found\n if sequence[i : i+3] in stop_codons:\n orf = sequence[0 : i+3]\n if len(orf) > threshold: # filter orfs less than threshold\n orf_list.append(orf)\n # if this orf is longer than longest_orf, update longest_orf\n if len(orf) > longest_orf:\n longest_orf = len(orf)-3 # do not count stop codon\n # recursive step\n return findLongestOpenReadingFrame(sequence[i+3:], longest_orf = longest_orf, orf_list = orf_list)\n # return statement for base case\n if len(orf_list) > 1:\n orf_list.pop(0)\n return orf_list\n\ndef reverseComplement(sequence):\n \"\"\"\n :return: reverse complement\n :raises TypeError: if sequence is not a str\n \"\"\"\n if not isinstance(sequence, str):\n raise TypeError('the item passed to reverseComplement string is not a string. Try again.')\n return reverse(complement(sequence))\n\ndef complement(sequence):\n \"\"\" return complement of sequence\n :param sequence: a nucleotide sequence\n :return: base pair complement of string, all upper case\n :raises TypeError: if sequence is not a string\n \"\"\"\n if not isinstance(sequence, str):\n raise TypeError('the item passed to reverseComplement string is not a string. Try again.')\n # complement dictionary\n complement_dict = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n # cast sequence to upper case\n sequence_uppercase = sequence.upper()\n # instantiate an empty list\n sequence_complement = []\n # take complement of sequence\n for base in sequence_uppercase:\n sequence_complement.append(complement_dict[base]) # collect as list b/c strings are immutable\n # turn list into string, return\n return ''.join(sequence_complement)\n\ndef reverse(sequence):\n \"\"\" reverse the string\n :param sequence: any string, but expected to be a string of nucleotide bases\n :return: a reversed string\n :raises TypeError: if sequence is not a string\n \"\"\"\n if not isinstance(sequence, str):\n raise TypeError('the item passed to reverse string is not a string. Try again.')\n return sequence[::-1]\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"assignment10/call_orfs.py","file_name":"call_orfs.py","file_ext":"py","file_size_in_byte":11511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"423145201","text":"from google.cloud import mediatranslation\r\nimport os\r\n\r\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'media-translation-key.json'\r\n\r\n\r\ndef translate_from_file(file_path):\r\n client = mediatranslation.SpeechTranslationServiceClient()\r\n\r\n # The `sample_rate_hertz` field is not required for FLAC and WAV (Linear16)\r\n # encoded data. Other audio encodings must provide the sampling rate.\r\n audio_config = mediatranslation.TranslateSpeechConfig(\r\n audio_encoding='linear16',\r\n source_language_code='en-US',\r\n target_language_code='fr-FR')\r\n\r\n streaming_config = mediatranslation.StreamingTranslateSpeechConfig(\r\n audio_config=audio_config, single_utterance=True)\r\n\r\n def request_generator(config, audio_file_path):\r\n\r\n # The first request contains the configuration.\r\n # Note that audio_content is explicitly set to None.\r\n yield mediatranslation.StreamingTranslateSpeechRequest(\r\n streaming_config=config, audio_content=None)\r\n\r\n with open(audio_file_path, 'rb') as audio:\r\n while True:\r\n chunk = audio.read(4096)\r\n if not chunk:\r\n break\r\n yield mediatranslation.StreamingTranslateSpeechRequest(\r\n audio_content=chunk,\r\n streaming_config=config)\r\n\r\n requests = request_generator(streaming_config, file_path)\r\n responses = client.streaming_translate_speech(requests)\r\n\r\n for response in responses:\r\n # Once the transcription settles, the response contains the\r\n # is_final result. The other results will be for subsequent portions of\r\n # the audio.\r\n result = response.result\r\n translation = result.text_translation_result.translation\r\n source = result.recognition_result\r\n\r\n print('result : {}'.format(result))\r\n print('translation : {}'.format(translation))\r\n print('source : {}'.format(source))\r\n\r\n if result.text_translation_result.is_final:\r\n print(u'\\nFinal translation: {0}'.format(translation))\r\n print(u'Final recognition result: {0}'.format(source))\r\n break\r\n\r\n print(u'\\nPartial translation: {0}'.format(translation))\r\n print(u'Partial recognition result: {0}'.format(source))\r\n\r\n\r\nfile = 'data\\\\testaudio.mp3'\r\n\r\ntranslate_from_file(file)\r\n","sub_path":"media-translation.py","file_name":"media-translation.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"527452401","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import unicode_literals, absolute_import\n\n\n######## NOT SETTINGS, JUST BOILER PLATE ##############\nimport os\n\nVERSION = '0.5'\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nLIBS_DIR = os.path.join(os.path.dirname(ROOT_DIR), 'libs')\n\n######## END OF BOILER PLATE ##############\n\n# debug will get you error message and auto reload\n# don't set this to True in production\nDEBUG = False\n\n# Should the application serve static files on it's own ?\n# IF yes, set the absolute path to the static files.\n# If no, set it to None\n# In dev this is handy, in prod you probably want the HTTP servers\n# to serve it, but it's OK for small traffic to set it to True in prod too.\nSTATIC_FILES_ROOT = os.path.join(ROOT_DIR, 'static')\n\n# If True, will link the compressed verion of the js and css files,\n# otherwise, will use the ordinary files\nCOMPRESSED_STATIC_FILES = False\n\n# absolute path where the paste files should be store\n# default in projectdirectory/static/content/\n# use \"/\" even under Windows\nPASTE_FILES_ROOT = os.path.join(STATIC_FILES_ROOT, 'content')\n\n# a tuple of absolute paths of directory where to look the template for\n# the first one will be the first to be looked into\n# if you want to override a template, create a new dir, write the\n# template with the same name as the one you want to override in it\n# then add the dir path at the top of this tuple\nTEMPLATE_DIRS = (\n os.path.join(ROOT_DIR, 'views'),\n)\n\n# Port and host the embeded python server should be using\n# You can also specify them using the --host and --port script options\n# which have priority on these settings\nHOST = \"127.0.0.1\"\nPORT = \"8000\"\n\n# User and group the server should run as. Set to None if it should be the\n# current user. Some OS don't support it and if so, it will be ignored.\nUSER = None\nGROUP = None\n\n# Display a tiny counter for pastes created.\n# Be carreful if your site have to many pastes this can hurt your hard drive performances.\n# Refresh counter interval. Default to every minute after a paste.\nDISPLAY_COUNTER = True\nREFRESH_COUNTER = 60 * 1\n\n# Names/links to insert in the menu bar.\n# Any link with \"mailto:\" will be escaped to prevent spam\nMENU = (\n ('Home', '/'), # internal link. First link will be highlited\n ('Download 0bin', 'https://github.com/sametmax/0bin'), # external link\n ('Faq', '/faq/'), # faq\n ('Contact', 'mailto:your@email.com') # email\n)\n\n# limit size of pasted text in bytes. Be careful allowing too much size can\n# slow down user's browser\nMAX_SIZE = 1024 * 500\n\n# length of base64-like paste-id string in the url, int from 4 to 27 (length of sha1 digest)\n# total number of unique pastes can be calculated as 2^(6*PASTE_ID_LENGTH)\n# for PASTE_ID_LENGTH=8, for example, it's 2^(6*8) = 281 474 976 710 656\nPASTE_ID_LENGTH = 8\n","sub_path":"zerobin/default_settings.py","file_name":"default_settings.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"365949495","text":"import tensorflow\nimport os\n\n# TODO:读取二进制文件\nclass binary(object):\n def __init__(self):\n # 设置图像大小\n self.width = 32\n self.height = 32\n self.channel = 3\n # 设置图像的字节数\n self.image = self.height * self.width * self.channel\n self.label = 1\n self.sample = self.image + self.label\n\n # TODO:读取二进制文件方法\n def read_binary(self,file_path):\n # 1、构建文件名队列\n file_quece = tensorflow.train.string_input_producer(file_path)\n print(\"file_quece:\\n\",file_quece)\n # 2、读取和解码\n # 读取\n reader = tensorflow.FixedLengthRecordReader(self.sample)\n key,value = reader.read(file_quece)\n print(\"key:\\n\",key)\n print(\"value:\\n\",value)\n # 解码\n image_decode = tensorflow.decode_raw(value,tensorflow.uint8)\n print(\"image_decode:\\n\",image_decode)\n # 切片操作,将标签和图片分开\n label = tensorflow.slice(image_decode,[0],[1])\n image = tensorflow.slice(image_decode,[self.label],[self.image])\n print(\"label:\\n\",label)\n print(\"image:\\n\",image)\n\n # 调整图片的形状\n image_reshape = tensorflow.reshape(image,[self.channel,self.height,self.width])\n print(\"image_reshape:\\n\",image_reshape)\n\n # 对图片进行转置,转成批处理需要的格式\n # [1,2,0]:表示原数据下表为1的放到0位置,2放到1位置,0放到2位置\n image_transpose = tensorflow.transpose(image_reshape,[1,2,0])\n print(\"image_transpose:\\n\",image_transpose)\n # 3、批处理\n image_batch,label_batch = tensorflow.train.batch([image_transpose,label],batch_size=100,num_threads=1,capacity=100)\n\n # 开启会话\n with tensorflow.Session() as sess:\n # 构建线程协调员\n coord = tensorflow.train.Coordinator()\n threads = tensorflow.train.start_queue_runners(sess=sess,coord=coord)\n key, value, image_decode, label, image, image_reshape, image_transpose, image_batch,label_batch = sess.run([key,value,image_decode,label,image,image_reshape,image_transpose,image_batch,label_batch])\n print(\"key:\\n\",key)\n print(\"value:\\n\",value)\n print(\"image_decode:\\n\",image_decode)\n print(\"label:\\n\",label)\n print(\"image:\\n\",image)\n print(\"image_reshape:\\n\",image_reshape)\n print(\"image_transpose:\\n\",image_transpose)\n print(\"image_batch:\\n\",image_batch)\n # 关闭回收线程\n coord.request_stop()\n coord.join(threads)\n return image_batch,label_batch\n\n # TODO:将读取的图片数据写入到TFRecords文件\n def write_TFRecords(self,image_value,label_value):\n with tensorflow.python_io.TFRecordWriter(\"./TFRecords/cifar10.tfrecords\") as writer:\n # 循环构造example对象,并序列化写入文件\n for i in range(100):\n image = image_value[i].tostring()\n label = label_value[i][0]\n example = tensorflow.train.Example(features=tensorflow.train.Features(feature={\n \"image\":tensorflow.train.Feature(bytes_list=tensorflow.train.BytesList(value=[image])),\n \"label\":tensorflow.train.Feature(int64_list=tensorflow.train.Int64List(value=[label]))\n }))\n # 将序列化后的example写入文件\n writer.write(example.SerializeToString())\n return None\n\n # TODO:读取TFRecords文件\n def read_TFRecords_demo(self):\n # 1、构建文件名队列\n file_queue = tensorflow.train.string_input_producer([\"./TFRecords/cifar10.tfrecords\"])\n # 2、读取和解码\n # 读取\n reader = tensorflow.TFRecordReader()\n key,value = reader.read(file_queue)\n\n # 解析example\n feature = tensorflow.parse_single_example(value,features={\n \"image\":tensorflow.FixedLenFeature([],tensorflow.string),\n \"label\":tensorflow.FixedLenFeature([],tensorflow.int64)\n })\n image_feature_value = feature[\"image\"]\n label_feature_value = feature[\"label\"]\n # 解码\n image_decode = tensorflow.decode_raw(image_feature_value,tensorflow.uint8)\n # 图像形状调整\n image_reshape = tensorflow.reshape(image_decode,[self.height,self.width,self.channel])\n\n # 3、批处理\n image_batch,label_batch = tensorflow.train.batch([image_reshape,label_feature_value],batch_size=100,num_threads=1,capacity=100)\n # 开启会话\n with tensorflow.Session() as sess:\n coord = tensorflow.train.Coordinator()\n # 开启线程\n threads = tensorflow.train.start_queue_runners(sess=sess,coord=coord)\n image_batch,label_batch = sess.run([image_batch,label_batch])\n print(\"label_batch:\\n\",label_batch)\n print(\"image_batch:\\n\",image_batch)\n # 终止线程\n coord.request_stop()\n # 回收线程\n coord.join(threads=threads)\nif __name__ == \"__main__\":\n # 获取路径下所有的文件\n file_list = os.listdir(\"E:/python大数据资料/黑马-03-3天带你玩转Python深度学习/资料/深度学习day2资料/02-代码/cifar-10-batches-bin\")\n # 将文件名和路径进行拼接,并把bin文件筛选出来\n path = \"E:/python大数据资料/黑马-03-3天带你玩转Python深度学习/资料/深度学习day2资料/02-代码/cifar-10-batches-bin/\"\n file_path = [os.path.join(path,fileName) for fileName in file_list if fileName[-3:] == \"bin\"]\n obj = binary()\n # image_batch, label = obj.read_binary(file_path)\n # obj.write_TFRecords(image_batch, label)\n obj.read_TFRecords_demo()","sub_path":"itheimaPythonAnalysis/deeplearning/day02/day02_binary_read_demo.py","file_name":"day02_binary_read_demo.py","file_ext":"py","file_size_in_byte":5852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"455171828","text":"\"\"\"\n@author: Payam Dibaeinia\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom collections import OrderedDict\n\n\nclass CNN(nn.Module):\n\n def __init__(self):\n super(CNN,self).__init__()\n self.conv = self._conv_layers()\n self.fc = self._fc_layers()\n\n\n def _conv_layers(self):\n ret = nn.Sequential(OrderedDict([\n ('conv1_1', nn.Conv2d(in_channels = 3, out_channels = 32, kernel_size = 3, padding = 1)),\n ('batchnorm1_1', nn.BatchNorm2d(32)),\n ('relu1_1', nn.ReLU(inplace = True)),\n ('conv1_2', nn.Conv2d(in_channels = 32, out_channels = 32, kernel_size = 3, padding = 1)),\n ('batchnorm1_2', nn.BatchNorm2d(32)),\n ('relu1_2', nn.ReLU(inplace = True)),\n ('maxpool1_1', nn.MaxPool2d(kernel_size=2, stride=2)),\n ('dropout1', nn.Dropout2d(p = 0.05)),\n\n ('conv2_1', nn.Conv2d(in_channels = 32, out_channels = 64, kernel_size = 3, padding = 1)),\n ('batchnorm2_1', nn.BatchNorm2d(64)),\n ('relu2_1', nn.ReLU(inplace = True)),\n ('conv2_2', nn.Conv2d(in_channels = 64, out_channels = 64, kernel_size = 3, padding = 1)),\n ('batchnorm2_2', nn.BatchNorm2d(64)),\n ('relu2_2', nn.ReLU(inplace = True)),\n ('maxpool2_1', nn.MaxPool2d(kernel_size=2, stride=2)),\n ('dropout2', nn.Dropout2d(p = 0.05)),\n\n ('conv3_1', nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 3, padding = 1)),\n ('batchnorm3_1', nn.BatchNorm2d(128)),\n ('relu3_1', nn.ReLU(inplace = True)),\n ('conv3_2', nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1)),\n ('batchnorm3_2', nn.BatchNorm2d(128)),\n ('relu3_2', nn.ReLU(inplace = True)),\n ('conv3_3', nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1)),\n ('batchnorm3_3', nn.BatchNorm2d(128)),\n ('relu3_3', nn.ReLU(inplace = True)),\n ('maxpool3_1', nn.MaxPool2d(kernel_size=2, stride=2)),\n\n ]))\n\n return ret\n\n def _fc_layers(self):\n ret = nn.Sequential(OrderedDict([\n ('dropout2', nn.Dropout(p = 0.05)),\n ('fc1', nn.Linear(2048,512)),\n ('relu4_1', nn.ReLU(inplace = True)),\n ('fc2', nn.Linear(512,128)),\n ('relu4_2', nn.ReLU(inplace = True)),\n ('last', nn.Linear(128,10)),\n\n ]))\n\n return ret\n\n def forward(self, x):\n\n conv_out = self.conv(x)\n conv_out_flatten = conv_out.view(conv_out.size(0),-1)\n forw = self.fc(conv_out_flatten)\n\n return forw\n","sub_path":"DeepCNN/DeepCNN.py","file_name":"DeepCNN.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"122299645","text":"\"\"\"\n需求分析:\n两个角色: 玩家(p) vs 电脑(c)\n以玩家为第一视角 -> 胜利 失败 平局\n拳法总结: 剪刀(0) 石头(1) 布(2)\n# 胜利\n - p = 剪刀, c = 布\n - p = 石头, c = 剪刀\n - p = 布, c = 石头\n# 平局\n - p == c\n# 失败\n 不是胜利或者不是平局 就是失败\n\"\"\"\n# 导入模块 随机数模块\nimport random\n\n# ��导玩家输入他的拳法\np = int(input(\"请输入:剪刀(0) 石头(1) 布(2):\"))\n# 引导电脑输入他的拳法(随机)\n# randint(0, 2) -> [0, 2]\nc = random.randint(0, 2)\nprint(\"玩家:%d------电脑:%d\" % (p, c))\n# 胜利\nif (p == 0 and c == 2) or (p == 1 and c == 0) or (p == 2 and c == 1):\n print(\"玩家取得了胜利...\")\n# 平局\nelif (p == c):\n print(\"玩家和电脑平局...\")\n# 失败\nelse:\n print(\"玩家失败...\")","sub_path":"pythonstage1/day03/04-if应用猜拳游戏.py","file_name":"04-if应用猜拳游戏.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"208763756","text":"\"\"\"\nGiven an n-ary tree, return the level order traversal of its nodes' values. (ie, from left to right, level by level).\n\nFor example, given a 3-ary tree:\n\nWe should return its level order traversal:\n\n [\n [1],\n [3,2,4],\n [5,6]\n ]\n\n\nNote:\n The depth of the tree is at most 1000.\n The total number of nodes is at most 5000.\n\"\"\"\n\ndef levelOrder(root):\n # define a function which can merge two 2-D list\n def merge2DLists(list1,list2):\n l = len(list1)\n for i in range(len(list2)):\n if i < l:\n list1[i] += list2[i]\n else:\n list1.append(list2[i])\n return list1\n\n # if root is None, return None\n if root == None:\n return None\n\n # merge its children\n two_d_list = list()\n if len(root.children) != 0:\n for child in root.children:\n two_d_list = merge2DLists(two_d_list, levelOrder(child))\n # return the root and its merged children list\n return [[root.val]] + two_d_list\n","sub_path":"LeetCode-Python/429 N-ary Tree Level Order Traversal.py","file_name":"429 N-ary Tree Level Order Traversal.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"374923777","text":"# Copyright 2020 Canonical Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For further info, check https://github.com/canonical/charmcraft\n\n\"\"\"Set up logging.\"\"\"\n\nimport logging\nimport os\nimport tempfile\n\nfrom charmcraft import __version__\n\nFORMATTER_SIMPLE = \"%(message)s\"\nFORMATTER_DETAILED = \"%(asctime)s %(name)-30s %(levelname)-8s %(message)s\"\n\n_logger = logging.getLogger(\"charmcraft\")\n_logger.setLevel(logging.DEBUG)\n\n\nclass _MessageHandler:\n \"\"\"Handle all the messages to the user.\n\n This class deals with several combination of the following dimensions:\n\n - the mode: quiet, normal or verbose\n - the output: sometimes, some messages, to the terminal; always to the file\n - the execution result: what happens if succeeded, raise a controlled error, or crashed\n \"\"\"\n\n _modes = {\n \"quiet\": (logging.WARNING, FORMATTER_SIMPLE),\n \"normal\": (logging.INFO, FORMATTER_SIMPLE),\n \"verbose\": (logging.DEBUG, FORMATTER_DETAILED),\n }\n\n def __init__(self):\n self._stderr_handler = logging.StreamHandler()\n _logger.addHandler(self._stderr_handler)\n\n # autoset modes constants for simpler interface\n for k in self._modes:\n setattr(self, k.upper(), k)\n\n def init(self, initial_mode):\n \"\"\"Initialize internal structures; this must be done before start logging.\"\"\"\n self._set_filehandler()\n self.set_mode(initial_mode)\n\n def set_mode(self, mode):\n \"\"\"Set logging in different modes.\"\"\"\n self.mode = mode\n level, format_string = self._modes[mode]\n self._stderr_handler.setFormatter(logging.Formatter(format_string))\n self._stderr_handler.setLevel(level)\n if mode == self.VERBOSE:\n _logger.debug(\"Starting charmcraft version %s\", __version__)\n\n def _set_filehandler(self):\n \"\"\"Set the file handler to log everything to the temp file.\"\"\"\n _, self._log_filepath = tempfile.mkstemp(prefix=\"charmcraft-log-\")\n\n file_handler = logging.FileHandler(self._log_filepath)\n file_handler.setFormatter(logging.Formatter(FORMATTER_DETAILED))\n file_handler.setLevel(0) # log eeeeeverything\n _logger.addHandler(file_handler)\n\n # a logger for only the file\n self._file_logger = logging.getLogger(\"charmcraft.guard\")\n self._file_logger.propagate = False\n self._file_logger.addHandler(file_handler)\n self._file_logger.debug(\"Starting charmcraft version %s\", __version__)\n\n def ended_ok(self):\n \"\"\"Cleanup after successful execution.\"\"\"\n os.unlink(self._log_filepath)\n\n def ended_interrupt(self):\n \"\"\"Clean up on keyboard interrupt.\"\"\"\n if self.mode == self.VERBOSE:\n _logger.exception(\"Interrupted.\")\n else:\n _logger.error(\"Interrupted.\")\n os.unlink(self._log_filepath)\n\n def ended_cmderror(self, err):\n \"\"\"Report the (expected) problem and (maybe) logfile location.\"\"\"\n if err.argsparsing:\n print(err)\n else:\n msg = \"{} (full execution logs in {})\".format(err, self._log_filepath)\n _logger.error(msg)\n\n def ended_crash(self, err):\n \"\"\"Report the internal error and logfile location.\n\n Show just the error to the user, but send the whole traceback to the log file.\n \"\"\"\n msg = \"charmcraft internal error! {}: {} (full execution logs in {})\".format(\n err.__class__.__name__, err, self._log_filepath\n )\n if self.mode == self.VERBOSE:\n # both to screen and file!\n _logger.exception(msg)\n else:\n # the error to screen and file, plus the traceback to the file\n _logger.error(msg)\n self._file_logger.exception(\"\")\n\n\nmessage_handler = _MessageHandler()\n","sub_path":"charmcraft/logsetup.py","file_name":"logsetup.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"201902263","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom hermes_python.hermes import Hermes\n\ndef action_wrapper(hermes, intentMessage):\n \"\"\" Write the body of the function that will be executed once the intent is recognized.\n In your scope, you have the following objects :\n - intentMessage : an object that represents the recognized intent\n - hermes : an object with methods to communicate with the MQTT bus following the hermes protocol.\n - conf : a dictionary that holds the skills parameters you defined\n Refer to the documentation for further details.\n \"\"\"\n \n if len(intentMessage.slots.objectLocation) > 0:\n myobjectLocation = ((intentMessage.slots.objectLocation.first().value))\n result_sentence = u\"Schalte das Licht {} an\".format(myobjectLocation) \n else:\n result_sentence = \t\"Schalte das Licht an\"\n \n current_session_id = intentMessage.session_id\n hermes.publish_end_session(current_session_id, result_sentence)\n\n\nif __name__ == \"__main__\":\n with Hermes(\"localhost:1883\") as h:\n h.subscribe_intent(\"Jamoth:LampenAnSchalten\", action_wrapper).start()\n","sub_path":"action-LichtAn.py","file_name":"action-LichtAn.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"268446361","text":"import pandas as pd\nimport tensorflow as tf\n\nfrom keras_fsl.dataframe.operators.abstract_operator import AbstractOperator\n\n\nclass ToKShotDataset(AbstractOperator):\n \"\"\"\n Create tf.data.Dataset with random groups of k_shot consecutive images with the same label\n \"\"\"\n\n def __init__(self, k_shot, preprocessing, label_column='label_one_hot'):\n \"\"\"\n\n Args:\n k_shot (int): number of consecutive crops from the same class\n preprocessing (function): to be applied onto the image after opening\n label_column (str): either \"label_one_hot\" or \"label\" depending on the expected form of the network\n \"\"\"\n self.k_shot = k_shot\n self.preprocessing = preprocessing\n self.label_column = label_column\n\n @staticmethod\n def load_img(annotation):\n \"\"\"\n Args:\n annotation (dict): with keys 'image_name': path to the image and 'crop_window' to be passed to tf.io.decode_and_crop_jpeg\n Returns:\n dict: the input dict with an extra image key.\n \"\"\"\n return (\n {\n 'image': tf.io.decode_and_crop_jpeg(\n tf.io.read_file(annotation['image_name']),\n crop_window=annotation['crop_window'],\n channels=3,\n ),\n **annotation,\n }\n )\n\n def repeat_k_shot(self, index):\n return tf.data.Dataset.from_tensors(index).repeat(self.k_shot)\n\n def to_dataset(self, group):\n \"\"\"\n Transform a pd.DataFrame into a tf.data.Dataset and load images\n \"\"\"\n return (\n tf.data.Dataset.from_tensor_slices(group.to_dict('list'))\n .map(self.load_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n .cache()\n .shuffle(buffer_size=len(group), reshuffle_each_iteration=True)\n .repeat()\n )\n\n def __call__(self, input_dataframe):\n return (\n tf.data.experimental.choose_from_datasets(\n datasets=(\n input_dataframe\n .assign(\n label_one_hot=lambda df: pd.get_dummies(df.label).values.tolist(),\n crop_window=lambda df: df[[\"crop_y\", \"crop_x\", \"crop_height\", \"crop_width\"]].values.tolist(),\n )\n .groupby('label')\n .apply(self.to_dataset)\n ),\n choice_dataset=(\n tf.data.Dataset.range(len(input_dataframe.label.unique()))\n .shuffle(buffer_size=len(input_dataframe.label.unique()), reshuffle_each_iteration=True)\n .flat_map(self.repeat_k_shot)\n ),\n )\n .map(\n lambda annotation: (self.preprocessing(annotation['image']), tf.cast(annotation[self.label_column], tf.float32)),\n num_parallel_calls=tf.data.experimental.AUTOTUNE,\n )\n )\n","sub_path":"keras_fsl/dataframe/operators/to_k_shot_dataset.py","file_name":"to_k_shot_dataset.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"97520119","text":"'''\nImplementation of the homeportal \"home-page\"\n'''\nfrom flask import Blueprint\nfrom flask import render_template\n\n# Blueprint Configuration\nhome_bp = Blueprint('home_bp', __name__,\n template_folder='templates', static_folder='static')\n\n\n@home_bp.route('/', methods=['GET'])\ndef home():\n '''homepage'''\n return render_template('home.html')\n","sub_path":"home_portal/home/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"34802102","text":"import os\nimport unittest\nimport json\nimport networkx as nx\nfrom graphene import graphgen\n\nnode_mapper_filename = './node_deep_mapper.json'\nedge_mapper_filename = './edge_deep_mapper.json'\ndata_filename = './test_deep_data.txt'\n\nclass TestDeepData(unittest.TestCase):\n\n def setUp(self):\n self.node_mapper = None\n self.clique_mapper = None\n self.data = []\n with open(node_mapper_filename) as f:\n self.node_mapper = json.load(f)\n with open(edge_mapper_filename) as f:\n self.edge_mapper = json.load(f)\n with open(data_filename) as f:\n for item in f:\n self.data.append(json.loads(item))\n\n def test_genDeepNodes(self):\n g = nx.Graph()\n g = graphgen.create_graph(g, \n graph_mapper = self.node_mapper, \n data_provider = self.data, add_type_to_key = True)\n # print('\\nNODES1:', g.nodes(data = True))\n self.assertEqual(nx.number_of_nodes(g), 21)\n # get node with key.\n key1 = ('TypeA', 'a_val2_1')\n key2 = ('TypeB', 'b_val2_21', 'b_val1_21')\n self.assertTrue(key1 in g.nodes)\n self.assertTrue(key2 in g.nodes)\n # print(g.node[key1])\n # print(g.node[key2])\n keyU = ('TypeA', '_UNKNOWN_')\n self.assertFalse(keyU in g.nodes)\n\n def test_genEdges(self):\n g = nx.Graph()\n g = graphgen.create_graph(g, \n graph_mapper = self.edge_mapper, \n data_provider = self.data, add_type_to_key = True)\n # print('\\nNODES2:', g.nodes(data = True))\n self.assertEqual(nx.number_of_nodes(g), 21)\n # print('\\nEDGES2:', g.edges())\n self.assertEqual(nx.number_of_edges(g), 38)\n # get node with key.\n key1 = ('TypeA', 'a_val2_1')\n key2 = ('TypeB', 'b_val2_1', 'b_val1_1')\n key3 = ('TypeB', 'b_val2_3', 'b_val1_3')\n key4 = ('TypeC', 'c_val1_3')\n self.assertTrue(key1 in g.nodes)\n self.assertTrue(key2 in g.nodes)\n # print(g.node[key1])\n # print(g.node[key2])\n # check eges with data\n self.assertTrue(g.has_edge(key1, key2))\n edge_data = g.get_edge_data(key1, key2)\n self.assertTrue(edge_data != {})\n # print('e1:', edge_data)\n self.assertTrue(g.has_edge(key3, key4))\n edge_data = g.get_edge_data(key3, key4)\n self.assertTrue(edge_data != {})\n # print('e2:', edge_data)\n key5 = ('TypeC', 'ABCDEF') # invalid node key\n self.assertFalse(key5 in g)\n self.assertFalse(g.has_edge(key2, key5))\n \n\n# if __name__ == '__main__':\n# unittest.main()\n","sub_path":"tests/test_deep_data.py","file_name":"test_deep_data.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"602477758","text":"from typing import List, Dict, TypeVar, Tuple\n\nfrom ExperimentalSpectrumPeptideMassNoise import experimental_spectrum_peptide_mass_noise\nfrom PeptideType import PeptideType\nfrom SequenceTester import TestResult, SequenceTesterSet, SequenceTester\nfrom SpectrumConvolution import spectrum_convolution\nfrom SpectrumConvolutionNoise import spectrum_convolution_noise\nfrom SpectrumScore import score_spectrums\n\nAA = TypeVar('AA')\n\n\n# MARKDOWN\ndef sequence_peptide(\n exp_spec: List[float], # must be sorted asc\n aa_mass_table: Dict[AA, float], # amino acid mass table\n aa_mass_tolerance: float, # amino acid mass tolerance\n peptide_mass_candidates: List[Tuple[float, float]], # mass range candidates for mass of peptide\n peptide_type: PeptideType, # linear or cyclic\n score_backlog: int, # backlog of top scores\n leaderboard_size: int,\n leaderboard_initial: List[List[AA]] = None # bootstrap candidate peptides for leaderboard\n) -> SequenceTesterSet:\n tester_set = SequenceTesterSet(\n exp_spec,\n aa_mass_table,\n aa_mass_tolerance,\n peptide_mass_candidates,\n peptide_type,\n score_backlog\n )\n if leaderboard_initial is None:\n leaderboard = [[]]\n else:\n leaderboard = leaderboard_initial[:]\n while len(leaderboard) > 0:\n # Branch candidates\n expanded_leaderboard = []\n for p in leaderboard:\n for m in aa_mass_table:\n new_p = p[:] + [m]\n expanded_leaderboard.append(new_p)\n # Test candidates to see if they match exp_spec or if they should keep being branched\n removal_idxes = set()\n for i, p in enumerate(expanded_leaderboard):\n res = set(tester_set.test(p))\n if {TestResult.MASS_TOO_LARGE} == res:\n removal_idxes.add(i)\n expanded_leaderboard = [p for i, p in enumerate(expanded_leaderboard) if i not in removal_idxes]\n # Set leaderboard to the top n scoring peptides from expanded_leaderboard, but include peptides past n as long\n # as those peptides have a score equal to the nth peptide. The reason for this is that because they score the\n # same, there's just as much of a chance that they'll end up as the winner as it is that the nth peptide will.\n # NOTE: Why get the theo spec of the linear version even if the peptide is cyclic? For similar reasons as to\n # why it's done in the branch-and-bound variant: If we treat candidate peptides as cyclic, their theo spec\n # will include masses for wrapping subpeptides of the candidate peptide. These wrapping subpeptide masses\n # may end up inadvertently matching masses in the experimental spectrum, meaning that the candidate may get\n # a better score than it should, potentially pushing it forward over other candidates that would ultimately\n # branch out to a more optimal final solution. As such, even if the exp spec is for a cyclic peptide,\n # treat the candidates as linear segments of that cyclic peptide (essentially linear peptides).\n theo_specs = [\n SequenceTester.generate_theroetical_spectrum_with_tolerances(\n p,\n peptide_type,\n aa_mass_table,\n aa_mass_tolerance\n )\n for p in expanded_leaderboard\n ]\n scores = [score_spectrums(exp_spec, theo_spec) for theo_spec in theo_specs]\n scores_paired = sorted(zip(expanded_leaderboard, scores), key=lambda x: x[1], reverse=True)\n leaderboard_tail_idx = min(leaderboard_size, len(scores_paired)) - 1\n leaderboard_tail_score = 0 if leaderboard_tail_idx == -1 else scores_paired[leaderboard_tail_idx][1]\n for j in range(leaderboard_tail_idx + 1, len(scores_paired)):\n if scores_paired[j][1] < leaderboard_tail_score:\n leaderboard_tail_idx = j - 1\n break\n leaderboard = [p for p, _ in scores_paired[:leaderboard_tail_idx + 1]]\n return tester_set\n# MARKDOWN\n\n\ndef main():\n print(\"\", end=\"\\n\\n\")\n print(\"`{bm-disable-all}`\", end=\"\\n\\n\")\n try:\n exp_spec = [float(m) for m in input().strip().split()]\n exp_spec_mass_noise = float(input().strip())\n exp_spec_final_mass_in_last_n = int(input().strip())\n aa_mass_noise = spectrum_convolution_noise(exp_spec_mass_noise)\n aa_mass_round = int(input().strip())\n aa_top_n = int(input().strip())\n aa_masses = spectrum_convolution(exp_spec, aa_mass_noise)\n aa_mass_table = {round(k, aa_mass_round): round(k, aa_mass_round) for k, v in aa_masses.most_common(aa_top_n)}\n peptide_type = input().strip()\n peptide_expected_len = int(input().strip())\n peptide_mass_noise = experimental_spectrum_peptide_mass_noise(exp_spec_mass_noise, peptide_expected_len)\n peptide_mass_range_candidates = [(m - peptide_mass_noise, m + peptide_mass_noise) for m in exp_spec[-exp_spec_final_mass_in_last_n:]]\n score_backlog = int(input().strip())\n leaderboard_size = int(input().strip())\n testers = sequence_peptide(\n exp_spec,\n aa_mass_table,\n aa_mass_noise,\n peptide_mass_range_candidates,\n {'cyclic': PeptideType.CYCLIC, 'linear': PeptideType.LINEAR}[peptide_type],\n score_backlog,\n leaderboard_size\n )\n print(f'Given the ...', end='\\n\\n')\n print(f' * experimental spectrum: {exp_spec}')\n print(f' * experimental spectrum mass noise: ±{exp_spec_mass_noise}')\n print(f' * assumed peptide type: {peptide_type}')\n print(f' * assumed peptide length: {peptide_expected_len}')\n print(f' * assumed peptide mass: any of the last {exp_spec_final_mass_in_last_n} experimental spectrum masses')\n print(f' * score backlog: {score_backlog}')\n print(f' * leaderboard size: {leaderboard_size}', end='\\n\\n')\n print(f'Top {aa_top_n} captured mino acid masses (rounded to {aa_mass_round}): {list(aa_mass_table.keys())}',\n end='\\n\\n')\n for tester in testers.testers:\n print(f'For peptides between {tester.peptide_min_mass} and {tester.peptide_max_mass}...', end='\\n\\n')\n for score, peptides in tester.leader_peptides.items():\n for peptide in peptides:\n print(f' * Score {score}: {\"-\".join([str(aa) for aa in peptide])}')\n finally:\n print(\"\", end=\"\\n\\n\")\n print(\"`{bm-enable-all}`\", end=\"\\n\\n\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"docs/data/learn/Bioinformatics/input/ch4_code/src/SequencePeptide_Leaderboard.py","file_name":"SequencePeptide_Leaderboard.py","file_ext":"py","file_size_in_byte":6823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"539819387","text":"\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph, Table\n# getSampleStyleSheet: 预设样式库\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.lib.pagesizes import letter\n# 设置字体\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\n\nfrom reportlab.lib.units import inch\nfrom reportlab.lib import colors\n\npdfmetrics.registerFont(TTFont('song', r'E:\\PythonObject\\python提升\\ascension\\ascension\\python\\excel_python\\STSONG.ttf'))\n\n\ndoc = SimpleDocTemplate(r'E:/test.pdf', pagesize = letter)\nwidth, height = letter\n\n# 预设样式\nstyles = getSampleStyleSheet()\ntitle = styles['Title']\ntitle.fontName = 'song'\n\nelement = []\nelement.append(Paragraph('Python webserver 依赖库', style = title))\ndata = [['依赖名', '版本', '描述'],\n ['requests', '2.22.0', '请求'],\n ['Flask', '1.0.2', 'web框架'],\n ['Flask-SocketIO', '3.3.2', 'Websocket'],\n ['Python','3.6.5',None]]\nct = [('FONTNAME', (0, 0), (-1, -1), 'song'),\n ('SPAN', (-2, -1), (-1, -1)),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n # ('GRID', (0, 0), (-1, -1), 0.5, colors.grey),\n ('FONTSIZE', (0, 0), (-1, 0), 11),\n # ('BACKGROUND', (0, 0), (-1, 0), colors.Color(0.86, 0.86, 0.86)),\n ('LINEBELOW', (0, 0), (-1, 0), 1, colors.Color(0.27, 0.5, 0.7)),\n ('LINEABOVE', (0, -1), (-1, -1), 1, colors.Color(0.27, 0.5, 0.7))]\n\n\nelement.append(Table(data = data, colWidths = (width - 2 * inch)/len(data[0]), style = ct))\n\ndoc.build(element)\n\n\n\n\n","sub_path":"python_up/python_vidobook_test/流畅的python/excel_python/pdf_table.py","file_name":"pdf_table.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"411356170","text":"# plot the displacements\n\n# imports\nimport matplotlib.pylab as plt\nimport numpy as np\n\n# initial size of plot window\nplt.rcParams['figure.figsize'] = 20, 12\n\n\n# input file\nfilename = (\"dataFiles/hist.data\")\ndata = np.loadtxt(filename, skiprows=1)\n\ndata12=np.concatenate((data[:,0],data[:,1]))\n\nhist=np.histogram(data12,100,normed=True)\n\ndef dist(r):\n\treturn 2**5*(r)**2*np.exp(-4*r)\n\n# plot 1\nplt.subplot(2,3,1)\nplt.subplots_adjust(wspace=0.3)\nplt.hist(data12,100,normed=True)\nx=np.linspace(0,3.5)\nplt.plot(x,dist(x))\n\n# labels\nplt.xlabel('Radius', fontsize=20)\nplt.ylabel('Counts', fontsize=20)\nplt.title('Normed histogram',fontsize=30)\n\n\n\n# plot 2\nplt.subplot(2,3,2)\nplt.subplots_adjust(wspace=0.3)\nplt.plot(data12,'-')\n\n\n# labels\nplt.xlabel('step', fontsize=20)\nplt.ylabel('r', fontsize=20)\nplt.title('Accepted r',fontsize=30)\n\n# plot 3\nplt.subplot(2,3,3)\nplt.subplots_adjust(wspace=0.3)\nplt.plot(data[:,2],'-')\n\n\n\n# labels\nplt.xlabel('step', fontsize=20)\nplt.ylabel('EL', fontsize=20)\nplt.title('EL fluct',fontsize=30)\n\n# plot 4\nplt.subplot(2,3,4)\nplt.subplots_adjust(wspace=0.3)\nplt.plot(data[:,3],'-')\n\n\n\n# labels\nplt.xlabel('step', fontsize=20)\nplt.ylabel('E', fontsize=20)\nplt.title('E',fontsize=30)\n\nk=100\n\nEL=data[:,2]\nl = len(EL)\nEL_mean_square = (sum(EL)/l)**2\nEL_square_mean = (sum(EL*EL)/l)\nEL_corr = []\nfor i in range(0,k):\n EL_corr.extend([0])\n for j in range(0,l-i):\n EL_corr[i] += EL[j]*EL[j+i]\n EL_corr[i] = (EL_corr[i]/(l-i)-EL_mean_square)/(EL_square_mean-EL_mean_square)\n\n\n# plot 5\nplt.subplot(2,3,5)\nplt.subplots_adjust(wspace=0.3)\nplt.plot(EL_corr,'-')\n\n# labels\nplt.xlabel('step', fontsize=20)\nplt.ylabel('corr', fontsize=20)\nplt.title('corr func',fontsize=30)\n\nk=500\nblock_av = []\nvarf = EL_square_mean-EL_mean_square\nfor B in range(1,k):\n MB=l/B\n block_av.extend([0])\n F_mean_square=(sum(EL[0:MB*B])/(MB*B))**2\n F_square_mean=0\n for i in range(0,MB):\n F_square_mean += ((sum(EL[i*B:(i+1)*B])/B)**2)/MB\n varF=F_square_mean-F_mean_square\n block_av[B-1] = B*varF/varf\n\n# plot 6\nplt.subplot(2,3,6)\nplt.subplots_adjust(wspace=0.3)\nplt.plot(range(1,k),block_av,'-')\n\n# labels\nplt.xlabel('step', fontsize=20)\nplt.ylabel('corr', fontsize=20)\nplt.title('block average',fontsize=30)\n\n\n\n# tick fontsize\nplt.xticks(fontsize=10)\nplt.yticks(fontsize=10)\n\n\n\n# display the plot\nplt.show()\n","sub_path":"H2/task1/H2plot.py","file_name":"H2plot.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"385850781","text":"# -*- coding: utf-8 -*-\nimport sqlite3\nfrom os import path\n \nfrom scrapy import signals\nfrom scrapy.xlib.pydispatch import dispatcher\n\n\nclass SQLiteStorePipeline(object):\n \"\"\"A pipeline for filtering out items which contain certain words in their\n description\"\"\"\n filename = '-data.db'\n \n def __init__(self):\n self.conn = None\n dispatcher.connect(self.initialize, signals.engine_started)\n dispatcher.connect(self.finalize, signals.engine_stopped)\n \n def initialize(self):\n self.conn = sqlite3.connect(self.filename)\n \n def finalize(self):\n if self.conn is not None:\n self.conn.commit()\n self.conn.close()\n self.conn = None\n \n def create_table(self, filename):\n conn = sqlite3.connect(filename)\n #conn.execute(u\"create table moive( id int identity(1, 1) not null primary key, 名字 text, 导演 text, 豆瓣链接 text, 描述 text)\")\n #conn.commit()\n return conn\n\n def sql_dianying_yinren(self, dianying_id, list_yingren):\n for item in list_yingren:\n x = conn.execute(u'select \"演员id\" from \"演员id-姓名\" where \"姓名\"%s\";' % item).fetchone()\n if x:\n pass\n \n def insert_dianying(self, item):\n self.conn.execute(u'insert into 电影信息(名称,豆瓣链接,发行地区,语言,描述,封面链接,上映日期,时长,评分,评分人数) \\\n values(?,?,?,?,?,?,?,?,?,?)',(item[\"name\"], item[\"douban_url\"], item[\"quyu\"], item[\"yuyan\"], item[\"description\"],\\\n item[\"imgae_url\"], item[\"date\"], item[\"runtime\"], item[\"pingfen\"], item[\"ping_num\"])).fetchone()\n x = self.conn.execute(\"select last_insert_rowid()\").fetchone()\n if x:\n return x[0]\n else:\n exit()\n \n\n def process_item(self, item, spider):\n item.pr()\n dianying_id = self.insert_dianying(item)\n #处理影人\n \n #\n \n \n return item\n\n\n\"\"\"\nCREATE TABLE \"电影信息\" (\n [id] INTEGER PRIMARY KEY AUTOINCREMENT, \n [名称] VARCHAR(256) NOT NULL, \n [豆瓣链接] VARCHAR(1024) NOT NULL, \n [发行地区] VARCHAR(64), \n [语言] VARCHAR(64), \n [描述] VARCHAR, \n [封面链接] VARCHAR(1024), \n [上映日期] DATE, \n [时长] INT, \n [评分] DECIMAL, \n [评分人数] INT);\n\nCREATE INDEX [豆瓣链接索引] ON \"电影信息\" ([豆瓣链接]);\n\nCREATE TABLE \"影人信息\" (\n [id] INTEGER PRIMARY KEY AUTOINCREMENT, \n [出生年月] DATE);\n\nCREATE TABLE \"别名-电影id\" (\n [电影名称] VARCHAR(256) NOT NULL, \n [电影id] BIGINT NOT NULL REFERENCES [电影信息]([id]));\n\nCREATE INDEX [电影名称索引] ON \"别名-电影id\" ([电影名称]);\n\nCREATE TABLE \"演员id-姓名\" (\n [演员id] BIGINT NOT NULL REFERENCES \"影人信息\"([id]), \n [姓名] VARCHAR(256) NOT NULL);\n\nCREATE INDEX [姓名索引] ON \"演员id-姓名\" ([姓名]);\n\nCREATE TABLE \"电影id-导演id\" (\n [电影id] BIGINT REFERENCES [电影信息]([id]) NOT NULL, \n [导演id] BIGINT REFERENCES [影人信息]([id]) NOT NULL);\n\n\nCREATE TABLE \"电影id-演员id\" (\n [电影id] BIGINT REFERENCES \"电影信息\"([id]) NOT NULL, \n [演员id] BIGINT REFERENCES \"影人信息\"([id]) NOT NULL);\n\n\nCREATE TABLE \"电影id-编剧id\" (\n [电影id] BIGINT REFERENCES [电影信息]([id]) NOT NULL, \n [编剧id] BIGINT REFERENCES [影人信息]([id]) NOT NULL);\n\n\nCREATE TABLE \"类型-电影id\" (\n [类型] VARCHAR(64) NOT NULL, \n [电影id] BIGINT NOT NULL REFERENCES [电影信息]([id]));\n\nCREATE INDEX [类型索引] ON \"类型-电影id\" ([类型]);\n\n\"\"\"","sub_path":"mbot/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"303001558","text":"from random import randint, random\n\n\nclass Ant:\n def __init__(self, tasks, machines, problem):\n \"\"\"\n Initialize a possible schedule for tasks on given machines\n self.tasks: Integer = number of tasks\n self.machines: Integer = number of machines\n self.problem: Problem -> used for evaluating fitness\n self.solution = tasks * machines matrix having 1 where task i is executed by machine j, 0 otherwise\n \"\"\"\n self.tasks = tasks\n self.machines = machines\n self.problem = problem\n self.solution = [[0 for j in range(machines)] for i in range(tasks)]\n self.initialize()\n\n def initialize(self):\n \"\"\"\n Initialize the solution with a first task given to all machines\n :return:\n \"\"\"\n for i in range(self.machines):\n combo = (randint(0, self.problem.tasks - 1), i)\n self.update(combo)\n\n def update(self, task_machine):\n \"\"\"\n Update solution\n task_machine = (task, machine)\n :return:\n \"\"\"\n task, machine = task_machine\n self.solution[task][machine] = 1\n\n def get_available_tasks(self):\n \"\"\"\n :return: list containing available tasks for the given machine\n \"\"\"\n available_tasks = []\n for t, task_line in enumerate(self.solution):\n ok = True\n for m in task_line:\n if m == 1:\n ok = False\n if ok:\n available_tasks.append(t)\n return available_tasks\n\n def add_move(self, pheromone_matrix, alpha, beta):\n \"\"\"\n Add a task for each machine\n :param pheromone_matrix: Matrix having lines and columns\n :param alpha:\n :param beta:\n \"\"\"\n for machine_index in range(self.machines):\n # get list of available tasks\n available_tasks = self.get_available_tasks()\n # if list is empty then break\n if len(available_tasks) == 0:\n break\n\n # compute the sum of indices given by all paths\n divider = sum((pheromone_matrix[task_index][machine_index] ** alpha) *\n (self.problem.cost_matrix[task_index][machine_index] ** beta)\n for task_index in available_tasks)\n\n # compute probabilities for all tasks\n probabilities = []\n for task in available_tasks:\n p = ((pheromone_matrix[task][machine_index] ** alpha) *\n (self.problem.cost_matrix[task][machine_index] ** beta)) / divider\n probabilities.append(p)\n\n # compute a cumulative sum for probabilities\n cumulative_sum = self.get_cumulative_sum(probabilities)\n # choose a task using the roulette method and update the solution\n task = available_tasks[self.choose_task(cumulative_sum)]\n self.solution[task][machine_index] = 1\n\n def choose_task(self, cumulative_sum):\n \"\"\"\n Choose random task_index from cumulative_sum array using roulette method\n :param cumulative_sum: list of int [0,1]\n :return: int\n \"\"\"\n r = random()\n cumulative_sum = cumulative_sum[::-1]\n for task_index, x in enumerate(cumulative_sum):\n if r < x:\n return task_index\n\n def get_cumulative_sum(self, probabilities):\n \"\"\"\n Compute the cumulative sum given a list of probabilities\n probabilities: 0.76, 0.19, 0.05\n => cumulative_sum: 1, 0.24, 0.05\n :param probabilities: List\n :return: List\n \"\"\"\n cumulative_sum = []\n for i in range(len(probabilities)):\n suma = sum(probabilities[i:])\n cumulative_sum.append(suma)\n return cumulative_sum\n\n def fitness(self):\n \"\"\"\n Compute fitness by counting the cost for each machine and determining the maximum from all\n :return: int\n \"\"\"\n cost = []\n for machine_index in range(self.machines):\n machine_cost = 0\n for task_index, task_line in enumerate(self.solution):\n if task_line[machine_index] == 1:\n machine_cost += self.problem.cost_matrix[task_index][machine_index]\n cost.append(machine_cost)\n return max(cost)\n\n def show_solution(self):\n \"\"\"\n Print the solution 'nicely'\n :return: void\n \"\"\"\n for task_index, task_line in enumerate(self.solution):\n for machine_index, usage_value in enumerate(task_line):\n if usage_value == 1:\n print(\"Task \" + str(task_index) + \" executed by machine \" + str(machine_index))\n\n def __gt__(self, other):\n return self.fitness() > other.fitness()\n","sub_path":"Processes (ACO)/Ant.py","file_name":"Ant.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"143340860","text":"class Problem:\n\n def __init__(self, cell_num, net_num):\n self.cell_num = cell_num\n self.net_num = net_num\n self.net_connect_cell_lists = [[] for n in range(cell_num)]\n self.cell_connect_net_lists = [[] for n in range(net_num)]\n self.cell_size_list = []\n self.lock_a_cell_set = set([])\n self.lock_b_cell_set = set([])\n self.block_size_ratio = 0\n self.block_a_min_cell_num_constraint = 0\n self.block_b_min_cell_num_constraint = 0\n\n\n def add_connection(self, cell, net):\n\n self.net_connect_cell_lists[cell - 1].append(net)\n self.cell_connect_net_lists[net - 1].append(cell)\n","sub_path":"problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"33069759","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# 给定一个字符串 s,找到 s 中最长的回文子串。你可以假设 s 的最大长度为 1000。 \n# \n# 示例 1: \n# \n# 输入: \"babad\"\n# 输出: \"bab\"\n# 注意: \"aba\" 也是一个有效答案。\n# \n# \n# 示例 2: \n# \n# 输入: \"cbbd\"\n# 输出: \"bb\"\n# \n# Related Topics 字符串 动态规划\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n\nclass Solution(object):\n # 把每个字母当成回文串的中心\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n n = len(s)\n self.res = ''\n def helper(i, j):\n while i >= 0 and j < n and s[i] == s[j]:\n i -= 1\n j += 1\n if len(self.res) < j - i - 1:\n self.res = s[i + 1: j]\n for i in range(n):\n helper(i, i) # 回文是奇数 eg: usu, 最中心数是s\n helper(i, i + 1) # 回文是偶数 eg: suus, 最中心数是uu\n return self.res\n \n # 把每个字母当成回文串的结束\n def longestPalindrome2(self, s):\n n = len(s)\n max_str = ''\n def helper(s, i, j):\n n = len(s)\n mid = (i + j) // 2\n if i == j:\n return True\n while j >= 0 and i < n and s[i] == s[j] and j > i:\n i += 1\n j -= 1\n if mid == i or mid == j:\n return True\n return False\n \n for i in range(n):\n for j in range(i + 1, n):\n if s[i] == s[j]:\n # print('i = {}, {}, j={}, {}'.format(i, s[i], j, s[j]))\n # 判断是否存在回文 不存在就找下一个,存在更新最长子串\n if helper(s, i, j):\n temp = s[i:j+1]\n # print('i = {}, {}, j={}, {}=={}'.format(i, s[i], j, s[j], temp))\n max_str = temp if len(temp) > len(max_str) else max_str\n \n return max_str\n\nsolution = Solution()\nstring = 'TTYYEPOPERYPT'\nx = solution.longestPalindrome(string)\nprint('字符串: {}中, 最长子串是: {}'.format(string, x))\n\nstring2 = 'TTYYEPOOOPERYPT'\nx2 = solution.longestPalindrome2(string2)\nprint('字符串: {}中, 最长子串是: {}'.format(string2, x2))\n\ns = [ 1, 2 ,3 ]\nprint('{}, {}'.format(s, s[::-1]))","sub_path":"LeetCode/05-最长的回文子串/05-最长的回文子串.py","file_name":"05-最长的回文子串.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"102705514","text":"from gym.envs.registration import register\nimport gym\n\nregister(\n id='DyCircleEnv-v0',\n entry_point='dygym.envs.robotics.circle:DyCircleEnv',\n max_episode_steps=200,\n kwargs={'velocity': 0.005})\n\nenv = gym.make(\"DyCircleEnv-v0\")\n\nfor i in range(10):\n env.reset()\n env.render()\n for i in range(200):\n action = env.action_space.sample()\n obs, reward, done, info = env.step(action)\n print(obs)\n env.render()\n","sub_path":"dygym/dygym/test/test_dycircle.py","file_name":"test_dycircle.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"312254256","text":"import pprint\nfrom utils import listdir_full_path, fetch_null_response_node\nfrom page_diff import get_page_number_diffs, get_pages\nfrom annotations import are_annotations_present, list_annotations_present\nfrom signature_detection import is_signature_present\nfrom markup_detection import is_markup_present\nimport json\nimport os\n\n\ndef do_the_thing(src_dir):\n \"\"\"\n Driver function\n :param src_dir: Source Directory which contains all files\n :return: Json response with all data\n \"\"\"\n\n files_to_read = listdir_full_path(src_dir)\n\n files_to_read = [file for file in files_to_read if file.endswith('.pdf')]\n\n if len(files_to_read) == 0:\n return fetch_null_response_node(\"No file found\")\n\n files_to_read.sort()\n\n page_count = get_pages(files_to_read)\n page_diff_flag = get_page_number_diffs(files_to_read)\n annotations_list = list_annotations_present(files_to_read)\n annotations_flag = are_annotations_present(files_to_read)\n signature_detected = is_signature_present(files_to_read)\n markup_present = is_markup_present(files_to_read)\n response = dict()\n\n for file in files_to_read:\n file_dict = dict()\n file_name = os.path.basename(file)\n file_dict[\"page_count\"] = page_count[file_name]\n file_dict[\"page_diff_flag\"] = page_diff_flag[file_name]\n file_dict[\"annotations_list\"] = annotations_list[file_name]\n file_dict[\"annotations_flag\"] = annotations_flag[file_name]\n file_dict[\"signature_present\"] = signature_detected[file_name]\n file_dict[\"markup_present\"] = markup_present[file_name]\n\n flag = page_diff_flag[file_name]\n\n if page_diff_flag[file_name] not in (True, False):\n flag = False\n\n if flag and annotations_flag[file_name] and markup_present[file_name]:\n file_dict[\"send_through\"] = True\n else:\n file_dict[\"send_through\"] = False\n\n response[file_name] = file_dict\n\n # pprint.pprint(json.dumps(response))\n pprint.pprint(response)\n\n # print('____________________________________________')\n # print(\"***********Page Diffs***********\")\n # pprint.pprint(get_page_number_diffs(files_to_read))\n\n # print('____________________________________________')\n # print(\"***********Annotations***********\")\n # pprint.pprint(are_annotations_present(files_to_read))\n\n # print('____________________________________________')\n # print(\"***********List Annotations***********\")\n # pprint.pprint(list_annotations_present(files_to_read))\n\n\n","sub_path":"julie.py","file_name":"julie.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"616301743","text":"import json\nfrom argparse import ArgumentParser\n\n\n\nif __name__ == \"__main__\" :\n parser = ArgumentParser()\n parser.add_argument(\"-j\", \"--json\", action=\"store\", type=str, required=True, help=\"json file path\")\n parser.add_argument(\"-m\", \"--model\", action=\"store\", type=str, required=True, help=\"model name\")\n\n with open(parser.parse_args().json, \"r\") as f :\n data = json.load(f)\n\n data = json.loads(data)\n for item in data :\n if item[\"model_name\"] == parser.parse_args().model :\n print (item)\n","sub_path":"hw3/read_record.py","file_name":"read_record.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"494102944","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport sys\n\nprint(\"creating final table ...\")\nout_folder, mark = sys.argv[1], sys.argv[2]\n\nann_uropa = pd.read_csv(\"{}/output_uropa/peaks_Annotation_allhits.bed\".format(out_folder), sep=\"\\t\", header=None)\n\nann_homer = pd.read_csv(\"{}/output_homer/{}_Homer_PeaksAnnotation.annot\".format(out_folder, mark), sep=\"\\t\")\nann_homer = ann_homer[[list(ann_homer)[0],list(ann_homer)[7]]]\nann_homer.columns = [\"PeakID\",\"Annotation_homer\"]\n\n\ndeac = pd.read_csv(\"{}/resLFC_normal.tsv\".format(out_folder), sep=\"\\t\")\ndeac[\"chr\"] = [i.split(\"_\")[0] for i in deac.index]\ndeac[\"start\"] = [int(i.split(\"_\")[1]) for i in deac.index]\ndeac[\"end\"] = [int(i.split(\"_\")[2]) for i in deac.index]\ndeac = deac[[\"chr\",\"start\",\"end\",\"baseMean\",\"log2FoldChange\",\"lfcSE\",\"pvalue\",\"padj\"]]\n\ntr = pd.read_csv(\"{}/TSS.bed\".format(out_folder),sep=\"\\t\", index_col=7, header=None)\ntr_tr = tr[3].to_dict()\ntr.index = tr[3]\ntr_genes = tr[6].to_dict()\n\n\ndixio_symbol=dict()\n\ngr = ann_uropa.groupby(3)\nfor i in set(ann_uropa[3].values):\n# ann.groupby(3).get_group(i)[15].tolist()\n dixio_symbol[i]=list(set(gr.get_group(i)[17].tolist()))\n\ndixio_ens=dict()\n\nfor i in set(ann_uropa[3].values):\n# ann.groupby(3).get_group(i)[15].tolist()\n dixio_ens[i]=list(set(gr.get_group(i)[15].tolist()))\n\n\ninde = []\ngggenes = []\nfor i,k in dixio_ens.items():\n# print(i,k)\n for j in k:\n# print(i,j)\n inde.append(i)\n gggenes.append(j)\n \nfinal_annot = pd.DataFrame({\"peakID\":inde,\"geneID\":gggenes}, index=inde)\n\nfinal_annot[\"symbol\"] = [tr_genes[i] if i in tr_genes else i for i in final_annot[\"geneID\"]]\n\nfinal_annot = final_annot.merge(deac, how=\"right\", left_index=True, right_index=True)\nfinal_annot = final_annot.merge(ann_homer, how=\"right\", left_index=True, right_on=\"PeakID\")\nfinal_annot[\"homer_genes\"] = [str(i).split(\"(\")[1].split(\")\")[0].split(\",\")[0] if len(str(i).split(\"(\"))>=2 else i for i in final_annot['Annotation_homer']]\nfinal_annot[\"homer_genes\"] = [tr_tr[i] if i in tr_tr else i for i in final_annot[\"homer_genes\"]]\nfinal_annot[\"Annotation_homer\"] = [str(i).split(\"(\")[0].strip() for i in final_annot[\"Annotation_homer\"]]\nfinal_annot.sort_values(\"padj\", ascending=True, inplace=True)\n\nfinal_annot = final_annot[[\"chr\",\"start\",\"end\",\"peakID\",\"baseMean\",\"log2FoldChange\",\"pvalue\",\"padj\",\"geneID\",\"symbol\",\"Annotation_homer\",\"homer_genes\"]]\n\nfinal_annot.to_csv(\"{}/REFERENCE_TABLE_{}.bed\".format(out_folder, mark), sep=\"\\t\", index=False)\n#final_annot.to_excel(\"{}/REFERENCE_TABLE_{}.xls\".format(out_folder, mark))\n\n","sub_path":"DiffPeaks_pipe_DESeq2/create_final_table.py","file_name":"create_final_table.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"134756325","text":"import math\nimport numpy as np\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nclass MountainCarEnv(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self):\n self.counter = 0\n self.min_position = -1.2\n self.max_position = 0.6\n\n self.min_position_y = 0.35\n self.max_position_y = 0.7\n\n self.max_speed = 0.02 # need to multiply by 50\n self.goal_position = 0\n self.goal_position_y = 0.25\n self.goal_velocity = 0\n\n self.record_x = -10\n self.record_y = -10\n\n self.prev_rel_y = 100\n\n self.low = np.array([self.min_position, -self.max_speed])\n self.high = np.array([self.max_position, self.max_speed])\n\n self.viewer = None\n self.action_space = spaces.Discrete(9)\n\n self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32)\n\n self.seed()\n self.reset()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, action):\n self.counter += 1\n\n if self.counter == 5000:\n self.counter = 0\n reward = -10000\n done = 1\n return np.array(self.state), reward, done, {}\n\n #assert self.action_space.contains(action), \"%r (%s) invalid\" % (action, type(action))\n #assert self.action_space_theta.contains(action_theta), \"%r (%s) invalid\" % (action_theta, type(action_theta))\n\n position, position_y, velocity, theta, goal_position, goal_position_y = self.state\n\n # nine different actions\n if action == 0:\n d_velocity = -1\n d_theta = -1\n elif action == 1:\n d_velocity = 0\n d_theta = -1\n elif action == 2:\n d_velocity = 1\n d_theta = -1\n elif action == 3:\n d_velocity = -1\n d_theta = 0\n elif action == 4:\n d_velocity = 0\n d_theta = 0\n elif action == 5:\n d_velocity = 1\n d_theta = 0\n elif action == 6:\n d_velocity = -1\n d_theta = 1\n elif action == 7:\n d_velocity = 0\n d_theta = 1\n elif action == 8:\n d_velocity = 1\n d_theta = 1\n\n\n velocity += d_velocity*0.001\n velocity = np.clip(velocity, -self.max_speed, self.max_speed)\n\n theta += (d_theta)*(math.pi/180)\n\n position += velocity * math.cos(theta)\n position = np.clip(position, self.min_position, self.max_position)\n\n if (position==self.min_position and velocity<0): velocity = 0\n\n position_y += velocity * math.sin(theta)\n position_y = np.clip(position_y, self.min_position_y, self.max_position_y)\n\n # threshold\n thr_position = 0.2\n thr_position_y_up = 0.15\n thr_position_y_down = 0.11\n thr_velocity = 0.03 # abs(velocity) ~ [0.0 ~ 0.005]\n\n done_pos = bool((position >= self.goal_position-thr_position) & (position <= self.goal_position+thr_position))\n done_pos_y = bool((position_y >= self.goal_position_y-thr_position_y_down) & (position_y <= self.goal_position_y+(thr_position_y_up)))\n\n done = done_pos# & done_pos_y# & done_vel\n\n if done:\n self.counter = 0\n\n rel_y = abs(self.goal_position_y - position_y)\n\n if rel_y < self.prev_rel_y:\n reward = 0\n else:\n reward = -1 - abs(self.goal_position_y - position_y)*50 #- abs(rel_position_y)*100\n\n self.state = (position, position_y, velocity, theta, self.goal_position, self.goal_position_y)\n self.prev_rel_y = rel_y\n return np.array(self.state), reward, done, {}\n\n def reset(self):\n x = -1.0#self.np_random.uniform(low=-1.0, high=-0.8)\n y = self.np_random.uniform(low=0.30, high=0.75)\n self.state = np.array([x, y, 0, 0, self.goal_position, self.goal_position_y])\n return np.array(self.state)\n\n def _height(self, xs):\n return xs*0+0.5\n\n def render(self, mode='human'):\n screen_width = 600\n screen_height = 400\n\n world_width = self.max_position - self.min_position\n scale = screen_width/world_width\n carwidth=40\n carheight=20\n\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(screen_width, screen_height)\n\n clearance = 10\n\n l,r,t,b = -carwidth/2, carwidth/2, carheight, 0\n car = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])\n car.add_attr(rendering.Transform(translation=(0, clearance)))\n self.cartrans = rendering.Transform()\n car.add_attr(self.cartrans)\n self.viewer.add_geom(car)\n\n driver = rendering.make_circle(carheight/2.5)\n driver.set_color(.8,.8,0)\n driver.add_attr(rendering.Transform(translation=(-carwidth/4,clearance+10)))\n driver.add_attr(self.cartrans)\n self.viewer.add_geom(driver)\n\n z,x,c,v = l/3, r/3, t/3, b/3\n topfront_wheel = rendering.FilledPolygon([(z,v), (z,c), (x,c), (x,v)])\n topfront_wheel.set_color(.5, .5, .5)\n topfront_wheel.add_attr(rendering.Transform(translation=(carwidth/4,clearance+carwidth/2)))\n topfront_wheel.add_attr(self.cartrans)\n self.viewer.add_geom(topfront_wheel)\n\n toprear_wheel = rendering.FilledPolygon([(z,v), (z,c), (x,c), (x,v)])\n toprear_wheel.set_color(.5, .5, .5)\n toprear_wheel.add_attr(rendering.Transform(translation=(-carwidth/4-3,clearance+carwidth/2)))\n toprear_wheel.add_attr(self.cartrans)\n self.viewer.add_geom(toprear_wheel)\n\n bottomrear_wheel = rendering.FilledPolygon([(z,v), (z,c), (x,c), (x,v)])\n bottomrear_wheel.add_attr(rendering.Transform(translation=(-carwidth/4-3,clearance-6)))\n bottomrear_wheel.add_attr(self.cartrans)\n bottomrear_wheel.set_color(.5, .5, .5)\n self.viewer.add_geom(bottomrear_wheel)\n\n bottomfront_wheel = rendering.FilledPolygon([(z,v), (z,c), (x,c), (x,v)])\n bottomfront_wheel.add_attr(rendering.Transform(translation=(carwidth/4,clearance-6)))\n bottomfront_wheel.add_attr(self.cartrans)\n bottomfront_wheel.set_color(.5, .5, .5)\n self.viewer.add_geom(bottomfront_wheel)\n\n flagx = (self.goal_position-self.min_position)*scale\n flagy1 = self._height(self.goal_position)*scale\n flagy2 = flagy1+50\n flagpole = rendering.Line((flagx, flagy1+15), (flagx, flagy2))\n self.viewer.add_geom(flagpole)\n flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, flagy2-10), (flagx+25, flagy2-5)])\n flag.set_color(.8,.3,0)\n self.viewer.add_geom(flag)\n\n road_top = rendering.Line((self.min_position*scale, self.max_position_y*scale), (self.max_position*scale*3, self.max_position_y*scale))\n self.viewer.add_geom(road_top)\n road_bot = rendering.Line((self.min_position*scale, self.min_position_y*scale), (self.max_position*scale*3, self.min_position_y*scale))\n self.viewer.add_geom(road_bot)\n road_top = rendering.Line((self.min_position*scale, self.max_position_y*scale), (self.max_position*scale*3, self.max_position_y*scale))\n self.viewer.add_geom(road_top)\n road_bot = rendering.Line((self.min_position*scale, self.min_position_y*scale), (self.max_position*scale*3, self.min_position_y*scale))\n self.viewer.add_geom(road_bot)\n\n pos_x = self.state[0]\n pos_y = self.state[1]\n vel = self.state[2]\n theta = self.state[3]\n\n self.cartrans.set_translation((pos_x-self.min_position)*scale, pos_y*scale)\n self.cartrans.set_rotation(theta)\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n","sub_path":"carRL_env.py","file_name":"carRL_env.py","file_ext":"py","file_size_in_byte":8199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"579427814","text":"\"\"\" Process OMDB-extracted data \"\"\"\n\nfrom __future__ import division\nfrom collections import defaultdict, namedtuple\nfrom datetime import datetime\nimport json\nimport logging\nimport os\n\nfrom bs4 import BeautifulSoup\nfrom bs4.element import NavigableString\nimport pandas as pd\nfrom textblob import TextBlob\nfrom tqdm import tqdm\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nReviewBase = namedtuple('ReviewBase', ['id', 'title', 'date', 'votes', 'total',\n 'text'])\n\nMOVIE_IDS = json.load(open('movie_ids.json', 'rb'))\nCEREMONIES = json.load(open('../data/ceremonies.json', 'rb'))\n\n\nclass Review(ReviewBase):\n def sentiment(self):\n tb = TextBlob(self.text)\n return tb.sentiment.polarity\n\n\nclass Imdb(object):\n def __init__(self, cache_folder, select=True):\n self.cache_folder = cache_folder\n self.select = select\n\n def preprocess(self):\n path = self.cache_folder + 'imdb.csv'\n with open(path, 'rb') as fi:\n data = pd.read_csv(fi)\n\n imdbVotes = data.ix[:, 'imdbVotes']\n imdbVotes = imdbVotes.apply(lambda x: int(x.replace(',', '')))\n data.ix[:, 'imdbVotes'] = imdbVotes\n return data\n\n\nclass ImdbReviews(object):\n\n title_to_id = {v: k for k, v in MOVIE_IDS.items()}\n title_to_id[\"Precious\"] = u\"472\"\n\n def __init__(self, cache_folder, imdb_df):\n self.cache_folder = cache_folder\n self.imdb_df = imdb_df\n\n def preprocess(self, weighted=True):\n data = defaultdict(lambda: defaultdict(list))\n feats = defaultdict(list)\n for root, dirs, fns in os.walk(self.cache_folder):\n logger.info('Processing: %s' % root)\n for fn in tqdm(fns):\n if os.path.splitext(fn)[1] != '.html':\n continue\n for id, r in self.get_reviews(os.path.join(root, fn)):\n data[id]['sentiment'].append(r.sentiment())\n data[id]['support'].append(r.votes + 1)\n data[id]['total'].append(r.total + 1)\n for id, d in data.items():\n feats['id'].append(int(id))\n if weighted:\n numerator = 0\n denominator = sum(d['total'])\n logger.info('movie id: %s sums: (%d, %d): '\n % (id, sum(d['support']), denominator))\n for support, sentiment in zip(d['support'], d['sentiment']):\n numerator += support * sentiment\n score = numerator / denominator\n else:\n score = sum(d['sentiment']) / len(d['sentiment'])\n feats['review_sentiment'].append(score)\n feats = pd.DataFrame(feats)\n return feats\n\n def get_reviews(self, path):\n logger.debug('Parsing: %s' % path)\n soup = BeautifulSoup(open(path, 'rb'), 'lxml')\n movie = soup.find(id='tn15title').h1.a.text\n id = self.title_to_id[movie]\n tn15 = soup.find(id='tn15content')\n\n release_date = self.imdb_df[self.imdb_df['id'] == int(id)]['Released']\n release_date = release_date.values[0]\n release_date = datetime.strptime(release_date, \"%d %b %Y\").date()\n yr = release_date.year\n if yr == 2016:\n yr = 2015\n cer_date = CEREMONIES[str(yr)]\n cer_date = datetime.strptime(cer_date, \"%d-%b-%y\").date()\n\n for c in tn15.children:\n if isinstance(c, NavigableString):\n continue\n if c.name == 'div' and c.small:\n title = c.h2.text\n small_tags = c.find_all('small')\n s1 = small_tags[0]\n if s1.find_next().find_next().find_next().name == 'img':\n header = c.small.text.split()\n votes, total = int(header[0]), int(header[3])\n else:\n votes, total = 0, 0\n date = small_tags[-1].text\n date = datetime.strptime(date, '%d %B %Y').date()\n if date >= cer_date:\n continue\n body = c.next_sibling.next_sibling.text\n r = Review(id, title, date, votes, total, body)\n yield id, r\n","sub_path":"src/preprocess/imdb.py","file_name":"imdb.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"65412398","text":"import cv2\n\nprint(cv2.ocl.haveOpenCL())\nprint(cv2.ocl.useOpenCL())\ncv2.ocl.setUseOpenCL(True)\nprint(cv2.ocl.useOpenCL())\n\nimg = cv2.UMat(cv2.imread(\"images/cat.jpg\", cv2.IMREAD_COLOR))\nimgUMat = cv2.UMat(img)\ngray = cv2.cvtColor(imgUMat, cv2.COLOR_BGR2GRAY)\ngray = cv2.GaussianBlur(gray, (7, 7), 1.5)\ngray = cv2.Canny(gray, 0, 50)\n\ncv2.imshow(\"edges\", gray)\ncv2.waitKey();\n","sub_path":"test-ocl.py","file_name":"test-ocl.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"184240967","text":"# Keras\nfrom keras import backend as K\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv1D, MaxPooling1D, Dropout, CuDNNLSTM, SimpleRNN\nfrom keras.layers.embeddings import Embedding\n\n\n# NLTK\n# import nltk\n# from nltk.corpus import stopwords\n# from nltk.stem import SnowballStemmer\n\nimport numpy as np\nimport pandas as pd\n\nimport os\nos.environ['MKL_NUM_THREADS'] = '12'\nos.environ['GOTO_NUM_THREADS'] = '12'\nos.environ['OMP_NUM_THREADS'] = '12'\nos.environ['openmp'] = 'True'\n\n\nfile = '/home/tianyi/Desktop/yelp/yelp_dataset/yelp_academic_dataset_review.csv'\n\ndf = pd.read_csv(file, usecols=['stars', 'text'], error_bad_lines=False)\n\n# df = df.iloc[1:500]\n\ndf = df.dropna()\ndf = df[df.text.apply(lambda x: x != \"\")]\ndf = df[df.stars.apply(lambda x: x != \"\")]\n\ndf.head(50)\n\ndf.groupby('stars').count()\n\nlabels = df['stars'].map(lambda x: 1 if int(x) > 3 else 0)\n\nK.set_session(K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=12, inter_op_parallelism_threads=12)))\n\nvocabulary_size = 1000\ntokenizer = Tokenizer(num_words=vocabulary_size)\ntokenizer.fit_on_texts(df['text'])\n\nsequences = tokenizer.texts_to_sequences(df['text'])\ndata = pad_sequences(sequences, maxlen=50)\n\nprint(data.shape)\n\n# RNN\nmodel_rnn = Sequential()\nmodel_rnn.add(Embedding(vocabulary_size, 100, input_length=50))\nmodel_rnn.add(SimpleRNN(100))\nmodel_rnn.add(Dense(1, activation='sigmoid'))\nmodel_rnn.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nmodel_rnn.fit(data, np.array(labels), validation_split=0.4, epochs=2)\n\n\n# RNN + LSTM\nmodel_lstm = Sequential()\nmodel_lstm.add(Embedding(vocabulary_size, 100, input_length=50))\nmodel_lstm.add(CuDNNLSTM(100))\nmodel_lstm.add(Dense(1, activation='sigmoid'))\nmodel_lstm.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nmodel_lstm.fit(data, np.array(labels), validation_split=0.4, epochs=2)\n\n\n# RNN + CNN + LSTM\ndef create_conv_model():\n model_conv = Sequential()\n model_conv.add(Embedding(vocabulary_size, 100, input_length=50))\n model_conv.add(Dropout(0.2))\n model_conv.add(Conv1D(64, 5, activation='relu'))\n model_conv.add(MaxPooling1D(pool_size=4))\n model_conv.add(CuDNNLSTM(100))\n model_conv.add(Dense(1, activation='sigmoid'))\n model_conv.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model_conv\n\nmodel_conv = create_conv_model()\nmodel_conv.fit(data, np.array(labels), validation_split=0.4, epochs = 2)\n","sub_path":"LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"368725415","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Bengio2003(nn.Module):\n def __init__(self, vocab_size, embedding_dim, context_size, ngram_size):\n super(Bengio2003, self).__init__()\n self.C = nn.Embedding(vocab_size, embedding_dim)\n self.g = nn.Linear(embedding_dim * context_size, vocab_size)\n self.softmax = nn.Softmax()\n self.ngram_size = ngram_size\n \n def forward(self, inputs):\n out = self.C(inputs)\n out = torch.cat(tuple([out[i] for i in range(0, self.ngram_size)]), 1)\n out = self.g(torch.tanh(out))\n return F.log_softmax(out, dim=1)","sub_path":"language_models/bengio_2003.py","file_name":"bengio_2003.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"332237556","text":"# Danijel Klarin, 6.2.2018.\r\n# vježba 12, zadatak 1\r\n\r\n'''Napišite program vjezba12_zd01.py koji od korisnika traži unos\r\n cijelog broja n većeg od 0 (potrebno je napraviti provjeru), te\r\n generira i ispisuje listu od n nasumičnih (random) prirodnih\r\n brojeva između 1 i 100.\r\n\r\n Potom treba prebrojati i ispisati koliko u toj listi ima parnih, a\r\n koliko neparnih brojeva.\r\n\r\n Npr. za n = 5 jedan mogući ispis bi bio:\r\n\r\n Polazna_lista = [6, 31, 75, 3, 42]\r\n U listi ima 2 parnih i 3 neparnih brojeva.'''\r\n\r\nfrom random import *\r\n\r\nn = int(input('Unesi n: '))\r\n\r\nif n > 0:\r\n\r\n lista = []\r\n parni = neparni = 0\r\n for i in range(n):\r\n brojevi = randint(1, 100)\r\n if brojevi % 2 == 0:\r\n parni += 1\r\n else:\r\n neparni += 1\r\n lista.append(brojevi)\r\n print('Polazna_lista =', lista)\r\n print('U listi ima {} parnih i {} neparnih brojeva.'\r\n .format(parni, neparni))\r\n\r\nelse:\r\n print('Broj je manji od 0')\r\n \r\n \r\n","sub_path":"Vjezba 12 - Priprema za kolokvij II/vjezba12_zd01.py","file_name":"vjezba12_zd01.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"471843869","text":"from PyQt5 import QtCore, QtGui\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom PyQt5.QtWidgets import QTableView\n\nfrom classes.bb_converts import *\nimport datetime\n\n\nclass MyTableModel(QtCore.QAbstractTableModel):\n need_save = pyqtSignal()\n need_edit = pyqtSignal()\n sort_col = 0\n\n def __init__(self, head, data=[[]], editable=False, date_col=8):\n super(MyTableModel, self).__init__()\n self.data = data\n self.head = head\n self.editable = editable\n self.date_col = date_col\n self.current_index = (-1, -1)\n\n def headerData(self, section: int, orientation: Qt.Orientation, role: int):\n if role == QtCore.Qt.DisplayRole:\n if orientation == Qt.Horizontal:\n return self.head[section]\n else:\n return ''\n if role == Qt.BackgroundColorRole: # BackgroundRole:\n # See below for the data structure.\n return QtGui.QColor('#c0f0f0')\n if role == Qt.InitialSortOrderRole:\n self.beginResetModel()\n if self.sort_col == section:\n self.data.sort(key=lambda i: i[section], reverse=True)\n self.sort_col = -1\n else:\n self.data.sort(key=lambda i: i[section])\n self.sort_col = section\n self.endResetModel()\n return\n # if role not in [4]:\n # print(section, orientation, role)\n\n def columnCount(self, parent=None):\n return len(self.head)\n\n def rowCount(self, parent=None):\n return len(self.data)\n\n def data(self, index, role):\n ret = None\n if len(self.data[0]) > 0:\n row = index.row()\n col = index.column()\n ret = self.data[row][col]\n if col == self.date_col:\n ret = date_us_ru(ret)\n else:\n ret = ' '\n if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:\n if ret is None:\n return \"\"\n else:\n return str(ret)\n if role == Qt.TextAlignmentRole:\n if isinstance(ret, int) or isinstance(ret, float):\n # Align right, vertical middle.\n return Qt.AlignVCenter + Qt.AlignRight\n if role == Qt.BackgroundRole and index.row() % 2:\n # See below for the data structure.\n return QtGui.QColor('#f0fcfc')\n\n def setData(self, index, value, role): # !!!\n if role == Qt.EditRole:\n if index.column() > 0:\n if index.column() == self.date_col:\n value = date_ru_us(value)\n self.data[index.row()][index.column()] = str(value).strip(' \\n.').replace(':', '-')\n self.current_index = (index.row(), index.column())\n self.need_save.emit()\n return True\n return False\n\n def flags(self, index): # !!!\n if self.editable and index.column() in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n return Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable\n else:\n # if index.column() == 0:\n # self.need_edit.emit()\n # self.current_index = (0, 0)\n return Qt.ItemIsSelectable | Qt.ItemIsEnabled\n","sub_path":"classes/qt_classes.py","file_name":"qt_classes.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"449766556","text":"#!/usr/bin/env python\n\nimport sys\nimport asyncio\nimport websockets\nimport json\nimport time\nimport math\nimport random\nimport logging\n\nlogging.basicConfig(\n filename=\"app.log\",\n level=logging.WARNING,\n format=\"%(asctime)s %(levelname)s %(name)s %(message)s\",\n)\nlogger = logging.getLogger(__name__)\n\nSENSOR_FREQ = int(sys.argv[2])\nALERT_FREQ = int(sys.argv[3])\n\nCLIENTS = set()\n\nparameters = {\n \"rr\": 0,\n \"o2\": 0,\n \"peep\": 0,\n \"tv\": 0,\n}\n\npatient = {\n \"name\": \"\",\n \"medical_history_number\": \"\",\n \"bed_number\": \"\",\n \"atention_number\": \"\",\n}\n\nalerts = {\n \"volume\": [0, 0],\n \"flow\": [0, 0],\n \"presure\": [0, 0],\n}\n\nconfig_versions = [time.time()]\n\nrandom_data = {\n \"parameters\": {\n \"rr\": 0,\n \"o2\": 0,\n \"peep\": 0,\n \"tv\": 0,\n },\n \"alert_id\": 0,\n}\n\n\nasync def manageConn(websocket, path):\n CLIENTS.add(websocket)\n await asyncio.wait([\n respondEvents(websocket, path),\n streamData(websocket, path),\n pushConfig(websocket, path),\n ])\n CLIENTS.remove(websocket)\n\n\nasync def alertGenerator():\n global random_data\n while True:\n random_data[\"alert_id\"] += 1\n for cli in CLIENTS:\n await pushAlert(cli)\n await asyncio.sleep(ALERT_FREQ)\n\n\nasync def sensorGenerator():\n global parameters\n global random_data\n while True:\n for parameter in random_data[\"parameters\"]:\n random_data[\"parameters\"][parameter] = (\n float(parameters[parameter]) + random.random())\n for cli in CLIENTS:\n await pushSensor(cli)\n await asyncio.sleep(SENSOR_FREQ)\n\n\nasync def pushSensor(websocket):\n global random_data\n try:\n data = {\n \"t\": 3,\n \"type\": \"sensors_push\",\n \"data\": random_data[\"parameters\"],\n \"ts\": str(time.time()),\n \"token\": \"10293848129381038109238019380911\",\n }\n data = json.dumps(data)\n await websocket.send(data)\n # print(\"> \" + data)\n except:\n return\n\n\nasync def pushConfig(websocket, path):\n global config_versions\n checker = 0\n while True:\n try:\n if checker < config_versions[0]: # todo: ver\n models = [\"parameters\", \"patient\", \"alerts\"]\n for model in models:\n data = {\n \"t\": 2,\n \"type\": \"push\",\n \"model\": model,\n \"data\": globals()[model],\n \"ts\": str(time.time()),\n \"token\": \"10293848129381038109238019380912\",\n }\n\n await websocket.send(json.dumps(data))\n # print(\"> \" + data)\n checker = config_versions[0]\n await asyncio.sleep(1)\n except:\n break\n\n\nasync def pushAlert(websocket):\n global random_data\n try:\n data = {\n \"t\": 1,\n \"type\": \"alert\",\n \"message\": \"Alerta de prueba \" + str(random_data[\"alert_id\"]),\n \"severity\": 1,\n }\n await websocket.send(json.dumps(data))\n # print(\"> \" + data)\n except:\n return\n\n\nasync def streamData(websocket, path):\n while True:\n try:\n factor = 20\n current_time = time.time()\n data = {\n \"t\": 0,\n \"type\": \"stream\",\n \"data\": {\n \"volume\": str(factor + factor * math.sin(current_time)),\n \"presure\": str(factor + factor * math.cos(current_time)),\n \"flow\": str(factor + factor * math.sin(2 * current_time)),\n },\n }\n\n result = json.dumps(data)\n await websocket.send(result)\n await asyncio.sleep(0.1)\n # print(f\"> {result}\")\n except:\n break\n\n\nasync def respondEvents(websocket, path):\n # global parameters\n global config_versions\n while True:\n try:\n request = await websocket.recv()\n request = json.loads(request)\n print(f\"< {request}\")\n response = \"\"\n model = request[\"model\"]\n\n if request[\"action\"] == \"set\":\n response = {\n \"id\": str(request[\"id\"]),\n \"status\": 200,\n \"token\": \"10293848129381038109238019380913\",\n }\n globals()[model] = request[\"data\"]\n config_versions[0] = time.time()\n print(\"new_config= \" + str(config_versions[0]))\n elif request[\"action\"] == \"get\":\n response = {\n \"id\": str(request[\"id\"]),\n \"status\": 300,\n \"model\": model,\n \"data\": globals()[model],\n \"token\": \"10293848129381038109238019380914\",\n }\n else:\n response = {\n \"id\": str(request[\"id\"]),\n \"status\": -1,\n \"token\": \"10293848129381038109238019380915\",\n }\n\n response = json.dumps(response)\n await websocket.send(response)\n print(f\"> {response}\")\n except ZeroDivisionError as err:\n logger.error(err)\n print(\"Connection closed\")\n break\n\n\nstart_server = websockets.serve(manageConn, \"0.0.0.0\", int(sys.argv[1]))\n\nasyncio.get_event_loop().run_until_complete(start_server)\nl = asyncio.get_event_loop()\nl.create_task(alertGenerator())\nl.create_task(sensorGenerator())\nl.run_forever()\n","sub_path":"server-simulador.py","file_name":"server-simulador.py","file_ext":"py","file_size_in_byte":5627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"309911850","text":"import pylab\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#import data\n\nthetc = np.loadtxt('run_thetc.dat').T\nthetc = thetc*180/3.14159\n\nphic = np.loadtxt('run_phic.dat').T\nphic = phic*180/3.14159\n\nthetl = np.loadtxt('run_thetl.dat').T\nthetl = thetl*180/3.14159\n\nphil = np.loadtxt('run_phil.dat').T\nphil = phil*180/3.14159\n\nthetr = np.loadtxt('run_thetr.dat').T\nthetr = thetr*180/3.14159\n\nphir = np.loadtxt('run_phir.dat').T\nphir = phir*180/3.14159\n\n\n\n#do some math\n\nphilp=-1*phil\nphirp=-1*phir\nthetlp=thetl-2*(thetl-90)\nthetrp=thetr-2*(thetr-90)\n\n\n\n#throw down some plots\n\nplt.plot(phil, thetl, 'bo', markersize=10, alpha=0.1)\nplt.plot(phir, thetr, 'yo', markersize=10, alpha=0.1)\n\n\nplt.show()\n#pylab.savefig('scatters.png')\n\n","sub_path":"simulations/comgeant/chris_moller_files/scatters.py","file_name":"scatters.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"172014221","text":"#!/usr/bin/env python\n\"\"\"A short and simple example experiment with restarts.\n\nThe code is fully functional but mainly emphasises on readability.\nHence it neither produces any progress messages (which can be very\nannoying for long experiments) nor provides batch distribution,\nas `example_experiment.py` does.\n\nTo apply the code to a different solver, `fmin` must be re-assigned or\nre-defined accordingly. For example, using `cma.fmin` instead of\n`scipy.optimize.fmin` can be done like::\n\n import cma\n def fmin(fun, x0):\n return cma.fmin(fun, x0, 2, {'verbose':-9})\n\n\"\"\"\nfrom __future__ import division, print_function\nimport cocoex, cocopp # experimentation and post-processing modules\nimport scipy.optimize # to define the solver to be benchmarked\nfrom numpy.random import rand # for randomised restarts\nimport os, webbrowser # to show post-processed results in the browser\nimport solvers\n\n### input\nsuite_name = \"bbob\"\noutput_folder = \"PSO_40D_1000it\"\nfmin = scipy.optimize.fmin\nbudget_multiplier = 1 # increase to 10, 100, ...\n\n# fmin is re-defined to call our solver\ndef fmin(fun, lbounds, ubounds, dim, budget):\n result = solvers.pso(50, fun, lbounds, ubounds, dim, 1000)\n return result\n\n### prepare\nsuite = cocoex.Suite(suite_name, \"\", \"function_indices:20,21,22,24 dimensions:40 instance_indices:1-15\")\nobserver = cocoex.Observer(suite_name, \"result_folder: \" + output_folder)\nminimal_print = cocoex.utilities.MiniPrint()\n\n### go\nfor problem in suite: # this loop will take several minutes or longer\n problem.observe_with(observer) # generates the data for cocopp post-processing\n x0 = problem.initial_solution\n # apply restarts while neither the problem is solved nor the budget is exhausted\n while (problem.evaluations < problem.dimension * budget_multiplier\n and not problem.final_target_hit):\n fmin(problem, problem.lower_bounds, problem.upper_bounds, \n problem.dimension, problem.dimension * budget_multiplier) # here we assume that `fmin` evaluates the final/returned solution\n x0 = problem.lower_bounds + ((rand(problem.dimension) + rand(problem.dimension)) *\n (problem.upper_bounds - problem.lower_bounds) / 2)\n minimal_print(problem, final=problem.index == len(suite) - 1)\n\n### post-process data\ncocopp.main(observer.result_folder) # re-run folders look like \"...-001\" etc\nwebbrowser.open(\"file://\" + os.getcwd() + \"/ppdata/index.html\")\n\n","sub_path":"bbob/example_experiment_for_beginners_pso.py","file_name":"example_experiment_for_beginners_pso.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"399369360","text":"\nfrom peewee import *\nfrom datetime import date\n\ndb = SqliteDatabase('cozzy.db')\n\nclass BaseModel(Model):\n class Meta:\n database = db\n\nclass Number(BaseModel):\n label = CharField()\n\nclass Person(BaseModel):\n last_name = CharField(null = True, max_length = 64)\n first_name = CharField(null = True, max_length = 32)\n middle_name = CharField(null = True, max_length = 32)\n date_of_birth = DateField(null = True)\n contact_full_name = CharField(null = True)\n\nclass Booking(BaseModel):\n number = ForeignKeyField(Number, related_name = \"reservations\")\n person = ForeignKeyField(Person, related_name = \"reservations\")\n start = DateField()\n finish = DateField()\n\ndb.connect()\n\ntables = [Number, Person, Booking]\n\ndb.drop_tables(tables, safe = True)\ndb.create_tables(tables)\n\n# ---\npersons = [\n {'last_name': 'Иванов', 'first_name': 'Иван'},\n {'last_name': 'Петров', 'first_name': 'Петр'},\n {'last_name': 'Сидоров', 'first_name': 'Сидр'},\n]\n\nnumbers = [\n {'label': '11'},\n {'label': '12'},\n {'label': '13'}\n]\n\n\nwith db.atomic():\n Person.insert_many(persons).execute()\n Number.insert_many(numbers).execute()\n\ndb.close()\n","sub_path":"package/db_scripts/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"32318983","text":"#!/usr/bin/env python\n# Copyright 2010 Google Inc. All Rights Reserved.\n#\n\n\"\"\"Top level __init__ for admin package.\"\"\"\n\n\n\n\nimport collections\nimport datetime\nimport logging\nimport os\nimport re\nimport urllib\n\nimport webapp2\n\nfrom google.appengine.ext.webapp import template\n\nfrom simian import settings\nfrom simian.mac.admin import xsrf\nfrom simian.mac.common import auth\n\n\n\nQUERY_LIMITS = [25, 50, 100, 250, 500, 1000, 2000]\n\nDEFAULT_COMPUTER_FETCH_LIMIT = 25\n\n\nclass Error(Exception):\n \"\"\"Base Error.\"\"\"\n\n\ndef GetMenu():\n \"\"\"Returns an OrderedDict with menu contents.\"\"\"\n menu = collections.OrderedDict()\n menu_items = [\n {'type': 'summary', 'url': '/admin', 'name': 'Summary'},\n\n {'type': 'search', 'url': 'javascript:simian.showSearch(); void(0);',\n 'name': 'Search'},\n\n {'type': 'munki_packages', 'name': 'Munki Packages', 'subitems': [\n {'type': 'packages', 'url': '/admin/packages',\n 'name': 'Package Admin'},\n {'type': 'proposals', 'url': '/admin/proposals',\n 'name': 'Pending Proposals'},\n {'type': 'package_logs', 'url': '/admin/packages/logs',\n 'name': 'Package Logs'},\n {'type': 'proposal_logs', 'url': '/admin/proposals/logs',\n 'name': 'Proposal Logs'},\n {'type': 'packages_historical',\n 'url': '/admin/packages?historical=1', 'name': 'Historical List'},\n {'type': 'packages_installs', 'url': '/admin/installs',\n 'name': 'Installs'},\n {'type': 'packages_failures',\n 'url': '/admin/installs?failures=1', 'name': 'Failures'},\n {'type': 'packages_problems', 'url': '/admin/installproblems',\n 'name': 'Other Install Problems'}\n ]},\n\n {'type': 'apple_updates', 'name': 'Apple Updates', 'subitems': [\n {'type': 'apple_applesus', 'url': '/admin/applesus',\n 'name': 'Catalog Admin'},\n {'type': 'apple_logs', 'url': '/admin/applesus/logs',\n 'name': 'Logs'},\n {'type': 'apple_historical', 'url': '/admin/packages?applesus=1',\n 'name': 'Historical List'},\n {'type': 'apple_installs', 'url': '/admin/installs?applesus=1',\n 'name': 'Installs'},\n {'type': 'apple_failures',\n 'url': '/admin/installs?applesus=1&failures=1',\n 'name': 'Failures'}\n ]},\n\n {'type': 'manifests', 'name': 'Manifests', 'subitems': [\n {'type': 'manifests_admin', 'url': '/admin/manifest_modifications',\n 'name': 'Modification Admin'},\n {'type': 'manifests_aliases', 'url': '/admin/package_alias',\n 'name': 'Package Aliases'},\n {'type': 'manifest_stable', 'url': '/admin/manifest/stable',\n 'name': 'View Stable'},\n {'type': 'manifest_testing', 'url': '/admin/manifest/testing',\n 'name': 'View Testing'},\n {'type': 'manifest_unstable', 'url': '/admin/manifest/unstable',\n 'name': 'View Unstable'}\n ]},\n\n {'type': 'admin_tools', 'name': 'Admin Tools', 'admin_only': True,\n 'subitems': [\n {'type': 'acl_groups', 'url': '/admin/acl_groups',\n 'name': 'ACL Groups'},\n {'type': 'config', 'url': '/admin/config',\n 'name': 'Configuration'},\n {'type': 'ip_blacklist', 'url': '/admin/ip_blacklist',\n 'name': 'IP Blacklist'},\n {'type': 'lock_admin', 'url': '/admin/lock_admin',\n 'name': 'Lock Admin'},\n {'type': 'release_report', 'url': '/admin/release_report',\n 'name': 'Release Report'},\n {'type': 'panic', 'url': '/admin/panic', 'name': 'Panic Mode'}\n ]},\n\n {'type': 'tags', 'url': '/admin/tags', 'name': 'Tags'},\n\n {'title': 'Client Reports'},\n\n {'type': 'broken_clients', 'url': '/admin/brokenclients',\n 'name': 'Broken Clients'},\n {'type': 'diskfree', 'url': '/admin/diskfree', 'name': 'Low Disk Space'},\n {'type': 'uptime', 'url': '/admin/uptime', 'name': 'Long Uptime'},\n {'type': 'offcorp', 'url': '/admin/offcorp', 'name': 'Longest Off Corp'},\n {'type': 'msu_gui_logs', 'url': '/admin/msulogsummary',\n 'name': 'MSU GUI Logs'},\n {'type': 'preflight_exits', 'url': '/admin/preflightexits',\n 'name': 'Preflight Exits'},\n {'type': 'usersettings_knobs', 'url': '/admin/user_settings',\n 'name': 'UserSettings Knobs'}\n ]\n for item in menu_items:\n if 'type' in item:\n if 'subitems' in item:\n menu[item['type']] = {}\n menu[item['type']]['name'] = item['name']\n menu[item['type']]['subitems'] = collections.OrderedDict()\n for subitem in item['subitems']:\n menu[item['type']]['subitems'][subitem['type']] = subitem\n else:\n menu[item['type']] = item\n elif 'title' in item:\n menu[item['title']] = item\n return menu\n\n\nclass AdminHandler(webapp2.RequestHandler):\n \"\"\"Class for Admin UI request handlers.\"\"\"\n\n XSRF_PROTECT = False\n\n def handle_exception(self, exception, debug_mode):\n \"\"\"Handle an exception.\n\n Args:\n exception: exception that was thrown\n debug_mode: True if the application is running in debug mode\n \"\"\"\n if issubclass(exception.__class__, auth.NotAuthenticated):\n self.error(403)\n return\n else:\n super(AdminHandler, self).handle_exception(exception, debug_mode)\n\n def IsAdminUser(self):\n \"\"\"Returns True if the current user is an admin, False otherwise.\"\"\"\n # NOTE(user): this is definitely not threadsafe.\n if not hasattr(self, '_is_admin'):\n self._is_admin = auth.IsAdminUser()\n return self._is_admin\n\n def Paginate(self, query, default_limit):\n \"\"\"Returns a list of entities limited to limit, with a next_page cursor.\"\"\"\n try:\n limit = int(self.request.get('limit', default_limit))\n except ValueError:\n limit = default_limit\n if limit not in QUERY_LIMITS:\n limit = default_limit\n\n cursor = self.request.get('page', '')\n if cursor:\n query.with_cursor(cursor)\n\n entities = list(query.fetch(limit))\n\n if len(entities) == limit:\n next_page = query.cursor()\n else:\n next_page = None\n\n self._page = {\n 'limit': limit,\n 'next_page': next_page,\n 'results_count': len(entities),\n }\n\n return entities\n\n def Render(self, template_path, values, write_to_response=True):\n \"\"\"Renders a template using supplied data values and returns HTML.\n\n Args:\n template_path: str path of template.\n values: dict of template values.\n write_to_response: bool, True to write to response.out.write().\n Returns:\n str HTML of rendered template.\n \"\"\"\n path = os.path.join(\n os.path.dirname(__file__), 'templates', template_path)\n\n if not settings.DEV_APPSERVER:\n values['static_path'] = 'myapp/%s' % os.getenv('CURRENT_VERSION_ID')\n\n values['is_admin'] = self.IsAdminUser()\n\n if not hasattr(self, '_menu'):\n self._menu = GetMenu()\n\n values['menu'] = self._menu\n\n if not settings.APPROVAL_REQUIRED:\n if 'proposals' in values['menu']['munki_packages']['subitems']:\n del values['menu']['munki_packages']['subitems']['proposals']\n\n if 'msg' not in values:\n values['msg'] = self.request.GET.get('msg')\n\n if 'report_type' not in values:\n values['report_type'] = 'undefined_report'\n\n if self.XSRF_PROTECT:\n values['xsrf_token'] = xsrf.XsrfTokenGenerate(values['report_type'])\n\n if hasattr(self, '_page'):\n values['limit'] = self._page.get('limit')\n values['next_page'] = self._page.get('next_page')\n values['results_count'] = self._page.get('results_count')\n values['limits'] = QUERY_LIMITS\n\n values['request_query_params'] = self.request.GET\n values['request_path'] = self.request.path\n\n if self._page.get('next_page'):\n # Generate next page link, replacing \"page\" query param with next_page.\n query_params = self.request.GET.copy()\n query_params['page'] = self._page.get('next_page')\n values['next_page_link'] = '%s?%s' % (\n self.request.path, urllib.urlencode(query_params, doseq=True))\n\n html = template.render(path, values)\n if write_to_response:\n self.response.out.write(html)\n return html\n\n\nclass UTCTZ(datetime.tzinfo):\n \"\"\"tzinfo class for the UTC time zone.\"\"\"\n\n def tzname(self, unused_dt):\n return 'UTC'\n\n def dst(self, unused_dt):\n return datetime.timedelta(0)\n\n def utcoffset(self, unused_dt):\n return datetime.timedelta(0)\n\n\ndef AddTimezoneToComputerDatetimes(computer):\n \"\"\"Sets the tzinfo on all Computer.connected_datetimes for use with Django.\n\n Args:\n computer: models.Computer entity.\n Returns:\n Boolean. True if one date is today, false otherwise.\n \"\"\"\n for i in xrange(0, len(computer.connection_datetimes)):\n cdt = computer.connection_datetimes[i]\n # set timezone so Django \"timesince\" template filter works.\n computer.connection_datetimes[i] = datetime.datetime(\n cdt.year, cdt.month, cdt.day,\n cdt.hour, cdt.minute, cdt.second,\n tzinfo=UTCTZ())\n\n\ndef XmlToHtml(xml):\n \"\"\"Convert an XML string into an HTML DOM with styles.\"\"\"\n tags = re.compile(r'\\<(\\/?)(\\w*)([^<>]*)\\>')\n html = tags.sub((r'<\\1\\2'\n r'\\3>'),\n xml)\n html = html.replace(' ', ' ').replace('\\n', '
')\n return '%s' % html\n","sub_path":"src/tests/simian/mac/admin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"139464426","text":"import tensorflow as tf\nimport tensorflow.keras.utils as np_utils\nimport os\n\n# 關閉不必要的 System Warning\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# 關閉不必要的 Tensorflow Warning\ntf.logging.set_verbosity(tf.logging.ERROR)\n\n# 資料集\nfashion_mnist = tf.keras.datasets.fashion_mnist\n\n# 取出訓練與測試集資料\n(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\n\n# 資料轉換\nx_train = x_train.reshape(x_train.shape[0], -1, 1).astype('float32')\nx_test = x_test.reshape(x_test.shape[0], 28, -1, 1).astype('float32')\n\n# 將圖片數值從原本的 0~255 正規化成 0~1\nx_train, x_test = x_train / 255, x_test / 255\n\n# 將 Features 進行標準化與 Label 的 Onehot encoding\ny_train = np_utils.to_categorical(y_train, 10)\ny_test = np_utils.to_categorical(y_test, 10)\n\n# 建立模型\nmodel = tf.keras.models.Sequential()\nmodel.add(tf.keras.layers.SimpleRNN(100,\n kernel_initializer=tf.keras.initializers.RandomNormal(\n stddev=0.001),\n recurrent_initializer=tf.keras.initializers.Identity(\n gain=1.0),\n activation='relu',\n input_shape=x_train.shape[1:]))\nmodel.add(tf.keras.layers.Dense(10))\nmodel.add(tf.keras.layers.Activation('softmax'))\n\nrmsprop = tf.keras.optimizers.RMSprop(lr=1e-6)\nmodel.compile(loss='categorical_crossentropy',\n optimizer=rmsprop,\n metrics=['accuracy'])\n\n# 開始訓練\nmodel.fit(x_train, y_train, validation_split=0.2, epochs=5, batch_size=300)\n\n# 驗證模型\nloss, acc = model.evaluate(x_test, y_test)\nprint('Testing Accurakcy: ', str(acc))\n","sub_path":"Fashion-MNIST/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"97857888","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('f:/images/wu/010002_1.png')\ncv2.namedWindow('img', cv2.WINDOW_AUTOSIZE)\ncv2.imshow('img', img)\n\ngray = cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY)\ncv2.imshow('gray', gray)\n\nprint(img.shape)\n\nres = np.zeros((100, img.shape[1], 3))\n\n\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"006-图像处理/OpenCV/my-demo/吴-压缩图片.py","file_name":"吴-压缩图片.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"378854532","text":"import numpy\nimport re\nimport operator\nimport feedparser\n\n\ndef loadDataSet():\n \"\"\"\n 创建待训练数据集\n :return:\n \"\"\"\n postingList = [\n ['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\n ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],\n ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\n ['stop', 'posting', 'stupid', 'worthless', 'garbage'],\n ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],\n ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']\n ] # 训练文档集合\n classVec = [0, 1, 0, 1, 0, 1] # 对应每篇文章的分类\n return postingList, classVec\n\n\ndef createVocabList(dataSet):\n \"\"\"\n 创建词汇表\n :param dataSet: 输入训练文档集\n :return:\n \"\"\"\n vocabSet = set([])\n for document in dataSet:\n vocabSet = vocabSet | set(document)\n return list(vocabSet)\n\n\ndef setOfWord2Vec(vocabList, inputSet):\n \"\"\"\n 根据输入的词汇表和文档,判断文档中是否含有词汇表中各个单词\n :param vocabList: 词汇表\n :param inputSet: 输入文档\n :return:\n \"\"\"\n returnVec = [0] * len(vocabList)\n for word in inputSet:\n if word in vocabList:\n # returnVec[vocabList.index(word)] = 1 # 词集模型\n returnVec[vocabList.index(word)] += 1 # 词袋模型\n else:\n # print(\"the word: %s is not in my Vocabulary!\" % word)\n continue\n return returnVec\n\n\ndef trainNB0(trainMatrix, trainCategory):\n \"\"\"\n 朴素贝叶斯分类器训练函数\n :param trainMatrix:\n :param trainCategory:\n :return:\n \"\"\"\n numTrainDocs = len(trainMatrix)\n numWords = len(trainMatrix[0])\n pAbusive = sum(trainCategory) / float(numTrainDocs)\n\n # p0Num = numpy.zeros(numWords)\n # p1Num = numpy.zeros(numWords)\n # p0Denom = 0\n # p1Denom = 0\n\n p0Num = numpy.ones(numWords)\n p1Num = numpy.ones(numWords)\n p0Denom = 2\n p1Denom = 2\n\n for i in range(numTrainDocs):\n if trainCategory[i] == 1:\n p1Num += trainMatrix[i]\n p1Denom += sum(trainMatrix[i])\n else:\n p0Num += trainMatrix[i]\n p0Denom += sum(trainMatrix[i])\n p0Vect = numpy.log(p0Num / p0Denom)\n p1Vect = numpy.log(p1Num / p1Denom)\n return p0Vect, p1Vect, pAbusive\n\n\ndef classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):\n \"\"\"\n 分类函数\n :param vec2Classify:\n :param p0Vec:\n :param p1Vec:\n :param pClass1:\n :return:\n \"\"\"\n p1 = sum(vec2Classify * p1Vec) + numpy.log(pClass1)\n p0 = sum(vec2Classify * p0Vec) + numpy.log(1 - pClass1)\n if p1 > p0:\n return 1\n else:\n return 0\n\n\ndef testingNB(testEntry):\n \"\"\"\n\n :param testEntry:\n :return:\n \"\"\"\n listOPosts, listClasses = loadDataSet()\n myVocabList = createVocabList(listOPosts)\n print(myVocabList)\n trainMat = []\n for postinDoc in listOPosts:\n trainMat.append(setOfWord2Vec(myVocabList, postinDoc))\n print(trainMat)\n p0V, p1V, pAb = trainNB0(numpy.array(trainMat), numpy.array(listClasses))\n print(p0V)\n print(p1V)\n print(pAb)\n thisDoc = numpy.array(setOfWord2Vec(myVocabList, testEntry))\n thisType = classifyNB(thisDoc, p0V, p1V, pAb)\n print(testEntry, 'classified as: ', thisType)\n\n\n# doc = ['love', 'my', 'dalmation']\ndoc = ['stupid', 'garbage']\ntestingNB(doc)\n\n\ndef textParse(bigString):\n \"\"\"\n 解析文本,返回小写字母的单词数组\n :param bigString:\n :return:\n \"\"\"\n listOfTokens = re.split(r'\\W*', bigString)\n return [token.lower() for token in listOfTokens if len(token) > 2]\n\n\ndef spamText():\n \"\"\"\n\n :return:\n \"\"\"\n docList = []\n classList = []\n fullText = []\n for i in range(1, 26):\n wordList = textParse(open('email/spam/%d.txt' % i).read())\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(1)\n wordList = textParse(open('email/ham/%d.txt' % i).read())\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(0)\n vocabList = createVocabList(docList)\n trainingSet = list(range(50))\n testSet = []\n for i in range(10):\n randIndex = int(numpy.random.uniform(0, len(trainingSet)))\n testSet.append(trainingSet[randIndex])\n del (trainingSet[randIndex])\n trainMat = []\n trainClasses = []\n for docIndex in trainingSet:\n trainMat.append(setOfWord2Vec(vocabList, docList[docIndex]))\n trainClasses.append(classList[docIndex])\n p0V, p1V, pSpam = trainNB0(numpy.array(trainMat), numpy.array(trainClasses))\n errorCount = 0\n for docIndex in testSet:\n wordVector = setOfWord2Vec(vocabList, docList[docIndex])\n classType = classifyNB(numpy.array(wordVector), p0V, p1V, pSpam)\n if classType != classList[docIndex]:\n errorCount += 1\n errorRate = float(errorCount) / len(testSet)\n print('the error rate is: ', errorRate)\n\n\nspamText()\n\n\ndef calMostFreq(vocabList, fullText):\n \"\"\"\n\n :param vocabList:\n :param fullText:\n :return:\n \"\"\"\n freqDict = {}\n for token in vocabList:\n freqDict[token] = fullText.count(token)\n sortedFreq = sorted(freqDict.items(), key=operator.itemgetter(1), reverse=True)\n return sortedFreq[:30]\n\n\ndef localWords(feed1, feed0):\n \"\"\"\n\n :param feed1:\n :param feed0:\n :return:\n \"\"\"\n docList = []\n classList = []\n fullText = []\n minLen = min(len(feed1['entries']), len(feed0['entries']))\n for i in range(minLen):\n wordList = textParse(feed1['entries'][i]['summary'])\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(1)\n wordList = textParse(feed0['entries'][i]['summary'])\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(0)\n vocabList = createVocabList(docList)\n top30Words = calMostFreq(vocabList, fullText)\n for pairW in top30Words:\n if pairW[0] in vocabList:\n vocabList.remove(pairW[0])\n trainingSet = list(range(2 * minLen))\n testSet = []\n for i in range(10):\n randIndex = int(numpy.random.uniform(0, len(trainingSet)))\n testSet.append(trainingSet[randIndex])\n del (trainingSet[randIndex])\n trainMat = []\n trainClasses = []\n for docIndex in trainingSet:\n trainMat.append(setOfWord2Vec(vocabList, docList[docIndex]))\n trainClasses.append(classList[docIndex])\n p0V, p1V, pSpam = trainNB0(numpy.array(trainMat), numpy.array(trainClasses))\n errorCount = 0\n for docIndex in testSet:\n wordVector = setOfWord2Vec(vocabList, docList[docIndex])\n classType = classifyNB(numpy.array(wordVector), p0V, p1V, pSpam)\n if classType != classList[docIndex]:\n errorCount += 1\n print('the error rate is: ', float(errorCount) / len(testSet))\n return vocabList, p0V, p1V\n\n\ndef getTopWords(shanghai, beijing):\n \"\"\"\n\n :param shanghai:\n :param beijing:\n :return:\n \"\"\"\n vocabList, p0V, p1V = localWords(shanghai, beijing)\n topShanghai = []\n topBeijing = []\n for i in range(len(p0V)):\n if p0V[i] > -5.0:\n topShanghai.append((vocabList[i], p0V[i]))\n if p1V[i] > -5.0:\n topBeijing.append((vocabList[i], p1V[i]))\n sortedShanghai = sorted(topShanghai, key=lambda pair: pair[1], reverse=True)\n print(\"------------------------SHANGHAI--------------------------\")\n for item in sortedShanghai:\n print(item[0])\n sortedBeijing = sorted(topBeijing, key=lambda pair: pair[1], reverse=True)\n print(\"------------------------BEIJING--------------------------\")\n for item in sortedBeijing:\n print(item[0])\n\n\nshanghai = feedparser.parse('https://shanghai.craigslist.org/search/jjj?format=rss')\nbeijing = feedparser.parse('https://beijing.craigslist.org/search/jjj?format=rss')\ngetTopWords(shanghai, beijing)\n","sub_path":"naiveBayes/nb001.py","file_name":"nb001.py","file_ext":"py","file_size_in_byte":7975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"237052021","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\"\"\"Roles sub-commands\"\"\"\n\nfrom airflow.cli.simple_table import AirflowConsole\nfrom airflow.utils import cli as cli_utils\nfrom airflow.utils.cli import suppress_logs_and_warning\nfrom airflow.www.app import cached_app\n\n\n@suppress_logs_and_warning()\ndef roles_list(args):\n \"\"\"Lists all existing roles\"\"\"\n appbuilder = cached_app().appbuilder # pylint: disable=no-member\n roles = appbuilder.sm.get_all_roles()\n AirflowConsole().print_as(\n data=sorted([r.name for r in roles]), output=args.output, mapper=lambda x: {\"name\": x}\n )\n\n\n@cli_utils.action_logging\n@suppress_logs_and_warning()\ndef roles_create(args):\n \"\"\"Creates new empty role in DB\"\"\"\n appbuilder = cached_app().appbuilder # pylint: disable=no-member\n for role_name in args.role:\n appbuilder.sm.add_role(role_name)\n print(f\"Added {len(args.role)} role(s)\")\n","sub_path":"airflow/cli/commands/role_command.py","file_name":"role_command.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"483889016","text":"\nclass SeqAutoEncoderConfig(object):\n def __init__(self):\n # training configuration\n self.lr = 0.001\n self.batch_size = 128\n self.epoch = 100\n\n # dataset configuration\n self.dict_size = 20000\n self.pad_flag_index = 0 # padding在字典中的索引\n self.sos_flag_index = 1 # 开始标志在字典中的索引\n self.eos_flag_index = 2\n self.oov_flag_index = 3 # 超出字典词在字典中的索引\n\n # encoder configuration\n self.en_input_size = 100\n self.en_hidden_size = 100\n self.en_num_layers = 1\n self.en_bidirectional = False\n self.en_dropout_rate = 0\n\n # decoder configuration\n self.de_input_size = 100\n self.de_hidden_size = 100\n self.de_num_layers = 1\n self.de_bidirectional = False\n self.de_dropout_rate = 0\n self.out_feature_num = self.dict_size\n","sub_path":"config/SeqAutoEncoderConf.py","file_name":"SeqAutoEncoderConf.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"446059333","text":"\"\"\"Init file.\"\"\"\nfrom flask import Flask\nfrom flask import render_template\nfrom hyperreal.dashboards import ALL_DASHBOARDS, ALL_COMPONENTS\nfrom hyperreal.model.utils import get_table_html\nfrom flask import request\nimport json\n\napp = Flask(__name__)\nexploded_df = [{'name': d.name,\n 'slug': d.slug,\n 'dash_obj': d} for d in ALL_DASHBOARDS]\n\n\n@app.route('/')\ndef index():\n \"\"\"Root controller.\"\"\"\n return render_template('index.html', dashes=exploded_df)\n\n\n@app.route('/dash/')\ndef dashboards():\n \"\"\"Dashboard root controller.\"\"\"\n dashes = [d['name'] for d in exploded_df]\n return f'{dashes}'\n\n\n@app.route('/dash/')\ndef individual_dash(slug=None):\n \"\"\"Individual dash controller.\"\"\"\n dash = [d['dash_obj'] for d in exploded_df if d['slug'] == slug]\n assert len(dash) == 1\n dash = dash[0]\n return render_template('dashboard.html',\n dash=dash)\n\n\n@app.route('/component//get_chart_data', methods=['POST', 'GET'])\ndef get_chart_data(slug=None):\n \"\"\"Return chart data given certain parameters.\"\"\"\n params = request.form\n print(params)\n component = [c for c in ALL_COMPONENTS if c.slug == slug][0]\n filters = []\n for col in component.dim_cols:\n value = params.get(f'form_{slug}_{col}_filter')\n filters.append((col, f'\"{value}\"'))\n\n req_val_cols = params.getlist(f'form_{slug}_val_cols')\n groupby_cols = params.getlist(f'form_{slug}_grouping')\n return get_table_html(component.get_chart_data(req_val_cols=req_val_cols,\n groupby_cols=groupby_cols,\n filter_cols=filters))\n","sub_path":"hyperreal/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"457342879","text":"from django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.conf import settings\n\nfrom tuesmon.projects.models import Project\nfrom tuesmon.projects.milestones.models import Milestone\nfrom tuesmon.projects.userstories.models import UserStory\nfrom tuesmon.projects.tasks.models import Task\nfrom tuesmon.projects.issues.models import Issue\nfrom tuesmon.projects.wiki.models import WikiPage\nfrom tuesmon.projects.epics.models import Epic\nfrom tuesmon.users.models import User\nfrom tuesmon.front.urls import urls\nfrom .service import recache_schedule\n\ndef build_url(name, *params):\n return \"{}://{}{}\".format(\n settings.SITES['front']['scheme'],\n settings.SITES['front']['domain'],\n urls[name].format(*params)\n )\n\n\ndef recache_user_profile(user):\n url = build_url(\"user\", user.username)\n recache_schedule(url)\n\n\ndef recache_project_profile(project):\n url = build_url(\"project\", project.slug)\n recache_schedule(url)\n\n\ndef recache_wiki_page(wiki_page):\n url = build_url(\"wiki\", wiki_page.project.slug, wiki_page.slug)\n recache_schedule(url)\n\n\ndef recache_issue(issue):\n url = build_url(\"issue\", issue.project.slug, issue.ref)\n recache_schedule(url)\n\n\ndef recache_task(task):\n url = build_url(\"task\", task.project.slug, task.ref)\n recache_schedule(url)\n\n\ndef recache_epic(epic):\n url = build_url(\"epic\", epic.project.slug, epic.ref)\n recache_schedule(url)\n\n\ndef recache_us(us):\n url = build_url(\"userstory\", us.project.slug, us.ref)\n recache_schedule(url)\n\n\ndef recache_taskboard(milestone):\n url = build_url(\"taskboard\", milestone.project.slug, milestone.slug)\n recache_schedule(url)\n\n\ndef recache_backlog(project):\n url = build_url(\"backlog\", project.slug)\n recache_schedule(url)\n\n\ndef recache_kanban(project):\n url = build_url(\"kanban\", project.slug)\n recache_schedule(url)\n\n\n@receiver(post_save, sender=User)\ndef user_post_save(update_fields, instance, **kwargs):\n # No recache on last_login update\n if update_fields and len(update_fields) == 1 and 'last_login' in update_fields:\n return\n\n recache_user_profile(instance)\n\n\n@receiver(post_save, sender=WikiPage)\ndef wiki_page_post_save(instance, **kwargs):\n recache_project_profile(instance.project)\n recache_wiki_page(instance)\n\n\n@receiver(post_save, sender=Issue)\ndef issue_post_save(instance, **kwargs):\n recache_project_profile(instance.project)\n recache_issue(instance)\n\n\n@receiver(post_save, sender=Task)\ndef task_post_save(instance, **kwargs):\n recache_project_profile(instance.project)\n recache_task(instance)\n if instance.user_story:\n recache_us(instance.user_story)\n if instance.milestone:\n recache_taskboard(instance.milestone)\n recache_backlog(instance.project)\n recache_kanban(instance.project)\n\n\n@receiver(post_save, sender=Epic)\ndef epic_post_save(instance, **kwargs):\n recache_project_profile(instance.project)\n recache_epic(instance)\n for us in instance.user_stories.all():\n recache_us(us)\n recache_backlog(instance.project)\n recache_kanban(instance.project)\n\n\n@receiver(post_save, sender=UserStory)\ndef us_post_save(instance, **kwargs):\n recache_project_profile(instance.project)\n recache_us(instance)\n for task in instance.tasks.all():\n recache_task(task)\n if instance.milestone:\n recache_taskboard(instance.milestone)\n recache_backlog(instance.project)\n recache_kanban(instance.project)\n\n@receiver(post_save, sender=Milestone)\ndef milestone_post_save(instance, **kwargs):\n recache_project_profile(instance.project)\n recache_taskboard(instance)\n for task in instance.tasks.all():\n recache_task(task)\n for us in instance.user_stories.all():\n recache_us(us)\n recache_backlog(instance.project)\n recache_kanban(instance.project)\n\n@receiver(post_save, sender=Project)\ndef project_post_save(instance, **kwargs):\n recache_project_profile(instance)\n recache_backlog(instance)\n recache_kanban(instance)\n","sub_path":"prerender_recache/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"92180492","text":"from html import escape\nfrom pprint import pformat\n\nfrom django.http import HttpResponse\n\n\ndef index(request):\n headers = pformat(dict(request.headers))\n params = pformat(dict(request.GET))\n request_context = pformat(request.environ.get(\"apig_wsgi.request_context\", None))\n full_event = pformat(request.environ.get(\"apig_wsgi.full_event\", None))\n environ = pformat(request.environ)\n return HttpResponse(\n f\"\"\"\n Hello World!
\n Headers
\n {escape(headers)}\n Query Params
\n {escape(params)}\n Request Context
\n {escape(request_context)}\n Full event
\n {escape(full_event)}\n WSGI Environ
\n {escape(environ)}\n \"\"\"\n )\n","sub_path":"example/app/testapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"23903866","text":"#!/usr/bin/env python\r\n\r\n\r\n\"\"\"\r\nPrint.py: How a Character works.\r\n\"\"\"\r\n\r\n\r\n__author__ = \"Bo Claes\"\r\n__email__ = \"bo.claes@student.kdg.be\"\r\n__status__ = \"Development\"\r\n\r\n\r\ndef main():\r\n\r\n word = input(\"Give me a word user \") # asking the user for a word\r\n\r\n print(\"This is how many characters are in your word \"+str(len(word))) # printing out the characters of that word\r\n\r\n\r\nif __name__ == '__main__': # code to execute if called from command-line\r\n main()","sub_path":"Print1.py","file_name":"Print1.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"295901175","text":"import win32com.client\nimport win32con\nimport win32api\nimport pywintypes\nimport re\n\ndef get_software_updates(update_seeker, installed):\n # Search installed/not installed Software Windows Updates\n search_string = \"IsInstalled=%d and Type='Software'\" % installed\n search_update = update_seeker.Search(search_string)\n _ = win32com.client.Dispatch(\"Microsoft.Update.UpdateColl\")\n updates = []\n categories = []\n update_dict = {}\n # compiles the regex pattern for finding Windows Update codes\n updates_pattern = re.compile(r'KB+\\d+')\n for update in search_update.Updates:\n update_str = str(update)\n # extracts Windows Update code using regex\n update_code = updates_pattern.findall(update_str)\n for category in update.Categories:\n category_name = category.Name\n print(\"[*] Name: \" + update_str + \" - \" +\n \"url: \" + \"https://support.microsoft.com/en-us/kb/{}\".format(\n \"\".join(update_code).strip(\"KB\")) + \" - \" +\n \"Category: \" + category_name)\n updates.append(update_str)\n categories.append(category_name)\n # converts lists to tuples in order to be used as a dictionary key\n hashable = tuple(updates)\n hashable_category = tuple(categories)\n # creates category:update dictionary\n for update in hashable:\n for category_update in hashable_category:\n update_dict[category_update] = str(update)\n return update_dict\n\ndef enum_winupdates():\n wua = win32com.client.Dispatch(\"Microsoft.Update.Session\")\n update_seeker = wua.CreateUpdateSearcher()\n print(\"\\n[+] Enumerating installed Windows or Drivers' Updates...(if any)\\n\")\n installed = get_software_updates(update_seeker, installed=True)\n print(\"\\n[+] Enumerating available Windows or Drivers' Updates not installed...(if any)\\n\")\n available = get_software_updates(update_seeker, installed=False)\n return installed, available\n\nenum_winupdates()\n","sub_path":"updates.py","file_name":"updates.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"450796508","text":"#!/usr/bin/env python\n\"\"\"Functions to build count structures from car-dog hashtag coocurrence data\"\"\"\n\nimport os\nimport json\nfrom collections import defaultdict\nimport csv\nfrom carroperro.util import name_from_hashtag\n\n\ndef build_counts_from_coocs(cooc_path, cars, dogs):\n \"\"\"Walk the tree of json cooc files and build a dict of dicts of the\n form:\n\n {car1: {dog1: count, dog2: count, ...}, car2: {dog1: count,...}...}\n\n The keys of the outer dict are all the car names, and the values are\n dictionaries from dog names to counts. The counts correspond to the\n number of times that dog name occurred as a hashtag in the same user\n timeline as the hashtag for that car name (or vice versa).\n\n Args:\n cooc_path: location of the directory that contains the coocurrence\n data for cars and dogs, with each file being named after a\n hashtag and contain a JSON dict from hashtag: count\n cars: an instance of Cars\n dogs: an instance of Dogs\n\n Returns:\n defaultdict: of (defaultdict(int)), cooc structure described above\n\n \"\"\"\n\n counts = defaultdict(lambda: defaultdict(int))\n\n for root, dirs, files in os.walk(cooc_path):\n for file in files:\n print(file)\n if file[-5:] == '.json':\n print(cooc_path + '/' + file)\n type_a, name_a = name_from_hashtag(file[:-5], cars, dogs)\n print('name_a', name_a)\n with open(cooc_path + '/' + file, 'r') as f:\n coocs = json.loads(f.read())\n for hashtag, count in coocs.items():\n type_b, name_b = name_from_hashtag(hashtag, cars, dogs)\n if type_a != type_b:\n if type_a == 'car':\n counts[name_a][name_b] += count\n else:\n counts[name_b][name_a] += count\n\n # json roundtrip turns nested defaultdict structure into normal dicts\n return json.loads(json.dumps(counts))\n\n\ndef reverse_counts(counts):\n \"\"\"Create a dog-oriented view of the counts:\n\n {dog1: {car1: count, car2: count, ...}, dog2: {car1: count,...}...}\n\n Args:\n counts (dict): the car-oriented counts as created by\n build_counts_from_coocs\n\n Returns:\n dict: the counts re-cast to have dogs as the first-level key\n\n \"\"\"\n\n new = defaultdict(lambda: defaultdict(int))\n\n for car, dog_counts in counts.items():\n for dog, count in dog_counts.items():\n new[dog][car] += count\n\n return json.loads(json.dumps(new))\n\n\ndef write_counts_csv(counts, csv_path):\n \"\"\"Takes a dictionary of dictionaries of counts and writes a CSV file\n from them. Could use csvwriter for more safety, but we know we have\n no problematic values in our names and no escaping needed.\n\n Args:\n counts: dict of dicts as generated by build_counts_from_coocs\n csv_path: location to write csv file\n \"\"\"\n\n with open(csv_path, 'w', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=['car', 'dog', 'count'])\n writer.writeheader()\n for car, dog_counts in counts.items():\n for dog, count in dog_counts.items():\n writer.writerow({'car': car, 'dog': dog, 'count': str(count)})\n\n\ndef read_count_csv(csv_path):\n \"\"\"Reads the csv file written by write_csv_from_countsand returns the count\n structure built by build_counts_from_coocs\n \"\"\"\n\n counts = defaultdict(lambda: defaultdict(int))\n\n with open(csv_path, 'r', newline='') as f:\n reader = csv.DictReader(f)\n for row in reader:\n counts[row['car']][row['dog']] += int(row['count'])\n\n return json.loads(json.dumps(counts))\n","sub_path":"carroperro/counting.py","file_name":"counting.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"616918427","text":"#coding:utf-8\n\"\"\"\n@file: SearchPageInfoGenerator\n@author: lyn\n@contact: tonylu716@gmail.com\n@python: 3.5\n@editor: PyCharm Mac\n@create: 2016/11/2 02:13\n@description:\n 本模块用于调用淘宝后端的ajax接口,拼接二者的json做转发\n\"\"\"\n\nimport requests,json\n\nclass StoreInfoGenerator:\n '''\n sample url: http://pub.alimama.com/promo/search/index.htm\n 本页的加载流程分为两个ajax,一是基本信息,二是详情\n 初始化此类后直接调用to_json()方法即可得到处理后的完整json\n '''\n def __init__(self,store_url):\n self.url = store_url\n self.user_view_url = None\n self.basic_info = None\n self.detail_info = None\n\n def generate_basic_info(self):\n #得到店铺基本信息\n json_url = 'http://pub.alimama.com/items/search.json?q={}'\\\n .format(self.url)\n self.user_view_url = 'http://pub.alimama.com/promo/search/index.htm?q={}'\\\n .format(self.url)\n #print(json_url)\n full_json = requests.get(json_url).text\n #print(full_json)\n jd = json.loads(full_json)\n #先将拿到的完整json转置为python字典,直接取第一个结果\n self.basic_info = jd['data']['pageList'][0]\n if len(jd['data']['pageList'])!=1:\n raise Exception('[Error] in get basic info: Multi Results')\n\n def generate_detail_info(self,seller_id):\n # 得到店铺细节信息\n if not seller_id:\n raise Exception('[Error] in get promote info: seller_id cannot be empty.')\n json_url = 'http://pub.alimama.com/pubauc/searchPromotionInfo.json?oriMemberId={}'\\\n .format(seller_id)\n self.temp_url = json_url\n full_json = requests.get(json_url).text\n #print(full_json)\n jd = json.loads(full_json)['data']\n #由��淘宝该接口是允许多商铺并行请求的,会返回列表,但我们只需要第一个\n #故对该字典做如下处理\n for key in jd.keys():\n jd[key] = jd[key][0]\n self.detail_info = jd\n\n def run(self):\n try:\n self.generate_basic_info()\n except:\n #此情况是店铺url链接错误,淘宝后端无法匹配得唯一条目(可能给予关键词重定向产生多项条目)\n print('Error in generate_basic_info():\\n\\tMaybe no located result and redirect worked in this page:{}'\\\n .format(self.user_view_url))\n return -1\n try:\n self.generate_detail_info(\n seller_id=self.basic_info['sellerId'])\n except Exception as e:\n print('Error in generate_detail_info(): page in :{}\\n\\t{}'\\\n .format(self.temp_url,str(e)))\n return -2\n #print('basic',self.basic_info)\n #print('detail',self.detail_info)\n for key in self.detail_info.keys():\n self.basic_info[key] = self.detail_info[key]\n return 1\n\n def to_json(self):\n res = self.run()\n if res==1:\n print('Result OK')\n return self.basic_info\n else:\n return res\n\n\nif __name__==\"__main__\":\n url = 'https://detail.tmall.com/item.htm?id=44895723989'\n json = StoreInfoGenerator(url).to_json()\n print(json)","sub_path":"scholar/SearchPageInfoGenerator.py","file_name":"SearchPageInfoGenerator.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"71054689","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport cv2\n\nclass RGBHistogram:\n def __init__(self, bins):\n ''' Store the number of bins ths histogram will use'''\n self.bins = bins\n def describe(self, image):\n ''' Computes a 3D histogram in RGB colourspace then\n Normalize the histogram so that images iwth the same\n content, but either scaled larger or smaller will \n have (roughly) the same histgram'''\n hist = cv2.calcHist([image], [0, 1, 2], None, \n self.bins, [0, 256, 0, 256, 0, 256])\n hist = cv2.normalize(hist)\n \n # return 3D histogram as a flattened array\n return hist.flatten()\n","sub_path":"rgbhistogram.py","file_name":"rgbhistogram.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"4505821","text":"import nltk\nimport random \nfrom nltk.corpus import movie_reviews\nimport pickle\n\ndocuments = [(list(movie_reviews.words(fileid)), category)\n\t\t\tfor category in movie_reviews.categories()\n\t\t\tfor fileid in movie_reviews.fileids(category)]\n\nrandom.shuffle(documents)\n\nall_words = []\n\nfor w in movie_reviews.words():\n\tall_words.append(w.lower())\n\n\nall_words = nltk.FreqDist(all_words)\n\nword_features = list(all_words.keys())[:3000]\n\ndef find_features(document):\n\twords = set (document)\n\tfeatures ={}\n\tfor w in word_features:\n\t\tfeatures[w] =(w in words)\n\n\treturn features\n\n#print((find_features(movie_reviews.words(\"neg/cv000_29416.txt\"))))\n\nfeaturesets = [(find_features(rev),category) for (rev,category) in documents]\n\n#starts from here \ntraining_set = featuresets[:1900]\ntesting_set = featuresets[1900:]\n\n#classirier = nltk.NaiveBayesClassifier.train(training_set)\nclassifier_f = open(\"naivebayes.pickle\",\"rb\")\nclassirier = pickle.load(classifier_f)\nclassifier_f.close()\n\nprint(\"Naive Bayes Algo accuracy percentage:\", (nltk.classify.accuracy(classirier, testing_set))*100)\nclassirier.show_most_informative_features(15)\n\n# save_slassifier = open(\"naivebayes.pickle\",\"wb\")\n# pickle.dump(classirier, save_slassifier)\n# save_slassifier.close()","sub_path":"NLTK tutorial/picklejhk.py","file_name":"picklejhk.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"611449669","text":"\"\"\"\n\tWritten By @searpheon - Arka\n\tUpdated By @nishantcoder97 - Nishant Nahata\n\tBINARY SEARCH\n\"\"\"\n\n\"\"\"\n\tCondition: List should be sorted in ascending order\n\"\"\"\ndef binary_search(item_list, item):\n\t\"\"\"\n\tparam: list item_list: List to be searched\n\tparam: int item: Item to be searched for\n\treturns: int index: Index of the first occurrence of item, or len(tem_list) if not found\n\t\"\"\"\n\tfirst = 0\n\tlast = len(item_list)-1 \n\tindex = len(item_list)\n\twhile first < last:\n\t\tmid = int((first + last) / 2)\n\t\tif item_list[mid] >= item:\n\t\t\tlast = mid\n\t\telse:\n\t\t\tfirst = mid + 1\n\tif item_list[first] == item:\n\t\tindex = first\n\treturn index\n\n\nif __name__ == '__main__':\n\t ### Tests ###\n\tprint(binary_search([1,2,3,5,8], 6)) # returns len(item_list)\n\tprint(binary_search([1,2,3,5,8], 5)) # returns 3\n\tprint(binary_search([1, 2, 3, 3, 3, 4, 4, 5, 10], 4)) # returns 5\n\n\n","sub_path":"Arrays-searching/src/binary_search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"466477023","text":"\ndef solve(k,n):\n max_hour=0\n for _ in range(n):\n k_i,s_i=map(int,(input()).split())\n hour=(k-k_i)/s_i\n if hour>max_hour:\n max_hour=hour\n return k/max_hour\n \n\n\n\nif __name__=='__main__':\n T=int(input())\n for i in range(T):\n k,n=map(int,(input()).split())\n print('Case #%i: %.6f'%(i+1,solve(k,n)))\n","sub_path":"2018/Practice/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"99222422","text":"\"\"\"\nParadrop command line utility.\n\nEnvironment Variables:\n PDSERVER_URL Paradrop controller URL [default: https://paradrop.org].\n\"\"\"\nimport os\n\nimport click\n\nfrom . import chute\nfrom . import device\nfrom . import groups\nfrom . import routers\nfrom . import store\n\n\nPDSERVER_URL = os.environ.get(\"PDSERVER_URL\", \"https://paradrop.org\")\n\nCONTEXT_SETTINGS = dict(\n # Options can be parsed from PDTOOLS_* environment variables.\n auto_envvar_prefix = 'PDTOOLS',\n\n # Respond to both -h and --help for all commands.\n help_option_names = ['-h', '--help'],\n\n obj = {\n 'pdserver_url': PDSERVER_URL\n }\n)\n\n\n@click.group(context_settings=CONTEXT_SETTINGS)\ndef root():\n \"\"\"\n Paradrop command line utility.\n\n Environment Variables\n PDSERVER_URL ParaDrop controller URL [default: https://paradrop.org]\n \"\"\"\n pass\n\n\nroot.add_command(chute.chute)\nroot.add_command(device.device)\nroot.add_command(routers.routers)\nroot.add_command(store.store)\ngroups.register_commands(root)\n\n\ndef main():\n \"\"\"\n Entry point for the pdtools Python package.\n \"\"\"\n root()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/pdtools/pdtools/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"180376348","text":"#!/usr/bin/python\n\n# nnet forward pass and error backpropagation\n\nimport csv\nimport numpy as np\nfrom random import seed, random, choice\n\nX = []\nY = []\n\n# read in feature data\nwith open('wheat-seeds.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n X.append([float(f) if f else 0.0 for f in row[:-1]])\n y = [0, 0, 0]\n y[int(row[-1])-1] = 1\n Y.append(y)\n\nX = np.array(X)\nY = np.array(Y)\n\n# partition data in train ({X,Y}tr) and test ({X,Y}te)\nnte = 10\nXtr = X[:-nte]\nYtr = Y[:-nte]\nXte = X[-nte:]\nYte = Y[-nte:]\n\n\n# nonlinearity: Sigmoid, and its derivative\ndef nonlin(x, deriv=False):\n\tif deriv:\n\t\treturn x * (1 - x)\n\telse:\n\t\treturn 1. / (1. + np.exp(-x))\n\n\n# inner layer is a matrix (outputs, inputs+1)\n# to add the bias term\ndef mkinner(inputs, outputs):\n\treturn np.random.rand(outputs, inputs+1)\n\n\n#def softmax(x):\n# e_x = np.exp(x)\n# return e_x / e_x.sum()\n\n# compute a forward pass, store the activations\n# forward pass per node is \n# act = nonlin(b + sum_i w_i x_i)\n# = nonlin(dot([w_0, ..., w_d, bias], [x_0, ..., x_d, 1]))\ndef forward(x, layers):\n\tacts = []\n\tfor l in layers:\n\t\tact = np.matmul(l, np.append(x, 1))\n\t\tx = nonlin(a)\n\treturn acts\n\n\n","sub_path":"06-nnets/nnet.py","file_name":"nnet.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"532377325","text":"from __future__ import unicode_literals\n\nimport datetime\n\nfrom django.test import TestCase\n\nfrom holidays.models import NthXDayHoliday, Holiday\n\nclass NthXDayHolidayTest(TestCase):\n def setUp(self):\n self.mlkday = NthXDayHoliday.objects.create(\n name=\"MLK Day\",\n month=1, # January\n nth=3,\n day_of_week=0, # Monday\n paid_holiday=False,\n )\n self.laborday = NthXDayHoliday.objects.create(\n name=\"Labor Day\",\n month=9, # September\n nth=1,\n day_of_week=0, # Monday\n paid_holiday=True,\n )\n self.thanksgiving = NthXDayHoliday.objects.create(\n name=\"Thanksgiving\",\n month=11, # November\n nth=4,\n day_of_week=3, # Thursday\n paid_holiday=True,\n )\n\n def test_get_holidays(self):\n self.assertListEqual(\n list(Holiday.get_available_holidays()),\n ['MLK Day', 'Labor Day', 'Thanksgiving'])\n\n def test_holiday_json(self):\n self.assertDictEqual(\n self.mlkday.to_json(2016),\n {\n 'id': self.mlkday.pk,\n 'name': 'MLK Day',\n 'date': datetime.date(year=2016, month=1, day=18),\n 'paid': False,\n 'type': 'Nth X Day Holiday',\n })\n self.assertDictEqual(\n self.laborday.to_json(2016),\n {\n 'id': self.laborday.pk,\n 'name': 'Labor Day',\n 'date': datetime.date(year=2016, month=9, day=5),\n 'paid': True,\n 'type': 'Nth X Day Holiday',\n })\n self.assertDictEqual(\n self.thanksgiving.to_json(2016),\n {\n 'id': self.thanksgiving.pk,\n 'name': 'Thanksgiving',\n 'date': datetime.date(year=2016, month=11, day=24),\n 'paid': True,\n 'type': 'Nth X Day Holiday',\n })\n\n def test_get_holidays_year(self):\n holidays = Holiday.get_holidays_for_year(2017)\n self.assertEqual(len(holidays), 3)\n self.assertListEqual(holidays, [\n self.mlkday.to_json(2017),\n self.laborday.to_json(2017),\n self.thanksgiving.to_json(2017),\n ])\n\n holidays = Holiday.get_holidays_for_year(2018)\n self.assertEqual(len(holidays), 3)\n self.assertListEqual(holidays, [\n self.mlkday.to_json(2018),\n self.laborday.to_json(2018),\n self.thanksgiving.to_json(2018),\n ])\n\n holidays = Holiday.get_holidays_for_year(2019)\n self.assertEqual(len(holidays), 3)\n self.assertListEqual(holidays, [\n self.mlkday.to_json(2019),\n self.laborday.to_json(2019),\n self.thanksgiving.to_json(2019),\n ])\n\n def test_holidays_between_dates(self):\n start_date = datetime.date(year=2017, month=1, day=1)\n end_date = datetime.date(year=2017, month=12, day=31)\n holidays = Holiday.holidays_between_dates(start_date, end_date)\n self.assertEqual(len(holidays), 3)\n self.assertListEqual(holidays, [\n self.mlkday.to_json(2017),\n self.laborday.to_json(2017),\n self.thanksgiving.to_json(2017),\n ])\n\n end_date = datetime.date(year=2018, month=9, day=4)\n holidays = Holiday.holidays_between_dates(start_date, end_date)\n self.assertEqual(len(holidays), 5)\n self.assertListEqual(holidays, [\n self.mlkday.to_json(2017),\n self.laborday.to_json(2017),\n self.thanksgiving.to_json(2017),\n self.mlkday.to_json(2018),\n self.laborday.to_json(2018),\n ])\n\n start_date = datetime.date(year=2016, month=1, day=19)\n end_date = datetime.date(year=2016, month=9, day=4)\n holidays = Holiday.holidays_between_dates(start_date, end_date)\n self.assertEqual(len(holidays), 0)\n\n def test_is_holiday(self):\n self.assertEqual(\n Holiday.is_holiday(datetime.date(year=2016, month=11, day=24)),\n [self.thanksgiving.to_json(2016)])\n self.assertEqual(\n Holiday.is_holiday(datetime.date(year=2017, month=11, day=23)),\n [self.thanksgiving.to_json(2017)])\n self.assertEqual(\n Holiday.is_holiday(datetime.date(year=2018, month=11, day=22)),\n [self.thanksgiving.to_json(2018)])\n\n self.assertFalse(Holiday.is_holiday(datetime.date(year=2016, month=11, day=23)))\n self.assertFalse(Holiday.is_holiday(datetime.date(year=2017, month=11, day=24)))\n self.assertFalse(Holiday.is_holiday(datetime.date(year=2018, month=11, day=23)))\n\n def test_is_paid_holiday(self):\n self.assertFalse(Holiday.is_paid_holiday(datetime.date(year=2016, month=1, day=18)))\n self.assertFalse(Holiday.is_paid_holiday(datetime.date(year=2017, month=1, day=16)))\n\n self.assertEqual(\n Holiday.is_paid_holiday(datetime.date(year=2017, month=9, day=4)),\n [self.laborday.to_json(2017)])\n\n self.assertEqual(\n Holiday.is_paid_holiday(datetime.date(year=2017, month=11, day=23)),\n [self.thanksgiving.to_json(2017)])\n","sub_path":"holidays/tests/test_nthxday.py","file_name":"test_nthxday.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"140299039","text":"\"\"\"\n回溯法+排序剪枝:\n(1) 每一层往下搜索的时候,只能从这个数的后面开始进行搜索(而不是从这个数的位置开始进行搜索)。\n(2) 还有一点可能引起重复的情况,就是同一层中,如果后面的数和前面的数相同,就会引发重复,这个时候直接continue。\n\"\"\"\n\n\nclass Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n res = []\n # 排序便于剪枝去重\n candidates.sort()\n self.helper(candidates, 0, target, [], res)\n return res\n\n def helper(self, array, start, tar, path, res):\n # 剪枝\n if tar < 0:\n return\n if tar == 0:\n res.append(path[:])\n return\n for i in range(start, len(array)):\n # \"剪枝\" 检测到重复分支的条件:\n # (1)不是这一层的第一个分支\n # (2)当前选出来的数和前一个分支相等\n if i > start and array[i] == array[i - 1]:\n continue\n path.append(array[i])\n self.helper(array, i + 1, tar - array[i], path, res)\n path.pop()","sub_path":"Backtracking/40. Combination Sum II.py","file_name":"40. Combination Sum II.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"525477494","text":"from webium.driver import get_driver\r\nfrom webium.driver import close_driver\r\nfrom Login import loginpage\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom activity_hub_page import ActivityHubPage\r\nfrom activity_page import AddEditActivityPage, switcher_OFF\r\nimport time\r\nfrom creds import admin_login, admin_password, server, database, username, password\r\nfrom random import choice\r\nfrom string import digits\r\nimport pyodbc\r\n\r\nclass BaseTest(object):\r\n def teardown_class(self):\r\n close_driver()\r\n\r\nclass Test_GODO600(BaseTest):\r\n\r\n def test_600(self):\r\n get_driver().maximize_window()\r\n page = loginpage()\r\n page.open()\r\n page.login_field.send_keys(admin_login)\r\n page.password_field.send_keys(admin_password)\r\n page.button.click()\r\n page=ActivityHubPage() #STEP1\r\n page.open()\r\n page.add_activity_button.click() #STEP2\r\n page=AddEditActivityPage()\r\n time.sleep(15)\r\n assert page.switchers1[0].get_attribute(\"outerHTML\") == switcher_OFF\r\n page.stop_no_sales.send_keys('-1')#STEP3\r\n page.minimum_not_met_alert.click()\r\n assert page.stop_no_sales.get_attribute('value')=='0'\r\n page.stop_no_sales.send_keys('10000001')#STEP4\r\n page.minimum_not_met_alert.click()\r\n assert page.stop_no_sales.get_attribute('value')=='100'\r\n page.stop_no_sales.clear()#STEP5\r\n page.stop_no_sales.send_keys('1')\r\n page.minimum_not_met_alert.click()\r\n assert page.stop_no_sales.get_attribute('value')=='1'\r\n NewActivityName = (\"AutoTest600_\" + ''.join(choice(digits) for i in range(4)))\r\n page.activity_name.send_keys(NewActivityName)\r\n select = Select(page.activity_status)\r\n NewActivityStatus = \"Inactive\"\r\n select.select_by_visible_text(NewActivityStatus)\r\n select = Select(page.branch)\r\n NewActivityBranch = \"AlexeyBranch\"\r\n select.select_by_visible_text(NewActivityBranch)\r\n select = Select(page.starting_location)\r\n NewActivityLocation = \"Hotel California\"\r\n select.select_by_visible_text(NewActivityLocation)\r\n select = Select(page.time_zone)\r\n NewActivityTimezone = \"Pacific\"\r\n select.select_by_visible_text(NewActivityTimezone)\r\n NewActivityCancellationPolicy = 'We can cancel an event any time we want.'\r\n page.cancellation_policy.send_keys(NewActivityCancellationPolicy)\r\n NewActivityDurationMinutes = '15'\r\n page.activity_duration_minutes.send_keys(NewActivityDurationMinutes)\r\n page.ticket_maximum.clear()\r\n NewActivityMaxTickets = '100'\r\n page.ticket_maximum.send_keys(NewActivityMaxTickets)\r\n NewActivityFirstTicketType = \"Adult\"\r\n page.first_ticket_type.send_keys(NewActivityFirstTicketType)\r\n NewActivityFirstTicketPrice = '9.99'\r\n page.first_ticket_price.send_keys(NewActivityFirstTicketPrice)\r\n page.stop_booking_sold.click()\r\n select = Select(page.stop_booking_sold)\r\n NewActivityStopbookingSold = \"15 m\"\r\n select.select_by_visible_text(NewActivityStopbookingSold)\r\n page.save_button.click()\r\n time.sleep(5)\r\n page = ActivityHubPage()\r\n time.sleep(5)\r\n page.show_inactive.click()\r\n page.search_activity_field.send_keys(NewActivityName) # STEP6\r\n time.sleep(5)\r\n page.activity_actions.click()\r\n page.edit_activity.click()\r\n page = AddEditActivityPage()\r\n time.sleep(15)\r\n assert page.switchers1[0].get_attribute(\"outerHTML\") == switcher_OFF\r\n assert page.stop_no_sales.get_attribute('value') == '1'\r\n cnxn = pyodbc.connect(\r\n 'DRIVER={ODBC Driver 17 for SQL Server};SERVER=' + server + ';DATABASE=' + database + ';UID=' + username + ';PWD=' + password)# STEP7\r\n cursor = cnxn.cursor()\r\n cursor.execute(\"SELECT TOP 1 activity_stopbooking_hoursbefore, activity_stopbooking_midnightbefore, activity_name FROM activity ORDER BY activity_id DESC\")\r\n row = cursor.fetchone()\r\n assert row[0] == 1\r\n assert row[1] == 0\r\n assert row[2] == NewActivityName\r\n page.switchers1[0].click()#STEP8\r\n assert page.switchers1[0].get_attribute(\"outerHTML\") != switcher_OFF\r\n assert page.stop_no_sales.get_attribute('value') == ''\r\n assert page.stop_no_sales.is_enabled()==False\r\n page.save_button.click()#STEP9\r\n time.sleep(5)\r\n page = ActivityHubPage()\r\n time.sleep(5)\r\n page.show_inactive.click()\r\n page.search_activity_field.send_keys(NewActivityName)\r\n time.sleep(5)\r\n page.activity_actions.click()\r\n page.edit_activity.click()\r\n page = AddEditActivityPage()\r\n time.sleep(15)\r\n assert page.switchers1[0].get_attribute(\"outerHTML\") != switcher_OFF\r\n assert page.stop_no_sales.is_enabled() == False\r\n cursor.execute(\"SELECT TOP 1 activity_stopbooking_hoursbefore, activity_stopbooking_midnightbefore, activity_name FROM activity ORDER BY activity_id DESC\")#STEP10\r\n row = cursor.fetchone()\r\n assert row[0] == 0\r\n assert row[1] == 1\r\n assert row[2] == NewActivityName\r\n","sub_path":"Tests_Activity Hub- Activities/test_GODO-600 Checking Stop booking if no sales field.py","file_name":"test_GODO-600 Checking Stop booking if no sales field.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"417126433","text":"n=int(input())\r\nk=[]\r\nA=[]\r\nfor i in range(n):\r\n expo=0\r\n x=input()\r\n N,M=x.split( )\r\n N=int(N)\r\n M=int(M)\r\n a=[int(y) for y in input().split()]\r\n b=[int(z) for z in input().split()]\r\n ans=0\r\n for b1 in b:\r\n #b1=int(b1)\r\n for a1 in range(len(a)):\r\n if a1+b1<=len(a):\r\n k.append(sum(a[a1:a1+b1]))\r\n m=max(k)\r\n #print(m)\r\n pos=k.index(max(k))\r\n ans=ans+((-1)**(expo))*m\r\n expo=expo+1\r\n k=[]\r\n a=a[pos+1:pos+b1-1]\r\n #print(a)\r\n A.append(ans)\r\nfor q in A:\r\n print(q,end=\"\\n\")\r\n \r\n","sub_path":"ninterval.py","file_name":"ninterval.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"564500393","text":"import numpy as np\nimport pandas_ml\nimport sklearn\nfrom keras.layers import Dense, Dropout\nfrom keras.layers import LSTM, Embedding\nfrom keras.models import Sequential\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.utils import to_categorical\nfrom sklearn.model_selection import train_test_split\n\nfrom archived import data_helper\n\nMAX_SEQUENCE_LENGTH = 100\nEMBEDDING_DIM = 300\nVALIDATION_SPLIT = 0.1\nTEST_SPLIT = 0.1\n\n# Number of Neurons default: 200\nUNITS = 200\n# Drop out default: 0.2\nDROPOUT = 0.2\n# Activation default: 'softmax' or 'sigmoid'\nACTIVATION = 'softmax'\n# optimizer default: 'rmsprop' or 'Adam'\nOPTIMIZER = 'rmsprop'\n# losses default: 'categorical_crossentropy' or 'binary_crossentropy'\nLOSS = 'categorical_crossentropy'\n# default 2\nEPOCH = 2\nBATCH_SIZE = 128\n\ntrain_texts, train_labels, test_texts, test_labels = data_helper.makeDataForSentences(\n filename='mydata/data_722.xlsx')\nall_texts = train_texts + test_texts\nall_labels = train_labels + test_labels\n\nprint('(2) doc to var...')\n\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(all_texts)\nsequences = tokenizer.texts_to_sequences(all_texts)\nword_index = tokenizer.word_index\n\ndata = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\nlabels = to_categorical(np.asarray(all_labels))\n\nx_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=TEST_SPLIT, random_state=42)\nx_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=VALIDATION_SPLIT, random_state=42)\n\nmodel = Sequential()\nmodel.add(Embedding(len(word_index) + 1, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH)) # input, output, max_length\nmodel.add(LSTM(UNITS, dropout=DROPOUT, recurrent_dropout=DROPOUT))\nmodel.add(Dropout(DROPOUT))\nmodel.add(Dense(labels.shape[1], activation=ACTIVATION))\nmodel.summary()\n\nmodel.compile(loss=LOSS,\n optimizer=OPTIMIZER,\n metrics=['acc'])\nprint(model.metrics_names)\nmodel.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=EPOCH, batch_size=BATCH_SIZE)\n\nprint('(6) testing model...')\nprint(model.evaluate(x_test, y_test))\n\n# Confusion Matrix\nprediction = model.predict_classes(x_test, verbose=0)\nreal = []\nfor item in y_test:\n real.append(data_helper.matrix2value(item))\nmatrix = pandas_ml.ConfusionMatrix(real, prediction)\nprint(\"Confusion Matrix:\\n%s\\nReport:\\n%s\\n\" % (\n matrix, sklearn.metrics.classification_report(real, prediction, digits=4)))\n","sub_path":"archived/one_LSTM.py","file_name":"one_LSTM.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"242054980","text":"from django.shortcuts import render,redirect\nfrom django.contrib.auth import logout,login,authenticate\nfrom .models import *\nfrom .forms import *\n\n# Create your views here.\n\ndef index(request):\n return render(request,'index.html')\n\ndef Details(request):\n profiles=Profile.objects.all()\n return render(request,'details.html',{'profiles':profiles})\n\ndef CreateUser(request):\n context={}\n if request.POST:\n form1=UserForm(request.POST)\n form2=ProfileForm(request.POST)\n if form1.is_valid() and form2.is_valid():\n user=form1.save(commit=False)\n user.save()\n f2=form2.save(commit=False)\n f2.user=user\n f2.save()\n return redirect('index')\n else:\n context['form1']=form1\n context['form2']=form2\n else:\n form1=UserForm()\n form2=ProfileForm()\n context['form1']=form1\n context['form2']=form2\n return render(request,'register.html',context)\n\ndef Edit(request,pk):\n if not request.user.is_authenticated:\n return redirect('login')\n context={}\n user=MyUser.objects.get(pk=pk)\n profile=Profile.objects.get(user=user)\n form1=UserForm(request.POST or None, request.FILES or None,instance=user)\n form2=ProfileForm(request.POST or None, request.FILES or None,instance=profile)\n if request.POST:\n if form1.is_valid() and form2.is_valid():\n form1.save()\n form2.save()\n return redirect('details')\n context={'form1':form1,'form2':form2}\n return render(request,'edit.html',context)\n\ndef Delete(request,pk):\n user=MyUser.objects.get(pk=pk)\n # profile=Profile.objects.get(user=user)\n user.delete()\n return redirect('details')\n\ndef logout_view(request):\n logout(request)\n return redirect('index')\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"52491884","text":"def multiply(num1,num2):\n if num1<10 and num2<10:\n return num1*num2\n else:\n num_length=len(str(num1)) if len(str(num1)) >len(str(num2)) else len(str(num2))\n num1_length=len(str(num1))\n num2_length=len(str(num2))\n num1_partition=num_length//2\n num2_partition=num_length//2\n \n a=num1//(10**num1_partition)\n b=num1%(10**num1_partition)\n c=num2//(10**num2_partition)\n d=num2%(10**num2_partition)\n #print(a,b,c,d)\n # we have to find -> ac* 10^(Na+Nb)/2 + bd + \n # ad\n firstcoeff=multiply(a,c)\n third_coef=multiply(b,d)\n second_coef=multiply(a+b,c+d)-firstcoeff-third_coef\n \n return 10**num1_length*firstcoeff+10**num1_partition*(second_coef)+third_coef\n\ndef karat(x,y):\n if len(str(x))== 1 or len(str(y))==1:\n return x*y\n\n n = max(len(str(x)),len(str(y))) // 2\n\n a = x // 10**(n)\n b = x % 10**(n)\n c = y // 10**(n)\n d = y % 10**(n)\n\n z0 = karat(b,d)\n z1 = karat((a+b), (c+d))\n z2 = karat(a,c)\n\n return ((10**(2*n))*z2)+((10**n)*(z1-z2-z0))+z0\nif __name__==\"__main__\":\n print(karat(3141592653589793238462643383279502884197169399375105820974944592\n\n,2718281828459045235360287471352662497757247093699959574966967627))\n # we have to find ac bd and bd+dc","sub_path":"karatsuba.py","file_name":"karatsuba.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"268185664","text":"import structlog\n\nfrom . import signals\n\n\nlogger = structlog.getLogger(__name__)\n\n\ndef receiver_before_task_publish(sender=None, headers=None, body=None, **kwargs):\n context = structlog.contextvars.get_merged_contextvars(logger)\n if \"task_id\" in context:\n context[\"parent_task_id\"] = context.pop(\"task_id\")\n\n signals.modify_context_before_task_publish.send(\n sender=receiver_before_task_publish, context=context\n )\n\n import celery\n\n if celery.VERSION > (4,):\n headers[\"__django_structlog__\"] = context\n else:\n body[\"__django_structlog__\"] = context\n\n\ndef receiver_after_task_publish(sender=None, headers=None, body=None, **kwargs):\n logger.info(\n \"task_enqueued\",\n child_task_id=headers.get(\"id\") if headers else body.get(\"id\"),\n child_task_name=headers.get(\"task\") if headers else body.get(\"task\"),\n )\n\n\ndef receiver_task_pre_run(task_id, task, *args, **kwargs):\n structlog.contextvars.clear_contextvars()\n structlog.contextvars.bind_contextvars(task_id=task_id)\n metadata = getattr(task.request, \"__django_structlog__\", {})\n structlog.contextvars.bind_contextvars(**metadata)\n signals.bind_extra_task_metadata.send(\n sender=receiver_task_pre_run, task=task, logger=logger\n )\n logger.info(\"task_started\", task=task.name)\n\n\ndef receiver_task_retry(request=None, reason=None, einfo=None, **kwargs):\n logger.warning(\"task_retrying\", reason=reason)\n\n\ndef receiver_task_success(result=None, **kwargs):\n signals.pre_task_succeeded.send(\n sender=receiver_task_success, logger=logger, result=result\n )\n logger.info(\"task_succeeded\")\n\n\ndef receiver_task_failure(\n task_id=None,\n exception=None,\n traceback=None,\n einfo=None,\n sender=None,\n *args,\n **kwargs,\n):\n throws = getattr(sender, \"throws\", ())\n if isinstance(exception, throws):\n logger.info(\n \"task_failed\",\n error=str(exception),\n )\n else:\n logger.exception(\n \"task_failed\",\n error=str(exception),\n exception=exception,\n )\n\n\ndef receiver_task_revoked(\n request=None, terminated=None, signum=None, expired=None, **kwargs\n):\n logger.warning(\n \"task_revoked\", terminated=terminated, signum=signum, expired=expired\n )\n\n\ndef receiver_task_unknown(message=None, exc=None, name=None, id=None, **kwargs):\n logger.error(\"task_not_found\", message=message)\n\n\ndef receiver_task_rejected(message=None, exc=None, **kwargs):\n logger.error(\"task_rejected\", message=message)\n\n\ndef connect_celery_signals():\n from celery.signals import before_task_publish, after_task_publish\n\n before_task_publish.connect(receiver_before_task_publish)\n after_task_publish.connect(receiver_after_task_publish)\n","sub_path":"django_structlog/celery/receivers.py","file_name":"receivers.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"428274677","text":"########Bootstrap config constants\n\nfrom enum import Enum\n\nLEAF_P4_INFO_FILE_PATH = \"./p4src/Build/leaf_p4info.txt\"\nLEAF_BMV2_JSON_FILE_PATH = \"./p4src/Build/leaf.json\"\n\nSPINE_P4_INFO_FILE_PATH = \"./p4src/Build/spine_p4info.txt\"\nSPINE_BMV2_JSON_FILE_PATH = \"./p4src/Build/spine.json\"\n\nSUPER_SPINE_P4_INFO_FILE_PATH = \"./p4src/Build/spine_p4info.txt\"\nSUPER_SPINE_BMV2_JSON_FILE_PATH = \"./p4src/Build/spine.json\"\n\n#This is the file, that contains the topology details. This file is generated from mininet simuator. After starting mininet simulator, this file is used by the\n#controller and result processor\nTOPOLOGY_CONFIG_FILE = \"./MininetSimulator/Build/Internalnetcfg.json\"\nHOST_COMMAND_FOLDER = \"./MininetSimulator/PER_HOST_COMMANDS/\"\n\n# This is the file where all controller logs wil be written\nCONTROLLER_LOG_FILE_PATH = \"./log/CONTROLLER.log\"\nSTATISTICS_LOG_FILE_PATH = \"./log/STATISTICS.log\"\nMAX_LOG_FILE_SIZE = 52428800 #50 MB\nMAX_LOG_FILE_BACKUP_COUNT = 250 # MAximum 25 files will be kept\nIPERF_MAX_FLOW_RATE_FOR_SERVER = \"4K\" #Iperf flow rate is made maximum to 64K. if we keep ubnlimited it swamps the buffer and experiemnts doesn't work really good\nIPERF_DEFAULT_WINDOW_SIZE_FOR_SERVER = \"1.4K\"\nIPERF_PACING_TIMER = 32\n#This is the path where all the counter values from devices will be written. Or we can directly show some data in live view of gnuplot\nLAMBDA = 25\nCONTROLLER_STATISTICS_RESULT_FILE_PATH = \"./result/\"\n#This is the path where all logs while processing the results willl be written\nRESULT_PROCESSOR_LOG_FILE_PATH = \"./log/RESULT_PROCESSOR_LOG.log\"\n\n\n\n\n\n#------------Usually buffer size should be Delay * bandwidth . for bmv2 based testing this have to be represented and configured through Queue depth.\n# ------ So we will multiply port bandwidth by a factor to estimate the Delay * BW . So by this factor we are actually estimating the Delay factor.\nQUEUE_RATE_TO_QUEUE_DEPTH_FACTOR = 2 # this means if for a port queu rate is x it's queue deth will be 5x\nMAX_PORT_NUMBER = 256 # This field means each switch will have maximum 1024 ports. Corresponding value (MAX_PORTS_IN_SWITCH=1024) also needed to be set in P4 constant.p4 file\nMAX_PORT_NUMBER_PLUS_ONE = MAX_PORT_NUMBER+1 # This special number is used for creating multicast sessions\n\n#=======this parameter is required for meters of each port. We have, setup queue rate for each ports. So the CIR will be queue_rate * CIR threshold factor and PIR will be queue rate\n#===== This parameter is not used at this moment\nINGRESS_STATS_METER_CIR_THRESHOLD_FACTOR = 0.6 # This means each port will color packet yellow when it reaches 70% of the queu rate and red when. These are initial rate. In runtime we will set them dynamically\nINGRESS_STATS_METER_CBURST_FACTOR = 0.1\nINGRESS_STATS_METER_PIR_FACTOR = 0.8\nINGRESS_STATS_METER_PBURST_FACTOR = 0.2 #--- This 4 parameters are not used at this moment\n\n#==== This is one of our major parameter and used\nEGRESS_STATS_METER_CIR_THRESHOLD_FACTOR = 0.70 # This means each port will color packet yellow when it reaches 70% of the queu rate and red when\nEGRESS_STATS_METER_CBURST_FACTOR = 0.1\nEGRESS_STATS_METER_PIR_FACTOR = 0.9\nEGRESS_STATS_METER_PBURST_FACTOR = 0.1\n\n# === These 2 arrays defines, what portion of total upward traffic processing capcicyt is reserved for which class of trffic\n#========= First array lists the accepted traffic classes, 2 nd array defines corresponding percentage to be configred in meter for ingress rate monitoring\n# ==== at this moment we are setting equal percentage for all 3 types of switches. But may need to make 3 different types of percentage for different types of switches\n# IPTOS_LOWDELAY minimize delay 0x10\n# IPTOS_THROUGHPUT maximize throughput 0x08\n# IPTOS_RELIABILITY maximize reliability 0x04\n# IPTOS_LOWCOST minimize cost 0x02\nTRAFFIC_CLASS_LOW_DELAY = 0x04\nTRAFFIC_CLASS_MAXIMIZE_THROUGHPUT = 0x02\nTRAFFIC_CLASS_MAXIMIZE_PROFIT = 0x08 # LOW cost .. if we want to present our low cost that means profit is maximized. but that will require dvisiion. so we are taking direct maximize profit\nTRAFFIC_CLASS_AS_LIST = [TRAFFIC_CLASS_LOW_DELAY, TRAFFIC_CLASS_MAXIMIZE_THROUGHPUT , TRAFFIC_CLASS_MAXIMIZE_PROFIT]\n#-- for only one category of flow if we prioritize rate for that we can get better perofrmance.\n#here for large flow giving 70%- gives better peprformance compare to ECMP\n#PERCENTAGE_OF_TOTAL_UPWARD_TRAFFIC_FOR_TRAFFIC_CLASS = [40, 70, 10]\nPERCENTAGE_OF_TOTAL_UPWARD_TRAFFIC_FOR_TRAFFIC_CLASS = [10,40, 5] # How much of the link capacity should a traffic class get.\n#======================thread control and timer related\nSTATISTICS_PULLING_INTERVAL = 1 # This meand after each 1 second controller will wake up the StatisticsPuller thread and collect stats from the switches\nPORT_STATISTICS_HISTORY_LENGTH = 1000 # this means the history will be\n#======================= Different Test Scenarios\nclass DataplnaeAlgorithm(Enum):\n DP_ALGO_BASIC_ECMP = \"ecmp\"\n DP_ALGO_BASIC_HULA = \"hula\"\n DP_ALGO_BASIC_CLB = \"clb\"\n\nALGORITHM_IN_USE = DataplnaeAlgorithm.DP_ALGO_BASIC_ECMP #For CLB it will be always ECMP\n\n\nqueueRateForHostFacingPortsOfLeafSwitch = 64\nqueueRateForSpineFacingPortsOfLeafSwitch = 32\nqueueRateForLeafSwitchFacingPortsOfSpineSwitch= 32\nqueueRateForSuperSpineSwitchFacingPortsOfSpineSwitch=512\nqueueRateForSpineSwitchFacingPortsOfSuperSpineSwitch=512\nqueueRateForExternalInternetFacingPortsOfSuperSpineSwitch=2048\n\n\n\n# #============================= Security access==========================\n\n\n#========================= Metrics Level Related configuration-- these are not uused at this moment=======================\n\n#=============================Port to Port delay levels: each tuple are of format (low, hi, level,weight)================\n\n\nPORT_TO_PORT_DELAY_LEVELS_LINEAR = [(0, 1000, 0, 0),(1001,5000,1,0), (5001, 75000,2,00)]\nEGRESS_QUEUE_DEPTH_DELAY_LEVELS_LINEAR = [(0, 2, 0, 0),(3,5,1,0), (6, 10,2,00)]\n\n\n\n\n#######################################################################################################################################################################################\n#######################################################################################################################################################################################\n#######################################################################################################################################################################################\n############################################################ All CONFIGURATIONS RELATED TO RESULT PROCESSING ########################################################################\n############################################################# Starts from Here ##########################################################################\n#######################################################################################################################################################################################\n#######################################################################################################################################################################################\n#######################################################################################################################################################################################\n\nFLOW_TYPE_IDENTIFIER_BY_FLOW_VOLUME_IN_KB = [ 50, 128, 256,1024] # These means in our experiments we will consider 2 types of traffic . one with 50 KB size another 1 MB or 1024 KB\nFLOW_TYPE_LOAD_RATIO = [ 10,5, 5, 80] # This means 80% flows are short and 20# are large\nFLOW_VOLUME_IDENTIFIER_VARIATION_LIMIT_IN_PERCENTAGE = 80 # this means any flow size within range of 15% defined in previous array will be categorized as flow of same type. 80 percent is configured to acoomdate both 10kb and 50 kb flow\nPACKET_SIZE = 1024 # Each packet will be 1200 Byte size\n\n\n\n\n\n\n\n\n\n\n\n#######################################################################################################################################################################################\n#######################################################################################################################################################################################\n#######################################################################################################################################################################################\n############################################################ All CONFIGURATIONS RELATED TO TEST EXECUTION ########################################################################\n############################################################# Starts from Here ##########################################################################\n#######################################################################################################################################################################################\n#######################################################################################################################################################################################\n#######################################################################################################################################################################################\n\n\n\n\n\nIPERF3_SERVER_PORT_START = 42000\nIPERF3_CLIENT_PORT_START = 32000\n#We are forced to pass the absolute path because the Iperf tests are ctually run from mininet hosts. which do not understand\n#the relative path. so please use the absolute path where you want to store your result\nTEST_RESULT_FOLDER = \"/home/deba/Desktop/CLB/testAndMeasurement/TEST_RESULTS\"\nTEST_RESULT_FOLDER_SERVER = \"/server-logs\"\nTEST_RESULT_FOLDER_CLIENT = \"/client-logs\"\nTEST_START_TIME_FILE_NAME =\"/test_start_timer.txt\"\n\nMAX_PORT_COUNT = 4\n\n\nSSH_USER_NAME = \"YOUR user name\"\nSSH_PASSWORD = \"Your pass word\" # These access are not required. we are not using them\nSSH_PORT = 22\n\nLINUX_CC_ALGORITHM_DCTCP = \"dctcp\"\nLINUX_CC_ALGORITHM_CUBIC = \"cubic\"\n\n\n\n#=======================configurations for CLB\nCPU_PORT = 255\nCLB_TESTER_DEVICE_NAME = \"p0l0\" # As out target is only testing algorithm we will only run the CLB from one switch.\n#This parameter defines that name. The algorithm will be only run with that device\nLOAD_DISTRIBUTION_1 = [(5,2),(6,10),(7,1),(8,3)]\nLOAD_DISTRIBUTION_2 = [(5,7),(6,1),(7,6),(8,2)]\n\nDISTRO1_INSTALL_DELAY = 0 # Weight distribution 1 will be installed after 50 second of the controller thread starts\nDISTRO2_INSTALL_DELAY = 110 # Weight distribution 2 will be installed after 50 second of the controller thread starts\n\n\n\n\n\n#======================= Must match with the P4 program for CLB\nMAX_PORTS_IN_SWITCH = 8; #Maximum Supported ports in a switch to reflect the dataplane configuration\nMAX_TOR_SUBNET = 4; #Maximum ToR supported by our simulation\nBITMASK_LENGTH = 16\nPRECISION_OF_LOAD_BALANCING = 8","sub_path":"ConfigConst.py","file_name":"ConfigConst.py","file_ext":"py","file_size_in_byte":11084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"645667157","text":"'''\n\nWrite a script that reads in the words from the words.txt file and finds and prints:\n\n1. The shortest word (if there is a tie, print all)\n2. The longest word (if there is a tie, print all)\n3. The total number of words in the file.\n\n\n'''\n\n\n# empty list\nshortest_words = []\nlongest_words = []\nword_list = []\nnumber = 0\n\n# open with 'with'//'as' and read the lines\nwith open(\"words.txt\", \"r\") as file:\n content = file.read()\n word_list = content.split()\n word_list.sort(key=len)\n\n\n# --- Task 1 ---\n# finding the shortest word overall in the file\n# word_list is sorted, that's why the shortest word is at the front\nshort = word_list[0]\n\n# iterate trough the rest of the file and append every word that is as short as the shortest\nfor word in word_list:\n if len(word) <= len(short):\n shortest_words.append(word)\n\nprint(shortest_words)\n\n# --- Task 2 ---\n# finding the longest word overall in the file\n# word_list is sorted, that's why the longest word is in the back\nlong = word_list[-1]\n\n# iterate trough the rest of the file and append every word that is as long as the shortest\nfor word in word_list:\n if len(word) >= len(long):\n longest_words.append(word)\n\nprint(longest_words)\n\n# --- Task 3 ---\nfor word in word_list:\n number += 1\n\nprint(f\"The total number of words in 'words.txt' is: {number}\")\n\n","sub_path":"07_file_io/07_01_words_analysis.py","file_name":"07_01_words_analysis.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"110720620","text":"\"\"\"The Intel MPI Benchmarks\n https://software.intel.com/en-us/articles/intel-mpi-benchmarks\n\"\"\"\nfrom abc import abstractmethod, abstractproperty\nimport re\n\nfrom cached_property import cached_property\n\nfrom hpcbench.api import (\n Benchmark,\n Metrics,\n MetricsExtractor,\n)\nfrom hpcbench.toolbox.process import find_executable\n\n\nclass IMBExtractor(MetricsExtractor):\n \"\"\"Abstract class for IMB benchmark metrics extractor\n \"\"\"\n @abstractproperty\n def metrics(self):\n \"\"\" The metrics to be extracted.\n This property can not be replaced, but can be mutated as required\n \"\"\"\n\n @abstractproperty\n def stdout_ignore_prior(self):\n \"\"\"Ignore stdout until this line\"\"\"\n\n @cached_property\n def metrics_names(self):\n \"\"\"get metrics names\"\"\"\n return set(self.metrics)\n\n def extract_metrics(self, outdir, metas):\n # parse stdout and extract desired metrics\n with open(self.stdout(outdir)) as istr:\n for line in istr:\n if line.strip() == self.stdout_ignore_prior:\n break\n for line in istr:\n self.process_line(line.strip())\n return self.epilog()\n\n @abstractmethod\n def process_line(self, line):\n \"\"\"Process a line\n \"\"\"\n\n @abstractmethod\n def epilog(self):\n \"\"\":return: extracted metrics as a dictionary\n \"\"\"\n\n\nclass IMBPingPongExtractor(IMBExtractor):\n \"\"\"Metrics extractor for PingPong IMB benchmark\"\"\"\n\n LATENCY_BANDWIDTH_RE = re.compile(\n r'^\\s*(\\d+)\\s+\\d+\\s+(\\d*\\.?\\d+)[\\s]+(\\d*\\.?\\d+)'\n )\n\n def __init__(self):\n super(IMBPingPongExtractor, self).__init__()\n self.s_latency = set()\n self.s_bandwidth = set()\n\n @cached_property\n def metrics(self):\n return dict(\n latency=Metrics.Second,\n bandwidth=Metrics.MegaBytesPerSecond,\n )\n\n @cached_property\n def stdout_ignore_prior(self):\n return \"# Benchmarking PingPong\"\n\n def process_line(self, line):\n search = self.LATENCY_BANDWIDTH_RE.search(line)\n if search:\n byte = int(search.group(1))\n if byte != 0:\n self.s_latency.add(float(search.group(2)))\n self.s_bandwidth.add(float(search.group(3)))\n\n def epilog(self):\n return dict(\n latency=min(self.s_latency),\n bandwidth=max(self.s_bandwidth),\n )\n\n\nclass IMBAllToAllExtractor(IMBExtractor):\n \"\"\"Metrics extractor for AllToAll IMB benchmark\"\"\"\n\n LATENCY_RE = re.compile(\n r'^\\s*(\\d+)\\s+\\d+\\s+\\d*\\.?\\d+[\\s]+\\d*\\.?\\d+[\\s]+(\\d*\\.?\\d+)'\n )\n\n def __init__(self):\n super(IMBAllToAllExtractor, self).__init__()\n self.s_res = set()\n\n @property\n def metrics(self):\n return dict(latency=Metrics.Second)\n\n @cached_property\n def stdout_ignore_prior(self):\n return \"# Benchmarking Alltoallv\"\n\n def process_line(self, line):\n search = self.LATENCY_RE.search(line)\n if search:\n byte = int(search.group(1))\n if byte != 0:\n self.s_res.add(float(search.group(2)))\n\n def epilog(self):\n return dict(latency=min(self.s_res))\n\n\nclass IMBAllGatherExtractor(IMBAllToAllExtractor):\n \"\"\"Metrics extractor for AllGather IMB benchmark\"\"\"\n\n def __init__(self):\n super(IMBAllGatherExtractor, self).__init__()\n\n @cached_property\n def stdout_ignore_prior(self):\n return \"# Benchmarking Allgather\"\n\n\nclass IMB(Benchmark):\n \"\"\"Benchmark wrapper for the IMBbench utility\n \"\"\"\n DEFAULT_EXECUTABLE = 'IMB-MPI1'\n PING_PONG = 'PingPong'\n ALL_TO_ALL = 'Alltoallv'\n ALL_GATHER = 'Allgather'\n DEFAULT_CATEGORIES = [\n PING_PONG,\n ALL_TO_ALL,\n ALL_GATHER,\n ]\n DEFAULT_ARGUMENTS = {\n ALL_GATHER: [\"-nmpmin\", \"{process_count}\"]\n }\n\n def __init__(self):\n super(IMB, self).__init__(\n attributes=dict(\n data=\"\",\n executable=IMB.DEFAULT_EXECUTABLE,\n categories=IMB.DEFAULT_CATEGORIES,\n arguments=IMB.DEFAULT_ARGUMENTS,\n )\n )\n name = 'imb'\n\n description = \"Provides latency/bandwidth of the network.\"\n\n @cached_property\n def executable(self):\n \"\"\"Get absolute path to executable\n \"\"\"\n return find_executable(self.attributes['executable'])\n\n def execution_matrix(self, context):\n for category in self.attributes['categories']:\n arguments = self.attributes['arguments'].get(category) or []\n if category == IMB.PING_PONG:\n for pair in IMB.host_pairs(context):\n yield dict(\n category=category,\n command=[self.executable, category] + arguments,\n srun_nodes=pair,\n )\n else:\n yield dict(\n category=category,\n command=[self.executable, category] + arguments,\n srun_nodes=2,\n )\n\n @staticmethod\n def host_pairs(context):\n try:\n pos = context.nodes.index(context.node)\n except ValueError:\n context.logger.error(\n 'Could not find current node %s in nodes %s',\n context.node,\n ', '.join(context.nodes)\n )\n return []\n else:\n return [\n [context.node, context.nodes[i]]\n for i in range(pos + 1, len(context.nodes))\n ]\n\n @cached_property\n def metrics_extractors(self):\n return {\n IMB.PING_PONG: IMBPingPongExtractor(),\n IMB.ALL_TO_ALL: IMBAllToAllExtractor(),\n IMB.ALL_GATHER: IMBAllGatherExtractor(),\n }\n","sub_path":"hpcbench/benchmark/imb.py","file_name":"imb.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"144901197","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 4/28/17 3:45 PM\n# @Author : xiaowa\n\nimport logging\nLOG_FORMAT = logging.Formatter(\"%(asctime)s %(name)s [%(levelname)s] %(filename)s:%(lineno)d - %(message)s\")\n\nCONSOLE_HANDLER = logging.StreamHandler()\nCONSOLE_HANDLER.setFormatter(LOG_FORMAT)\n\n\ndefault_logger = logging.getLogger(\"default_logger\")\ndefault_logger.addHandler(CONSOLE_HANDLER)\ndefault_logger.setLevel(logging.INFO)\n\n\n\n\n\n\n","sub_path":"lml/lib/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"248491615","text":"class Utente:\n def __init__(self, idUtente, morada, telefoneCasa, telefoneTrabalho, nome, sexo, estadoCivil, ssn, nacionalidade,\n cidadania):\n self.idUtente = idUtente\n self.morada = morada\n self.telefoneCasa = telefoneCasa\n self.telefoneTrabalho = telefoneTrabalho\n self.nome = nome\n self.sexo = sexo\n self.estadoCivil = estadoCivil\n self.ssn = ssn\n self.nacionalidade = nacionalidade\n self.cidadania = cidadania\n\n def toString(self):\n string = \"Identificador: \" + str(self.idUtente) + \"\\n\"\n string += \"Morada: \" + self.morada + \"\\n\"\n string += \"Telefone de Casa: \" + self.telefoneCasa + \"\\n\"\n string += \"Telefone do Trabalho: \" + self.telefoneTrabalho + \"\\n\"\n string += \"Nome: \" + self.nome + \"\\n\"\n string += \"Sexo: \" + self.sexo + \"\\n\"\n string += \"Estado Civil: \" + self.estadoCivil + \"\\n\"\n string += \"Número de Utente de Saúde: \" + self.ssn + \"\\n\"\n string += \"Nacionalidade: \" + self.nacionalidade + \"\\n\"\n string += \"Cidadania: \" + self.cidadania + \"\\n\\n\"\n return string\n","sub_path":"FE01/Maquina1/Business/Models/Utente.py","file_name":"Utente.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"626765935","text":"#Name - Jyostna Thanjavur \r\n#importing Python Libraries \r\n\r\nfrom reportlab.lib import colors #reportlab library for mapping colors and matplot library\r\nfrom reportlab.lib.units import cm \r\nfrom Bio.Graphics import GenomeDiagram #biopython library for genomic diagram\r\nfrom Bio import SeqIO #library to read file\r\n\r\n#Reading the genome file using read function \r\nStore_File = SeqIO. read(\"Genome.gb\", \"genbank\")\r\n\r\nGen_Diagram = GenomeDiagram.Diagram(\"Tomato curly stunt virus\", \"complete genome\") #Creating empty diagram\r\nGen_Track = Gen_Diagram.new_track(1, name = \"Annotated Features\") #Adding empty Diagram \r\nGen_newSet = Gen_Track.new_set() #Adding empty feature set \r\n\r\n#Initiating diagram features for the objects in the genome sequence file\r\n#Switching colors between pink and green \r\n\r\nfor feature in Store_File.features:\r\n if feature.type != \"gene\":\r\n continue\r\n if len(feature) % 9 == 0: #if length of genome multiple of 9 - the gene is represented by pink color\r\n color = colors.pink\r\n else:\r\n color = colors.green # else green\r\n\r\n Gen_newSet.add_feature(feature, color = color, label = True, label_size = 20) #Adding and labeling color and size features to the diagram\r\n\r\n #Creating the output file using write and draw functions\r\n Gen_Diagram.draw(format = \"circular\", circular = True, pagesize = (30 * cm, 30 * cm), start = 0, end = len(Store_File), circle_core = 0.8)\r\n Gen_Diagram.write(\"OutputFile.jpg\", \"JPG\")","sub_path":"GenomeCode.py","file_name":"GenomeCode.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"569662751","text":"mins = [1, 2, 3]\nsecs = [m*60 for m in mins]\n\"\"\"\nlist comprehensions always have the same format:\n`new_list = [ function(x) for x in old_list ]` where _x_ is the target identifier and the *function(x)* is the \ntransformation. \n\nThis is somewhat of a _functional programming_ concept. \n\nNOTE that list comprehensions can only deal with situations where each item in the list has to be transformed, and thus\nif the transformation has to occur on the basis of certain criteria, then it doesn't work. \n\"\"\"\nprint(secs)\n\n# Transforming meters into feet:\nmeters = [1, 10, 3]\nfeet = [m*3.281 for m in meters]\nprint(feet)\n\n# Lower and Mixed case to uppercase:\nlower = [\"I\", \"don't\", \"like\", \"spam\"]\nupper = [s.upper() for s in lower] # .upper() method of a string converts it to all-uppercase.\nprint(upper)\n\n# Converting string to float using float() BIF:\ntimeStr = ['2.12', '2.76', '2.35']\ntimeFloat = [float(t) for t in timeStr]\nprint(timeFloat)","sub_path":"HeadFirstPython/listComprehensions.py","file_name":"listComprehensions.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"166150562","text":"'''\nполучить список пользователей с их полом\n\nпройтись по всему списку\nесли пол = 1, то\nполучить номер пользователя в списке\nзанести номер в список\n\nпройтись с этими номерами в read_from_file_to_list.py\nзанести все группы в список\n\nотправить этот список в edit_file.py\n'''\n\nimport urllib.request\nimport json\n\n#https://oauth.vk.com/authorize?client_id=5097800&display=page&redirect_uri=https://oauth.vk.com/blank.html&scope=friends&response_type=token&v=5.37\n\n\nACCESS_TOKEN = 'f7bc8dfed9eb001f003a979c5774d153a49ba80cb6e7744bef98d90f05b028571dc05a627fd5418e1653e'\n#GROUPS_ID = 24098940 #83000 m.kala\n#GROUPS_ID = 59142119 #1237 Peri\n#GROUPS_ID = 67824212 #139 zerkalny_mir\nGROUPS_ID = 43817239 #78 v efire\n#GROUPS_ID = 51094072 #25\n#GROUPS_ID = 91843912 #409\n#GROUPS_ID = 66066496 #kasp\n#GROUPS_ID = 68017962 #derbent\n#GROUPS_ID = 19514611 #antimesto\n#GROUPS_ID = 51392247 #friendzone\nurl_begin = 'https://api.vk.com/method/execute?code=return['\n\ndef get_data_from_response(loc_offset, loc_edge):\n url_tmp = 'API.groups.getMembers({\"group_id\":%d,\"offset\":' % (GROUPS_ID)\n url_end = \"\"\n for i in range(loc_offset, loc_edge, 1000):\n url_end += url_tmp + str(i) + \"}),\"\n\n response = urllib.request.urlopen(url_begin + url_end + '];&access_token=' + ACCESS_TOKEN)\n data = json.loads(response.read().decode('utf-8'))['response']\n\n return get_uids_from_data(data)\n\n\ndef get_uids_from_data(loc_data):\n users_list = []\n for i in range(0, 25):\n users_list.extend(loc_data[i]['users'])\n\n return users_list","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"173634798","text":"# -*- coding: utf-8 -*-\nimport logging\nimport random\n\nimport numpy as np\n\n\nclass SBM(object):\n\n def __init__(self, num_vertices, communities, vertex_labels, p_matrix, p_matrix_2):\n logging.info('Initializing SBM Model ...')\n self.num_vertices = num_vertices\n self.communities = communities\n self.vertex_labels = vertex_labels\n self.p_matrix = p_matrix\n self.p_matrix_2 = p_matrix_2\n self.block_matrix, self.block_matrix_2 = self.generate()\n \n\n def generate(self):\n logging.info('Generating SBM ...')\n v_label_shape = (1, self.num_vertices)\n p_matrix_shape = (self.communities, self.communities)\n block_matrix_shape = (self.num_vertices, self.num_vertices)\n block_matrix = np.zeros(block_matrix_shape, dtype=int)\n block_matrix_2 = np.zeros(block_matrix_shape, dtype=int)\n\n for row, _row in enumerate(block_matrix):\n for col, _col in enumerate(block_matrix[row]):\n if row>col:\n community_a = self.vertex_labels[row]\n community_b = self.vertex_labels[col]\n\n p = random.random()\n \n val = self.p_matrix[community_a][community_b]\n\n if p < val:\n block_matrix[row][col] = 1\n block_matrix[col][row] = 1\n\n val = self.p_matrix_2[community_a][community_b]\n\n if p < val:\n block_matrix_2[row][col] = 1\n block_matrix_2[col][row] = 1\n\n return block_matrix, block_matrix_2\n\n\npass","sub_path":"nettack/sbm.py","file_name":"sbm.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"447196924","text":"#!/usr/bin/python\n# ^_^ coding: utf-8 ^_^\nsrc_path = \"/home/wwwroot/test.txt\"\ndst_path = \"/home/wwwroot/test2.txt\"\nsrc_file = open(src_path,\"r\")\ndst_file = open(dst_path,\"w\")\nsize = 1024\nwhile True:\n file = src_file.read(size)\n if file ==\"\":\n break\n dst_file.write(file)\n\nsrc_file.close()\ndst_file.close()\n","sub_path":"class4/copfile1.py","file_name":"copfile1.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"523079281","text":"# Always set X is shorter list, Y is longer list\n# Perform binary search on X, and cut X and Y, to become two balanced pieces since we are calculating median.\n# We want both piece equal number or diff one\n# And all numbers in left side should smaller than any number in right side.\n# Left side piece number = X left side piece plus Y left side piece\n# Right side piece number = X right side piece plus Y right side piece\n# And since we know the total number, if we know X left side piece total number\n# Then we know Y left side size, then know Y cut point index.\n# Then verify if its legal. Special attention to four number besides cut line.\n# xLeft always < xRight, yLeft always < yRight. But xLeft yRight, xRight yLeft is uncertain.\n# If xLeft < yRight, yLeft < xRight, then its a legal cut. Depend on total number is odd or even, we get its median\n# If its illegal, we should move cut point in X either moving left or moving right.\n# If we move X cut point to left, then for the left piece, it contains less number from X and more number from Y.\n# At the same time, for the cutting line four number, cutting number from X is smaller, and cutting number from Y is bigger.\n# If we move X cut point to right, then for the left piece, it contains more number from X and less number from Y.\n# At the same time, for the cutting line four number, cutting number from X is bigger, and cutting number from Y is smaller.\nclass Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n left = 0\n right = len(nums1)\n if len(nums1) > len(nums2):\n return self.findMedianSortedArrays(nums2, nums1)\n while left <= right:\n nums1SplitIndex = left + int((right-left)/2)\n nums2SplitIndex = len(nums1) + len(nums2)\n nums2SplitIndex = int(nums2SplitIndex/2) - nums1SplitIndex\n if nums1SplitIndex > 0:\n xLeft = nums1[nums1SplitIndex -1]\n else:\n xLeft = float(\"-inf\")\n if nums1SplitIndex < len(nums1):\n xRight = nums1[nums1SplitIndex]\n else:\n xRight = float(\"inf\")\n if nums2SplitIndex >0:\n yLeft = nums2[nums2SplitIndex -1]\n else:\n yLeft = float(\"-inf\")\n if nums2SplitIndex < len(nums2):\n yRight = nums2[nums2SplitIndex]\n else:\n yRight = float(\"inf\")\n if xLeft <= yRight and yLeft <= xRight:\n if (len(nums1) + len(nums2))%2 ==0:\n return float(max(xLeft, yLeft) + min(xRight, yRight))/2\n else:\n return min(xRight,yRight)\n if yLeft > xRight:\n left = nums1SplitIndex + 1\n else:\n right = nums1SplitIndex -1\n\n","sub_path":"面试-LeetCode题/基础算法4-二分搜索/LeetCode4(Median of Two Sorted Arrays)/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"548575268","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('participants', '0011_auto_20150726_2337'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='participant',\n name='rnd_str',\n field=models.CharField(help_text=b'Unique string per participant', unique=True, max_length=30, verbose_name=b'random string', blank=True),\n ),\n ]\n","sub_path":"participants/migrations/0012_auto_20150726_2348.py","file_name":"0012_auto_20150726_2348.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"333787674","text":"from __future__ import print_function\n\n# *-* coding: utf-8 *-*\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom HodaDatasetReader import read_hoda_dataset\nfrom sklearn.metrics import classification_report\n\nimport keras\nfrom keras import backend as K\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import RMSprop\n\ndef recall_m(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\ndef precision_m(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\ndef f1_m(y_true, y_pred):\n precision = precision_m(y_true, y_pred)\n recall = recall_m(y_true, y_pred)\n return 2*((precision*recall)/(precision+recall+K.epsilon()))\n\nbatch_size = 128\nnum_classes = 10\nepochs = 5\n\nprint('Reading train dataset (Train 60000.cdb)...')\nx_train, y_train = read_hoda_dataset(dataset_path='./DigitDB/Train 60000.cdb',\n images_height=32,\n images_width=32,\n one_hot=False,\n reshape=True)\n\nprint('Reading test dataset (Test 20000.cdb)...')\nx_test, y_test = read_hoda_dataset(dataset_path='./DigitDB/Test 20000.cdb',\n images_height=32,\n images_width=32,\n one_hot=False,\n reshape=True)\n\nprint()\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_shape=(1024,)))\n#model.add(Dropout(0.2))\n#model.add(Dense(512, activation='relu'))\n#model.add(Dropout(0.2))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.summary()\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer=RMSprop(),\n metrics=['accuracy', f1_m, precision_m, recall_m])\n\nhistory = model.fit(x_train, y_train,\n epochs=epochs,\n verbose=1,\n validation_split=0.1)\n\ny_pred = model.predict(x_test, batch_size=64, verbose=1)\ny_pred_max = np.argmax(y_pred, axis=1)\ny_test_max = np.argmax(y_test, axis=1)\n\nprint(classification_report(y_test_max, y_pred_max))\n\nscore = model.evaluate(x_test, y_test, verbose=0)\n\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\nprint('Test f1_score:', score[2])\nprint('Test precision:', score[3])\nprint('Test recall:', score[4])\n\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.savefig('loss.png')\nplt.show()\n","sub_path":"HomeWork2/MLP_b.py","file_name":"MLP_b.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"39511418","text":"import time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass BaseAction:\n\n def __init__(self, driver):\n self.driver = driver\n\n def click(self, loc, time=10, poll=1):\n self.find_element(loc, time, poll).click()\n\n def input_text(self, loc, text, time=10, poll=1):\n self.find_element(loc, time, poll).send_keys(text)\n\n def clear_text(self, loc, time=10, poll=1):\n self.find_element(loc, time, poll).clear()\n\n def back(self):\n self.driver.keyevent(4)\n\n def find_element(self, loc, time=10, poll=1):\n loc_by, loc_value = loc\n if loc_by == By.XPATH:\n loc_value = self.make_xpath_with_feature(loc_value)\n return WebDriverWait(self.driver, time, poll).until(lambda x: x.find_element(loc_by, loc_value))\n\n def find_elements(self, loc, time=10, poll=1):\n loc_by, loc_value = loc\n if loc_by == By.XPATH:\n loc_value = self.make_xpath_with_feature(loc_value)\n return WebDriverWait(self.driver, time, poll).until(lambda x: x.find_elements(loc_by, loc_value))\n\n def scroll_page_one_time(self, direction=\"down\"):\n window_size = self.driver.get_window_size()\n window_height = window_size[\"height\"]\n window_width = window_size[\"width\"]\n end_y = window_height * 0.25\n start_y = end_y * 3\n center_x = window_width * 0.5\n\n if direction == \"down\":\n self.driver.swipe(center_x, start_y, center_x, end_y)\n elif direction == \"up\":\n self.driver.swipe(center_x, end_y, center_x, start_y)\n else:\n raise Exception(\"请输入正确的direction参数\")\n\n\n # 滑动当前页面到某个元素出现\n def scroll_page_until_loc(self, loc, direction=\"down\"):\n while True:\n try:\n self.find_element(loc)\n break\n except Exception:\n\n self.scroll_page_one_time(direction)\n time.sleep(1)\n\n def make_xpath_with_feature(self, feature):\n xpath_start = \"//*[\"\n xpath_end = \"]\"\n xpath = \"\"\n if isinstance(feature, str):\n xpath = self.make_xpath_with_unit_feature(feature)\n else:\n for i in feature:\n xpath = xpath + self.make_xpath_with_unit_feature(i)\n xpath = xpath.rstrip(\"and\")\n xpath = xpath_start + xpath + xpath_end\n return xpath\n\n def make_xpath_with_unit_feature(self, unit_feature):\n xpath = \"\"\n args = unit_feature.split(\",\")\n if len(args) == 2:\n xpath = xpath + \"@\" + args[0] + \"='\" + args[1] + \"'and\"\n elif len(args) == 3:\n if args[2] == \"1\":\n xpath = xpath + \"contains(@\" + args[0] + \",'\" + args[1] + \"')and\"\n elif args[2] == \"0\":\n xpath = xpath + \"@\" + args[0] + \"='\" + args[1] + \"'and\"\n return xpath\n\n","sub_path":"base/base_action.py","file_name":"base_action.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"552305927","text":"from selenium import webdriver\nfrom selenium.webdriver.support.select import By\nfrom features.pageobjects.page import Page\nfrom hamcrest import assert_that, contains_string, equal_to\n\ndriver = webdriver.Chrome(\"E:\\Soft\\chromedriver_win32\\chromedriver.exe\")\npage = Page (driver)\npage.maximize_window()\npage.navigate('http://localhost:8080/Shop/')\nassert_that (driver.title, contains_string ('opencart'))\n\nSEARCH_BOX = driver.find_element_by_name('search')\nSEARCH_RESULT = (By.CSS_SELECTOR,'#content > div:nth-child(8)')\n\npage.send_key_and_press_enter(SEARCH_BOX, 'phone')\nhas_result = page. is_element_visible(SEARCH_RESULT)\nassert_that(has_result,equal_to(True))\n\npage.quit()\n","sub_path":"exercises/linhhoang16/search_product.py","file_name":"search_product.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"126086564","text":"from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom .views import (\n\tCommentDetailAPIView, \n\tCommentListAPIView,\n\t) \n\nurlpatterns = [\n\turl(r'^$', CommentListAPIView.as_view(), name= \"list\"),\n url(r'^(?P\\d+)/$', CommentDetailAPIView.as_view(), name= \"thread\"),\n # url(r'^(?P\\d+)/delete/$', comment_delete, name = \"delete\"),\n]\n","sub_path":"comments/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"228002128","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\ntemp_graph.py\nwritten by Joseph Metrailler\nto send file to dropbox\n\n--> Envoie sur dropbox un graphique des températures moyennées sur la dernière heure (chaque engistrement s'ajoute à al liste)\n exécuté à chaque heure 7/7 365/365\n\n\"\"\"\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nplt.switch_backend('Agg')\nfrom matplotlib.dates import DateFormatter\n\nimport pymysql\npymysql.install_as_MySQLdb()\nimport MySQLdb as mdb\n\nimport datetime\nfrom datetime import datetime, timedelta, date\nimport time\n\nfrom math import floor\nimport os\nimport dropbox \n\nclass transfer_data:\n def __init__(self, access_token):\n self.access_token = access_token\n\n def upload_file(self, file_from, file_to):\n \"\"\"upload a file to Dropbox using API v2\n \"\"\"\n dbx = dropbox.Dropbox(self.access_token)\n with open(file_from, 'rb') as f:\n dbx.files_upload(f.read(), file_to, mode=dropbox.files.WriteMode.overwrite)\n\nclass new_record:\n def __init__(self):\n self.databaseUsername=\"pi\" #YOUR MYSQL USERNAME, USUALLY ROOT\n self.databasePassword=\"mablonde\" #YOUR MYSQL PASSWORD \n self.databaseName=\"tlogger\" #YOUR DATABASE NAME\n self.record = \"\"\n def read_temp(self):\n timeEndMesure = datetime.today()\n timeBeginMesure = timeEndMesure - timedelta(minutes = 60)\n # connect the db and create the cursor to access the database \n con=mdb.connect(\"localhost\", self.databaseUsername, self.databasePassword, self.databaseName)\n cur=con.cursor()\n with con:\n sqlTxt = \"\".join([\"SELECT timeAcquis, AVG(t10), AVG(t11), AVG(t12) FROM tLog WHERE timeAcquis \" \\\n \"BETWEEN '\", str(timeBeginMesure), \"' AND '\", str(timeEndMesure), \"';\"])\n cur.execute(sqlTxt)\n row = cur.fetchall()\n tMes, v10, v11, v12 = row[0]\n dtMes = datetime.strptime(str(tMes), \"%Y-%m-%d %H:%M:%S\")\n dh = \":\".join([str(date.toordinal(dtMes)),str(dtMes.hour)])\n self.record = \"/\".join([dh, str(round(v10,1)), str(round(v11,1)), str(round(v12,1))])\n vRet = self.record\n con.close\n return vRet\n\ndef send_temp_file(fileFrom, pathFileTo):\n access_token = 'rVV5iiHMNaAAAAAAAAAAcAdj9YnAUBcfWTD7kLGflYPXVt1dypwYg4G_eTaS-DXh'\n transferData = transfer_data(access_token)\n # API v2\n transferData.upload_file(fileFrom, pathFileTo)\n \ndef plot_temperatures (newData, xMin, xMax, yMin, yMax):\n\n yMin = int(yMin) - 2\n yMax = int(yMax) + 2\n\n xdates = []\n salon = []\n bureau = []\n exterieur = []\n\n for l in newData:\n xdates.append(datetime.strptime(l[0], \"%Y-%m-%d %H:%M:%S\"))\n salon.append(l[1])\n bureau.append(l[2])\n exterieur.append(l[3])\n\n fig, ax = plt.subplots()\n\n plt.title('Temperatures maison')\n plt.plot(xdates, salon,'--',label='Salon')\n plt.plot(xdates, bureau,'--',label='Bureau')\n plt.plot(xdates, exterieur,'--',label='Exterieur')\n\n formatter = mpl.dates.DateFormatter('%d.%m.%y %H:%M')\n ax.xaxis.set_major_formatter(formatter)\n labels = ax.get_xticklabels()\n plt.setp(labels, rotation=90)\n\n plt.legend(loc=2)\n plt.axis([xMin, xMax, yMin, yMax])\n plt.ylabel('Temperature [C]')\n plt.xlabel(datetime.today().strftime(\"%Y-%m-%d %H:%M\"))\n plt.tight_layout()\n plt.grid()\n plt.savefig(graphFrom)\n## plt.show()\n\nprint (\"lecture de la DB\")\nc = new_record()\nvRecord = c.read_temp()\n\npathFileTo = \"/temp_graph.txt\"\npathGraphTo = \"/temp_graph.png\"\nappPath = \"/home/pi/Documents/projets_jo/tempLogger\"\n\nfileFrom = \"\".join([appPath, pathFileTo])\ngraphFrom = \"\".join([appPath,pathGraphTo])\n\nwith open (fileFrom, \"a\") as f:\n f.write(\"\".join([vRecord,\"\\r\\n\"]))\n\nprint (\"Traitement des données\")\nnewData = []\n\nwith open(fileFrom, \"r\") as f:\n data = f.readlines()\n\nxMin = 9999999999\nxMax=-xMin\nyMin=xMin\nyMax=-yMin\n\nfor l in data:\n x = l.split(\"/\")\n## \n## ui=0\n## for u in x :\n## print(x[ui])\n## ui += 1\n## \n a1 = x[0].split(\":\")\n dt=int(a1[0])\n xMin = min(xMin, dt)\n xMax = max(xMax, dt)\n if xMax == xMin : xMax += 1\n \n t =a1[1]\n dtx = date.fromordinal(dt)\n tx = \"\".join([str(t),\":30:00\"])\n d1 = \" \".join([str(dtx),tx])\n s = float(x[1])\n b = float(x[2])\n e = float(x[3].replace(\"\\n\",\"\"))\n yMin = min(s,yMin)\n yMin = min(b,yMin)\n yMin = min(e,yMin)\n yMax = max(s,yMax)\n yMax = max(b,yMax)\n yMax = max(e,yMax)\n newData.append([d1,s,b,e])\n## \n## print (d1,s,b,e,xMin,xMax,yMin,yMax)\n\nprint (\"Création du graphique et copie des fichiers\")\nplot_temperatures(newData, xMin, xMax, yMin, yMax)\n\nsend_temp_file(fileFrom, pathFileTo)\nprint (\" \".join([\"file\", fileFrom, \"uploadded to dropbox\"]))\n\nsend_temp_file(graphFrom, pathGraphTo)\nprint (\" \".join([\"file\", graphFrom, \"uploadded to dropbox\"]))\nprint (\"done bye\")\n\n\n","sub_path":"temp_graph.py","file_name":"temp_graph.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"489576624","text":"# from django import forms\r\n\r\n# # class UploadFileForm(forms.Form):\r\n# # title = forms.CharField(max_length=50)\r\n# # file = forms.FileField()\r\n\r\n\r\n# class DocumentForm(forms.Form):\r\n# docfile = forms.FileField(\r\n# label='Select a file',\r\n# help_text='max. 42 megabytes'\r\n# )\r\n\r\n\r\nfrom django import forms\r\n\r\nclass DocumentForm(forms.Form):\r\n docfile = forms.FileField(\r\n label='Select a file',\r\n help_text='[NOTE : \".wav\" file only, max 42 megabytes]'\r\n )\r\n\r\n\r\n\r\n\r\ngenre_list= [\r\n (1, 'Blues'),\r\n (2, 'Classical'),\r\n (3, 'Country'),\r\n (4, 'Disco'),\r\n (5, 'Classical'),\r\n (6, 'Jazz'),\r\n (7, 'Metal'),\r\n (8, 'Pop'),\r\n (9, 'Reggae'),\r\n (10, 'Rock')\r\n]\r\n\r\n\r\n\r\nclass UserForm(forms.Form):\r\n selected_genre = forms.CharField(label='Select a genre to convert to : ', widget=forms.Select(choices=genre_list))","sub_path":"xlsite/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"291928856","text":"import tqdm\n\nif __name__ == '__main__':\n\n #with open('nt_hmmsearch_default_sorted.tbl') as in_fh:\n with open('aa_hmmsearch_default_sorted.tbl') as in_fh:\n with open('aa_hmmsearch_default_sorted_evalue.tbl', 'w') as out_fh:\n\n #skip first four lines\n next(in_fh)\n next(in_fh)\n next(in_fh)\n next(in_fh)\n\n first_line = next(in_fh).strip().split()\n\n current_read = first_line[0]\n hits = [first_line]\n\n for line in tqdm.tqdm(in_fh):\n while line.startswith('#'):\n try:\n line = next(in_fh)\n except StopIteration:\n break\n\n\n\n line = line.strip().split()\n read = line[0]\n if read != current_read:\n # sort and output reads\n try:\n hits = sorted(hits, key=lambda x: float(x[4]))\n except IndexError:\n print(hits)\n assert False\n for hit in hits:\n out_fh.write(\"\\t\".join(hit) + '\\n')\n # make a new store\n hits = [line]\n current_read = read\n else:\n hits.append(line)\n\n # for final set of reads\n try:\n hits = sorted(hits, key=lambda x: float(x[4]))\n for hit in hits:\n out_fh.write(\"\\t\".join(hit) + '\\n')\n except IndexError:\n print(hits)\n","sub_path":"aa/orfM/hmmsearch_aa/sort_hmmsearch_by_evalue.py","file_name":"sort_hmmsearch_by_evalue.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"587104861","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /local/hd1/home1/data/acue/rd/p-open-deploy-optional/xmlschema_acue/components/xmlschema_acue/tests/xmlschema_acue_tests/libs/resources/Case010/CallCase.py\n# Compiled at: 2019-05-19 16:21:25\n# Size of source mod 2**32: 12574 bytes\n\"\"\"\nThis module runs tests concerning resources.\n\"\"\"\nimport unittest, os\nfrom filesysobjects.apppaths import splitapppathx, normapppathx\ntry:\n from pathlib import PureWindowsPath, PurePath\nexcept ImportError:\n from pathlib2 import PureWindowsPath, PurePath\n\nfrom xmlschema_acue import fetch_namespaces, fetch_resource, normalize_url, fetch_schema, fetch_schema_locations, load_xml_resource, XMLResource, XMLSchemaURLError\nfrom tests.xmlschema_acue_tests.testtools.TestCaseXMLSchema import XMLSchemaTestCase, SKIP_REMOTE_TESTS\nfrom xmlschema_acue.compat import urlsplit, uses_relative\nfrom xmlschema_acue.etree import PyElementTree, is_etree_element, etree_element, py_etree_element\nfrom testdata.xmlschema_acue_testdata import testdata_dir\n\ndef is_windows_path(path):\n \"\"\"Checks if the path argument is a Windows platform path.\"\"\"\n return '\\\\' in path or ':' in path or '|' in path\n\n\ndef add_leading_slash(path):\n if path:\n if path[0] not in ('/', '\\\\'):\n return '/' + path\n return path\n\n\nclass TestResources(XMLSchemaTestCase):\n\n def check_url(self, url, expected):\n url_parts = urlsplit(url)\n url_parts2 = normapppathx(url, appsplit=True)\n url_parts3 = splitapppathx(url, appsplit=True)\n print('4TEST:url_parts = ' + str(url_parts))\n print('4TEST:url_parts2 = ' + str(url_parts2))\n print('4TEST:url_parts2 = ' + str(url_parts3))\n if urlsplit(expected).scheme not in uses_relative:\n expected = add_leading_slash(expected)\n else:\n expected_parts = urlsplit(expected, scheme='file')\n self.assertEqual(url_parts.scheme, expected_parts.scheme, 'Schemes differ.')\n self.assertEqual(url_parts.netloc, expected_parts.netloc, 'Netloc parts differ.')\n self.assertEqual(url_parts.query, expected_parts.query, 'Query parts differ.')\n self.assertEqual(url_parts.fragment, expected_parts.fragment, 'Fragment parts differ.')\n if is_windows_path(url_parts.path) or is_windows_path(expected_parts.path):\n path = PureWindowsPath(url_parts.path)\n expected_path = PureWindowsPath(add_leading_slash(expected_parts.path))\n else:\n path = PurePath(url_parts.path)\n expected_path = PurePath(expected_parts.path)\n self.assertEqual(path, expected_path, 'Paths differ.')\n\n def test_normalize_url(self):\n url1 = 'https://example.com/xsd/other_schema.xsd'\n self.assertEqual(normapppathx(url1), url1)\n self.check_url(normalize_url(url1, base_url='/path_my_schema/schema.xsd'), url1)\n parent_dir = os.path.dirname(os.getcwd())\n self.check_url(normalize_url('../dir1/./dir2'), os.path.join(parent_dir, 'dir1/dir2'))\n self.check_url(normalize_url('../dir1/./dir2', '/home', keep_relative=True), 'file:///dir1/dir2')\n self.check_url(normalize_url('../dir1/./dir2', 'file:///home'), 'file:///dir1/dir2')\n self.check_url(normalize_url('other.xsd', 'file:///home'), 'file:///home/other.xsd')\n self.check_url(normalize_url('other.xsd', 'file:///home/'), 'file:///home/other.xsd')\n self.check_url(normalize_url('file:other.xsd', 'file:///home'), 'file:///home/other.xsd')\n cwd_url = 'file://{}/'.format(add_leading_slash(os.getcwd()))\n self.check_url(normalize_url('file:other.xsd', keep_relative=True), 'file:other.xsd')\n self.check_url(normalize_url('file:other.xsd'), cwd_url + 'other.xsd')\n self.check_url(normalize_url('file:other.xsd', 'http://site/base', True), 'file:other.xsd')\n self.check_url(normalize_url('file:other.xsd', 'http://site/base'), cwd_url + 'other.xsd')\n self.check_url(normalize_url('dummy path.xsd'), cwd_url + 'dummy path.xsd')\n self.check_url(normalize_url('dummy path.xsd', 'http://site/base'), 'http://site/base/dummy%20path.xsd')\n self.check_url(normalize_url('dummy path.xsd', 'file://host/home/'), 'file://host/home/dummy path.xsd')\n win_abs_path1 = 'z:\\\\Dir_1_0\\\\Dir2-0\\\\schemas/XSD_1.0/XMLSchema.xsd'\n win_abs_path2 = 'z:\\\\Dir-1.0\\\\Dir-2_0\\\\'\n self.check_url(normalize_url(win_abs_path1), win_abs_path1)\n self.check_url(normalize_url('k:\\\\Dir3\\\\schema.xsd', win_abs_path1), 'file:///k:\\\\Dir3\\\\schema.xsd')\n self.check_url(normalize_url('k:\\\\Dir3\\\\schema.xsd', win_abs_path2), 'file:///k:\\\\Dir3\\\\schema.xsd')\n self.check_url(normalize_url('schema.xsd', win_abs_path2), 'file:///z:\\\\Dir-1.0\\\\Dir-2_0/schema.xsd')\n self.check_url(normalize_url('xsd1.0/schema.xsd', win_abs_path2), 'file:///z:\\\\Dir-1.0\\\\Dir-2_0/xsd1.0/schema.xsd')\n\n def test_fetch_resource(self):\n wrong_path = self.casepath(testdata_dir + 'resources/dummy_file.txt')\n self.assertRaises(XMLSchemaURLError, fetch_resource, wrong_path)\n right_path = self.casepath(testdata_dir + 'resources/dummy file.txt')\n self.assertTrue(fetch_resource(right_path).endswith('dummy file.txt'))\n\n def test_fetch_namespaces(self):\n self.assertFalse(fetch_namespaces(self.casepath(testdata_dir + 'resources/malformed.xml')))\n\n def test_fetch_schema_locations(self):\n locations = fetch_schema_locations(self.col_xml_file)\n self.assertEqual(splitapppathx(locations[0])[0], self.col_xsd_file)\n self.assertEqual(locations[1][0][0], 'http://example.com/ns/collection')\n self.assertEqual(splitapppathx(locations[1][0][1])[0], self.col_xsd_file)\n self.assertEqual(splitapppathx(fetch_schema(self.vh_xml_file))[0], self.vh_xsd_file)\n\n def test_load_xml_resource(self):\n self.assertTrue(is_etree_element(load_xml_resource((self.vh_xml_file), element_only=True)))\n root, text, url = load_xml_resource((self.vh_xml_file), element_only=False)\n self.assertTrue(is_etree_element(root))\n self.assertEqual(root.tag, '{http://example.com/vehicles}vehicles')\n self.assertTrue(text.startswith('').namespace, '')\n\n def test_xml_resource_defuse(self):\n resource = XMLResource((self.vh_xml_file), defuse='never')\n self.assertEqual(resource.defuse, 'never')\n self.assertRaises(ValueError, XMLResource, (self.vh_xml_file), defuse='all')\n self.assertRaises(ValueError, XMLResource, (self.vh_xml_file), defuse=None)\n self.assertIsInstance(resource.root, etree_element)\n resource = XMLResource((self.vh_xml_file), defuse='always')\n self.assertIsInstance(resource.root, py_etree_element)\n xml_file = self.casepath(testdata_dir + 'resources/with_entity.xml')\n self.assertIsInstance(XMLResource(xml_file), XMLResource)\n self.assertRaises((PyElementTree.ParseError), XMLResource, xml_file, defuse='always')\n xml_file = self.casepath(testdata_dir + 'resources/unused_external_entity.xml')\n self.assertIsInstance(XMLResource(xml_file), XMLResource)\n self.assertRaises((PyElementTree.ParseError), XMLResource, xml_file, defuse='always')\n xml_file = self.casepath(testdata_dir + 'resources/external_entity.xml')\n self.assertIsInstance(XMLResource(xml_file), XMLResource)\n self.assertRaises((PyElementTree.ParseError), XMLResource, xml_file, defuse='always')\n\n def test_xml_resource_timeout(self):\n resource = XMLResource((self.vh_xml_file), timeout=30)\n self.assertEqual(resource.timeout, 30)\n self.assertRaises(ValueError, XMLResource, (self.vh_xml_file), timeout='100')\n self.assertRaises(ValueError, XMLResource, (self.vh_xml_file), timeout=0)\n\n def test_xml_resource_is_lazy(self):\n resource = XMLResource(self.vh_xml_file)\n self.assertTrue(resource.is_lazy())\n resource = XMLResource((self.vh_xml_file), lazy=False)\n self.assertFalse(resource.is_lazy())\n\n def test_xml_resource_is_loaded(self):\n resource = XMLResource(self.vh_xml_file)\n self.assertFalse(resource.is_loaded())\n resource.load()\n self.assertTrue(resource.is_loaded())\n\n def test_xml_resource_open(self):\n resource = XMLResource(self.vh_xml_file)\n xml_file = resource.open()\n data = xml_file.read().decode('utf-8')\n self.assertTrue(data.startswith('')\n self.assertRaises(ValueError, resource.open)\n\n def test_xml_resource_tostring(self):\n resource = XMLResource(self.vh_xml_file)\n self.assertTrue(resource.tostring().startswith('= 0:\n loop_combination_idx_list = []\n for i in range(len(list_dict_categories_idx)):\n loop_combination_idx_list.append(dict_categories[list_dict_categories_keys[i]][list_dict_categories_idx[i]])\n\n nested_list_idx.append(loop_combination_idx_list)\n list_dict_categories_idx[(-1)] -= 1\n for idx in range(len(list_dict_categories_idx) - 1, 0, -1):\n if -1 in list_dict_categories_idx:\n list_dict_categories_idx[idx] = dict_categories_lens[list_dict_categories_keys[idx]] - 1\n list_dict_categories_idx[(idx - 1)] -= 1\n else:\n break\n\n return (\n nested_list_idx, list_dict_categories_keys)\n\n\ndef _is_vable(optimize, scatter_object):\n numpy = False\n if isinstance(scatter_object, np.ndarray):\n if scatter_object.dtype.kind in ('u', 'i', 'f', 'b'):\n numpy = True\n return optimize & numpy | (type(scatter_object) == type(None))\n\n\ndef _merge_objects(list_objects, delete=False):\n \"\"\"\n Given a list with objects, merges them into one single object, provided all elements of the list\n are of the same type.\n\n :param list_objects: List with objects to merge.\n :param delete: If `True`, deletes list_objects\n :return: merged object\n \"\"\"\n if list_objects == None:\n return\n else:\n if type(list_objects[0]) in [pd.DataFrame, pd.Series]:\n table_return = pd.concat(list_objects, copy=False)\n else:\n if isinstance(list_objects[0], np.ndarray):\n n_rows = sum([len(i) for i in list_objects])\n i_row = 0\n if list_objects[0].ndim >= 2:\n table_return = np.empty(((n_rows,) + np.shape(list_objects[0])[1:]), dtype=(list_objects[0].dtype))\n else:\n table_return = np.empty(n_rows, dtype=(list_objects[0].dtype))\n for i in list_objects:\n if len(i) > 0:\n if list_objects[0].ndim > 1:\n table_return[i_row:i_row + len(i), :] = i\n else:\n table_return[i_row:i_row + len(i)] = i\n i_row += len(i)\n\n else:\n if isinstance(list_objects[0], list):\n table_return = list(itertools.chain.from_iterable(list_objects))\n for i in reversed(table_return):\n if _return_set_type((i,)) == 0:\n pass\n elif len(i) == 0:\n table_return.remove(i)\n\n if delete:\n del list_objects\n return table_return\n\n\ndef _return_slice(object_slice, row_0=0, row_f=None, col_0=0, col_f=None, string_cols=False):\n if isinstance(object_slice, (pd.DataFrame, pd.Series, np.ndarray)):\n if row_f == None:\n row_f = object_slice.shape[0]\n elif col_f == None:\n col_f = object_slice.shape[1]\n if isinstance(object_slice, pd.DataFrame):\n if string_cols & (col_0 in object_slice.columns.tolist()):\n return object_slice[col_0].iloc[row_0:row_f]\n else:\n return object_slice.iloc[row_0:row_f, col_0:col_f]\n else:\n if isinstance(object_slice, np.ndarray):\n if object_slice.ndim > 1:\n return object_slice[row_0:row_f, col_0:col_f]\n else:\n return object_slice[row_0:row_f]\n else:\n if isinstance(object_slice, pd.Series):\n return object_slice.iloc[row_0:row_f]\n if isinstance(object_slice, list):\n return object_slice[row_0:row_f]\n\n\ndef _return_idx_list_k1(idx_list, scatter_object, size, size_limit):\n \"\"\"\n Given a scatter object and a idx list of locations to cut that object, return\n the maximum k value of all the objects in the division list, as well as the new division of\n the object in a list.\n\n :param idx_list: List with indexes of object cutting.\n :type idx_list: list\n :param scatter_object: Object to be cut.\n :type scatter_object: Defined in `scatter()`function.\n :param size: comm.size, number of processors.\n :type size: int\n :param size_limit: limit of memory allocated to each element.\n :type size_limit: int\n :return: k value.\n :type return: int\n :return: idx list.\n :type return: list\n \"\"\"\n idx_list_sizes = [_get_size_object(_return_slice(scatter_object, idx_list[i], idx_list[(i + 1)])) for i in range(len(idx_list) - 1)]\n k = int(max([i / size_limit for i in idx_list_sizes])) + 1\n idx_list_k = []\n for i in range(len(idx_list) - 1):\n idx_list_k += [int(z) for z in np.linspace(idx_list[i], idx_list[(i + 1)], k + 1)][:-1]\n\n idx_list_k += [len(scatter_object)]\n return (\n k, idx_list_k)\n\n\ndef _return_k2(scatter_object, size_limit):\n \"\"\"\n Returns the k2 value of an object; concretely, a list with objects.\n\n :param scatter_object: list from which k2 must be obtained.\n :type scatter_object: list\n\n :param size_limit: limit of memory for each processor.\n :type size_limit: int\n\n :return: k2 value\n :type return: int\n \"\"\"\n list_object_sizes = [_get_size_object(i) for i in scatter_object]\n k2 = int(max([i / size_limit for i in list_object_sizes])) + 1\n return k2\n\n\ndef _cut_in_k_parts(scatter_object, k, size, idx_list_k1=[]):\n \"\"\"\n Given a scatter object and a k value, cuts the object into k parts, trying to keep the same\n length for all the objects.\n\n :param scatter_object: Object to be cut.\n :type scatter_object: Defined in `scatter()`function.\n :param k: k value.\n :type k: list\n :param size: number of processors\n :type size: int\n :param idx_list_k1: list with custom cutting indexes\n :type idx_list_k1: list\n :return: cut object.\n :type return: list\n \"\"\"\n k1, k2 = k[0], k[1]\n if len(idx_list_k1) == 0:\n idx_list_k1 = [int(i) for i in np.linspace(0, len(scatter_object), size * k1 + 1)]\n else:\n scatter_list_objects_k1 = [_return_slice(scatter_object, idx_list_k1[i], idx_list_k1[(i + 1)]) for i in range(len(idx_list_k1) - 1)]\n if k[1] <= 1:\n scatter_list_objects = scatter_list_objects_k1\n else:\n scatter_list_objects = [[\n [dict_emptythings[type(scatter_object[0])] for y in range(k2)]] for x in range(k1 * size)]\n k_i = 0\n while scatter_list_objects_k1:\n obj_k2 = []\n for obj in scatter_list_objects_k1[0]:\n idx_list_k2 = [int(i) for i in np.linspace(0, len(obj), k2 + 1)]\n obj_k2.append([_return_slice(obj, idx_list_k2[i], idx_list_k2[(i + 1)]) for i in range(len(idx_list_k2) - 1)])\n\n if len(obj_k2) > 0:\n scatter_list_objects[k_i] = obj_k2\n del scatter_list_objects_k1[0]\n k_i += 1\n\n return scatter_list_objects\n\n\ndef _scatterv(object, comm, root=0):\n \"\"\"\n Generalized function that automatically uses `comm.Scatterv()` or `comm.scatter()` depending\n on the data type. If the array is numeric, it does so. If the array is not numeric, or it\n is not an array of the class `numpy.ndarray`, it redirects the scattering to do `comm.scatter()`.\n\n :param object: object to be scattered.\n :type object: `scatter()` object type.\n :param comm: MPI.COMM_WORLD object\n :type comm: MPI.COMM_WORLD object\n :param root: root process\n :type root: int\n :param optimize_scatter: If True, uses `comm.Scaterv()` command with numerical arrays.\n :type optimize_scatter: bool\n :return: Scattered object\n \"\"\"\n if comm.rank == root:\n if isinstance(object, list):\n counts = [i.size for i in object]\n displs = [0] + list(np.cumsum(counts))\n lens = [len(i) for i in object]\n object = _merge_objects(object)\n else:\n if object.ndim > 1:\n displs = [int(i) * int(object.size / object.shape[0]) for i in np.linspace(0, object.shape[0], comm.size + 1)]\n else:\n displs = [int(i) for i in np.linspace(0, object.shape[0], comm.size + 1)]\n counts = [displs[(i + 1)] - displs[i] for i in range(len(displs) - 1)]\n if object.ndim > 1:\n lens = [int((displs[(i + 1)] - displs[i]) / object.shape[1]) for i in range(len(displs) - 1)]\n else:\n lens = [displs[(i + 1)] - displs[i] for i in range(len(displs) - 1)]\n displs = displs[:-1]\n shape = object.shape\n object_type = object.dtype\n if object.ndim > 1:\n object = object.ravel().astype((np.float64), copy=False)\n else:\n object, counts, displs, shape, lens, object_type = (None, None, None, None,\n None, None)\n counts = comm.bcast(counts, root=root)\n displs = comm.bcast(displs, root=root)\n lens = comm.bcast(lens, root=root)\n shape = list(comm.bcast(shape, root=root))\n object_type = comm.bcast(object_type, root=root)\n shape[0] = lens[comm.rank]\n shape = tuple(shape)\n x = np.zeros(counts[comm.rank])\n comm.Scatterv([object, counts, displs, MPI.DOUBLE], x, root=root)\n del object\n if len(shape) > 1:\n return np.reshape(x, (-1, ) + shape[1:]).astype(object_type, copy=False)\n else:\n return x.view(object_type)\n\n\ndef _gatherv(object, comm, root, optimize, k1_val):\n \"\"\"\n Generalized function that automatically uses `comm.Gatherv()` or `comm.gather()` depending\n on the data type. If the array is numeric, it does so. If the array is not numeric, or it\n is not an array of the class `numpy.ndarray`, it redirects the scattering to do `comm.gather()`.\n\n :param object: object to be scattered.\n :type object: `gather()` object type.\n :param comm: MPI.COMM_WORLD object\n :type comm: MPI.COMM_WORLD object\n :param root: root process\n type root: int\n :return: Scattered object\n \"\"\"\n optimize_scatter, object_type = (0, None)\n if comm.rank == root:\n if isinstance(object, np.ndarray) & optimize:\n if object.dtype in [np.float64, np.float32, np.float16, np.float,\n np.int, np.int8, np.int16, np.int32, np.int64, int, float,\n bool]:\n optimize_scatter = 1\n object_type = object.dtype\n else:\n optimize_scatter = comm.bcast(optimize_scatter, root=root)\n object_type = comm.bcast(object_type, root=root)\n if optimize_scatter == 1:\n counts = object.size\n lens = object.shape[0]\n shape = list(object.shape)\n if object.ndim > 1:\n object = object.ravel().astype((np.float64), copy=False)\n counts = comm.allgather(counts)\n lens = comm.gather(lens, root=root)\n displs = None\n if comm.rank == root:\n displs = [sum(counts[:i]) for i in range(len(counts))]\n shape[0] = sum(lens)\n shape = tuple(shape)\n if comm.rank == root:\n x = np.zeros((sum(counts)), dtype=(np.float64))\n else:\n x = None\n comm.Gatherv([object, counts[comm.rank]], [x, counts, displs, MPI.DOUBLE], root=root)\n if comm.rank == root:\n if len(shape) > 1:\n return_obj = np.reshape(x, (-1, ) + shape[1:]).astype(object_type, copy=False)\n if k1_val == 1:\n return return_obj\n else:\n lens = [\n 0] + list(np.cumsum(lens))\n return [return_obj[lens[i]:lens[(i + 1)]] for i in range(len(lens) - 1)]\n else:\n return_obj = x.view(object_type)\n if k1_val == 1:\n return return_obj\n else:\n lens = [\n 0] + list(np.cumsum(lens))\n return [return_obj[lens[i]:lens[(i + 1)]] for i in range(len(lens) - 1)]\n else:\n return x\n else:\n return comm.gather(object, root=root)\n\n\ndef _gather_or_allgather(object, comm, root, type_gather='gather', optimize=True, k1_val=1):\n if type_gather == 'gather':\n return _gatherv(object, comm, root, optimize, k1_val)\n if type_gather == 'allgather':\n return comm.allgather(object)\n\n\ndef _general_scatter(scatter_object, comm, by, dest, size_limit, root, optimize, scatter_method):\n \"\"\"\n This function is a more generalised form of the comm.scatter() function, prepared for arrays\n of any size.\n\n :param scatter_object: Object to be divided.\n :type scatter_object: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects. If a list of objects is passed\n objects must all of them be [int, float, str] or [pd.DataFrame, np.array].\n Lists with elements of mixed classes are yet not supported.\n\n :param comm: MPI4PY's MPI.COMM_WORLD object.\n\n :param by: If the table cannot be directly divided by rows and, instead, there are some\n categorical variables that are stored within a column/s in the\n `pd.DataFrame`/`np.ndarray` object, `scatter` can perform the subdivision based\n on this variable. For instance, it the column has 1000 genes (several rows per\n gene), and the # of processors is 10, each processor will have 100 genes. So far,\n only one `by` variable can be attributed. The table must be sorted by this\n :type by: int (`np.ndarray`) or str (`pd.Dataframe`).\n\n :param size_limit: maximum byte size allowed for each divisible object. If the size exceeds the\n size limit, the chunk will be divided in `k`subchunks.\n :type size_limit: int\n\n :param root: processor from which the object has been created.\n :type root: int\n\n :param scatter_method: ['scatter' | 'bcast']. If 'scatter', divides de object and distributes it\n into the processors. If 'bcast', sends a copy of the object to all\n processors.\n\n :return: i-th subtable for processor i, already parallelized.\n :type return: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects.\n \"\"\"\n rank = comm.rank\n size = comm.size\n scatter_object_type = type(scatter_object)\n tag = 4568121\n if type(by) != list:\n by = [\n by]\n size_limit = size_limit / size\n if size == 1:\n return scatter_object\n else:\n if rank == root:\n if scatter_method in ('sendrecv', ):\n size = 1\n by = []\n if type(scatter_object) in [pd.DataFrame, np.ndarray, pd.Series]:\n by_col = by if by != [] else []\n scatter_object_nrows = scatter_object.shape[0]\n if len(by_col) > 0:\n idx_list = []\n nested_list_values, list_categories_keys = _generate_index_list(scatter_object, by_col)\n string_cols = _is_istring_cols(scatter_object, by_col)\n for value_comb in nested_list_values:\n for i in range(len(value_comb)):\n if string_cols:\n col_f = by_col[i]\n else:\n col_f = by_col[i] + 1\n bool_array_i = _return_slice(scatter_object, col_0=(by_col[i]), col_f=col_f,\n string_cols=string_cols) == value_comb[i]\n if i == 0:\n if isinstance(bool_array_i, (pd.DataFrame, pd.Series)):\n bool_array = bool_array_i.values\n else:\n bool_array = bool_array_i\n else:\n if isinstance(bool_array_i, (pd.DataFrame, pd.Series)):\n bool_array = bool_array & bool_array_i.values\n else:\n bool_array = bool_array & bool_array_i\n\n bool_to_idx = np.argwhere(bool_array.flatten()).flatten()\n if len(bool_to_idx) > 0:\n idx_list.append(int(min(bool_to_idx)))\n\n idx_list.append(len(scatter_object))\n idx_list = sorted(idx_list)\n lsp = np.linspace(0, len(idx_list) - 1, size + 1)\n idx_list = [idx_list[int(i)] for i in lsp]\n k, idx_list_k = _return_idx_list_k1(idx_list, scatter_object, size, size_limit)\n else:\n lsp = np.linspace(0, scatter_object_nrows, size + 1)\n idx_list = [int(i) for i in lsp]\n k, idx_list_k = _return_idx_list_k1(idx_list, scatter_object, size, size_limit)\n else:\n if _is_vable(optimize, scatter_object) & (k == 1):\n scatter_list_objects = scatter_object\n else:\n scatter_list_objects = [_return_slice(scatter_object, idx_list_k[i], idx_list_k[(i + 1)]) for i in range(len(idx_list_k) - 1)]\n else:\n if type(scatter_object) == list:\n set_type = _return_set_type(scatter_object)\n if set_type == 2:\n raise TypeError('The list to be scattered cannot contain simple types (int, float, str) and complex types (pd.DataFrame, np.ndarray, list) mixed together.')\n else:\n if set_type == 0:\n idx_list = [int(i) for i in np.linspace(0, len(scatter_object), size + 1)]\n k, idx_list_k = _return_idx_list_k1(idx_list, scatter_object, size, size_limit)\n scatter_list_objects = [scatter_object[idx_list_k[i]:idx_list_k[(i + 1)]] for i in range(len(idx_list_k) - 1)]\n else:\n if set_type == 1:\n idx_list = [int(i) for i in np.linspace(0, len(scatter_object), size + 1)]\n k2 = _return_k2(scatter_object, size_limit)\n if k2 == 1:\n k1, idx_list_k1 = _return_idx_list_k1(idx_list, scatter_object, size, size_limit)\n else:\n k1, idx_list_k1 = 1, idx_list\n k = [k1, k2]\n scatter_list_objects = _cut_in_k_parts(scatter_object, k, size, idx_list_k1)\n else:\n raise TypeError('The object types ({}) are not allowed so far.'.format(set_types))\n else:\n if type(k) != list:\n k = [\n k, 1]\n else:\n k, scatter_list_objects, idx_list = (None, None, None)\n k = comm.scatter(([k] * comm.size), root=root)\n k1, k2 = k[0], k[1]\n is_vable = _is_vable(optimize, scatter_object)\n is_vable = np.all(comm.allgather(is_vable))\n if k2 > 1:\n if rank == root:\n if is_vable:\n pass\n else:\n table_list_k_i = [scatter_list_objects[i] for i in range(0, k1 * size, k1)]\n else:\n table_list_k_i = None\n merge_table_k_i_j = []\n for k_2 in range(k2):\n if rank == root:\n table_k_i_j = []\n for z in table_list_k_i:\n i_j_z = []\n for l in range(len(z)):\n i_j_z.append(z[l][k_2])\n\n table_k_i_j.append(i_j_z)\n\n else:\n table_k_i_j = None\n if scatter_method == 'scatter':\n table_k_i_j = comm.scatter(table_k_i_j, root=root)\n else:\n if scatter_method == 'sendrecv':\n if rank == root:\n comm.send((table_k_i_j[0]), dest=dest, tag=tag)\n if rank == dest:\n table_k_i_j = comm.recv(source=root, tag=tag)\n merge_table_k_i_j.append(table_k_i_j)\n\n if scatter_method == 'sendrecv':\n object_return = [] if comm.rank == dest else None\n if rank == dest:\n for l in range(len(merge_table_k_i_j[0])):\n object_return.append(_merge_objects([merge_table_k_i_j[k][l] for k in range(len(merge_table_k_i_j))]))\n\n return object_return\n else:\n return\n else:\n if scatter_method == 'scatter':\n object_return_not_merged = [[[] for y in range(0)] for x in range(len(merge_table_k_i_j[0]))]\n for l in range(len(merge_table_k_i_j[0])):\n object_return_not_merged[l] = [\n _merge_objects([merge_table_k_i_j[k_2][l] for k_2 in range(len(merge_table_k_i_j))])]\n\n else:\n if scatter_method == 'scatter':\n object_return_not_merged = [[[] for y in range(0)] for x in range(k1)]\n else:\n if scatter_method == 'sendrecv':\n object_return_not_merged = [[[] for y in range(0)] for x in range(k1)] if rank == dest else None\n for k_i in range(k1):\n if rank == root:\n if is_vable & (k[0] == 1):\n pass\n else:\n table_list_k_i = [scatter_list_objects[i] for i in range(0, k1 * size, k1)]\n else:\n table_list_k_i = None\n if scatter_method == 'scatter':\n if is_vable:\n if k[0] == 1:\n object_return_not_merged[k_i] = _scatterv(scatter_object, comm, root=root)\n else:\n object_return_not_merged[k_i] = _scatterv(table_list_k_i, comm, root=root)\n else:\n object_return_not_merged[k_i] = comm.scatter(table_list_k_i, root=root)\n else:\n if scatter_method == 'sendrecv':\n if rank == root:\n comm.send((table_list_k_i[0]), dest=dest, tag=tag)\n if rank == dest:\n object_return_not_merged[k_i] = comm.recv(source=root, tag=tag)\n if rank == root:\n try:\n for k_del in reversed(sorted(list(range(0, k1 * size, k1)))):\n del scatter_list_objects[k_del]\n\n except:\n del scatter_list_objects\n\n k1 -= 1\n\n if is_vable & (k1 == 1):\n object_return = object_return_not_merged[0]\n else:\n object_return = _merge_objects(object_return_not_merged, delete=True)\n return object_return\n\n\ndef _general_gather(gather_object, comm, size_limit, root, optimize, gather_method):\n \"\"\"\n This function is a more generalised form of the comm.gather() function, prepared for arrays\n of any size.\n\n :param gather_object: Object to be gathered.\n :type gather_object: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects. If a list of objects is passed\n objects must all of them be [int, float, str] or [pd.DataFrame, np.array].\n Lists with elements of mixed classes are yet not supported.\n\n :param comm: MPI4PY's MPI.COMM_WORLD object.\n\n :param size_limit: maximum byte size allowed for each divisible object. If the size exceeds the\n size limit, the chunk will be divided in `k`subchunks.\n :type size_limit: int\n\n :gather_method: ['gather' | 'allgather']. If `gather`, returns the object to the \"root\"\n processor. If `allgather`, all processors receive a copy of the gathered object.\n :type gather_method: str\n\n :param root: if `gather`, processor that will receive the gathered object.\n :type root: int\n\n :return: object from gathered subobjects.\n :type return: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects.\n \"\"\"\n rank, size = comm.rank, comm.size\n size_limit = size_limit / size\n if size == 1:\n return gather_object\n else:\n if (type(gather_object) == list) & (_return_set_type(gather_object) == 1):\n k2 = _return_k2(gather_object, size_limit)\n else:\n k2 = 1\n if k2 == 1:\n k1, idx_list_k1 = _return_idx_list_k1([0, len(gather_object)], gather_object, size, size_limit)\n else:\n k1 = 1\n k1 = max(comm.allgather(k1))\n k2 = max(comm.allgather(k2))\n if k2 > 1:\n k1 = 1\n else:\n idx_list_k1 = [int(z) for z in np.linspace(0, len(gather_object), k1 + 1)]\n if (k1 == 1) & (k2 == 1):\n object_return = _gather_or_allgather(gather_object, comm, root, gather_method, optimize=optimize)\n else:\n if k1 > 1:\n object_return = [None] * (k1 * size) if rank == root else None\n for k_i in range(k1):\n object_return_k_i = _gather_or_allgather((gather_object[idx_list_k1[k_i]:idx_list_k1[(k_i + 1)]]), comm,\n root, gather_method, k1_val=k1, optimize=optimize)\n if rank == root:\n for i in range(size):\n object_return[k_i + i * k1] = object_return_k_i[i]\n\n else:\n if k2 > 1:\n max_size_list = max(comm.allgather(len(gather_object)))\n if len(gather_object) == 0:\n gather_object += [[]] * (max_size_list - len(gather_object))\n else:\n gather_object += [dict_emptythings[type(gather_object[0])]] * (max_size_list - len(gather_object))\n object_return = [None] * (max_size_list * size) if rank == root else None\n for size_i in range(max_size_list):\n list_return_object_i = [] if rank == root else None\n object_cut_k2 = [int(x) for x in np.linspace(0, len(gather_object[size_i]), k2 + 1)]\n for k_i in range(k2):\n object_return_k_i = _gather_or_allgather((gather_object[size_i][object_cut_k2[k_i]:object_cut_k2[(k_i + 1)]]),\n comm,\n root, gather_method, optimize=False)\n if rank == root:\n list_return_object_i.append(object_return_k_i)\n\n if rank == root:\n for n_i in range(size):\n merged_i_k = _merge_objects([list_return_object_i[k_i][n_i] for k_i in range(k2)])\n if len(merged_i_k) > 0:\n object_return[size_i + n_i * max_size_list] = merged_i_k\n\n del list_return_object_i\n\n if rank == root:\n object_return = list(filter((None).__ne__, object_return))\n return object_return\n if isinstance(object_return, list):\n object_return = list(filter((None).__ne__, object_return))\n if len(object_return) > 0:\n object_return = _merge_objects(object_return)\n else:\n object_return = None\n return object_return\n\n\ndef bcast(bcast_object, comm, size_limit=50000000, root=0):\n \"\"\"\n This function communicates an object to the rest of cores, but this time it communicates the\n whole object to all cores. Thus, at the end of the broadcasting, each core will have an\n exact copy of the object.\n\n :param bcast_object: Object to be communicated.\n :type bcast_object: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects. If a list of objects is passed\n objects must all of them be [int, float, str] or [pd.DataFrame, np.array].\n Lists with elements of mixed classes are yet not supported.\n\n :param comm: MPI4PY's MPI.COMM_WORLD object.\n\n :param size_limit: maximum byte size allowed for each divisible object. If the size exceeds the\n size limit, the chunk will be divided in `k`subchunks.\n :type size_limit: int\n\n :param root: processor from which the object has been created.\n :type root: int\n\n :return: i-th subtable for processor i, already parallelized.\n :type return: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects.\n \"\"\"\n if comm.rank == root:\n mem = virtual_memory().available\n size_scatter_object = _get_size_object(bcast_object)\n else:\n mem, size_scatter_object = (None, None)\n mem = comm.scatter([mem] * comm.size)\n size_scatter_object = comm.scatter([size_scatter_object] * comm.size)\n scatter_object_type = comm.scatter([type(bcast_object)] * comm.size)\n if size_scatter_object * comm.size > mem:\n raise MemoryError('The size of the object is too big to be broadcasted for this numberof processors.')\n return_object = None\n for CPU in range(comm.size):\n if root == CPU:\n return_object = bcast_object\n else:\n if comm.rank == root:\n scatter_list = [\n dict_emptythings[type(bcast_object)]] * comm.size\n scatter_list[CPU] = bcast_object\n else:\n scatter_list = None\n scatter_list_object = _general_scatter(scatter_list, comm, size_limit=size_limit, root=root,\n by=[],\n dest=0,\n optimize=False,\n scatter_method='scatter')\n if comm.rank == CPU:\n return_object = scatter_list_object[0]\n\n return return_object\n\n\ndef scatter(scatter_object, comm, by=[], size_limit=500000000, root=0, optimize=True):\n \"\"\"\n This function divides an object into `n` parts, and distributes it into all the cores.\n\n :param scatter_object: Object to be communicated.\n :type scatter_object: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects. If a list of objects is passed\n objects must all of them be [int, float, str] or [pd.DataFrame, np.array].\n Lists with elements of mixed classes are yet not supported.\n\n :param comm: MPI4PY's MPI.COMM_WORLD object.\n\n :param by: If the table cannot be directly divided by rows and, instead, there are some\n categorical variables that are stored within a column/s in the\n `pd.DataFrame`/`np.ndarray` object, `scatter` can perform the subdivision based\n on this variable. For instance, it the column has 1000 genes (several rows per\n gene), and the # of processors is 10, each processor will have 100 genes. So far,\n only one `by` variable can be attributed. The table must be sorted by this\n :type by: int (`np.ndarray`) or str (`pd.Dataframe`).\n\n :param size_limit: maximum byte size allowed for each divisible object. If the size exceeds the\n size limit, the chunk will be divided in `k`subchunks.\n :type size_limit: int\n\n :param root: processor from which the object has been created.\n :type root: int\n\n :param optimize: If `True`, applies a vectorized parallelization of the object, given the object\n supports that parallelization.\n :type optimize: bool\n\n :return: i-th subtable for processor i, already parallelized.\n :type return: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects.\n \"\"\"\n return _general_scatter(scatter_object, comm, by, dest=0, size_limit=size_limit, root=root, optimize=optimize,\n scatter_method='scatter')\n\n\ndef gather(gather_object, comm, optimize=True, size_limit=1500000000, root=0):\n \"\"\"\n This function communicates individual objects, each one in a different core, to a\n destination core.\n\n :param gather_object: Object to be communicated.\n :type gather_object: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects. If a list of objects is passed\n objects must all of them be [int, float, str] or [pd.DataFrame, np.array].\n Lists with elements of mixed classes are yet not supported.\n\n :param comm: MPI4PY's MPI.COMM_WORLD object.\n\n :param size_limit: maximum byte size allowed for each divisible object. If the size exceeds the\n size limit, the chunk will be divided in `k`subchunks.\n :type size_limit: int\n\n :param root: processor from which the object has been created.\n :type root: int\n\n :param optimize: If `True`, applies a vectorized parallelization of the object, given the object\n supports that parallelization.\n :type optimize: bool\n\n :return: i-th subtable for processor i, already parallelized.\n :type return: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects.\n \"\"\"\n return _general_gather(gather_object, comm, size_limit, root, optimize=optimize, gather_method='gather')\n\n\ndef allgather(allgather_object, comm, size_limit=1500000000, root=0):\n \"\"\"\n This function combines the objects from all the cores and distributes copies of the\n combined object to all the cores.\n\n :param gather_object: Object to be communicated.\n :type gather_object: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects. If a list of objects is passed\n objects must all of them be [int, float, str] or [pd.DataFrame, np.array].\n Lists with elements of mixed classes are yet not supported.\n\n :param comm: MPI4PY's MPI.COMM_WORLD object.\n\n :param size_limit: maximum byte size allowed for each divisible object. If the size exceeds the\n size limit, the chunk will be divided in `k`subchunks.\n :type size_limit: int\n\n :param root: processor from which the object has been created.\n :type root: int\n\n :return: i-th subtable for processor i, already parallelized.\n :type return: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects.\n \"\"\"\n return _general_gather(allgather_object, comm, size_limit, root, optimize=False, gather_method='allgather')\n\n\ndef sendrecv(send_object, comm, dest, size_limit=1500000000, root=0):\n \"\"\"\n This function sends an object from a source core to a destination node.\n\n :param send_object: Object to be communicated.\n :type send_object: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects. If a list of objects is passed\n objects must all of them be [int, float, str] or [pd.DataFrame, np.array].\n Lists with elements of mixed classes are yet not supported.\n\n :param comm: MPI4PY's MPI.COMM_WORLD object.\n\n :param dest: Destination node where the object will be communicated.\n :type dest: int\n\n :param size_limit: maximum byte size allowed for each divisible object. If the size exceeds the\n size limit, the chunk will be divided in `k`subchunks.\n :type size_limit: int\n\n :param root: processor from which the object has been created.\n :type root: int\n\n :return: i-th subtable for processor i, already parallelized.\n :type return: `pd.DataFrame`, `np.ndarray`, and list of string, ints, floats, or\n `pd.DataFrame` or `np.ndarray` objects.\n \"\"\"\n return _general_scatter(send_object, comm, by=[], dest=dest, size_limit=size_limit, root=root, optimize=False, scatter_method='sendrecv')","sub_path":"pycfiles/bigmpi4py-1.2.4-py3.6/bigmpi4py.cpython-36.py","file_name":"bigmpi4py.cpython-36.py","file_ext":"py","file_size_in_byte":42228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"246541756","text":"from django.db import models\nimport pandas as pd\nfrom tensorflow import keras\n# Create your models here.\nclass Fertility(models.Model):\n season_choices=((1,'spring'),\n (2,'fall'),\n (3,'winter'),\n (4,'summer'),\n )\n season=models.PositiveIntegerField(choices=season_choices)\n age=models.PositiveIntegerField()\n cd_choices=((0,'no'),(1,'yes'))\n childish_diseases=models.PositiveIntegerField(choices=cd_choices)\n accident_or_serious_trauma=models.PositiveIntegerField(choices=cd_choices)\n surgical_intervention=models.PositiveIntegerField(choices=cd_choices)\n high_fevers_choices=((1,'more than 3 months ago'),(2,'less than 3 months ago'),(3,'no'))\n high_fevers_in_last_year=models.PositiveIntegerField(choices=high_fevers_choices)\n frequency_of_alcohol_consumption_choiced=((1,'once a week'),(2,'hardly ever or never'),(3,'several times a week'),(4,'several times a day'),(5,'every day'))\n frequency_of_alcohol_consumption=models.PositiveIntegerField(choices=frequency_of_alcohol_consumption_choiced)\n smoking_habit_choices=((1,'occasional'),(2,'daily'),(3,'never'))\n smoking_habit=models.PositiveIntegerField(choices=smoking_habit_choices)\n no_of_hours_spent_sitting_per_day=models.PositiveIntegerField()\n confidence=models.FloatField()\n\n def save(self, *args, **kwargs):\n df = pd.DataFrame(\n columns=['Season','Age','Childish diseases','Accident or serious trauma','Surgical intervention','High fevers in the last year','Frequency of alcohol consumption','Smoking habit','Number of hours spent sitting per day',], data=[\n [self.season,self.age,self.childish_diseases,self.accident_or_serious_trauma,self.surgical_intervention,self.high_fevers_in_last_year,self.frequency_of_alcohol_consumption,self.smoking_habit,self.no_of_hours_spent_sitting_per_day]])\n NEW_MODEL = keras.models.load_model('my_model_fertility.h5')\n self.confidence=NEW_MODEL.predict(x=df)[0][0]*100\n super().save(*args, **kwargs)","sub_path":"Diagnostics/fertility/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"64381713","text":"from keras.models import Sequential, load_model\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom keras.utils import plot_model\nfrom collections import deque\nimport numpy as np\nimport main\n\nBLANK = 0\nBLACK = 1\nWHITE = 2\nPLACEABLE = 3\nWALL = 4\n\n\nclass QNetwork:\n\n def __init__(self, learning_rate=0.01, state_size=16, action_size=16, hidden_size=10):\n\n self.model = Sequential()\n self.model.add(Dense(hidden_size, activation='relu', input_dim=state_size))\n self.model.add(Dense(hidden_size, activation='relu'))\n self.model.add(Dense(action_size, activation='linear'))\n self.optimizer = Adam(lr=learning_rate) # 誤差を減らす学習方法はAdam\n self.model.compile(loss='mse', optimizer=self.optimizer)\n #self.model.compile(loss=huberloss, optimizer=self.optimizer)\n\n # 重みの学習\n def replay(self, memory, batch_size, gamma, targetQN):\n\n inputs = np.zeros((batch_size, 16))\n targets = np.zeros((batch_size, 16))\n mini_batch = memory.sample(batch_size)\n\n for i, (state_b, action_b, reward_b, next_state_b) in enumerate(mini_batch):\n inputs[i:i + 1] = state_b\n target = reward_b\n\n if not (next_state_b == np.zeros(state_b.shape)).all(axis=1):\n # 価値計算(DDQNにも対応できるように、行動決定のQネットワークと価値観数のQネットワークは分離)\n retmainQs = self.model.predict(next_state_b)[0]\n next_action = np.argmax(retmainQs) # 最大の報酬を返す行動を選択する\n target = reward_b + gamma * targetQN.model.predict(next_state_b)[0][next_action]\n\n targets[i] = self.model.predict(state_b) # Qネットワークの出力\n targets[i][action_b] = target # 教師信号\n\n # shiglayさんよりアドバイスいただき、for文の外へ修正しました\n self.model.fit(inputs, targets, epochs=1, verbose=0) # epochsは訓練データの反復回数、verbose=0は表示なしの設定\n\nclass Actor:\n def get_action(self, state, episode, mainQN): # [C]t+1での行動を返す\n # 徐々に最適行動のみをとる、ε-greedy法\n epsilon = 0.001 + 0.9 / (1.0+episode)\n othello.board_class.find_placeable(BLACK if t%2==1 else WHITE)\n placeable_list = [i for i, stone in enumerate(return_board(othello.board_class.board)) if stone==PLACEABLE]\n othello.board_class.delete_placeable()\n\n if epsilon <= np.random.uniform(0, 1):\n retTargetQs = mainQN.model.predict(state)[0]\n #action = np.argmax(retTargetQs) # 最大の報酬を返す行動を選択する\n sorted_Q = np.argsort(retTargetQs)\n for i in range(16):\n action = sorted_Q[i]\n if action in placeable_list:\n break\n elif i==15:\n action = -1\n else:\n action = -1 # ランダムに行動する\n\n return action\n\nclass Memory:\n def __init__(self, max_size=1000):\n self.buffer = deque(maxlen=max_size)\n\n def add(self, experience):\n self.buffer.append(experience)\n\n def sample(self, batch_size):\n idx = np.random.choice(np.arange(len(self.buffer)), size=batch_size, replace=False)\n return [self.buffer[ii] for ii in idx]\n\n def len(self):\n return len(self.buffer)\n\ndef return_board(board):\n\n return_board = []\n for i in range(36):\n if board[i]!=4:\n return_board.append(board[i])\n\n return return_board\n\ndef convert_state(observation):\n\n return sum([stone * (3 ** i) for i, stone in enumerate(observation)])\n\n\n\nplayer_list = [\"player\", \"random\"]\nothello = main.Othello(player_list[1], player_list[1])\nq_turn = WHITE\n\nmax_num_of_turn = 16\nnum_consecutive_iterations = 100\nnum_episodes = 100\ngoal_average_reward = 90\ntotal_reward_vec = np.zeros(num_consecutive_iterations)\ngamma = 0.99\nislearned = False\n\nhidden_size = 16 # Q-networkの隠れ層のニューロンの数\nlearning_rate = 0.00001 # Q-networkの学習係数\nbatch_size = 16\n\nmainQN = load_model(\"./dqn_model_100_1.h5\")\ntargetQN = QNetwork(hidden_size=hidden_size, learning_rate=learning_rate)\nactor = Actor()\nmemory = Memory(10000)\n\nwin = 0\nlose = 0\neven = 0\n\nfor episode in range(num_episodes): # 試行数分繰り返す\n #env.reset() # cartPoleの環境初期化\n othello.reset()\n #state, reward, done, _ = env.step(env.action_space.sample()) # 1step目は適当な行動をとる\n winner = othello.game_loop(4)\n state = return_board(othello.board_class.board)\n state = np.reshape(state, [1, 16]) # list型のstateを、1行4列の行列に変換\n episode_reward = 0\n\n targetQN.model.set_weights(mainQN.model.get_weights())\n\n for t in range(max_num_of_turn + 1): # 1試行のループ\n\n if (t+1)%2==q_turn%2:\n action = actor.get_action(state, episode, mainQN) # 時刻tでの行動を決定する\n #next_state, reward, done, info = env.step(action) # 行動a_tの実行による、s_{t+1}, _R{t}を計算する\n winner = othello.game_loop((action // 4 + 1) * 6 + (action % 4 + 1))\n next_state = return_board(othello.board_class.board)\n next_state = np.reshape(next_state, [1, 16]) # list型のstateを、1行4列の行列に変換\n\n reward = 0\n if winner!=-1:\n next_state = np.zeros(state.shape)\n if winner==q_turn:\n reward = 1\n elif winner==q_turn%2+1:\n reward = -1\n\n episode_reward += reward\n\n memory.add((state, action, reward, next_state)) # メモリの更新する\n state = next_state # 状態更新\n\n\n # Qネットワークの重みを学習・更新する replay\n if (memory.len() > batch_size) and not islearned:\n mainQN.replay(memory, batch_size, gamma, targetQN)\n else:\n winner = othello.game_loop(4)\n\n # 1施行終了時の処理\n if winner!=-1:\n total_reward_vec = np.hstack((total_reward_vec[1:], episode_reward)) # 報酬を記録\n if q_turn%2==(t+1)%2:\n action = -1\n if winner==q_turn:\n win += 1\n elif winner==q_turn%2+1:\n lose += 1\n elif winner==0:\n even += 1\n print('%d Episode finished after %d time steps / mean %f' % (episode, t + 1, total_reward_vec.mean()), end=\" \")\n print(\"win:{}\".format(win), \"lose:{}\".format(lose), \"even:{}\".format(even))\n break\n\n # 複数施行の平均報酬で終了を判断\n if total_reward_vec.mean() >= goal_average_reward:\n print('Episode %d train agent successfuly!' % episode)\n islearned = 1\n\nmainQN.model.save(\"./dqn_model_100_1.h5\")","sub_path":"v4/DQN_predict.py","file_name":"DQN_predict.py","file_ext":"py","file_size_in_byte":6995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"183391137","text":"import os, sys\ntry:\n import pafy # youtube downloader api\nexcept:\n print('pafy not found\\ntype \"pip install pafy\" and \"pip install youtube_dl\" then run again')\n sys.exit()\n\nclass YouTubeDownLoader():\n # making and obj of pafy and starting the bot\n def __init__(self, whole_url):\n self.explicit_url = ''\n if '=' not in whole_url:\n self.explicit_url = whole_url.split('\\\\')[-1]\n else:\n self.explicit_url = whole_url.split('=')[-1]\n self.video = pafy.new(self.explicit_url)\n self.choose_method()\n\n \n # for when the wrong input is entered\n def wrong_input(self):\n print('wrong input!')\n sys.exit()\n\n\n def nothing_found(self):\n print('nothing found')\n sys.exit()\n\n\n # choose the method and doing the stuff\n def choose_method(self):\n text = '''\ndownload:\n1.video-only\n2.audio-only\n3.both (embeded)\n4.both (separately)\nwhich one? (1/2/3/4) '''\n\n try:\n method = int(input(text).strip())\n except:\n self.wrong_input()\n \n if method==1:\n self.parse_video()\n elif method==2:\n self.parse_audio()\n elif method==3:\n self.parse_both_emb()\n else:\n self.parse_both_sep()\n\n\n # chooses one stream and downloads it\n def download(self, all_streams):\n # nothing found\n if len(all_streams)==0:\n self.nothing_found()\n else:\n # for input's text\n i_s = []\n print('\\n\\nall that found:')\n for i in range(len(all_streams)):\n strm = (str(all_streams[i]).split(':')[-1]).split('@')\n print(f'{i+1}. {strm[0]}, {strm[1]}')\n i_s.append(i+1)\n text = '\\nwhich one?('+'/'.join([str(i) for i in i_s])+') '\n # checking if the user didn't enter wrong input\n try:\n which = int(input(text).strip())\n except:\n self.wrong_input()\n \n # downloading the chosen format of video\n try:\n all_streams[which-1].download()\n except:\n print('\\nsomething went wrong downloading this!')\n\n\n # parsing the video-only\n def parse_video(self):\n # all the possible formats\n all_streams = self.video.videostreams\n # download the shit\n self.download(all_streams)\n\n\n # parsing the audio only\n def parse_audio(self):\n all_streams = self.video.audiostreams\n self.download(all_streams)\n\n\n # parsing both video and audio embeded to one single file\n def parse_both_emb(self):\n all_streams = self.video.streams\n self.download(all_streams)\n\n\n # parsing both video and audio seperately\n def parse_both_sep(self):\n print('\\n\\n****Audio Part****')\n self.parse_audio()\n print('\\n\\n****Audio Part****')\n self.parse_video()\n\n\n\n\n\nif __name__ == \"__main__\":\n # make the downloads folder and cd to that\n if not os.path.isdir('downloads'):\n os.system('mkdir downloads')\n os.chdir('downloads')\n\n # if url is passed through argument start the bot\n args = sys.argv\n if len(args)==1:\n print('you have to pass the url via argument')\n elif len(args)==2:\n ytdl = YouTubeDownLoader(args[1])\n else:\n print('too many argument bruh')\n sys.exit()","sub_path":"YouTubeDownLoader/ytdl.py","file_name":"ytdl.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"237930317","text":"# encoding=utf-8\n# https://leetcode.com/problems/majority-element/\nclass Solution:\n # @param num, a list of integers\n # @return an integer\n def majorityElement(self, num):\n # ndict = {}\n # count,candidate = 0,0\n # for i in num:\n # if i not in ndict:\n # ndict[i] = 1\n # else:\n # ndict[i] += 1\n # if max(count,ndict[i]) == ndict[i]:\n # count = ndict[i]\n # candidate = i\n # return candidate\n\n # Better Solution: (算法课都白上了..sigh..)\n count, candidate = 0,0\n for i in num:\n if count == 0:\n candidate = i\n count = 1\n elif i == candidate:\n count += 1\n else:\n count -= 1\n return candidate","sub_path":"majorityElement.py","file_name":"majorityElement.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"613275500","text":"name = {1 : 'George Washington',\r\n 2 : 'Thomas Jefferson',\r\n 5 : 'Abraham Lincoln',\r\n 10 : 'Alexander Hamilton',\r\n 20 : 'Andrew Jackson',\r\n 50 : 'Ulysses S.Grant',\r\n 100 : 'Benjamin Franklin'}\r\na = eval(input('Please enter the denomination of a banknote: '))\r\nif a == 1 or a == 2 or a == 5 or a == 10 or a == 20 or a == 50 or a == 100:\r\n print(f'The name of individual that appears on the bankrote is {name[a]}.')","sub_path":"043.py","file_name":"043.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"479845305","text":"MOD = 1000000007\n\nn, k = map(int, input().split())\nxs = sorted(list(map(int, input().split())))\nminus = []\nplus = []\n\nans = -MOD\n\nfor i in range(n):\n if xs[i] >= 0:\n minus = xs[:i]\n plus = sorted(xs[i:], reverse=True)\n break\n\nfor i in range(len(plus) + 1):\n tmp = 1\n l = k - i\n if l < 0:\n break\n if l > len(minus):\n continue\n\n if l % 2 == 0:\n if i != 0:\n for p in plus[:i]:\n tmp *= p\n if l != 0:\n for m in minus[:l]:\n tmp *= m\n else:\n if i != 0:\n for p in plus[len(plus)-i:]:\n tmp *= p\n if l != 0:\n for m in minus[len(minus) - l:]:\n tmp *= m\n \n ans = max(ans, tmp)\n\nprint(ans)","sub_path":"beginner_contest173/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"79525741","text":"\"\"\"Train Keras CNN on Fashion MNIST on SageMaker.\"\"\"\n\nimport argparse\nimport os\nimport h5py\nimport numpy as np\nimport pandas as pd\nimport boto3\n\nfrom tensorflow.compat.v1.saved_model import simple_save\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.layers import (Input, Dense, Activation,\n Flatten, BatchNormalization, Conv2D,\n MaxPooling2D, ZeroPadding2D, Dropout)\nfrom keras.callbacks import (Callback, EarlyStopping, ModelCheckpoint,\n ReduceLROnPlateau)\nfrom keras.datasets import fashion_mnist\nfrom keras.utils import to_categorical, multi_gpu_model\n\n\n\n# Include classes from cnn.py to avoid import issues\nclass BestValAcc(Callback):\n\n def on_train_begin(self, logs={}):\n self.val_acc = []\n\n def on_train_end(self, logs={}):\n print(\"best_val_acc:\", max(self.val_acc))\n\n def on_epoch_end(self, batch, logs={}):\n self.val_acc.append(logs.get('val_acc'))\n\n \nclass CNN(Model):\n\n def __init__(self, input_shape, conv_params={}, fc_params={}, drop_rate=0.0):\n # param defaults\n conv0_defaults = {'conv0_pad': 1,\n 'conv0_channels': 32,\n 'conv0_filter': 3,\n 'conv0_stride': 1,\n 'conv0_pool': 1,\n 'conv0_activation': 'relu'}\n conv1_defaults = {'conv1_pad': 1,\n 'conv1_channels': 64,\n 'conv1_filter': 3,\n 'conv1_stride': 1,\n 'conv1_pool': 2,\n 'conv1_activation': 'relu'}\n conv2_defaults = {'conv2_pad': 1,\n 'conv2_channels': 128,\n 'conv2_filter': 3,\n 'conv2_stride': 1,\n 'conv2_pool': 2,\n 'conv2_activation': 'relu'}\n fc0_defaults = {'fc0_neurons': 512,\n 'fc0_activation': 'relu'}\n fc1_defaults = {'fc1_neurons': 256,\n 'fc1_activation': 'relu'}\n fc2_defaults = {'fc2_neurons': 10,\n 'fc2_activation': 'softmax'}\n\n conv_defaults = {'conv0': conv0_defaults,\n 'conv1': conv1_defaults,\n 'conv2': conv2_defaults}\n fc_defaults = {'fc0': fc0_defaults,\n 'fc1': fc1_defaults,\n 'fc2': fc2_defaults}\n\n # set param attributes\n self.conv_params = conv_params\n self.fc_params = fc_params\n\n # merge passed in params with defaults\n for layer in conv_defaults:\n try:\n conv_params[layer] = {**conv_defaults[layer],\n **conv_params[layer]}\n except KeyError:\n conv_params[layer] = conv_defaults[layer]\n for layer in fc_defaults:\n try:\n fc_params[layer] = {**fc_params[layer],\n **fc_defaults[layer]}\n except KeyError:\n fc_params[layer] = fc_defaults[layer]\n\n # Input placeholder\n X_input = Input(input_shape)\n\n # Pad -> Conv -> Act -> BN -> MaxPool blocks\n for (i, conv) in enumerate(conv_params):\n p = conv_params[conv][conv + '_pad']\n c = conv_params[conv][conv + '_channels']\n f = conv_params[conv][conv + '_filter']\n s = conv_params[conv][conv + '_stride']\n o = conv_params[conv][conv + '_pool']\n act = conv_params[conv][conv + '_activation']\n if i == 0:\n X = ZeroPadding2D((p, p), name=conv + '_pad')(X_input)\n X = Conv2D(c, (f, f), strides=(s, s), name=conv)(X)\n X = BatchNormalization(name=conv + '_bn')(X)\n X = Activation(act, name=conv + '_act')(X)\n X = MaxPooling2D((o, o), name=conv + '_pool')(X)\n else:\n X = ZeroPadding2D((p, p), name=conv + '_pad')(X)\n X = Conv2D(c, (f, f), strides=(s, s), name=conv)(X)\n X = BatchNormalization(name=conv + '_bn')(X)\n X = Activation(act, name=conv + '_act')(X)\n X = MaxPooling2D((o, o), name=conv + '_pool')(X)\n\n X = Flatten()(X)\n\n # BN -> FullyConnected blocks\n for (i, fc) in enumerate(fc_params):\n n = fc_params[fc][fc + '_neurons']\n act = fc_params[fc][fc + '_activation']\n X = BatchNormalization(name=fc + '_bn')(X)\n X = Dropout(drop_rate, name=fc + '_drop')(X)\n X = Dense(n, activation=act, name=fc + '_act')(X)\n\n # create model\n super().__init__(outputs=X, inputs=X_input)\n\n # set param attributes\n self.conv_params = conv_params\n self.fc_params = fc_params\n\n def compile(self, **kwargs):\n \"\"\"Wrap compile method with defaults.\"\"\"\n defaults = dict(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n new_kwargs = {**defaults, **kwargs}\n super().compile(**new_kwargs)\n\n def fit(self, X_train, Y_train, X_val, Y_val, \n checks_dir='models/keras_checkpoints',\n early_stop_kwargs={}, checkpoint_kwargs={},\n lrreduce_kwargs={}, **kwargs):\n \"\"\"Wrap fit method with defaults.\n\n Parameters\n ----------\n X_train: numpy.ndarray\n Array of training data inputs\n Y_train: numpy.ndarray\n Array of training data outputs\n X_val: numpy.ndarray\n Array of validation data inputs\n Y_val: numpy.ndarray\n Array of validation data outputs\n checks_dir: str, default 'models/'\n Path to directory for saving checkpoints\n early_stop_kwargs: dict, default empty\n Keyword arguments for early stopping callback\n checkpoint_stop_kwargs: dict, default empty\n Keyword arguments for checkpoint callback\n lrreduce_kwargs: dict, default empty\n Keyword arguments for reduce learning rate on plateau callback\n **kwargs:\n Keyword arguments for keras.Model.fit\n\n \"\"\"\n # Stop training if validation accuracy doesn't improve\n early_stop_defaults = dict(monitor='val_acc',\n min_delta=0,\n patience=10,\n verbose=1,\n mode='auto')\n early_stop_kwargs = {**early_stop_defaults, **early_stop_kwargs}\n early_stopping = EarlyStopping(**early_stop_kwargs)\n\n # Save if validation accuracy improves\n checkpoint_defaults = dict(monitor='val_acc',\n verbose=1,\n save_best_only=True,\n save_weights_only=True)\n checkpoint_kwargs = {**checkpoint_defaults, **checkpoint_kwargs}\n checkpoint_model_name = ('FashionMNISTCNN-epoch-{epoch:02d}' +\n '-val_acc-{val_acc:.4f}.hdf5')\n path = os.path.join(checks_dir, checkpoint_model_name)\n checkpointer = ModelCheckpoint(path, **checkpoint_kwargs)\n\n # Reduce learning rate if accuracy plateaus\n lrreduce_defaults = dict(monitor='val_acc',\n factor=0.1,\n patience=10,\n verbose=1)\n lrreduce_kwargs = {**lrreduce_defaults, **lrreduce_kwargs}\n lrreduce = ReduceLROnPlateau(**lrreduce_kwargs)\n\n # Track best validation accuracy\n best_val_acc = BestValAcc()\n\n callbacks = [early_stopping, best_val_acc, checkpointer,\n lrreduce]\n\n fit_defaults = dict(batch_size=128,\n validation_data=(X_val, Y_val),\n epochs=1,\n verbose=1,\n callbacks=callbacks)\n fit_kwargs = {**fit_defaults, **kwargs}\n history = super().fit(X_train, Y_train, **fit_kwargs)\n return history\n\n\nclass FashionMNISTCNN(CNN):\n \n @staticmethod\n def _create_test_set(X_train, Y_train, test_size=10000, seed=27):\n # random seed for reproducibility\n np.random.seed(seed)\n # create dataframe for convenience\n train_df = pd.DataFrame(X_train.reshape(X_train.shape[0], 784))\n train_df['label'] = Y_train\n # store slices for later concatenation \n slices = []\n # get slices for all the classes\n for class_label in train_df['label'].unique():\n # slice all rows for this class\n class_slice = train_df[train_df['label'] == class_label]\n # get indices for test rows\n indices = np.random.choice(class_slice.index.values, \n size=test_size//10, \n replace=False)\n # slice for these indices\n slices += [class_slice.loc[indices, : ]]\n # drop rows for these indices \n train_df = train_df.drop(index=indices)\n # collect slices into a dataframe\n test_df = pd.concat(slices, ignore_index=True)\n # convert back to numpy arrays\n X_train = train_df.drop(columns=['label']).values\n Y_train = train_df['label'].values\n X_test = test_df.drop(columns=['label']).values\n Y_test = test_df['label'].values\n # reshape inputs\n X_train = X_train.reshape(X_train.shape[0], 28, 28)\n X_test = X_test.reshape(Y_test.shape[0], 28, 28)\n # return numpy arrays of values\n return X_train, Y_train, X_test, Y_test\n \n @staticmethod\n def load_data(train_path='data/train.hdf5', val_path='data/val.hdf5',\n test_path='data/test.hdf5'):\n \"\"\"Load Fashion MNIST data.\"\"\"\n # check if data files exist locally\n try:\n with h5py.File(train_path) as hf:\n X_train = np.array(hf['X_train'])\n Y_train = np.array(hf['Y_train'])\n with h5py.File(val_path) as hf:\n X_val = np.array(hf['X_val'])\n Y_val = np.array(hf['Y_val'])\n with h5py.File(test_path) as hf:\n X_test = np.array(hf['X_test'])\n Y_test = np.array(hf['Y_test'])\n \n # if not get and save locally\n except:\n (X_train, Y_train), (X_val, Y_val) = fashion_mnist.load_data()\n X_train, Y_train, X_test, Y_test = FashionMNISTCNN._create_test_set(X_train, \n Y_train)\n with h5py.File(train_path, 'w') as hf:\n hf.create_dataset('X_train', data=X_train)\n hf.create_dataset('Y_train', data=Y_train)\n with h5py.File(val_path, 'w') as hf:\n hf.create_dataset('X_val', data=X_val)\n hf.create_dataset('Y_val', data=Y_val)\n with h5py.File(test_path, 'w') as hf:\n hf.create_dataset('X_test', data=X_test)\n hf.create_dataset('Y_test', data=Y_test)\n\n return X_train, Y_train, X_val, Y_val, X_test, Y_test\n \n\n @staticmethod\n def prepare_data(X_train, Y_train, X_val, Y_val, X_test=None, Y_test=None):\n \"\"\"Prepare data for model.\"\"\"\n # reshape for keras\n X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)\n X_val = X_val.reshape(X_val.shape[0], 28, 28, 1)\n\n # Normalize pixel values\n X_train = X_train.astype('float32')\n X_val = X_val.astype('float32')\n X_train /= 255\n X_val /= 255\n \n # One-hot encode image classes\n Y_train = to_categorical(Y_train, 10)\n Y_val = to_categorical(Y_val, 10)\n \n if X_test is not None:\n X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)\n X_test = X_test.astype('float32')\n X_test /= 255\n \n if Y_test is not None:\n Y_test = to_categorical(Y_test, 10)\n\n return X_train, Y_train, X_val, Y_val, X_test, Y_test\n \n @staticmethod\n def upload_checks_to_s3(checks_output_path, checks_dir):\n \"\"\"Put keras checkpoints in outside s3 bucket\"\"\"\n s3_resource = boto3.resource('s3')\n bucket_name = os.path.dirname(checks_output_path).split('//')[1]\n prefix = os.path.basename(checks_output_path)\n bucket = s3_resource.Bucket(bucket_name)\n\n for _, _, files in os.walk(checks_dir):\n for file in files:\n file_path = os.path.join(checks_dir, file)\n with open(file_path, 'rb') as data:\n bucket.put_object(Key=os.path.join(prefix, file), Body=data)\n\n @staticmethod\n def save_history(history, checks_dir):\n \"\"\"Save keras history in checkpoints directory\"\"\"\n # convert the history.history dict to a pandas DataFrame: \n history_df = pd.DataFrame(history.history) \n history_df['epoch'] = history_df.index + 1\n # or save to csv: \n history_csv_file = 'FashionMNISTCNN-history.csv'\n path = os.path.join(checks_dir, history_csv_file)\n with open(path, mode='w') as f:\n history_df.to_csv(f, index=False)\n \nif __name__ == '__main__':\n\n # parse model parameters from command line\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--epochs', type=int, default=1)\n parser.add_argument('--batch-size', type=int, default=100)\n parser.add_argument('--drop-rate', type=float, default=0.0)\n parser.add_argument('--checks-out-path', type=str, \n default=os.environ.get('SM_MODEL_DIR'))\n parser.add_argument('--gpu-count', type=int,\n default=os.environ.get('SM_NUM_GPUS'))\n parser.add_argument('--train', type=str,\n default=os.environ.get('SM_CHANNEL_TRAIN'))\n parser.add_argument('--val', type=str,\n default=os.environ.get('SM_CHANNEL_VAL'))\n parser.add_argument('--test', type=str,\n default=os.environ.get('SM_CHANNEL_TEST'))\n parser.add_argument('--model', type=str,\n default=os.environ.get('SM_MODEL_DIR'))\n parser.add_argument('--checks', type=str,\n default=os.environ.get('SM_CHANNEL_CHECKS'))\n\n # architecture hyperparameters\n parser.add_argument('--conv0_pad', type=int, default=1)\n parser.add_argument('--conv0_channels', type=int, default=32)\n parser.add_argument('--conv0_filter', type=int, default=3)\n parser.add_argument('--conv0_stride', type=int, default=1)\n parser.add_argument('--conv0_pool', type=int, default=1)\n parser.add_argument('--conv0_activation', type=str, default='relu')\n\n parser.add_argument('--conv1_pad', type=int, default=1)\n parser.add_argument('--conv1_channels', type=int, default=64)\n parser.add_argument('--conv1_filter', type=int, default=3)\n parser.add_argument('--conv1_stride', type=int, default=1)\n parser.add_argument('--conv1_pool', type=int, default=2)\n parser.add_argument('--conv1_activation', type=str, default='relu')\n\n parser.add_argument('--conv2_pad', type=int, default=1)\n parser.add_argument('--conv2_channels', type=int, default=128)\n parser.add_argument('--conv2_filter', type=int, default=3)\n parser.add_argument('--conv2_stride', type=int, default=1)\n parser.add_argument('--conv2_pool', type=int, default=2)\n parser.add_argument('--conv2_activation', type=str, default='relu')\n\n parser.add_argument('--fc0_neurons', type=int, default=512)\n parser.add_argument('--fc0_activation', type=str, default='relu')\n parser.add_argument('--fc1_neurons', type=int, default=256)\n parser.add_argument('--fc1_activation', type=str, default='relu')\n\n # store parameters\n args, _ = parser.parse_known_args()\n\n epochs = args.epochs\n batch_size = args.batch_size\n drop_rate = args.drop_rate\n gpu_count = args.gpu_count\n model_dir = args.model\n train_dir = args.train\n val_dir = args.val\n test_dir = args.test\n checks_dir = args.checks\n checks_out_path = args.checks_out_path\n \n conv0_params = {'conv0_pad': args.conv0_pad,\n 'conv0_channels': args.conv0_channels,\n 'conv0_filter': args.conv0_filter,\n 'conv0_stride': args.conv0_stride,\n 'conv0_pool': args.conv0_pool,\n 'conv0_activation': args.conv0_activation}\n conv1_params = {'conv1_pad': args.conv1_pad,\n 'conv1_channels': args.conv1_channels,\n 'conv1_filter': args.conv1_filter,\n 'conv1_stride': args.conv1_stride,\n 'conv1_pool': args.conv1_pool,\n 'conv1_activation': args.conv1_activation}\n conv2_params = {'conv2_pad': args.conv2_pad,\n 'conv2_channels': args.conv2_channels,\n 'conv2_filter': args.conv2_filter,\n 'conv2_stride': args.conv2_stride,\n 'conv2_pool': args.conv2_pool,\n 'conv2_activation': args.conv2_activation}\n fc0_params = {'fc0_neurons': args.fc0_neurons,\n 'fc0_activation': args.fc0_activation}\n fc1_params = {'fc1_neurons': args.fc1_neurons,\n 'fc1_activation': args.fc1_activation}\n fc2_params = {'fc2_neurons': 10,\n 'fc2_activation': 'softmax'}\n\n # collect layer parameters\n conv_params = {'conv0': conv0_params, 'conv1': conv1_params,\n 'conv2': conv2_params}\n fc_params = {'fc0': fc0_params, 'fc1': fc1_params, 'fc2': fc2_params}\n\n # create model\n input_shape = (28, 28, 1)\n model = FashionMNISTCNN(input_shape, conv_params, fc_params, drop_rate)\n\n print(model.summary())\n\n # load and prepare data\n train_path = os.path.join(train_dir, 'train.hdf5')\n val_path = os.path.join(val_dir, 'val.hdf5')\n test_path = os.path.join(test_dir, 'test.hdf5')\n\n X_train, Y_train, X_val, Y_val, _, _ = model.load_data(train_path=train_path,\n val_path=val_path,\n test_path=test_path)\n X_train, Y_train, X_val, Y_val, _, _ = model.prepare_data(X_train, Y_train,\n X_val, Y_val)\n\n # compile model with defaults\n model.compile()\n\n # use multiple gpus if present\n if gpu_count > 1:\n model = multi_gpu_model(model, gpus=gpu_count)\n\n # fit model\n history = model.fit(X_train, Y_train, X_val, Y_val,\n checks_dir=checks_dir,\n batch_size=batch_size,\n epochs=epochs)\n\n # upload Keras checkpoints and history to s3\n model.save_history(history, checks_dir)\n model.upload_checks_to_s3(checks_out_path, checks_dir)\n \n # save Keras model for Tensorflow Serving\n sess = K.get_session()\n simple_save(sess,\n os.path.join(model_dir, 'model/1'),\n inputs={'inputs': model.input},\n outputs={t.name: t for t in model.outputs})\n","sub_path":"train_script_sagemaker.py","file_name":"train_script_sagemaker.py","file_ext":"py","file_size_in_byte":19645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"444939880","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\n\nfrom tflearnpipe.fitting import _fit_on_batch, _fit_on_single_epoch, _fit_on_generator, _fit\n\nfrom .simple_model import SimpleModelForTest\n\n\nclass FittingTest(tf.test.TestCase, SimpleModelForTest):\n\n def setUp(self):\n self.graph = SimpleModelForTest.build_graph()\n X = np.random.uniform(-1, 1, 20).reshape([-1, 5])\n Y = 3 * X + 2\n\n def generator_test():\n for row_i in range(X.shape[0]):\n yield {\n 'x_place': X[row_i: row_i + 1],\n 'y_place': Y[row_i: row_i + 1],\n }\n self.generator_test = generator_test\n\n self.data = {\n 'x': X,\n 'y': Y,\n }\n self.nodes = [self.graph['loss_tns:0'], self.graph['train_op']]\n self.placeholders = {\n 'x_place': self.graph['x_place:0'],\n 'y_place': self.graph['y_place:0'],\n }\n self.test_session = tf.Session()\n self.test_session.run(tf.global_variables_initializer())\n\n def test_fit_on_batch(self):\n cases = {\n \"1\": {\"valid_data_generator\": None, \"valid\": False},\n \"2\": {\"valid_data_generator\": self.generator_test, \"valid\": False},\n \"3\": {\"valid_data_generator\": self.generator_test, \"valid\": True},\n }\n answers = {\n \"1\": None,\n \"2\": None,\n \"3\": {},\n }\n\n x_train = np.random.uniform(-1, 1, 20).reshape([-1, 5])\n y_train = 3 * x_train + 2\n\n for case_num, case in cases.items():\n result = _fit_on_batch(\n session=self.test_session,\n subtrain_data={\n 'x_place': x_train,\n 'y_place': y_train,\n },\n nodes=self.nodes,\n placeholders=self.placeholders,\n skip_check=False,\n valid_data_generator=case['valid_data_generator'],\n valid=case['valid'],\n )\n self.assertEqual(2, len(result))\n self.assertEqual(type(result[0][0]), np.float32)\n self.assertEqual(type(result[1]), type(answers[case_num]))\n\n self.test_session.close()\n\n def test_fit_on_epoch(self):\n cases = {\n \"1\": {\"valid_data_generator\": None},\n \"2\": {\"valid_data_generator\": self.generator_test},\n }\n\n answers = {\n \"1\": type(None),\n \"2\": dict,\n }\n\n for case_num, case in cases.items():\n result = _fit_on_single_epoch(\n session=self.test_session,\n subtrain_data_generator=self.generator_test,\n nodes=self.nodes,\n placeholders=self.placeholders,\n valid_data_generator=case['valid_data_generator'],\n validate_at_each_batch=False,\n )\n self.assertEqual(2, len(result))\n self.assertIsInstance(result[0], dict)\n self.assertEqual(set(result[0].keys()),\n set(['num_batch', 'result_sum']))\n self.assertIsInstance(result[1], answers[case_num])\n\n self.test_session.close()\n\n def test_fit_on_generator(self):\n _fit_on_generator(\n session=self.test_session,\n subtrain_data_generator=self.generator_test,\n nodes=self.nodes,\n placeholders=self.placeholders,\n num_epochs=10,\n valid_data_generator=None,\n validate_at_each_batch=False,\n callbacks=None,\n )\n _fit_on_generator(\n session=self.test_session,\n subtrain_data_generator=self.generator_test,\n nodes=self.nodes,\n placeholders=self.placeholders,\n num_epochs=10,\n valid_data_generator=self.generator_test,\n validate_at_each_batch=False,\n callbacks=None,\n )\n self.test_session.close()\n\n def test_fit(self):\n _fit(\n session=self.test_session,\n subtrain_data=self.data,\n nodes=self.nodes,\n placeholders=self.placeholders,\n num_epochs=10,\n batch_size=2,\n valid_data=None,\n validate_at_each_batch=False,\n callbacks=None,\n )\n\n _fit(\n session=self.test_session,\n subtrain_data=self.data,\n nodes=self.nodes,\n placeholders=self.placeholders,\n num_epochs=10,\n batch_size=2,\n valid_data=self.data,\n validate_at_each_batch=False,\n callbacks=None,\n )\n","sub_path":"test/test_fitting.py","file_name":"test_fitting.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"72429673","text":"import pandas as pd\r\nfrom scipy import stats\r\nfrom sklearn.cluster import KMeans\r\nimport matplotlib.pyplot as plt\r\n##import seaborn as sns\r\nfrom sklearn.decomposition import PCA\r\nimport numpy as np\r\nfrom sklearn.datasets import make_blobs\r\nfrom sklearn.metrics import pairwise_distances\r\nfrom sklearn.cluster import KMeans\r\n\r\ndef compute_inertia(a, X):\r\n W = [np.mean(pairwise_distances(X[a == c, :])) for c in np.unique(a)]\r\n return np.mean(W)\r\n\r\ndef compute_gap(clustering, data, k_max=5, n_references=5):\r\n if len(data.shape) == 1:\r\n data = data.reshape(-1, 1)\r\n reference = np.random.rand(*data.shape)\r\n reference_inertia = []\r\n for k in range(1, k_max+1):\r\n local_inertia = []\r\n for _ in range(n_references):\r\n clustering.n_clusters = k\r\n assignments = clustering.fit_predict(reference)\r\n local_inertia.append(compute_inertia(assignments, reference))\r\n reference_inertia.append(np.mean(local_inertia))\r\n \r\n ondata_inertia = []\r\n for k in range(1, k_max+1):\r\n clustering.n_clusters = k\r\n assignments = clustering.fit_predict(data)\r\n ondata_inertia.append(compute_inertia(assignments, data))\r\n \r\n gap = np.log(reference_inertia)-np.log(ondata_inertia)\r\n return gap, np.log(reference_inertia), np.log(ondata_inertia)\r\n\r\n\r\n\r\ndef main():\r\n\r\n#### Read in data\r\n df=pd.read_csv('attractors_discrete.txt', delim_whitespace=True,index_col = [\"name\"])\r\n #df_tr = df.drop('name',axis=1)\r\n \r\n\r\n\r\n### Plot sum of squares using sklearn command\r\n## Sum_of_squared_distances = [] #list of sum of squares values\r\n## K = range(1,5)\r\n## for k in K:\r\n## \r\n## km = KMeans(n_clusters=k)\r\n## km = km.fit(df)\r\n## Sum_of_squared_distances.append(km.inertia_)\r\n## plt.plot(K, Sum_of_squared_distances, 'bx-')\r\n## plt.xlabel('k')\r\n## plt.ylabel('Sum_of_squared_distances')\r\n## plt.title('Elbow Method For Optimal k')\r\n## plt.show()\r\n\r\n### Plot sum of squares and gap statistic using glowing python\r\n gap, reference_inertia, ondata_inertia = compute_gap(KMeans(), df)\r\n\r\n #inertia\r\n plt.plot(range(1, k_max+1), reference_inertia,'-o', label='reference') \r\n plt.plot(range(1, k_max+1), ondata_inertia,'-o', label='data')\r\n plt.xlabel('k')\r\n plt.ylabel('log(inertia)')\r\n plt.show()\r\n\r\n #gapstat\r\n plt.plot(range(1, k_max+1), gap, '-o')\r\n plt.ylabel('gap')\r\n plt.xlabel('k')\r\n\r\n#### plot kmeans\r\n#### kmeans = KMeans(n_clusters=4, random_state=0).fit(df_tr)\r\n#### labels = kmeans.labels_\r\n#### df_tr['clusters'] = labels\r\n#### print(df_tr)\r\nmain()\r\n\r\n","sub_path":"_site/_projects/project2/OLD/NetworkAnalysis 1/Clustering/kmeans_gapstat_sklearn.py","file_name":"kmeans_gapstat_sklearn.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"114037957","text":"import math,os,sys\n\ndef get_pos(graph):\n parents_node = []\n nodes_sucessor = {}\n new_pos = {}\n new_pos[\"BR\"] = (100,100)\n parents_node.append(\"BR\")\n nodes = graph.nodes()\n edges = graph.edges()\n for edge in edges:\n if edge[0] not in parents_node:\n parents_node.append(edge[0])\n for p_node in parents_node:\n no_of_nodes = len(graph.successors(p_node))\n try:\n angel = 360/no_of_nodes\n except:\n angel = 0\n\n count = 1\n for edge in edges:\n if edge[0] == p_node:\n\n try:\n p_x = new_pos[p_node][0]\n\n p_y = new_pos[p_node][1]\n\n except:\n print(\"Unwanted Routes\")\n\n sys.exit(0)\n try :\n ppx = float(new_pos[graph.predecessors(p_node)[0]][1])\n ppy = float(new_pos[graph.predecessors(p_node)[0]][0])\n d_y = p_y - ppx\n d_x = p_x - ppy\n\n angel = math.degrees(math.atan2(d_y,d_x)) + ((count-1) *15)\n x = p_x + 30 * math.cos(math.radians(angel))\n y = p_y + 30 * math.sin(math.radians(angel))\n\n except IndexError:\n node_angel = angel * count\n node_angel = math.radians(node_angel)\n y = p_y + 30 * math.sin(node_angel)\n x = p_x + 30 * math.cos(node_angel)\n\n new_pos[edge[1]]= (x,y)\n count = count + 1\n\n return new_pos\n","sub_path":"Learn/Testing/Test_Detail/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"66705719","text":"import cx_Oracle\nfrom .json_utils import load_json\nfrom .parameters import ordered_table_names\n\n\n# TABLE NAMES MUST BE IN ORDER TO SATISFY FOREIGN KEY CONSTRAINTS\ndef insert_json_db(json_file, conn, table_names, commit=True):\n tables_dict = json_file[\"tables\"]\n\n cursor = conn.cursor()\n\n for table_name in table_names:\n table_dict_list = tables_dict[table_name]\n\n # Add only if any value exists in the json file\n if len(table_dict_list) > 0:\n \n # List of row tuples ordered by the column name\n row_tuple_list = []\n\n for row_dict in table_dict_list:\n row_tuple = []\n\n for col_name in sorted(row_dict.keys()):\n row_tuple.append(row_dict[col_name])\n row_tuple_list.append(tuple(row_tuple))\n\n # Column names are based on last tuple which should be same for all\n col_names = ','.join(sorted(row_dict.keys()))\n\n # Make the command to execute ready\n command = f\"INSERT INTO {table_name} ({col_names}) VALUES (\"\n for i in range(len(row_tuple)):\n command += \":\" + str(i+1) + \",\"\n command = command[:-1] + \")\"\n cursor.bindarraysize = len(row_tuple_list)\n try:\n cursor.executemany(command, row_tuple_list)\n except cx_Oracle.IntegrityError as exc:\n error, = exc.args\n print('-'*70)\n print(error.code, error.message)\n print(command, row_tuple_list)\n\n\n if commit:\n conn.commit()\n print(f'Insertion commited.')\n\n","sub_path":"Project 2/cs_450_550_ha2_univ_db_template/cs_450_550_ha2_univ_db_template/lib/sql_binding/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"476501112","text":"from flask import request, Blueprint, jsonify\r\nfrom database.database import db\r\nfrom firebase_admin import firestore\r\nimport datetime\r\nimport time\r\nfrom flask_cors import CORS, cross_origin\r\n\r\n\r\ngetUpcomingTodo = Blueprint(\"getUpcomingTodo\", __name__)\r\nCORS(createUser)\r\n\r\n\r\n@getUpcomingTodo.route(\"/api/user//todo/upcoming\", methods=[\"GET\"])\r\n@cross_origin(headers=['Content-Type', 'Authorization'])\r\ndef getUpcomingTodoRoute(userId):\r\n if not userId:\r\n return (jsonify({\"error\": \"Missing userId\"}), 400)\r\n \r\n friends = db.collection(\"Friends\").where(\"userId\", \"==\", userId).stream()\r\n\r\n idDict = {doc.to_dict()[\"friendId\"]:True for doc in friends}\r\n idDict[userId] = True\r\n\r\n currentTime = time.time() * 1000\r\n\r\n docs = db.collection('Todo').where('dueDate', '>=', currentTime).order_by(u'dueDate', direction=firestore.Query.ASCENDING).stream()\r\n \r\n sortedResults = [doc.to_dict() for doc in docs]\r\n \r\n res = []\r\n \r\n users = db.collection('Users').stream()\r\n user_dicts = [user.to_dict() for user in users]\r\n\r\n for todo in sortedResults:\r\n if idDict.get(todo[\"userId\"]) and not todo.get(\"status\"):\r\n for user in user_dicts:\r\n if todo[\"userId\"] == user[\"userId\"]:\r\n todo[\"selectedBadge\"] = user[\"selectedBadge\"]\r\n todo[\"selectedBorder\"] = user[\"selectedBorder\"]\r\n todo[\"selectedCelebration\"] = user[\"selectedCelebration\"]\r\n todo[\"profileUrl\"] = user[\"imageUrl\"]\r\n todo[\"fullName\"] = user[\"fullName\"]\r\n res.append(todo)\r\n \r\n # custom items, full name, and imageUrl\r\n return {\"todos\": res}","sub_path":"routes/GetUpcomingTodo.py","file_name":"GetUpcomingTodo.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"102478318","text":"# Snakemake pipe for RNAseq\n# (c) 2017 Ali Rassolie\n# SINGLE-END\n__version__ = \"0.1.6 GIMLET\"\n\ntry:\n import scripts.art as a\n print(\nf\"\"\"\n***********************************************************\n\nVERSION = {__version__}\n\n***********************************************************\n\n\n\"\"\")\n print(a.Art.name)\n print(a.Art.homer)\n #input()\nexcept ImportError as e:\n print(e)\n \n\nfrom subprocess import call\nfrom collections import OrderedDict\nimport os, re\n\nclass FileExpander:\n def __init__(self):\n\n# This is the config file, necessary effective functioning.\n# Please do note that we are doing this relative to the Snakefile folder, and not the scripts\n# folder. Some strange python thing.\n configfile = \"scripts/config.yaml\"\n paired_clean = lambda i: [ re.findall(r\"(.*)_\\d\\.fastq\\.gz\", z)[0] for z in i if re.findall(r\"(.*)_\\d\\.fastq\\.gz\", z)]\n single_clean = lambda i: [ re.findall(r\"(.*)\\.fastq\\.gz\", z)[0] for z in i if re.findall(r\"(.*)\\.fastq\\.gz\", z)]\n# Here we open the configfile to access the parent directory; we should maybe use JSON formatting,\n# snakemake might not like it alas.\n try:\n PARENT_DIR, SYMLINKED_DIR = self.opener(configfile)\n except FileNotFoundError as e:\n PARENT_DIR, SYMLINKED_DIR = self.opener(\"config.yaml\")\n# Now we are defining the global variable FULL_PATHS\n# The variable is important to produce the file-names\n# the rules require\n\n# Please do make sure that the input PARENT_DIR is a list.\n self.FULL_PATHS = file_expander([PARENT_DIR])\n self.SYMLINKED_PATHS, self.LOG_PATHS = symlink_creator(origin=self.FULL_PATHS, target=SYMLINKED_DIR, n=self.NUMBER_OF_FOLDERS)\n \n\n# This lambda func will replace remove the suffix of the files that we find\n\n if self.run_type == \"single\":\n self.FULL_PATHS = single_clean(self.FULL_PATHS)\n self.SYMLINKED_PATHS = single_clean(self.SYMLINKED_PATHS)\n elif self.run_type == \"paired\":\n self.FULL_PATHS = paired_clean(self.FULL_PATHS)\n self.SYMLINKED_PATHS = paired_clean(self.SYMLINKED_PATHS)\n else: \n return ValueError\n\n self.SYMLINKED_PATHS = list(OrderedDict.fromkeys(self.SYMLINKED_PATHS))\n self.NO_DATA_PATH = [ \"/\".join(i.split(\"/\")[:-2]) for i in self.SYMLINKED_PATHS ]\n self.FILES = [ i.split(\"/\")[-1] for i in self.SYMLINKED_PATHS ]\n self.LOG_PATHS = list(OrderedDict.fromkeys(self.LOG_PATHS))\n\n def opener(self, i):\n with open(i, \"r\") as file:\n text_data = file.read().replace('\"','').replace(\"'\",\"\").split(\"\\n\")\n text_data = { i.split(\": \")[0]:i.split(\": \")[1] for i in text_data if i}\n PARENT_DIR = text_data[\"PARENT_DIR\"]\n SYMLINKED_DIR = text_data[\"SYMLINKED_DIR\"]\n self.NUMBER_OF_FOLDERS = int(text_data[\"NUMBER_OF_FOLDERS\"])\n self.run_type = text_data[\"RUN_TYPE\"]\n return PARENT_DIR, SYMLINKED_DIR\n\n# Please do note that the origin and the target must be a list; perhaps a dictionary\n # This is the complete list of all the files that the parent_dir contained.\n # symlink_creator is a method that will create symbolic links between the origins and the targets.\n\ndef symlink_creator(origin=None, target=None, n=4):\n origins = origin\n target = target\n file_names = [ \"/\".join(i.split(\"/\")[-n:-1]) for i in origins]\n files = [ i.split(\"/\")[-1] for i in origins ]\n uncreated_dirs = [ \"/\".join(i.split(\"/\")[-n:-1]) for i in origins ]\n uncreated_dirs = [ f\"{target}/{i}\" for i in uncreated_dirs ]\n target_paths = [ f\"{target}/{i[0]}/data/{i[1]}\" for i in zip(file_names,files) ]\n no_data_target = [ f\"{target}/{i[0]}\" for i in file_names ]\n log_paths = [ f\"{target}/{i}/log\" for i in file_names ]\n\n for dir_ in zip(uncreated_dirs, files):\n call([\"mkdir\", \"-p\", f\"{dir_[0]}/data\", f\"{dir_[0]}/log/{dir_[1]}\"])\n\n for each_target in zip(origin, target_paths):\n call([\"ln\", \"-s\", each_target[0], each_target[1]])\n\n return target_paths, log_paths\n\ndef isgz(i):\n param = re.search(r\"\\w\\.gz$\", i)\n try:\n assert param\n return True\n except AssertionError:\n return False\n\n# In this case we have the following file-tree structure:\n# .../Experiment/sample/run/... and we thus provide n=3\n# so as to expand the third dir, and return its contents.\ndef file_expander(parent, n=3, delimiter=\"/\"):\n for i in range(n):\n if i == n-1:\n files = []\n for parent_path in parent:\n\n files.extend([f\"{parent_path}{delimiter}{k}\" for k in os.listdir(parent_path) if not os.path.isdir(f\"{parent_path}{delimiter}{k}\") and isgz(f\"{parent_path}{delimiter}{k}\")])\n return files\n else:\n parent = expand(parent, delimiter)\n\n# expand() method will take an array containing paths, and return a list where the\n# paths have been opened and returned in a list.\ndef expand(f, delimiter=\"/\"):\n l = []\n for each in f:\n # This list compr will expand the path of spec index in the input path list\n # and it will be appending children that are directories themselves.\n files_in_d = [ k for k in os.listdir(each) if os.path.isdir(f\"{each}{delimiter}{k}\")]\n\n for i in files_in_d:\n l.append(f\"{each}{delimiter}{i}\")\n return l\n","sub_path":"single_end/scripts/file_name_producer.py","file_name":"file_name_producer.py","file_ext":"py","file_size_in_byte":5088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"112109999","text":"\"\"\"\n实现打印出10以内的乘法表\n\n实现内容:\n1,手动1~10的整数,打印乘法表\n2,输入错误,让再次输入,输入符合要求后,立即打印乘法表\n3,允许输错的次数是5次,超过错误次数,退出程序。\n\"\"\"\n\ndef multiplication_table(s):\n for i in range(1, s + 1):\n for j in range(1, i + 1):\n print(\"%d x %d = %2d\" % (j, i, i * j), end='; ')\n print('\\n')\n\nif __name__ in \"__main__\":\n i = 0\n while (i<5):\n s = input(\"请输入十以内的整数:\")\n if s.isdigit():\n s = int(s)\n if s in range(1,11):\n multiplication_table(s)\n break\n else:\n i = i+1\n else:\n i = i+1\n if (i==5):\n print(\"超过输入次数,程序退出!\")\n exit()","sub_path":"打印十以内的乘法表.py","file_name":"打印十以内的乘法表.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"488102394","text":"import argparse\nimport datetime\nimport torch\nimport numpy as np\nimport math\nimport random\nimport os\nimport logging\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\n\nfrom models.boosted_vae import BoostedVAE\nfrom models.realnvp import RealNVPFlow\nfrom models.iaf import IAFFlow\nfrom models.planar import PlanarFlow\nfrom models.radial import RadialFlow\nfrom models.liniaf import LinIAFFlow\nfrom models.affine import AffineFlow\nfrom models.nlsq import NLSqFlow\n\nfrom utils.density_plotting import plot\nfrom utils.load_data import make_toy_density, make_toy_sampler\nfrom main_experiment import init_log\nfrom utils.warmup_scheduler import GradualWarmupScheduler\n\n\nlogger = logging.getLogger(__name__)\n\n\nTOY_DATASETS = [\"8gaussians\", \"2gaussians\", \"1gaussian\", \"swissroll\", \"rings\", \"moons\", \"pinwheel\", \"cos\", \"2spirals\", \"checkerboard\", \"line\", \"circles\", \"joint_gaussian\"]\nENERGY_FUNS = ['u0', 'u1', 'u2', 'u3', 'u4', 'u5', 'u6']\nG_MAX_LOSS = -10.0\n\nparser = argparse.ArgumentParser(description='PyTorch Ensemble Normalizing flows')\nparser.add_argument('--experiment_name', type=str, default=\"toy\",\n help=\"A name to help identify the experiment being run when training this model.\")\nparser.add_argument('--dataset', type=str, default='mnist', help='Dataset choice.', choices=TOY_DATASETS + ENERGY_FUNS)\nparser.add_argument('--mog_sigma', type=float, default=1.5, help='Variance in location of mixture of gaussian data.',\n choices=[i / 100.0 for i in range(50, 250)])\nparser.add_argument('--mog_clusters', type=int, default=6, help='Number of clusters to use in the mixture of gaussian data.',\n choices=range(1,13))\n\n# seeds\nparser.add_argument('--manual_seed', type=int, default=123,\n help='manual seed, if not given resorts to random seed.')\n\n# gpu/cpu\nparser.add_argument('--gpu_id', type=int, default=0, metavar='GPU', help='choose GPU to run on.')\nparser.add_argument('--num_workers', type=int, default=1,\n help='How many CPU cores to run on. Setting to 0 uses os.cpu_count() - 1.')\nparser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training')\n\n# Reporting\nparser.add_argument('--log_interval', type=int, default=1000,\n help='how many batches to wait before logging training status. Set to <0 to turn off.')\nparser.add_argument('--plot_interval', type=int, default=1000,\n help='how many batches to wait before creating reconstruction plots. Set to <0 to turn off.')\nparser.add_argument('--no_tensorboard', dest=\"tensorboard\", action=\"store_false\", help='Turns off saving results to tensorboard.')\nparser.set_defaults(tensorboard=True)\n\nparser.add_argument('--out_dir', type=str, default='./results/snapshots', help='Output directory for model snapshots etc.')\nparser.add_argument('--data_dir', type=str, default='./data/raw/', help=\"Where raw data is saved.\")\nparser.add_argument('--exp_log', type=str, default='./results/toy_experiment_log.txt', help='File to save high-level results from each run of an experiment.')\nparser.add_argument('--print_log', dest=\"save_log\", action=\"store_false\", help='Add this flag to have progress printed to log (rather than saved to a file).')\nparser.set_defaults(save_log=True)\n\nsr = parser.add_mutually_exclusive_group(required=False)\nsr.add_argument('--save_results', action='store_true', dest='save_results', help='Save results from experiments.')\nsr.add_argument('--discard_results', action='store_false', dest='save_results', help='Do NOT save results from experiments.')\nparser.set_defaults(save_results=True)\nparser.add_argument('--plot_resolution', type=int, default=250, help='how many points to plot, higher gives better resolution')\n\n# optimization settings\nparser.add_argument('--num_steps', type=int, default=100000, help='number of training steps to take (default: 100000)')\nparser.add_argument('--batch_size', type=int, default=256, help='input batch size for training (default: 64)')\nparser.add_argument('--learning_rate', type=float, default=0.005, help='learning rate')\nparser.add_argument('--regularization_rate', type=float, default=0.8, help='Regularization penalty for boosting.')\nparser.add_argument('--iters_per_component', type=int, default=10000, help='how often to train each boosted component before changing')\nparser.add_argument('--max_beta', type=float, default=1.0, help='max beta for warm-up')\nparser.add_argument('--min_beta', type=float, default=0.0, help='min beta for warm-up')\nparser.add_argument('--no_annealing', action='store_true', default=False, help='disables annealing while training')\nparser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay parameter in Adamax')\nparser.add_argument('--no_lr_schedule', action='store_true', default=False, help='Disables learning rate scheduler during training')\nparser.add_argument('--lr_schedule', type=str, default=None, help=\"Type of LR schedule to use.\", choices=['plateau', 'cosine', None])\nparser.add_argument('--patience', type=int, default=5000, help='If using LR schedule, number of steps before reducing LR.')\nparser.add_argument(\"--max_grad_clip\", type=float, default=0, help=\"Max gradient value (clip above max_grad_clip, 0 for off)\")\nparser.add_argument(\"--max_grad_norm\", type=float, default=100.0, help=\"Max norm of gradient (clip above max_grad_norm, 0 for off)\")\nparser.add_argument(\"--warmup_iters\", type=int, default=0, help=\"Use this number of iterations to warmup learning rate linearly from zero to learning rate\")\n\n# flow parameters\nparser.add_argument('--flow', type=str, default='planar',\n choices=['planar', 'radial', 'liniaf', 'affine', 'nlsq', 'boosted', 'iaf', 'realnvp'],\n help=\"\"\"Type of flows to use, no flows can also be selected\"\"\")\nparser.add_argument('--num_flows', type=int, default=2, help='Number of flow layers, ignored in absence of flows')\n\nparser.add_argument('--h_size', type=int, default=16, help='Width of layers in base networks of iaf and realnvp. Ignored for all other flows.')\nparser.add_argument('--coupling_network_depth', type=int, default=1, help='Number of extra hidden layers in the base network of iaf and realnvp. Ignored for all other flows.')\nparser.add_argument('--coupling_network', type=str, default='tanh', choices=['relu', 'residual', 'tanh', 'random', 'mixed'],\n help='Base network for RealNVP coupling layers. Random chooses between either Tanh or ReLU for every network, whereas mixed uses ReLU for the T network and TanH for the S network.')\nparser.add_argument('--no_batch_norm', dest='batch_norm', action='store_false', help='Disables batch norm in realnvp layers')\nparser.set_defaults(batch_norm=True)\nparser.add_argument('--z_size', type=int, default=2, help='how many stochastic hidden units')\n\n# Boosting parameters\nparser.add_argument('--rho_init', type=str, default='decreasing', choices=['decreasing', 'uniform'],\n help='Initialization scheme for boosted parameter rho')\nparser.add_argument('--rho_iters', type=int, default=100, help='Maximum number of SGD iterations for training boosting weights')\nparser.add_argument('--rho_lr', type=float, default=0.005, help='Initial learning rate used for training boosting weights')\nparser.add_argument('--num_components', type=int, default=4,\n help='How many components are combined to form the flow')\nparser.add_argument('--component_type', type=str, default='affine', choices=['liniaf', 'affine', 'nlsq', 'realnvp'],\n help='When flow is boosted -- what type of flow should each component implement.')\n\n\n\ndef parse_args(main_args=None):\n \"\"\"\n Parse command line arguments and compute number of cores to use\n \"\"\"\n args = parser.parse_args(main_args)\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n args.device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\n args.density_matching = args.dataset.startswith('u')\n args.dynamic_binarization = False\n args.input_type = 'binary'\n args.input_size = [2]\n args.density_evaluation = True\n args.shuffle = True\n args.train_size = args.iters_per_component\n\n # Set a random seed if not given one\n if args.manual_seed is None:\n args.manual_seed = random.randint(1, 100000)\n\n random.seed(args.manual_seed)\n torch.manual_seed(args.manual_seed)\n torch.cuda.manual_seed_all(args.manual_seed)\n np.random.seed(args.manual_seed)\n\n # intialize snapshots directory for saving models and results\n args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_').replace(':', '_').replace('-', '_')\n args.experiment_name = args.experiment_name + \"_\" if args.experiment_name is not None else \"\"\n args.snap_dir = os.path.join(args.out_dir, args.experiment_name + args.flow)\n\n lr_schedule = f'_lr{str(args.learning_rate)[2:]}'\n if args.lr_schedule is None or args.no_lr_schedule:\n args.no_lr_schedule = True\n args.lr_schedule = None\n else:\n args.no_lr_schedule = False\n lr_schedule += f'{args.lr_schedule}'\n\n if args.dataset in ['u5', 'mog']:\n dataset = f\"{args.dataset}_s{int(100 * args.mog_sigma)}_c{args.mog_clusters}\"\n else:\n dataset = args.dataset\n\n args.snap_dir += f'_seed{args.manual_seed}' + lr_schedule + '_' + dataset + f\"_bs{args.batch_size}\"\n\n args.boosted = args.flow == \"boosted\"\n if args.flow != 'no_flow':\n args.snap_dir += 'K' + str(args.num_flows)\n\n if args.flow in ['boosted', 'bagged']:\n if args.regularization_rate < 0.0:\n raise ValueError(\"For boosting the regularization rate should be greater than or equal to zero.\")\n args.snap_dir += '_' + args.component_type + '_C' + str(args.num_components)\n args.snap_dir += '_reg' + f'{int(100*args.regularization_rate):d}' if args.density_matching else ''\n\n if args.flow == 'iaf':\n args.snap_dir += '_hidden' + str(args.coupling_network_depth) + '_hsize' + str(args.h_size)\n\n if args.flow == \"realnvp\" or args.component_type == \"realnvp\":\n args.snap_dir += '_' + args.coupling_network + str(args.coupling_network_depth) + '_hsize' + str(args.h_size)\n \n is_annealed = \"\"\n if not args.no_annealing and args.min_beta < 1.0:\n is_annealed += \"_annealed\"\n else:\n args.min_beta = 1.0\n \n args.snap_dir += is_annealed + f'_{args.model_signature}/'\n if not os.path.exists(args.snap_dir):\n os.makedirs(args.snap_dir)\n\n init_log(args)\n \n # Set up multiple CPU/GPUs\n logger.info(\"COMPUTATION SETTINGS:\")\n logger.info(f\"Random Seed: {args.manual_seed}\\n\")\n if args.cuda:\n logger.info(\"\\tUsing CUDA GPU\")\n torch.cuda.set_device(args.gpu_id)\n else:\n logger.info(\"\\tUsing CPU\")\n if args.num_workers > 0:\n num_workers = args.num_workers\n else:\n num_workers = max(1, os.cpu_count() - 1)\n\n logger.info(\"\\tCores available: {} (only requesting {})\".format(os.cpu_count(), num_workers))\n torch.set_num_threads(num_workers)\n logger.info(\"\\tConfirmed Number of CPU threads: {}\".format(torch.get_num_threads()))\n\n kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}\n return args, kwargs\n\n\ndef init_model(args):\n if args.flow == 'boosted':\n model = BoostedVAE(args).to(args.device)\n elif args.flow == 'planar':\n model = PlanarFlow(args).to(args.device)\n elif args.flow == 'radial':\n model = RadialFlow(args).to(args.device)\n elif args.flow == 'liniaf':\n model = LinIAFFlow(args).to(args.device)\n elif args.flow == 'affine':\n model = AffineFlow(args).to(args.device)\n elif args.flow == 'nlsq':\n model = NLSqFlow(args).to(args.device)\n elif args.flow == 'iaf':\n model = IAFFlow(args).to(args.device)\n elif args.flow == \"realnvp\":\n model = RealNVPFlow(args).to(args.device)\n else:\n raise ValueError('Invalid flow choice')\n\n return model\n\n\ndef init_optimizer(model, args):\n \"\"\"\n group model parameters to more easily modify learning rates of components (flow parameters)\n \"\"\"\n logger.info('OPTIMIZER:')\n warmup_mult = 1000.0\n base_lr = (args.learning_rate / warmup_mult) if args.warmup_iters > 0 else args.learning_rate\n logger.info(f\"Initializing Adamax optimizer with base learning rate={args.learning_rate}, weight decay={args.weight_decay}.\")\n \n if args.flow == 'boosted':\n logger.info(\"For boosted model, grouping parameters according to Component Id:\")\n flow_params = {f\"{c}\": torch.nn.ParameterList() for c in range(args.num_components)}\n flow_labels = {f\"{c}\": [] for c in range(args.num_components)}\n vae_params = torch.nn.ParameterList()\n vae_labels = []\n for name, param in model.named_parameters():\n if name.startswith(\"flow\"):\n pos = name.find(\".\")\n component_id = name[(pos + 1):(pos + 2)]\n flow_params[component_id].append(param)\n flow_labels[component_id].append(name)\n else:\n vae_labels.append(name)\n vae_params.append(param)\n\n # collect all parameters into a single list\n # the first args.num_components elements in the parameters list correspond boosting parameters\n all_params = []\n for c in range(args.num_components):\n all_params.append(flow_params[f\"{c}\"])\n logger.info(f\"Grouping [{', '.join(flow_labels[str(c)])}] as Component {c}'s parameters.\")\n\n # vae parameters are at the end of the list (may not exist if doing density estimation)\n if len(vae_params) > 0:\n all_params.append(vae_params)\n logger.info(f\"Grouping [{', '.join(vae_labels)}] as the VAE parameters.\\n\")\n \n optimizer = optim.Adamax([{'params': param_group} for param_group in all_params], lr=base_lr, weight_decay=args.weight_decay)\n else:\n logger.info(f\"Initializing optimizer for standard models with learning rate={args.learning_rate}.\\n\")\n optimizer = optim.Adamax(model.parameters(), lr=base_lr, weight_decay=args.weight_decay)\n\n if args.no_lr_schedule:\n scheduler = None\n else:\n if args.lr_schedule == \"plateau\":\n logger.info(f\"Using ReduceLROnPlateua as a learning-rate schedule, reducing LR by 0.5 after {args.patience} steps until it reaches 1e-5.\")\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n factor=0.5,\n patience=args.patience,\n min_lr=1e-5,\n verbose=True,\n threshold_mode='abs')\n elif args.lr_schedule == \"cosine\":\n if args.boosted:\n logger.info(f\"Using a Cyclic Cosine Annealing LR as a learning-rate schedule, annealed over {args.iters_per_component} training steps, restarting with each new component.\")\n scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=args.iters_per_component)\n else:\n logger.info(f\"Using CosineAnnealingLR as a learning-rate schedule, annealed over {args.num_steps} training steps.\")\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.num_steps)\n\n if args.warmup_iters > 0:\n logger.info(f\"Gradually warming up learning rate from {base_lr} to {args.learning_rate} over the first {args.warmup_iters} steps.\\n\")\n warmup_scheduler = GradualWarmupScheduler(optimizer, multiplier=warmup_mult, total_epoch=args.warmup_iters, after_scheduler=scheduler)\n return optimizer, warmup_scheduler\n else:\n return optimizer, scheduler\n\n\n\ndef compute_kl_qp_loss(model, target_fn, beta, args):\n \"\"\"\n Compute KL(q_inv || p) where q_inv is the inverse flow transform:\n \n (log_q_inv = log_q_base - logdet),\n\n and p is the target distribution (energy potential)\n \n Returns the minimization objective for density matching.\n\n ADAPTED FROM: https://arxiv.org/pdf/1904.04676.pdf (https://github.com/kamenbliznashki/normalizing_flows/blob/master/bnaf.py)\n \"\"\"\n z0 = model.base_dist.sample((args.batch_size,))\n q_log_prob = model.base_dist.log_prob(z0).sum(1)\n \n if args.boosted:\n if model.component < model.num_components:\n density_from = '-c' if model.all_trained else '1:c-1'\n sample_from = 'c'\n else:\n density_from = '1:c'\n sample_from = '1:c'\n \n z_g, entropy_ldj, z_G, boosted_ldj = model.flow(z0, sample_from=sample_from, density_from=density_from)\n p_log_prob = -1.0 * target_fn(z_g[-1]) * beta # p = exp(-potential) => log_p = - potential\n g_lhood = q_log_prob - entropy_ldj\n \n if model.component == 0 and model.all_trained == False:\n G_lhood = torch.zeros_like(g_lhood)\n loss = g_lhood - p_log_prob\n else:\n G_log_prob = model.base_dist.log_prob(z_G[0]).sum(1)\n G_lhood = torch.max(G_log_prob - boosted_ldj, torch.ones_like(boosted_ldj) * G_MAX_LOSS)\n loss = G_lhood - p_log_prob + g_lhood * args.regularization_rate\n\n return loss.mean(0), (g_lhood.mean().item(), G_lhood.mean().item(), p_log_prob.mean().item())\n\n else:\n zk, logdet = model(z0)\n p_log_prob = -1.0 * target_fn(zk) * beta # p = exp(-potential) => log_p = - potential\n loss = q_log_prob - logdet - p_log_prob\n return loss.mean(0), (q_log_prob.mean().item(), logdet.mean().item(), p_log_prob.mean().item())\n\n\ndef compute_kl_pq_loss(model, data_or_sampler, beta, args):\n \"\"\"\n Compute KL(p || q_fwd) where q_fwd is the forward flow transform (log_q_fwd = log_q_base + logdet),\n and p is the target distribution.\n\n Returns the minimization objective for density estimation (NLL under the flow since the\n entropy of the target dist is fixed wrt the optimization)\n\n ADAPTED FROM: https://arxiv.org/pdf/1904.04676.pdf (https://github.com/kamenbliznashki/normalizing_flows/blob/master/bnaf.py)\n \"\"\"\n if callable(data_or_sampler):\n sample = data_or_sampler(args.batch_size).to(args.device)\n else:\n sample = data_or_sampler\n \n if args.boosted:\n Z_g, g_ldj = model.component_forward_flow(sample, component=model.component)\n g_lhood = model.base_dist.log_prob(Z_g[-1]).sum(1) + g_ldj\n\n if model.all_trained or model.component > 0:\n G_component = '-c' if model.all_trained else '1:c-1'\n component = model._sample_component(sampling_components=G_component)\n Z_G, G_ldj = model.component_forward_flow(sample, component=component)\n G_lhood = torch.max(model.base_dist.log_prob(Z_G[-1]).sum(1) + G_ldj, torch.ones_like(G_ldj) * G_MAX_LOSS)\n loss = beta * -1.0 * g_lhood + (1 - beta) * G_lhood\n else:\n G_lhood = torch.zeros_like(g_lhood)\n loss = -1.0 * g_lhood\n\n return loss.mean(0), (g_lhood.mean().item(), G_lhood.mean().item())\n \n else:\n z, logdet = model(sample)\n q_log_prob = model.base_dist.log_prob(z).sum(1)\n loss = -1.0 * (q_log_prob + logdet)\n return loss.mean(0), (q_log_prob.mean().item(), logdet.mean().detach().item())\n\n\n@torch.no_grad()\ndef rho_gradient(model, target_or_sample_fn, args):\n fixed_components = \"-c\" if model.all_trained else \"1:c-1\"\n if args.density_matching:\n # density matching of a target function\n z0 = model.base_dist.sample((args.num_components * args.batch_size * 25,))\n g_zk, g_ldj = [], []\n G_zk, G_ldj = [], []\n for z0_i in z0.split(args.batch_size, dim=0):\n gZ_i, _, _, g_ldj_i = model.flow(z0_i, sample_from=\"c\", density_from=\"1:c\")\n g_zk += [gZ_i[-1]] # grab K-th element\n g_ldj += [g_ldj_i]\n GZ_i, _, _, G_ldj_i = model.flow(z0_i, sample_from=fixed_components, density_from=\"1:c\")\n G_zk += [GZ_i[-1]] # grab K-th element\n G_ldj += [G_ldj_i]\n \n g_zk, g_ldj = torch.cat(g_zk, 0), torch.cat(g_ldj, 0)\n G_zk, G_ldj = torch.cat(G_zk, 0), torch.cat(G_ldj, 0)\n \n q_log_prob = model.base_dist.log_prob(z0).sum(1)\n p_log_prob_g = -1.0 * target_or_sample_fn(g_zk) # p = exp(-potential) => log_p = - potential\n loss_wrt_g = q_log_prob - g_ldj - p_log_prob_g\n p_log_prob_G = -1.0 * target_or_sample_fn(G_zk) # p = exp(-potential) => log_p = - potential\n loss_wrt_G = q_log_prob - G_ldj - p_log_prob_G\n \n else:\n # estimate density from a sampler\n sample = target_or_sample_fn(args.num_components * args.batch_size * 25).to(args.device)\n g_zk, g_ldj = [], []\n G_zk, G_ldj = [], []\n for sample_i in sample.split(args.batch_size, dim=0):\n g_zk_i, _, _, g_ldj_i = model.flow(sample_i, sample_from=\"c\", density_from=\"1:c\")\n g_zk += [g_zk_i[-1]]\n g_ldj += [g_ldj_i]\n G_zk_i, _, _, G_ldj_i = model.flow(sample_i, sample_from=fixed_components, density_from=\"1:c\")\n G_zk += [G_zk_i[-1]]\n G_ldj += [G_ldj_i]\n\n g_zk, g_ldj = torch.cat(g_zk, 0), torch.cat(g_ldj, 0)\n G_zk, G_ldj = torch.cat(G_zk, 0), torch.cat(G_ldj, 0) \n\n loss_wrt_g = -1.0 * (model.base_dist.log_prob(g_zk).sum(1) + g_ldj)\n loss_wrt_G = -1.0 * (model.base_dist.log_prob(G_zk).sum(1) + G_ldj)\n\n return loss_wrt_g.mean(0).detach().item(), loss_wrt_G.mean(0).detach().item()\n\n\ndef update_rho(model, target_or_sample_fn, writer, args):\n if model.component == 0 and model.all_trained == False:\n return\n\n if args.rho_iters == 0:\n return\n\n model.eval()\n with torch.no_grad():\n\n rho_log = open(model.args.snap_dir + '/rho.log', 'a')\n print(f\"\\n\\nUpdating weight for component {model.component} (all_trained={str(model.all_trained)})\", file=rho_log)\n print('Initial Rho: ' + ' '.join([f'{val:1.2f}' for val in model.rho.data]), file=rho_log)\n \n tolerance = 0.001\n init_step_size = args.rho_lr\n min_iters = 10\n max_iters = args.rho_iters\n prev_rho = model.rho.data[model.component].item()\n \n for batch_id in range(max_iters):\n\n loss_wrt_g, loss_wrt_G = rho_gradient(model, target_or_sample_fn, args) \n gradient = loss_wrt_g - loss_wrt_G\n\n step_size = init_step_size / (0.05 * batch_id + 1)\n rho = min(max(prev_rho - step_size * gradient, 0.0005), 0.999)\n\n grad_msg = f'{batch_id: >3}. rho = {prev_rho:5.3f} - {gradient:4.2f} * {step_size:5.3f} = {rho:5.3f}'\n loss_msg = f\"\\tg vs G. Loss: ({loss_wrt_g:5.1f}, {loss_wrt_G:5.1f}).\"\n print(grad_msg + loss_msg, file=rho_log)\n \n model.rho[model.component] = rho\n dif = abs(prev_rho - rho)\n prev_rho = rho\n\n writer.add_scalar(f\"rho/rho_{model.component}\", rho, batch_id)\n\n if batch_id > min_iters and (batch_id > max_iters or dif < tolerance):\n break\n\n print('New Rho: ' + ' '.join([f'{val:1.2f}' for val in model.rho.data]), file=rho_log)\n rho_log.close()\n\n\ndef annealing_schedule(i, args):\n if args.density_matching:\n if args.min_beta == 1.0:\n return 1.0\n\n if args.boosted:\n if i >= args.iters_per_component * args.num_components or i == args.iters_per_component:\n rval = 1.0\n else:\n rval = 0.01 + ((i % args.iters_per_component) / args.iters_per_component)\n else:\n rval = 0.01 + i/10000.0\n\n rval = max(args.min_beta, min(args.max_beta, rval))\n else:\n # anneal the push away from G by slowing dropping the weight of g\n # WHAT HAPPENS if we do the reverse of this schedule?\n if args.boosted:\n # starting with a high G weight initially\n rval = 0.01 + 0.98 * (( max(i-1,0) % args.iters_per_component) / args.iters_per_component)\n else:\n rval = 1.0\n return rval\n\n\ndef train(model, target_or_sample_fn, loss_fn, optimizer, scheduler, args):\n if args.tensorboard:\n writer = SummaryWriter(args.snap_dir)\n\n model.train()\n\n if args.boosted:\n model.component = 0\n prev_lr = []\n for c in range(args.num_components):\n if c != model.component:\n optimizer.param_groups[c]['lr'] = 0.0\n \n prev_lr.append(optimizer.param_groups[c]['lr'])\n \n for batch_id in range(args.num_steps+1):\n model.train()\n optimizer.zero_grad()\n beta = annealing_schedule(batch_id, args)\n\n loss, loss_terms = loss_fn(model, target_or_sample_fn, beta, args)\n loss.backward()\n\n if args.max_grad_clip > 0:\n torch.nn.utils.clip_grad_value_(model.parameters(), args.max_grad_clip)\n if args.max_grad_norm > 0:\n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n if args.tensorboard:\n writer.add_scalar(\"grad_norm/grad_norm\", grad_norm, batch_id)\n\n if args.boosted: # freeze all but the new component being trained\n if batch_id > 0:\n for c in range(args.num_components):\n optimizer.param_groups[c]['lr'] = prev_lr[c] if c == model.component else 0.0\n if args.tensorboard:\n for i in range(len(optimizer.param_groups)):\n writer.add_scalar(f'lr/lr_{i}', optimizer.param_groups[i]['lr'], batch_id)\n\n optimizer.step()\n if not args.no_lr_schedule:\n if args.lr_schedule == \"plateau\":\n scheduler.step(metrics=loss)\n else:\n scheduler.step(epoch=batch_id)\n\n if args.boosted:\n prev_lr[model.component] = optimizer.param_groups[model.component]['lr']\n\n boosted_component_converged = args.boosted and batch_id % args.iters_per_component == 0 and batch_id > 0\n new_boosted_component = args.boosted and batch_id % args.iters_per_component == 1\n if boosted_component_converged or new_boosted_component or batch_id % args.log_interval == 0:\n msg = f'{args.dataset}: step {batch_id:5d} / {args.num_steps}; loss {loss.item():8.3f} (beta={beta:5.4f})'\n if args.boosted:\n msg += f' | g vs G ({loss_terms[0]:8.3f}, {loss_terms[1]:8.3f})'\n msg += f' | p_log_prob {loss_terms[2]:8.3f}' if args.density_matching else ''\n msg += f' | c={model.component} (all={str(model.all_trained)[0]})'\n msg += f' | Rho=[' + ', '.join([f\"{val:4.2f}\" for val in model.rho.data]) + \"]\"\n else:\n msg += f' | q_log_prob {loss_terms[0]:8.3f}'\n msg += f' | ldj {loss_terms[1]:8.3f}'\n msg += f' | p_log_prob {loss_terms[2]:7.3f}' if args.density_matching else ''\n logger.info(msg)\n\n if args.tensorboard:\n writer.add_scalar('batch/train_loss', loss.item(), batch_id)\n if args.boosted:\n writer.add_scalar('batch/train_g', loss_terms[0], batch_id)\n writer.add_scalar('batch/train_G', loss_terms[1], batch_id)\n else:\n writer.add_scalar('batch/q_log_prob', loss_terms[0], batch_id)\n writer.add_scalar('batch/log_det_jacobian', loss_terms[1], batch_id)\n\n if boosted_component_converged:\n update_rho(model, target_or_sample_fn, writer, args)\n model.increment_component()\n\n if (batch_id > 0 and batch_id % args.plot_interval == 0) or boosted_component_converged:\n with torch.no_grad():\n plot(batch_id, model, target_or_sample_fn, args)\n\n \n\n \ndef main(main_args=None):\n \"\"\"\n use main_args to run this script as function in another script\n \"\"\"\n\n # =========================================================================\n # PARSE EXPERIMENT SETTINGS, SETUP SNAPSHOTS DIRECTORY, LOGGING\n # =========================================================================\n args, kwargs = parse_args(main_args)\n\n # =========================================================================\n # SAVE EXPERIMENT SETTINGS\n # =========================================================================\n logger.info(f'EXPERIMENT SETTINGS:\\n{args}\\n')\n torch.save(args, os.path.join(args.snap_dir, 'config.pt'))\n\n # =========================================================================\n # INITIALIZE MODEL AND OPTIMIZATION\n # =========================================================================\n model = init_model(args)\n optimizer, scheduler = init_optimizer(model, args)\n num_params = sum([param.nelement() for param in model.parameters()]) \n logger.info(f\"MODEL:\\nNumber of model parameters={num_params}\\n{model}\\n\")\n\n # =========================================================================\n # TRAINING\n # =========================================================================\n logger.info('TRAINING:')\n if args.density_matching:\n # target is energy potential to match\n target_or_sample_fn = make_toy_density(args)\n loss_fn = compute_kl_qp_loss\n else:\n # target is density to estimate to sample from\n target_or_sample_fn = make_toy_sampler(args)\n loss_fn = compute_kl_pq_loss\n\n\n train(model, target_or_sample_fn, loss_fn, optimizer, scheduler, args)\n \n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"toy_experiment.py","file_name":"toy_experiment.py","file_ext":"py","file_size_in_byte":29958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"525358950","text":"#-*- coding:utf-8 -*-\nimport os\nimport json\n\n# Resources\nresources_dir = \"resources/\"\ncategories_map = os.path.join(resources_dir, \"categories_map.json\")\n\n# Cache\ncache_categories_list = \"cache/categories.json\"\ncache_category_models = \"cache/category_models/\"\ncache_bad_categories = \"cache/bad_categories\"\ncache_brands_list = \"cache/brands.json\"\ncache_images_dir = \"cache/imgs/\"\n\n# Temporary files\ntmp_upload_dir = \"tmp/uploads\"\ntmp_upload_csv = tmp_upload_dir+\"/csv\"\n\n# Verbose of the logging manager\nverbose_lvl = 5\n\nsite_url = \"https://database.ubital.com\"\nbasedir_url = site_url + \"/database2cdiscount/converter/\"\nresults_path = \"results\"\nresults_url = basedir_url + results_path\ncache_images_dir_url = basedir_url + cache_images_dir\n\npublic_folder_path = \"/home/eyal/documents/work/freelance/conversion_amazon_to_cdiscount/AmazonConverter/results\"\npublic_folder_uri = \"\"\n\n# DELETE below\ntestpublic_folder_uri = results_url\n\n\nrequests_history_file = \"cache/request.hist\"\n\n# init empty dirs\nfor directory in ('tmp', 'cache', results_path, cache_category_models, cache_images_dir, tmp_upload_dir,\n resources_dir, ):\n if not os.path.exists(directory):\n os.mkdir(directory)\n\n# init empty files\nfor filename in [requests_history_file,cache_bad_categories ]:\n if not os.path.exists(filename):\n open(filename, 'w', encoding='utf-8').close()\n\n# init jsons\nfor jsonfile in [categories_map]:\n if not os.path.exists(jsonfile):\n open(jsonfile, 'w', encoding='utf-8').write(\"{}\")\n\ncache_model_list = lambda code: os.path.join(cache_category_models, str(code)+'.json')\n\n\n","sub_path":"converter/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"201869255","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 15 14:58:39 2019\n\n@author: Joseph\n\"\"\"\n\n\nimport pandas as pd \nimport matplotlib.pyplot as plt\n\nfilename = \"Revenue4.csv\"\ndataframe = pd.read_csv(filename)\n\nno = dataframe.No\nchoices = dataframe.choice\nrev = dataframe.revenue\nchoices = choices == \"stay\"\nprint(rev.var())\nplt.scatter(no, rev,s = 1, c = choices)\nplt.savefig(\"rev.png\")\nplt.show()","sub_path":"rev.py","file_name":"rev.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"22183077","text":"x = int(input())\nif x > 0:\n print(x)\nelse:\n print(-x)\n\nx = int(input())\ny = int(input())\nif x > 0:\n if y > 0: # x > 0, y > 0\n print(\"Первая четверть\")\n else: # x > 0, y < 0\n print(\"Четвертая четверть\")\nelse:\n if y > 0: # x < 0, y > 0\n print(\"Вторая четверть\")\n else: # x < 0, y < 0\n print(\"Третья четверть\")","sub_path":"usloviya.py","file_name":"usloviya.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"510072995","text":"import selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nimport time\nfrom queue import Queue\nfrom selenium.webdriver.chrome.options import Options\nimport json\n\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument(\"--window-size=1920x1080\")\n\ndriver = webdriver.Chrome(options=chrome_options)\nlst = []\ndriver.get('https://duke-sp.blackboard.com/eaccounts/AccountTransaction.aspx')\ntime.sleep(1)\nusr = driver.find_element_by_id(\"j_username\")\nusr.send_keys(\"\")\npword = driver.find_element_by_id(\"j_password\")\npword.send_keys(\"\")\ndriver.find_element_by_id(\"Submit\").click()\ntime.sleep(1)\nselect = Select(driver.find_element_by_id('MainContent_Accounts'))\nselect.select_by_value(\"50f415b5-2d1b-4e12-8b3c-92c312fe2c29\")\nstart = driver.find_element_by_id(\"ctl00_MainContent_BeginRadDateTimePicker_dateInput\")\nstart.click()\nstart.send_keys(\"08/01/19 12:00 AM\")\ntime.sleep(1)\ndriver.find_element_by_id(\"MainContent_ContinueButton\").click()\ntime.sleep(2)\nx = driver.find_element_by_class_name(\"rgPager\")\nlinks = x.find_elements_by_tag_name(\"a\")\ntest = driver.find_element_by_id('ctl00_MainContent_ResultRadGrid_ctl00')\nrows = test.find_elements_by_tag_name(\"tr\") # get all of the rows in the table\nfor i in range(4, len(rows)):\n lst.append(rows[i].text)\n#link_set = set(links)\ni = 1\nwhile i < num:\n l = links[i]\n l.click()\n time.sleep(2)\n test = driver.find_element_by_id('ctl00_MainContent_ResultRadGrid_ctl00')\n rows = test.find_elements_by_tag_name(\"tr\") # get all of the rows in the table\n for k in range(4, len(rows)):\n lst.append(rows[k].text)\n x = driver.find_element_by_class_name(\"rgPager\")\n links = x.find_elements_by_tag_name(\"a\") \n i += 1\n\ndata = {}\nmonthD = {\"11\": \"November\", \"10\": \"October\", \"9\": \"September\"}\nfor txt in lst:\n txt = txt.split()\n date = txt[0]\n m = date.split(\"/\")[0]\n month = monthD[m]\n day = date.split(\"/\")[1]\n if month not in data:\n data[month] = {}\n if day not in data[month]:\n data[month][day] = []\n \n \n hour = \" \".join([txt[1], txt[2]])\n price = txt[-2][1:-1]\n location = \" \".join([txt[5], txt[-5]])\n #print(location)\n if location == \"Au Reg\":\n location = \"Au Bon Pain\"\n if location.startswith(\"Lobby\"):\n location = \"Lobby Shop\"\n if location.startswith(\"Vending\"):\n location = \"Vending Machine\"\n if location.split()[0] == location.split()[1]:\n location = location.split()[0]\n smallD = {\"Time\": hour, \"Used\": price, \"Location\": location}\n data[month][day].append(smallD)\nprint(data)\n\nwith open(\"final.json\", \"w\") as write_file:\n json.dump(data, write_file)\n","sub_path":"DataPrep.py","file_name":"DataPrep.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"219394019","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n\n # init 8 registers\n self.reg = [0] * 8\n\n # register index\n self.pc = 0\n\n # 256 bit memory\n self.ram = [0b0] * 256\n\n # reg 7 = 0xF4\n self.reg[7] = 0xF4\n\n # flag\n self.flag = 0b00000000\n\n def load(self, filename=None):\n \"\"\"Load a program into memory.\"\"\"\n\n address = 0\n\n # For now, we've just hardcoded a program:\n if filename:\n with open(\n filename\n ) as f: # could be changed to \"with open('./examples/' + filename) as f:\" to address filename directly, but I prefer to specify file path.\n address = 0\n for line in f:\n value = line.split(\"#\")[0].strip()\n if value == \"\":\n continue\n\n else:\n instruction = int(value, 2)\n self.ram[address] = instruction\n address += 1\n\n else:\n program = [\n # From print8.ls8\n 0b10000010, # LDI R0,8\n 0b00000000,\n 0b00001000,\n 0b01000111, # PRN R0\n 0b00000000,\n 0b00000001, # HLT\n ]\n\n for address, instruction in enumerate(program):\n self.ram[address] = instruction\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n # `AND` `OR` `XOR` `NOT` `SHL` `SHR` `MOD`\n \n # Bitwise ALU\n AND = 0b10101000\n OR = 0b10101010\n XOR = 0b10101011\n NOT = 0b01101001\n SHL = 0b10101100\n SHR = 0b10101101\n MOD = 0b10100100\n \n\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n\n elif op == \"SUB\":\n self.reg[reg_a] -= self.reg[reg_b]\n\n elif op == \"MUL\":\n self.reg[reg_a] *= self.reg[reg_b]\n\n elif op == \"CMP\":\n if self.reg[reg_a] == self.reg[reg_b]:\n self.flag = 0b00000001\n\n elif self.reg[reg_a] > self.reg[reg_b]:\n self.flag = 0b10000010\n\n else:\n self.flag = 0b00000100\n \n ### Sprint Challenge Stretch ###\n \n elif op == \"AND\":\n self.reg[reg_a] = self.reg[reg_a] & self.reg[reg_b]\n \n elif op == \"OR\":\n self.reg[reg_a] = self.reg[reg_a] | self.reg[reg_b]\n \n elif op == \"XOR\":\n self.reg[reg_a] = self.reg[reg_a] ^ self.reg[reg_b]\n \n elif op == \"NOT\":\n self.reg[reg_a] -= 0b11111111\n \n elif op == \"SHL\":\n self.reg[reg_a] << self.reg[reg_b]\n \n elif op == \"SHR\":\n self.reg[reg_a] >> self.reg[reg_b]\n \n elif op == \"MOD\":\n if self.reg[reg_b] == 0:\n print(\"Cannot mod by values of 0\")\n self.running = False # HLT\n \n else:\n self.reg[reg_a] %= self.reg[reg_b]\n \n ################################\n\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def ram_read(self, address):\n \"\"\"prints content in specified address in RAM\"\"\"\n return self.ram[address]\n\n def ram_write(self, value, address):\n \"\"\"Overwrites ram with the value at specified address\"\"\"\n self.ram[address] = value\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(\n f\"TRACE: %02X | %02X %02X %02X |\"\n % (\n self.pc,\n # self.fl,\n # self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2),\n ),\n end=\"\",\n )\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end=\"\")\n\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n\n # Instructions\n HLT = 0b00000001\n LDI = 0b10000010\n PRN = 0b01000111\n MUL = 0b10100010\n PUSH = 0b01000101\n POP = 0b01000110\n ADD = 0b10100000\n CALL = 0b01010000\n RET = 0b00010001\n NOP = 0b00000000\n\n SP = 7\n\n ### Sprint Challenge ###\n CMP = 0b10100111\n JMP = 0b01010100\n JEQ = 0b01010101\n JNE = 0b01010110\n ########################\n\n running = True\n\n while running:\n # instructions register\n ir = self.ram[self.pc]\n\n # operands\n operand_a = self.ram[self.pc + 1] # register 1\n operand_b = self.ram[self.pc + 2] # register 2\n\n # HLT\n if ir == HLT:\n running = False\n self.pc += 1\n\n # LDI\n elif ir == LDI:\n self.reg[operand_a] = operand_b\n # increment program counter by 3 steps in RAM\n self.pc += 3\n\n # PRN\n elif ir == PRN:\n print(self.reg[operand_a])\n self.pc += 2\n\n # MUL\n elif ir == MUL:\n product = self.reg[operand_a] * self.reg[operand_b]\n self.reg[operand_a] = product\n self.pc += 3\n\n # PUSH\n elif ir == PUSH:\n # decrement the stack pointer\n self.reg[SP] -= 1\n # store value from reg to ram\n self.ram_write(self.reg[operand_a], self.reg[SP])\n self.pc += 2\n\n # POP\n elif ir == POP:\n # read value of SP and overwrite next register\n value = self.ram_read(self.reg[SP])\n self.reg[operand_a] = value\n # increment SP\n self.reg[SP] += 1\n self.pc += 2\n\n # ADD\n elif ir == ADD:\n add = self.reg[operand_a] + self.reg[operand_b]\n self.reg[operand_a] = add\n self.pc += 3\n\n # NOP\n elif ir == NOP:\n # Do nothing and move on to next instruction\n self.pc += 1\n continue\n\n # CALL\n elif ir == CALL:\n self.reg[SP] -= 1\n self.ram_write(self.pc + 2, self.reg[SP])\n self.pc = self.reg[operand_a]\n\n # RET\n elif ir == RET:\n self.pc = self.ram[self.reg[SP]]\n self.reg[SP] += 1\n\n ### Sprint Challenge ###\n\n # CMP\n elif ir == CMP:\n self.alu(\"CMP\", operand_a, operand_b)\n self.pc += 3\n\n # JMP\n elif ir == JMP:\n self.pc == self.reg[operand_a]\n break\n\n # JEQ\n elif ir == JEQ:\n if (self.flag & HLT) == 1:\n self.pc = self.reg[operand_a]\n\n else:\n self.pc += 2\n\n # JNE\n elif ir == JNE:\n if (self.flag & HLT) == 0:\n self.pc = self.reg[operand_a]\n\n else:\n self.pc += 2\n\n ########################\n\n # Unknown instructions\n else:\n print(f\"Unknown instruction {ir} at address {self.pc}\")\n self.pc += 1\n\n\n# Test\nif __name__ == \"__main__\":\n LS8 = CPU()\n LS8.load()\n for i in range(9):\n print(LS8.ram_read(i))\n\n LS8.ram_write(0, 15)\n\n print(\"==============\")\n print(LS8.ram_read(0))\n print(\"==============\")\n","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":7887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"289014885","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport time\nimport math\nimport random\nimport argparse\nimport numpy as np\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torchvision import transforms\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nfrom loader.load_vaihingen import vaihingenloader\n# from network.unet import UNet\n# from network.efficientnet.Efficientnet_DAN import EfficientNet_1_Nof\nimport sys\nsys.path.append('./')\n\ndef get_Hrrs_label():\n return np.asarray(\n [\n [255, 255, 255], # 不透水面\n [ 0, 0, 255], # 建筑物\n [ 0, 255, 255], # 低植被\n [ 0, 255, 0], # 树\n [255, 255, 0], # 车\n [255, 0, 0], # Clutter/background\n [ 0, 0, 0] # ignore\n ])\n\ndef decode_segmap(label_mask, n_classes = 6):\n \"\"\"Decode segmentation class labels into a color image\n Args:\n label_mask (np.ndarray): an (M,N) array of integer values denoting\n the class label at each spatial location.\n plot (bool, optional): whether to show the resulting color image\n in a figure.\n Returns:\n (np.ndarray, optional): the resulting decoded color image.\n \"\"\"\n label_colours = get_Hrrs_label()\n\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, n_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3), dtype=np.uint8)\n rgb[:, :, 0] = r\n rgb[:, :, 1] = g\n rgb[:, :, 2] = b\n return rgb\n\ndef tta_inference(inp, model, num_classes=6, scales=[1.0], flip=True):\n b, _, h, w = inp.size()\n preds = inp.new().resize_(b, num_classes, h, w).zero_().to(inp.device)\n for scale in scales:\n size = (int(scale*h), int(scale*w))\n resized_img = F.interpolate(inp, size=size, mode='bilinear', align_corners=True,)\n pred = model_inference(model, resized_img.to(inp.device), flip)\n pred = F.interpolate(pred, size=(h, w), mode='bilinear', align_corners=True,)\n preds += pred\n\n return preds/(len(scales))\n\ndef model_inference(model, image, flip=True):\n output = model(image)\n if aux:\n output =output[0]\n if flip:\n fimg = image.flip(2)\n if aux:\n output += model(fimg)[0].flip(2)\n else:\n output += model(fimg).flip(2)\n fimg = image.flip(3)\n if aux:\n output += model(fimg)[0].flip(3)\n else:\n output += model(fimg).flip(3)\n return output/3\n return output\ndef slide(model, scale_image, num_classes=6, crop_size=512, overlap=1/3, scales=[1.0], flip=True):\n\n N, C, H_, W_ = scale_image.shape\n # print(f\"Height: {H_} Width: {W_}\")\n \n full_probs = torch.zeros((N, num_classes, H_, W_), device=scale_image.device) #\n count_predictions = torch.zeros((N, num_classes, H_, W_), device=scale_image.device) #\n\n h_overlap_length = int((1-overlap)*crop_size) #\n w_overlap_length = int((1-overlap)*crop_size) # \n\n h = 0\n slide_finish = False\n while not slide_finish:\n\n if h + crop_size <= H_:\n # print(f\"h: {h}\")\n # set row flag\n slide_row = True\n # initial row start\n w = 0\n while slide_row:\n if w + crop_size <= W_:\n # print(f\" h={h} w={w} -> h'={h+crop_size} w'={w+crop_size}\")\n patch_image = scale_image[:, :, h:h+crop_size, w:w+crop_size]\n #\n patch_pred_image = tta_inference(patch_image, model, num_classes=num_classes, scales=scales, flip=flip)\n count_predictions[:,:,h:h+crop_size, w:w+crop_size] += 1\n full_probs[:,:,h:h+crop_size, w:w+crop_size] += patch_pred_image\n\n else:\n # print(f\" h={h} w={W_-crop_size} -> h'={h+crop_size} w'={W_}\")\n patch_image = scale_image[:, :, h:h+crop_size, W_-crop_size:W_]\n #\n patch_pred_image = tta_inference(patch_image, model, num_classes=num_classes, scales=scales, flip=flip)\n count_predictions[:,:,h:h+crop_size, W_-crop_size:W_] += 1\n full_probs[:,:,h:h+crop_size, W_-crop_size:W_] += patch_pred_image\n slide_row = False\n\n w += w_overlap_length\n\n else:\n # print(f\"h: {h}\")\n # set last row flag\n slide_last_row = True\n # initial row start\n w = 0\n while slide_last_row:\n if w + crop_size <= W_:\n # print(f\"h={H_-crop_size} w={w} -> h'={H_} w'={w+crop_size}\")\n patch_image = scale_image[:,:,H_-crop_size:H_, w:w+crop_size]\n #\n patch_pred_image = tta_inference(patch_image, model, num_classes=num_classes, scales=scales, flip=flip)\n count_predictions[:,:,H_-crop_size:H_, w:w+crop_size] += 1\n full_probs[:,:,H_-crop_size:H_, w:w+crop_size] += patch_pred_image\n\n else:\n # print(f\"h={H_-crop_size} w={W_-crop_size} -> h'={H_} w'={W_}\")\n patch_image = scale_image[:,:,H_-crop_size:H_, W_-crop_size:W_]\n #\n patch_pred_image = tta_inference(patch_image, model, num_classes=num_classes, scales=scales, flip=flip)\n count_predictions[:,:,H_-crop_size:H_, W_-crop_size:W_] += 1\n full_probs[:,:,H_-crop_size:H_, W_-crop_size:W_] += patch_pred_image\n\n slide_last_row = False\n slide_finish = True\n\n w += w_overlap_length\n\n h += h_overlap_length\n\n full_probs /= count_predictions\n\n return full_probs\n \ndef predict_sliding(model, image, num_classes=6, crop_size=512, overlap=1/3, scales=[1.0], flip=True):\n\n N, C, H, W = image.shape\n # scale_image = checksize(image, crop_size=crop_size)\n # N, C, H_, W_ = scale_image.shape\n \n # if H_ == W_ and H_ == 512:\n # full_probs = tta_inference(\n # inp=scale_image,\n # model=model,\n # num_classes=num_classes,\n # scales=scales,\n # flip=flip)\n # else:\n # full_probs = slide(\n # model, \n # scale_image, \n # num_classes=num_classes, \n # crop_size=crop_size, \n # overlap=overlap, \n # scales=scales, \n # flip=flip)\n full_probs = slide(\n model, \n image, \n num_classes=num_classes, \n crop_size=crop_size, \n overlap=overlap, \n scales=scales, \n flip=flip)\n\n full_probs = F.interpolate(full_probs, size=(H, W), mode='bilinear', align_corners=True)\n\n return full_probs\n\ndef test(testloader, model, savedir, device):\n '''\n args:\n test_loaded for test dataset\n model: model\n return:\n mean,Iou,IoU class\n '''\n if not os.path.exists(savedir):\n os.mkdir(savedir)\n\n model.eval()\n total_batches = len(testloader)\n with torch.no_grad():\n for idx, batch in enumerate(testloader):\n \n # load data\n # print(batch)\n image, _, name = batch\n print(name)\n image = image.to(device)\n N, C, H, W = image.shape\n\n # if H == W and H == 512:\n # # H == W == 512, directly tta inference.\n # print(f\"H={H} and W={W} using tta.\")\n # output = tta_inference(\n # inp=image,\n # model=model,\n # num_classes=8, \n # scales=[0.75, 1.0, 1.25],\n # flip=True)\n # else:\n # slide.\n # print(f\"H={H} and W={W} using slide.\")\n output = predict_sliding(\n model=model,\n image=image,\n num_classes=6,\n crop_size=512,\n overlap=1/4,\n scales=[0.75, 1.0, 1.25],\n flip=True)\n\n _, output = torch.max(output, 1)\n \n\n assert len(output.shape) == 3, f\"Wrong shape!\"\n # convert torch to array\n output = np.asarray(output.permute(1,2,0).data.cpu().numpy(), dtype=np.uint8)\n\n # input: [H, W, 3]\n imageout = decode_segmap(output.squeeze())\n\n # std output\n img_save_name = os.path.basename(name[0])\n\n img_save_name = os.path.splitext(img_save_name)[0]\n\n # infile = open(\"/workspace/code/sample.xml\", \"r\",encoding='utf-8') #打开文件\n # outfile = open(os.path.join(savedir, img_save_name+'.xml'), \"w\", encoding='utf-8') # 内容输出\n\n # for line in infile:\n # outfile.write(line.replace('sample', img_save_name))\n # infile.close()\n # outfile.close()\n\n img_save_path = os.path.join(savedir, img_save_name+'.png')\n if not os.path.exists(os.path.join(savedir)):\n os.makedirs(os.path.join(savedir))\n imageout = Image.fromarray(imageout)\n # print(img_save_path)\n imageout.save(img_save_path)\n\ndef main(input_path_testA, output_path_testA, model_path):\n\n cudnn.enabled = True # Enables bencnmark mode in cudnn, to enable the inbuilt\n cudnn.benchmark = True # cudnn auto-tuner to find the best algorithm to use for\n # our hardware\n # Setup device\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # torch.cuda.set_device(0)\n\n T = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.3918, 0.4114, 0.3726], [0.1553, 0.1528, 0.1456]),\n ])\n\n # testA_set = onlinezkxt(root=input_path_testA, transform=T)\n # testA_set = vaihingenloader(root=args.root, split='test')\n testA_set = vaihingenloader(root=input_path_testA, split='test')\n testA_loader = DataLoader(testA_set, batch_size=1, shuffle=False, num_workers=2, pin_memory=True)\n\n # testB_set = onlinezkxt(root=input_path_testB, transform=T)\n # testB_loader = DataLoader(testB_set, batch_size=1, shuffle=False, num_workers=1, pin_memory=True)\n\n # deeplab = encoding.models.get_model('gffnet_ResNeSt101_ADE', pretrained=False)\n # from network.fcn import VGGNet, FCN32s, FCN16s, FCN8s, FCNs\n # vgg_model = VGGNet(requires_grad=True, remove_fc=False).cuda()\n # model = FCN8s(pretrained_net=vgg_model, n_class=8).cuda()\n # from network.segnet import SegNet\n \n # model = model_now.from_name('efficientnet-b1',override_params={'num_classes' : 6}).cuda()\n model = build_network(model_init, num_classes)\n\n # model = deeplab_resnet50.DeepLabv3_plus(\n # nInputChannels=3,\n # n_classes=8,\n # os=8,\n # pretrained=True\n # ).cuda()\n\n # model = torch.nn.DataParallel(model, device_ids=[0])\n # deeplab.gffhead.cls[6] = nn.Conv2d(256, 9, kernel_size=(1, 1), stride=(1, 1))\n # deeplab.auxlayer.conv5[4] = nn.Conv2d(256, 9, kernel_size=(1, 1), stride=(1, 1))\n # print(checkpoint)\n # model = EfficientNet_1_Nof.from_name('efficientnet-b1').cuda()\n\n\n checkpoint = torch.load(model_path ,map_location=\"cuda:0\")\n # new_state_dict = OrderedDict()\n # for k, v in checkpoint.items():\n # name = k[7:] # remove 'module.'\n # new_state_dict[name] = v\n # model.load_state_dict(new_state_dict)\n model.load_state_dict(checkpoint) \n\n \n \n \n deeplab = model.to(device)\n\n start = time.time()\n test(testA_loader, deeplab, output_path_testA, device)\n print(f\"testA finish\")\n runtime1 = time.time() - start\n print(f\"Spend Time: {math.floor(runtime1//3600):2d}h:\"\n f\"{math.floor(runtime1%3600//60):2d}m:{math.floor(runtime1%60):2d}s\")\n # test(testB_loader, deeplab, output_path_testB, device)\n # print(f\"testB finish\")\n # runtime = time.time() - start\n # print(f\"Spend Time: {math.floor(runtime//3600):2d}h:\"\n # f\"{math.floor(runtime%3600//60):2d}m:{math.floor(runtime%60):2d}s\")\n\nif __name__ == '__main__':\n\n import os\n import torch\n from torch.utils.data import DataLoader\n from torchvision import transforms\n from os.path import basename\n from PIL import Image\n import sys\n # from network.efficientnet.Efficientnet_DAN import EfficientNet_1_PAM as model_now\n from network import build_network\n model_init = \"erfnet\"\n num_classes = 6\n aux = 0\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\n\n input_path_testA = '/media/hdd1/IGARSS_2020_LiuSiyu/igarss/data/vaismall'\n\n output_path_testA = './data/data_paper/vai/erf'\n\n model_path = 'runs_vai/erfnet_500_0/erfnetbs8gpu2/model.pth'\n\n cudnn.benchmark = True\n cudnn.enabled = True\n\n main(input_path_testA, output_path_testA, model_path)\n\n \n","sub_path":"slide_predict_vai.py","file_name":"slide_predict_vai.py","file_ext":"py","file_size_in_byte":13399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"494108463","text":"from gevent import monkey\n\nmonkey.patch_all()\n\nimport pytest\nimport mock\nfrom datetime import datetime, timedelta\nfrom gevent.queue import Queue, Empty\nfrom pyramid import testing\nimport channelstream\nimport channelstream.gc\nfrom channelstream.channel import Channel\nfrom channelstream.connection import Connection\nfrom channelstream.user import User\n\n\n@pytest.fixture\ndef cleanup_globals():\n channelstream.CHANNELS = {}\n channelstream.CONNECTIONS = {}\n channelstream.USERS = {}\n\n\n@pytest.mark.usefixtures(\"cleanup_globals\")\nclass TestChannel(object):\n def test_create_defaults(self):\n channel = Channel('test', long_name='long name')\n assert channel.name == 'test'\n assert channel.long_name == 'long name'\n assert channel.connections == {}\n assert channel.notify_presence is False\n assert channel.broadcast_presence_with_user_lists is False\n assert channel.salvageable is False\n assert channel.store_history is False\n assert channel.history_size == 10\n assert channel.history == []\n\n def test_repr(self):\n channel = Channel('test', long_name='long name')\n assert repr(channel) == ''\n\n @pytest.mark.parametrize('prop, value', [\n ('notify_presence', True),\n ('store_history', 6),\n ('history_size', 42),\n ('broadcast_presence_with_user_lists', True)\n ])\n def test_create_set_config(self, prop, value):\n channel_configs = {'test': {prop: value}}\n channel = Channel('test', channel_configs=channel_configs)\n\n assert getattr(channel, prop) == value\n\n def test_create_set_config_diff_name(self):\n channel_configs = {'test2': {'notify_presence': True}}\n channel = Channel('test', channel_configs=channel_configs)\n assert channel.notify_presence is False\n\n def test_add_connection(self):\n connection = Connection('test_user',\n conn_id='A')\n channel = Channel('test')\n channel.add_connection(connection)\n assert len(channel.connections['test_user']) == 1\n assert 'test_user' in channel.connections\n assert connection in channel.connections['test_user']\n assert repr(channel) == ''\n\n def test_remove_connection(self):\n connection = Connection('test_user', conn_id='A')\n connection2 = Connection('test_user2', conn_id='B')\n connection3 = Connection('test_user', conn_id='C')\n channel = Channel('test')\n channel.add_connection(connection)\n channel.add_connection(connection2)\n channel.remove_connection(connection)\n assert 'test_user' not in channel.connections\n assert len(channel.connections['test_user2']) == 1\n channel.add_connection(connection)\n channel.add_connection(connection3)\n channel.remove_connection(connection)\n assert len(channel.connections['test_user']) == 1\n\n def test_remove_non_existant_connection(self):\n channel = Channel('test')\n connection = Connection('test_user', conn_id='A')\n channel.remove_connection(connection)\n assert 'test_user' not in channel.connections\n\n def test_remove_connection_w_presence(self):\n user = User('test_user')\n channelstream.USERS[user.username] = user\n connection = Connection('test_user', conn_id='A')\n user.add_connection(connection)\n config = {'test': {'notify_presence': True,\n 'broadcast_presence_with_user_lists': True}}\n channel = Channel('test', channel_configs=config)\n channel.add_connection(connection)\n channel.remove_connection(connection)\n\n def test_add_connection_w_presence(self):\n user = User('test_user')\n channelstream.USERS[user.username] = user\n connection = Connection('test_user', conn_id='A')\n user.add_connection(connection)\n config = {'test': {'notify_presence': True,\n 'broadcast_presence_with_user_lists': True}}\n channel = Channel('test', channel_configs=config)\n channel.add_connection(connection)\n assert len(channel.connections['test_user']) == 1\n assert 'test_user' in channel.connections\n assert connection in channel.connections['test_user']\n\n def test_presence_message(self):\n user = User('test_user')\n connection = Connection('test_user', conn_id='A')\n user.add_connection(connection)\n channel = Channel('test')\n channel.add_connection(connection)\n payload = channel.send_notify_presence_info('test_user', 'join')\n assert payload['user'] == 'test_user'\n assert payload['message']['action'] == 'join'\n assert payload['type'] == 'presence'\n assert payload['channel'] == 'test'\n assert len(payload['users']) == 0\n\n def test_presence_message_w_users(self):\n user = User('test_user')\n user.state_from_dict({'key': '1', 'key2': '2'})\n user.state_public_keys = ['key2']\n channelstream.USERS[user.username] = user\n connection = Connection('test_user', conn_id='A')\n user.add_connection(connection)\n user2 = User('test_user2')\n user2.state_from_dict({'key': '1', 'key2': '2'})\n channelstream.USERS[user2.username] = user2\n connection2 = Connection('test_user2', conn_id='A')\n user2.add_connection(connection2)\n config = {'test': {'notify_presence': True,\n 'broadcast_presence_with_user_lists': True}}\n channel = Channel('test', channel_configs=config)\n channel.add_connection(connection)\n channel.add_connection(connection2)\n payload = channel.send_notify_presence_info('test_user', 'join')\n assert len(payload['users']) == 2\n sorted_users = sorted(payload['users'], key=lambda x: x['user'])\n assert sorted_users == [\n {'state': {'key2': '2'}, 'user': 'test_user'},\n {'state': {}, 'user': 'test_user2'}\n ]\n\n def test_history(self):\n config = {'test': {'store_history': True,\n 'history_size': 3}}\n channel = Channel('test', long_name='long name',\n channel_configs=config)\n channel.add_message({'message': 'test1', 'type': 'message'})\n channel.add_message({'message': 'test2', 'type': 'message'})\n channel.add_message({'message': 'test3', 'type': 'message'})\n channel.add_message({'message': 'test4', 'type': 'message'})\n\n assert len(channel.history) == 3\n assert channel.history == [\n {'channel': 'test', 'message': 'test2', 'type': 'message'},\n {'channel': 'test', 'message': 'test3', 'type': 'message'},\n {'channel': 'test', 'message': 'test4', 'type': 'message'}\n ]\n\n def test_user_state(self):\n user = User('test_user')\n changed = user.state_from_dict({'key': '1', 'key2': '2'})\n user.state_public_keys = ['key2']\n connection = Connection('test_user', conn_id='A')\n user.add_connection(connection)\n channel = Channel('test')\n channel.add_connection(connection)\n payload = channel.send_user_state(user, changed)\n assert payload['user'] == 'test_user'\n assert payload['message']['state'] == {'key2': '2'}\n assert payload['message']['changed'] == [{'key': 'key2', 'value': '2'}]\n assert payload['type'] == 'user_state_change'\n assert payload['channel'] == 'test'\n\n def test_user_single_assignment(self):\n user = User('test_user')\n connection = Connection('test_user', conn_id='A')\n user.add_connection(connection)\n channel = Channel('test')\n channel.add_connection(connection)\n [channel] == user.get_channels()\n\n def test_user_multi_assignment(self):\n user = User('test_user')\n connection = Connection('test_user', conn_id='A')\n connection2 = Connection('test_user', conn_id='A2')\n connection3 = Connection('test_user', conn_id='A3')\n user.add_connection(connection)\n user.add_connection(connection2)\n user.add_connection(connection3)\n channel = Channel('test')\n channel2 = Channel('test2')\n channelstream.CHANNELS[channel.name] = channel\n channelstream.CHANNELS[channel2.name] = channel2\n channel.add_connection(connection)\n channel.add_connection(connection2)\n channel2.add_connection(connection3)\n assert ['test', 'test2'] == sorted([c.name for c in user.get_channels()])\n\n\n@pytest.mark.usefixtures(\"cleanup_globals\")\nclass TestConnection(object):\n def test_create_defaults(self):\n now = datetime.utcnow()\n connection = Connection('test', 'X')\n assert connection.username == 'test'\n assert now <= connection.last_active\n assert connection.socket is None\n assert connection.queue is None\n assert connection.id == 'X'\n\n def test_mark_for_gc(self):\n long_time_ago = datetime.utcnow() - timedelta(days=50)\n connection = Connection('test', 'X')\n connection.mark_for_gc()\n assert connection.last_active < long_time_ago\n\n def test_message(self):\n connection = Connection('test', 'X')\n connection.queue = Queue()\n connection.add_message({'message': 'test'})\n assert connection.queue.get() == [{'message': 'test'}]\n\n def test_heartbeat(self):\n connection = Connection('test', 'X')\n connection.queue = Queue()\n connection.heartbeat()\n assert connection.queue.get() == []\n\n\nclass TestUser(object):\n def test_create_defaults(self):\n user = User('test_user')\n user.state_from_dict({'key': '1', 'key2': '2'})\n user.state_public_keys = ['key2']\n assert repr(user) == ''\n assert sorted(user.state.items()) == sorted({'key': '1',\n 'key2': '2'}.items())\n assert user.public_state == {'key2': '2'}\n\n def test_messages(self):\n user = User('test_user')\n connection = Connection('test_user', conn_id='A')\n connection.queue = Queue()\n connection2 = Connection('test_user', conn_id='B')\n connection2.queue = Queue()\n user.add_connection(connection)\n user.add_connection(connection2)\n user.add_message({'type': 'message'})\n assert len(user.connections) == 2\n assert len(user.connections[0].queue.get()) == 1\n assert len(user.connections[1].queue.get()) == 1\n\n\n@pytest.mark.usefixtures(\"cleanup_globals\")\nclass TestGC(object):\n def test_gc_connections_active(self):\n channel = Channel('test')\n channelstream.CHANNELS[channel.name] = channel\n channel2 = Channel('test2')\n channelstream.CHANNELS[channel2.name] = channel2\n user = User('test_user')\n channelstream.USERS[user.username] = user\n user2 = User('test_user2')\n channelstream.USERS[user2.username] = user2\n connection = Connection('test_user', '1')\n channelstream.CONNECTIONS[connection.id] = connection\n connection2 = Connection('test_user', '2')\n channelstream.CONNECTIONS[connection2.id] = connection2\n connection3 = Connection('test_user2', '3')\n channelstream.CONNECTIONS[connection3.id] = connection3\n connection4 = Connection('test_user2', '4')\n channelstream.CONNECTIONS[connection4.id] = connection4\n user.add_connection(connection)\n user.add_connection(connection2)\n channel.add_connection(connection)\n channel.add_connection(connection2)\n user2.add_connection(connection3)\n user2.add_connection(connection4)\n channel2.add_connection(connection3)\n channel2.add_connection(connection4)\n channelstream.gc.gc_conns()\n conns = channelstream.CHANNELS['test'].connections['test_user']\n assert len(conns) == 2\n assert len(channelstream.CONNECTIONS.items()) == 4\n conns = channelstream.CHANNELS['test2'].connections['test_user2']\n assert len(conns) == 2\n assert len(user.connections) == 2\n assert len(user2.connections) == 2\n assert sorted(channel.connections.keys()) == ['test_user']\n assert sorted(channel2.connections.keys()) == ['test_user2']\n\n def test_gc_connections_collecting(self):\n channel = Channel('test')\n channelstream.CHANNELS[channel.name] = channel\n channel2 = Channel('test2')\n channelstream.CHANNELS[channel2.name] = channel2\n user = User('test_user')\n channelstream.USERS[user.username] = user\n user2 = User('test_user2')\n channelstream.USERS[user2.username] = user2\n connection = Connection('test_user', '1')\n channelstream.CONNECTIONS[connection.id] = connection\n connection2 = Connection('test_user', '2')\n connection2.mark_for_gc()\n channelstream.CONNECTIONS[connection2.id] = connection2\n connection3 = Connection('test_user2', '3')\n connection3.mark_for_gc()\n channelstream.CONNECTIONS[connection3.id] = connection3\n connection4 = Connection('test_user2', '4')\n channelstream.CONNECTIONS[connection4.id] = connection4\n user.add_connection(connection)\n user.add_connection(connection2)\n channel.add_connection(connection)\n channel.add_connection(connection2)\n user2.add_connection(connection3)\n user2.add_connection(connection4)\n channel2.add_connection(connection3)\n channel2.add_connection(connection4)\n channelstream.gc.gc_conns()\n assert len(channelstream.CONNECTIONS.items()) == 2\n conns = channelstream.CHANNELS['test'].connections['test_user']\n assert len(conns) == 1\n assert conns == [connection]\n conns = channelstream.CHANNELS['test2'].connections['test_user2']\n assert len(conns) == 1\n assert conns == [connection4]\n assert len(user.connections) == 1\n assert len(user2.connections) == 1\n connection.mark_for_gc()\n connection4.mark_for_gc()\n channelstream.gc.gc_conns()\n assert 'test_user' not in channelstream.CHANNELS['test'].connections\n assert 'test_user2' not in channelstream.CHANNELS['test2'].connections\n assert len(channelstream.CHANNELS['test'].connections.items()) == 0\n assert len(channelstream.CHANNELS['test2'].connections.items()) == 0\n\n def test_users_active(self):\n user = User('test_user')\n channelstream.USERS[user.username] = user\n user2 = User('test_user2')\n channelstream.USERS[user2.username] = user2\n channelstream.gc.gc_users()\n assert len(channelstream.USERS.items()) == 2\n user.last_active -= timedelta(days=2)\n channelstream.gc.gc_users()\n assert len(channelstream.USERS.items()) == 1\n\n\n@pytest.fixture\ndef pyramid_config():\n # from pyramid.request import Request\n # request = Request.blank('/', base_url='http://foo.com')\n config = testing.setUp(settings={})\n settings = config.get_settings()\n return config, settings\n\n\n@pytest.fixture\ndef dummy_request():\n app_request = testing.DummyRequest()\n app_request.handle_cors = mock.Mock()\n return app_request\n\n\n@pytest.mark.usefixtures('cleanup_globals', 'pyramid_config')\nclass TestConnectViews(object):\n def test_bad_json(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {}\n view_cls = ServerViews(dummy_request)\n result = view_cls.connect()\n assert result == {'error': 'No username specified'}\n\n def test_good_json(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {'username': 'username',\n 'conn_id': 'X',\n 'fresh_user_state': {'key': 'foo'},\n 'user_state': {'bar': 'baz'},\n 'state_public_keys': 'bar',\n 'channels': ['a', 'aB'],\n 'channel_configs': {\n 'a': {'store_history': True,\n 'history_size': 2}}}\n view_cls = ServerViews(dummy_request)\n assert channelstream.CHANNELS == {}\n result = view_cls.connect()\n assert len(channelstream.CHANNELS.keys()) == 2\n assert 'username' in channelstream.USERS\n assert 'X' in channelstream.CONNECTIONS\n assert result['channels'] == ['a', 'aB']\n assert result['state'] == {'bar': 'baz', 'key': 'foo'}\n assert result['conn_id'] == 'X'\n channels_info = result['channels_info']['channels']\n assert len(channels_info.keys()) == 2\n assert channels_info['a']['total_users'] == 1\n assert channels_info['a']['total_connections'] == 1\n assert channels_info['a']['users'] == ['username']\n assert channels_info['a']['history'] == []\n assert result['channels_info']['users'] == [\n {'state': {'bar': 'baz', 'key': 'foo'}, 'user': 'username'}\n ]\n\n@pytest.mark.usefixtures('cleanup_globals', 'pyramid_config')\nclass TestUserStateViews(object):\n def test_bad_json(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {}\n view_cls = ServerViews(dummy_request)\n result = view_cls.user_state()\n assert result == {'error': 'No username specified'}\n\n def test_not_found_json(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {'user': 'blabla'}\n view_cls = ServerViews(dummy_request)\n result = view_cls.user_state()\n assert result == {'error': 'User not found'}\n\n def test_good_json(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {'username': 'test',\n 'conn_id': 'x',\n 'fresh_user_state': {'key': 'foo'},\n 'user_state': {'bar': 'baz'},\n 'state_public_keys': 'bar',\n 'channels': ['a', 'aB'],\n 'channel_configs': {\n 'a': {'store_history': True,\n 'history_size': 2}}}\n view_cls = ServerViews(dummy_request)\n view_cls.connect()\n dummy_request.json_body = {\n \"user\": 'test',\n \"user_state\": {\"bar\": 2, 'private': 'im_private'},\n \"state_public_keys\": [\"avatar\", \"bar\"]\n }\n view_cls = ServerViews(dummy_request)\n result = view_cls.user_state()\n sorted_keys = sorted(['bar', 'key', 'private'])\n assert sorted_keys == sorted(result['user_state'].keys())\n assert result['user_state']['private'] == 'im_private'\n sorted_changed = sorted([x['key'] for x in result['changed_state']])\n assert sorted_changed == sorted(['bar', 'private'])\n\n@pytest.mark.usefixtures('cleanup_globals', 'pyramid_config')\nclass TestSubscribeViews(object):\n def test_bad_json(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {}\n view_cls = ServerViews(dummy_request)\n result = view_cls.subscribe()\n assert result == {'error': 'Unknown connection'}\n\n def test_good_json(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {'username': 'test',\n 'conn_id': 'x',\n 'fresh_user_state': {'key': 'foo'},\n 'user_state': {'bar': 'baz'},\n 'state_public_keys': 'bar',\n 'channels': ['a', 'aB'],\n 'channel_configs': {\n 'a': {'store_history': True,\n 'history_size': 2}}}\n view_cls = ServerViews(dummy_request)\n view_cls.connect()\n dummy_request.json_body = {\"conn_id\": 'x',\n \"channels\": ['b'],\n \"channel_configs\": {\n \"a\": {\"notify_presence\": True},\n \"b\": {\"notify_presence\": True}}\n }\n view_cls = ServerViews(dummy_request)\n result = view_cls.subscribe()\n assert sorted(result['channels']) == sorted(['a', 'aB', 'b'])\n assert result['channels_info']['users'] == [\n {'state': {'bar': 'baz', 'key': 'foo'}, 'user': 'test'}]\n assert 'a' in result['channels_info']['channels']\n assert 'b' in result['channels_info']['channels']\n assert result['channels_info']['channels']['a'][\n 'total_connections'] == 1\n assert result['channels_info']['channels']['a']['total_users'] == 1\n assert result['channels_info']['channels']['a']['history'] == []\n assert result['channels_info']['channels']['a']['users'] == ['test']\n\n\n@pytest.mark.usefixtures('cleanup_globals', 'pyramid_config')\nclass TestUnsubscribeViews(object):\n def test_bad_json(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {}\n view_cls = ServerViews(dummy_request)\n result = view_cls.unsubscribe()\n assert result == {'error': 'Unknown connection'}\n\n def test_good_json(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {'username': 'test',\n 'conn_id': 'x',\n 'fresh_user_state': {'key': 'foo'},\n 'user_state': {'bar': 'baz'},\n 'state_public_keys': 'bar',\n 'channels': ['a', 'aB', 'aC'],\n 'channel_configs': {\n 'a': {'store_history': True,\n 'history_size': 2}}}\n view_cls = ServerViews(dummy_request)\n view_cls.connect()\n dummy_request.json_body = {\"conn_id\": 'x',\n \"channels\": ['aC', 'a']\n }\n view_cls = ServerViews(dummy_request)\n result = view_cls.unsubscribe()\n assert sorted(result['channels']) == sorted(['aB'])\n\n def test_non_existing_channel(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {'username': 'test',\n 'conn_id': 'x',\n 'fresh_user_state': {'key': 'foo'},\n 'user_state': {'bar': 'baz'},\n 'state_public_keys': 'bar',\n 'channels': ['a', 'aB', 'aC'],\n 'channel_configs': {\n 'a': {'store_history': True,\n 'history_size': 2}}}\n view_cls = ServerViews(dummy_request)\n view_cls.connect()\n dummy_request.json_body = {\"conn_id\": 'x',\n \"channels\": ['d']\n }\n view_cls = ServerViews(dummy_request)\n result = view_cls.unsubscribe()\n assert sorted(result['channels']) == sorted(['a', 'aB', 'aC'])\n\n\n@pytest.mark.usefixtures('cleanup_globals', 'pyramid_config')\nclass TestInfoView(object):\n def test_empty_json(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {}\n view_cls = ServerViews(dummy_request)\n result = view_cls.info()\n assert result['channels'] == {}\n assert result['users'] == []\n\n def test_subscribed_json(self, dummy_request):\n from channelstream.wsgi_views.server import ServerViews\n dummy_request.json_body = {'username': 'test1',\n 'conn_id': 'x',\n 'fresh_user_state': {'key': 'foo'},\n 'user_state': {'bar': 'baz'},\n 'state_public_keys': 'bar',\n 'channels': ['a', 'aB'],\n 'channel_configs': {\n 'a': {'store_history': True,\n 'history_size': 2}}}\n view_cls = ServerViews(dummy_request)\n view_cls.connect()\n dummy_request.json_body = {'username': 'test2',\n 'conn_id': 'y',\n 'fresh_user_state': {'key': 'foo1'},\n 'user_state': {'bar': 'baz1'},\n 'state_public_keys': 'key',\n 'channels': ['a', 'c'],\n 'channel_configs': {\n 'c': {'store_history': True,\n 'history_size': 2}}}\n view_cls = ServerViews(dummy_request)\n view_cls.connect()\n result = view_cls.info()\n assert sorted(('a', 'aB', 'c')) == sorted(result['channels'].keys())\n assert result['users']\n compA = sorted(result['channels']['a']['users'])\n compB = sorted(['test1', 'test2'])\n assert compA == compB\n assert result['channels']['a']['total_users'] == 2\n assert result['channels']['a']['total_connections'] == 2\n assert result['channels']['c']['users'] == ['test2']\n assert result['channels']['c']['total_users'] == 1\n assert result['channels']['c']['total_connections'] == 1\n assert result['channels']['aB']['users'] == ['test1']\n compA = sorted(result['users'],\n key=lambda x: x['user'])\n compB = sorted([\n {'state': {'bar': 'baz', 'key': 'foo'}, 'user': 'test1'},\n {'state': {'bar': 'baz1', 'key': 'foo1'}, 'user': 'test2'}],\n key=lambda x: x['user'])\n assert compA == compB\n dummy_request.body = 'NOTEMPTY'\n dummy_request.json_body = {'info': {'channels': ['a']}}\n view_cls = ServerViews(dummy_request)\n result = view_cls.info()\n assert 'a' in result['channels']\n assert 'aB' not in result['channels']\n","sub_path":"channelstream/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":27049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"378709860","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport awkward\n\nfrom geeksw.utils.core import concatenate\n\n\nimport geeksw.framework as fwk\nfrom geeksw.plotting import Plot\nfrom geeksw.utils.awkward_utils import selection2mask\n\n\ndef iterate_by_descending_pt(particles):\n\n pt = particles.pt\n mask = selection2mask(pt.argmax(), pt)\n for i in range(4):\n yield particles[mask]\n\n pt = pt[~mask]\n particles = particles[~mask]\n\n mask = selection2mask(pt.argmax(), pt)\n\n\n###\n# Leptons pt\n###\n\n\n@fwk.one_producer(\"/plots/fullLeptonsPt\")\n@fwk.consumes(leptons=\"/full/leptons\")\ndef gen_pt_plot(leptons):\n\n pt_bins = np.linspace(0, 200, 100)\n leptons = leptons[leptons.pt.counts > 4]\n\n plot = Plot()\n\n labels = [\"1st lepton\", \"2nd lepton\", \"3rd lepton\", \"4th lepton\"]\n for lbl, particles in zip(labels, iterate_by_descending_pt(leptons)):\n plt.hist(particles.pt.flatten(), bins=pt_bins, histtype=\"step\", label=lbl)\n\n plt.ylabel(\"Events\")\n plt.xlabel(\"pT [GeV]\")\n plt.legend(loc=\"upper right\")\n plt.xlim(pt_bins[0], pt_bins[-1])\n plot.commit()\n return plot\n\n\n###\n# Leptons eta\n###\n\n\n@fwk.one_producer(\"/plots/fullLeptonsEta\")\n@fwk.consumes(leptons=\"/full/leptons\")\ndef gen_eta_plot(leptons):\n\n eta_bins = np.linspace(-5.0, 5.0, 100)\n leptons = leptons[leptons.pt.counts > 4]\n\n plot = Plot()\n\n labels = [\"1st lepton\", \"2nd lepton\", \"3rd lepton\", \"4th lepton\"]\n for lbl, particles in zip(labels, iterate_by_descending_pt(leptons)):\n plt.hist(particles.eta.flatten(), bins=eta_bins, histtype=\"step\", label=lbl)\n\n plt.ylabel(\"Events\")\n plt.xlabel(\"eta\")\n plt.legend(loc=\"upper right\")\n plt.xlim(eta_bins[0], eta_bins[-1])\n plot.commit()\n return plot\n\n\n@fwk.one_producer(\"plots/selectedNJetOverThresh\")\n@fwk.consumes(jets=\"*/selected/jets\")\ndef plot_jets_over_thresh(jets, thresh=30.0):\n\n bins = np.linspace(0, 10, 11)\n\n plot = Plot()\n plt.title(\"Jets over Pt {} GeV\".format(thresh))\n for k, v in jets.items():\n # v = concatenate([x.pt for x in v])\n nJets = (v.pt > thresh).sum()\n plt.hist(nJets, bins=bins, histtype=\"step\", label=k, density=True)\n plt.legend(loc=\"upper right\")\n plt.ylabel(\"Events\")\n plt.xlabel(\"n jets\")\n plt.xlim(bins[0], bins[-1])\n plot.commit()\n return plot\n\n\n@fwk.one_producer(\"plots/selectedBmax\", merged=False)\n@fwk.consumes(jets=\"*/selected/jets\")\ndef plot_jets_bness(jets, thresh=30.0):\n\n bins = np.linspace(0, 1, 100)\n\n plot = Plot()\n plt.title(\"Max b-tag of jets over Pt {} GeV\".format(thresh))\n\n for k, v in jets.items():\n\n pt = concatenate([x.pt for x in v])\n v = concatenate([x[\"btag\"] for x in v])\n v = v * (pt > thresh)\n bmax = v.max()\n bmax = np.clip(bmax, 0.0, 1.0)\n\n plt.hist(bmax, bins=bins, histtype=\"step\", label=k, density=True)\n plt.gca().set_yscale(\"log\")\n\n plt.legend(loc=\"upper right\")\n plt.ylabel(\"Events\")\n plt.xlabel(\"max b-tag\")\n plt.xlim(bins[0], bins[-1])\n plot.commit()\n return plot\n\n\n###\n# For gen MET and m4l\n##\n\n\n@fwk.one_producer(\"plots/\")\n@fwk.consumes(variable=\"*/selected/\")\ndef plot_multi_sample_variable(variable):\n\n name = variable.subs[\"\"]\n\n all_bins = {\n \"MET_pt\": np.linspace(0, 200, 50),\n # \"m4l\": np.linspace(0, 800, 100),\n }\n\n bins = all_bins[name]\n\n plot = Plot()\n for k, v in variable.items():\n plt.hist(v, bins=bins, label=k, histtype=\"step\", density=True)\n\n plt.legend(loc=\"upper right\")\n plt.xlabel(name + \" [GeV]\")\n plt.xlim(bins[0], bins[-1])\n ax = plt.gca()\n ax.set_yscale(\"log\", nonposy=\"clip\")\n plot.commit()\n return plot\n\n\n# @fwk.one_producer(\"/plots/eventSelection\")\n# @fwk.consumes(selection=\"/eventSelection\")\n# def plot_event_selection(gen_failed_cut):\n\n\n@fwk.one_producer(\"gen/plots/genFailedCut\")\n@fwk.consumes(gen_failed_cut=\"gen/tree/genFailedCut\")\ndef plot_gen_failed_cut(gen_failed_cut):\n\n df = pd.DataFrame(data={\"genFailedCut\": gen_failed_cut})\n n_gen = len(df)\n\n plot = Plot()\n counts = df[\"genFailedCut\"].value_counts()\n counts.loc[5] = counts.loc[0]\n counts = counts.drop(0).sort_index(ascending=False)\n counts = counts.cumsum().iloc[::-1] / 1000000\n d = {5: \"arbitration\", 4: \"reconstruction\", 3: \"pT > 5 GeV\", 2: \"|eta| < 2.5\", 1: \"total\"}\n counts = counts.rename(d, axis=\"index\")\n counts.plot(kind=\"barh\")\n plt.xlabel(\"Million events\")\n ax = plt.gca()\n xlim = plt.xlim()\n plt.xlim(0, 1.15 * xlim[1])\n d = xlim[1] - xlim[0]\n for i, v in enumerate(counts.values):\n ax.text(v + d * 0.02, i - 0.05, \"{0:.1f} %\".format((1e8 * v) / n_gen), fontweight=\"bold\")\n plt.grid(False)\n plot.commit(bbox_inches=\"tight\")\n return plot\n\n\n###\n# Gen Flavor config\n###\n\n\n@fwk.one_producer(\"plots/gen/genFlavorConfig\")\n@fwk.consumes(gen_flv_config=\"gen/tree/genFlavorConfig\")\ndef plot_gen_flavor_config(gen_flv_config):\n\n df = pd.DataFrame(data={\"genFlavorConfig\": gen_flv_config})\n n_gen = len(df)\n\n plot = Plot()\n counts = df[\"genFlavorConfig\"].value_counts() / 1000000\n for i in range(5):\n if not i in counts.index:\n counts.loc[i] = 0\n counts = counts.sort_index(ascending=False)\n d = {0: \"impossible\", 1: \"all same\", 2: \"2 vs 2 SS\", 3: \"2 vs 2 OS\", 4: \"3 vs 1\"}\n counts = counts.rename(d, axis=\"index\")\n counts.plot(kind=\"barh\")\n ax = plt.gca()\n xlim = plt.xlim()\n plt.xlim(0, 1.15 * xlim[1])\n d = xlim[1] - xlim[0]\n for i, v in enumerate(counts.values):\n ax.text(v + d * 0.02, i - 0.05, \"{0:.1f} %\".format((1e8 * v) / n_gen), fontweight=\"bold\")\n plt.xlabel(\"Million events\")\n plt.grid(False)\n plot.commit(bbox_inches=\"tight\")\n return plot\n","sub_path":"producers/ValidationPlotter.py","file_name":"ValidationPlotter.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"42995766","text":"import pyaudio\nimport numpy as np\nimport wave\nimport sys\nimport time\nimport pylab\nimport graphics\nimport audio_math\nimport audio_visuals\nimport random\nfrom matplotlib import pyplot as plt\n\nif( len(sys.argv) < 2 ):\n\tprint(\"Plays a wave file.\\n\\n\")\n\tsys.exit(-1)\n#def callback function\ndef callback(in_data, frame_count, time_info, status):\n\tglobal np_data, bool\n\tdata = wf.readframes(frame_count)\n\tnp_data = data\n\tbool = True\n\treturn (data, pyaudio.paContinue)\nDEBUG = False\nbool = False\nlast_update = time.time()\nlast_color = time.time()\n#wave file\nwf = wave.open(sys.argv[1], 'rb')\n#init PyAudio, NumPy, graphics\npa = pyaudio.PyAudio()\nwin = graphics.GraphWin(\"Cheap Audio Visuals\", 1280, 240)\nfinal_data = np.array([])\nnp_data = np.array([])\n\nif(DEBUG):\n\tprint(\"width:\",wf.getsampwidth())\n\tprint(\"format:\", pa.get_format_from_width(wf.getsampwidth()))\n\tprint(\"channels:\", wf.getnchannels())\n\tprint(\"rate:\",wf.getframerate())\n\n#open a audio stream on desired output\n#Wav file\naudio_stream = pa.open(format=pa.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True,\n stream_callback=callback)\n'''\n#input\naudio_stream = pa.open(format=16,\n channels=2,\n rate=44100,\n input=True,\n stream_callback=callback)\n'''\n#setup display\n#audio_visuals.load_circles(64, 6, win)\naudio_stream.start_stream()\nwin.setCoords(0, 0, 1280, 240)\naudio_visuals.load_circles(num_cir=64, cir_radius=6, window=win)\nwin.redraw()\n\nwhile audio_stream.is_active():\n\t#if(time.time() - last_color > 1/30):\n\t#\taudio_visuals.re_color(64, win)\n\t#\tlast_color = time.time()\n\tif(bool):\n\t\tbool = False\n\t\tif(time.time() - last_update > 1/30):\n\t\t\tfinal_stream = audio_math.make_audio_tuple(np_data, 512)\n\t\t\tfinal_data = audio_math.exaggerated_process(final_stream, 64, 512, 44100)\n\t\t\tfor data in final_data:\n\t\t\t\taudio_visuals.circle_sound(data, win)\n\t\t\t\t#audio_visuals.plot_sound(data, win, audio_visuals.random_color())\n\t\t\t\tlast_update = time.time()\nprint(\"Done\")\n\t\n#gracefully end stream\naudio_stream.stop_stream()\naudio_stream.close()\nwf.close()\n\n#gracefully end PyAudio\npa.terminate()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"476068605","text":"#!/usr/bin/env python3\n\nimport sys\nimport xmltodict\nimport psycopg2\n\n\n\ntoursfile = sys.argv[1]\n\n\ndsn = 'dbname=ght user=pgsql password=xyz host=localhost'\n\ndef main():\n conn = _db_new_connection(dsn)\n toursdata = xmltodict.parse(open(toursfile, mode='r', encoding=\"utf-8\").read(),\n force_list={'tours': True,\n 'stops': True,\n 'vehicle': True,\n 'address': True\n }\n )\n #tours\n tnum = 0\n for t in toursdata['toursdata']['tours']: #yeah, that is a hideous name\n tnum += 1\n #print(str(t))\n \n insert_sql = \"\"\" INSERT INTO tours\n (id, status, missing_vehicle_data, license_plate, planned_begin,\n planned_end, haulier, transport_company, customer, vehicle_owner_code)\n VALUES ( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s );\"\"\"\n\n update_sql = \"\"\"UPDATE tours SET (id, status, missing_vehicle_data, license_plate, planned_begin,\n planned_end, haulier, transport_company, customer, vehicle_owner_code) = (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n WHERE id = %s ;\"\"\"\n\n #t\n values = [\n t['id'], t['status'], t['missing_vehicle_data'], t['license_plate'], t['planned_begin'],\n t['planned_end'], t['haulier'], t['transport_company'], t['customer'], t['vehicle_owner_code']\n ]\n print(\"t\", end='')\n _insert_or_update(conn, insert_sql, update_sql, values)\n\n #stops\n #sometimes there is a tour with no stops, that's weird. set empty list\n if not 'stops' in t: t['stops'] = []\n for s in t['stops']:\n\n insert_sql = \"\"\" INSERT INTO stops\n (id, type, time_window_begin, time_window_end, rta, tours_id)\n VALUES ( %s, %s, %s, %s, %s, %s );\"\"\"\n\n update_sql = \"\"\"UPDATE stops SET (id, type, time_window_begin, time_window_end, rta, tours_id) = (%s, %s, %s, %s, %s, %s )\n WHERE id = %s ;\"\"\"\n\n values = [ s['id'], s['type'], s['time_window_begin'], s['time_window_end'],\n s['rta'], t['id']\n ] #t['id'] is deliberatly a foreign key\n print(\"s\", end='')\n _insert_or_update(conn, insert_sql, update_sql, values)\n \n #address(s) yeah I know we should decide whether or not we are pluralizing, that's just the ghtrack way.\n for a in s['address']:\n\n \n #there should really only be one address per stop, it's just the\n #way the xml is organized. \n insert_sql = \"\"\" INSERT INTO address\n (location_id, company, country, city, zipcode, street, score, latitude, longitude)\n VALUES ( %s, %s, %s, %s, %s, %s, %s, %s, %s );\"\"\"\n update_sql = \"\"\"UPDATE address SET (location_id, company, country, city, zipcode, street, score, latitude, longitude) =\n ( %s, %s, %s, %s, %s, %s, %s, %s, %s )\n WHERE location_id = %s ;\"\"\"\n values = [ a['location_id'], a['company'], a['country'], a['city'], a['zipcode'],\n a['street'], a['score'], a['latitude'], a['longitude']\n ]\n _insert_or_update(conn, insert_sql, update_sql, values)\n\n #join table stops_address (because addresses probably are used by multiple stops but I am not sure if stops always have just one)\n insert_sql = \"\"\" INSERT INTO stops_address\n (stops_id, address_location_id)\n VALUES ( %s, %s );\"\"\"\n #there is no point in updating since it will be the same as what is there.\n update_sql = \"\" \n\n print(\"a\", end='')\n _insert_or_update(conn, insert_sql, update_sql, [ s['id'], a['location_id'] ])\n\n \n #vehicles in tours\n vnum = 1\n if not 'vehicle' in t: t['vehicle'] = []\n for v in t['vehicle']:\n\n #vehicles do not seem to always have a longitude and latitude and timestamp, but may. null them if they do not exist\n if not 'latitude' in v.keys() : v['latitude'] = None\n if not 'longitude' in v.keys(): v['longitude'] = None\n if not 'timestamp' in v.keys(): v['timestamp'] = None\n if not 'name' in v.keys(): v['name'] = None\n\n\n #vehicles have to have a uuid don't they?\n if not 'uuid' in v.keys():\n print(\"no uuid for vehicle in tour: \"+t['id'])\n v['uuid']=v['license_plate']\n \n\n insert_sql = \"\"\" INSERT INTO vehicle\n (uuid, data_gate_open, latitude, longitude, timestamp, license_plate, name)\n VALUES ( %s, %s, %s, %s, %s, %s, %s );\"\"\"\n update_sql = \"\"\"UPDATE vehicle SET (uuid, data_gate_open, latitude, longitude, timestamp, license_plate, name) =\n ( %s, %s, %s, %s, %s, %s, %s )\n WHERE uuid = %s ;\"\"\"\n\n values = [ v['uuid'], v['data_gate_open'], v['latitude'], v['longitude'], v['timestamp'], v['license_plate'], v['name'] ]\n\n print()\n print('tour: ' + t['id'] + ' vehicle: ' +str(vnum))\n vnum += 1\n print(\"v\", end='')\n _insert_or_update(conn, insert_sql, update_sql, values)\n\n\n #join table tours_vehicle (are vehicles in more than one tour at a time? possibly!)\n insert_sql = \"\"\" INSERT INTO tours_vehicle\n (tours_id, vehicle_uuid)\n VALUES ( %s, %s );\"\"\"\n #there is no point in updating since it will be the same as what is there.\n update_sql = ''\n _insert_or_update(conn, insert_sql, update_sql, [ t['id'], v['uuid'] ])\n\n\n #just do one tour for debug\n #exit()\n\ndef _insert_or_update(conn, insert_sql, update_sql, values):\n try:\n cur = conn.cursor()\n res = cur.execute(insert_sql, values)\n conn.commit()\n print(\"i\", end=' ')\n\n return()\n print(\"should not get here\")\n except psycopg2.IntegrityError as e:\n #update existing row instead\n #extra id/primary key on the end so we can fill the \"where\" clause parameter\n print(\"u\", end=' ')\n conn.commit()\n\n if update_sql == \"\":\n #don't bother if we have no update_sql\n return()\n\n #try updating existing row\n try:\n values.append(values[1])\n cur = conn.cursor()\n res = cur.execute(update_sql, values)\n conn.commit()\n except Exception as e:\n print(\"error updating: \"+str(e))\n\n#new db connection \ndef _db_new_connection(dsn):\n print(\"try to connect..\")\n try:\n myConn = psycopg2.connect(dsn)\n print(\"DB connection established\")\n except Exception as e:\n print(\"Can\\'t connect to the database: \"+str(e))\n #raise IOError\n exit()\n \n print(\"OK new connection\")\n return myConn\n\n\nif __name__ == '__main__':\n main()\n \n","sub_path":"ghtrack/insert_tours.py","file_name":"insert_tours.py","file_ext":"py","file_size_in_byte":7324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"644647769","text":"import pysupercluster\nimport pandas as pd\nimport hashlib\nimport json\nfrom utils.redis import cache\nfrom .dataService import DataService\n\n\nclass PinClusterService(object):\n def __init__(self, config=None):\n self.config = config\n\n def pins_key(self, filters):\n filters_json = json.dumps(filters, sort_keys=True).encode('utf-8')\n hashed_json = hashlib.md5(filters_json).hexdigest()\n return 'filters:{}:pins'.format(hashed_json)\n\n def get_pins(self, filters):\n key = self.pins_key(filters)\n pins = cache.get(key)\n\n if pins is None:\n dataAccess = DataService()\n\n fields = [\n 'srnumber',\n 'requesttype',\n 'latitude',\n 'longitude']\n\n filters = dataAccess.standardFilters(\n filters['startDate'],\n filters['endDate'],\n filters['requestTypes'],\n filters['ncList'])\n\n pins = dataAccess.query(fields, filters, table='map')\n pins = pd.DataFrame(pins, columns=fields)\n\n cache.set(key, pins)\n\n return pins\n\n def pin_clusters(self, pins, zoom, bounds, options={}):\n if len(pins) == 0:\n return []\n\n min_zoom = options.get('min_zoom', 0)\n max_zoom = options.get('max_zoom', 17)\n radius = options.get('radius', 200)\n extent = options.get('extent', 512)\n\n index = pysupercluster.SuperCluster(\n pins[['longitude', 'latitude']].to_numpy(),\n min_zoom=min_zoom,\n max_zoom=max_zoom,\n radius=radius,\n extent=extent)\n\n north = bounds.get('north', 90)\n south = bounds.get('south', -90)\n west = bounds.get('west', -180)\n east = bounds.get('east', 180)\n\n clusters = index.getClusters(\n top_left=(west, north),\n bottom_right=(east, south),\n zoom=zoom)\n\n for cluster in clusters:\n if cluster['count'] == 1:\n pin = pins.iloc[cluster['id']]\n cluster['srnumber'] = pin['srnumber']\n cluster['requesttype'] = pin['requesttype']\n del cluster['expansion_zoom']\n\n return clusters\n\n async def get_pin_clusters(self, filters, zoom, bounds, options):\n pins = self.get_pins(filters)\n return self.pin_clusters(pins, zoom, bounds, options)\n","sub_path":"server/src/services/pinClusterService.py","file_name":"pinClusterService.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"641431340","text":"#!/usr/bin/env python3\n\n#=========================================================================================================================\n# title : variational_univariate_gaussian.py\n# description : Infer posterior of univariate Gaussian mean and precision for a dataset, given a normal-gamma prior.\n# author : Charles Olivier (https://c-olivier.github.io/)\n# date : 28.03.2018\n# usage : chmod +x variational_univariate_gaussian.py; ./variational_univariate_gaussian.py\n# notes : Compare closed form posterior and mean-field approximated posterior.\n# python_version : 3.6.4\n#=========================================================================================================================\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import norm\nfrom scipy.special import gamma as gam_fun\n\n\nN = 10\nmu_0 = a_0 = b_0 = k_0 = 0\ndata_dist = norm()\ndata = data_dist.rvs(N) # standard normal\ndata_min, data_max = np.min(data), np.max(data)\nmu_ml = (1/N) * np.sum(data)\nva_ml = (1/N) * np.sum(np.square((data-mu_ml)))\n\nplt.figure(figsize=(10,3))\nplt.subplot(121)\nplt.title('Data Distribution, N={}'.format(N))\ny_bin, x_bin = np.histogram(data, bins=10)\ninterval = x_bin[1]-x_bin[0]\ny_bin = y_bin / N\nx_bin = (x_bin[1:]+x_bin[:-1])/2\nx = np.linspace(data_min, data_max, N, endpoint=True)\ny = data_dist.pdf(x)\nplt.bar(x_bin, y_bin)\nplt.plot(x,y, 'r')\n\ndef gamma(lam, a, b):\n \"\"\"The gamma distribution evaluated at lam with parameters a, b\"\"\"\n return (1/gam_fun(a))*(b**a)*(lam**(a-1))*np.exp(-b*lam)\n\ndef normal_gamma(mu, lam, m, k, a, b):\n \"\"\"Evaluates probability of normal-gamma NG(mu,lam|m,k,a,b)\"\"\"\n return norm(m, 1/(k*lam)).pdf(mu)*gamma(lam, a, b)\n\ndef normal_gamma_uncoupled(mu, lam, m, k, a, b):\n \"\"\"Evaluates probability of factorised posterior distributions: N(mu|m,k**-1)*Ga(lam|a,b)\"\"\"\n return norm(m, 1/k).pdf(mu)*gamma(lam, a, b)\n\n\ng1, g2 = np.meshgrid(np.arange(-1,1, 0.05), np.arange(0.01,2, 0.05))\ng = np.c_[g1.ravel(), g2.ravel()]\n# true posterior\nmu_n = (k_0*mu_0 + N*mu_ml)/(k_0+N)\nk_n = k_0 + N\na_n = a_0 + N/2\nb_n = b_0 + 0.5*N*va_ml + (k_0*N*(mu_ml -mu_0)**2)/(2*(k_0+N))\ng = normal_gamma(g[:,0], g[:,1], mu_n, k_n, a_n, b_n).reshape(*g1.shape)\n\nplt.subplot(122)\nplt.title('True and Approximate posteriors')\nl1 = plt.contour(g1, g2, g, 3, colors='r')\nplt.plot(0,1, 'r+', ms=10, label='True value')\nplt.xlabel('mu')\nplt.ylabel('lambda')\n\n# approximate posterior\nmu_n = (k_0*mu_0 + N*mu_ml)/(k_0+N)\nk_n = k_0 + N * 1/(va_ml)\na_n = a_0 + (N+1)/2\nb_n = a_n * va_ml\ng1, g2 = np.meshgrid(np.arange(-1,1, 0.05), np.arange(0.01,2, 0.05))\ng = np.c_[g1.ravel(), g2.ravel()]\ng = normal_gamma_uncoupled(g[:,0], g[:,1], mu_n, k_n, a_n, b_n).reshape(*g1.shape)\nl2 = plt.contour(g1, g2, g, 3, colors='b')\nplt.legend(loc=0)\nplt.show();","sub_path":"scripts/variational_univariate_gaussian.py","file_name":"variational_univariate_gaussian.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"89219489","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nimport numpy as np\nfrom unittest.mock import patch\n\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.core.sites import Element\nfrom pymatgen.analysis.phase_diagram import (\n PDEntry, PhaseDiagram, CompoundPhaseDiagram, PDPlotter)\n\nfrom vise.chempotdiag.chem_pot_diag import ChemPotDiag, sort_coords\nfrom vise.util.testing import ViseTest\n\n\nclass TestChemPotDiag(ViseTest):\n\n def setUp(self) -> None:\n\n mg = PDEntry(Composition(\"Mg\"), -1.0)\n mg2 = PDEntry(Composition(\"Mg\"), -0.5)\n ca = PDEntry(Composition(\"Ca\"), -2.0)\n sr = PDEntry(Composition(\"Sr\"), -3.0)\n o = PDEntry(Composition(\"O\"), -4.0)\n\n camg = PDEntry(Composition(\"Ca2Mg2\"), -16.0) # rel -10.0\n camgo = PDEntry(Composition(\"CaMgO\"), -17.0) # rel -10.0\n camgo2 = PDEntry(Composition(\"CaMgO2\"), -11.0) # rel 0.0\n camgsro4 = PDEntry(Composition(\"CaMgSrO4\"), -32.0) # rel -10\n\n self.pd_1d = PhaseDiagram([mg, mg2])\n self.pd_2d = PhaseDiagram([mg, ca, camg])\n self.pd_3d = PhaseDiagram([mg, ca, camg, o, camgo, camgo2])\n self.pd_4d = PhaseDiagram([mg, ca, camg, o, camgo, sr, camgsro4])\n\n self.cpd_1d = ChemPotDiag.from_phase_diagram(pd=self.pd_1d,\n target_comp=\"Mg\")\n self.cpd_2d = ChemPotDiag.from_phase_diagram(pd=self.pd_2d,\n target_comp=\"CaMg\")\n self.cpd_3d = ChemPotDiag.from_phase_diagram(pd=self.pd_3d,\n target_comp=\"CaMgO\")\n self.cpd_3d_unstable = \\\n ChemPotDiag.from_phase_diagram(pd=self.pd_3d,\n target_comp=\"CaMgO2\",\n allow_unstable_target_chempot=True)\n self.cpd_4d = ChemPotDiag.from_phase_diagram(pd=self.pd_4d,\n target_comp=\"SrCaMgO4\")\n\n self.comp_pd = CompoundPhaseDiagram(\n entries=[mg, ca, o, camg, camgo, camgo2],\n terminal_compositions=[Composition(\"CaMg\"),\n Composition(\"Ca\"),\n Composition(\"O\")])\n\n self.comp_cpd = ChemPotDiag.from_phase_diagram(\n pd=self.comp_pd,\n target_comp=\"MgCaO2\",\n allow_unstable_target_chempot=True)\n\n def test_cpd_1d(self):\n self.assertEqual([Element.Mg], self.cpd_1d.elements)\n self.assertEqual(1, self.cpd_1d.dim)\n self.assertEqual([[0.0]], self.cpd_1d.vertices)\n self.assertEqual(\"Mg\", self.cpd_1d.target_comp)\n self.assertEqual({'A': [0.0]}, self.cpd_1d.target_comp_chempot)\n self.assertEqual({'A': [-1.0]}, self.cpd_1d.target_comp_abs_chempot)\n\n # def test_pd_1d_draw(self):\n # pdp = PDPlotter(self.pd_1d, show_unstable=10)\n # pdp.show()\n\n def test_cpd_1d_draw(self):\n with self.assertRaises(NotImplementedError):\n self.cpd_1d.draw_diagram()\n\n def test_cpd_2d(self):\n self.assertEqual([Element.Ca, Element.Mg], self.cpd_2d.elements)\n self.assertEqual(2, self.cpd_2d.dim)\n self.assertEqual([[0.0, -5.0], [-5.0, 0.0]], self.cpd_2d.vertices)\n# self.assertEqual([[-5.0, 0.0], [0.0, -5.0]], self.cpd_2d.vertices)\n self.assertEqual(\"CaMg\", self.cpd_2d.target_comp)\n self.assertEqual({'A': [0.0, -5.0], 'B': [-5.0, 0.0]},\n self.cpd_2d.target_comp_chempot)\n self.assertEqual({'A': [-2.0, -6.0], 'B': [-7.0, -1.0]},\n self.cpd_2d.target_comp_abs_chempot)\n\n @unittest.skipIf(not ViseTest.DISPLAY_DIAGRAM, ViseTest.no_display_reason)\n def test_cpd_2d_draw(self):\n self.cpd_2d.draw_diagram()\n\n @patch(\"vise.chempotdiag.chem_pot_diag.plt.show\")\n def test_cpd_2d_draw_mock(self, mock):\n self.cpd_2d.draw_diagram()\n mock.assert_called_once_with()\n\n @patch(\"vise.chempotdiag.chem_pot_diag.plt.savefig\")\n def test_cpd_2d_draw_savefig_mock(self, mock):\n self.cpd_2d.draw_diagram(filename=\"a.pdf\")\n mock.assert_called_once_with(\"a.pdf\")\n\n def test_cpd_3d(self):\n self.assertEqual([Element.Ca, Element.Mg, Element.O],\n self.cpd_3d.elements)\n self.assertEqual(3, self.cpd_3d.dim)\n self.assertEqual([[-10.0, 0.0, 0.0], [-5.0, 0.0, -5.0],\n [0.0, -10.0, 0.0], [0.0, -5.0, -5.0]],\n self.cpd_3d.vertices)\n self.assertEqual(\"CaMgO\", self.cpd_3d.target_comp)\n self.assertEqual({'A': [-10.0, 0.0, 0.0], 'B': [-5.0, 0.0, -5.0],\n 'C': [0.0, -10.0, 0.0], 'D': [0.0, -5.0, -5.0]},\n self.cpd_3d.target_comp_chempot)\n\n @unittest.skipIf(not ViseTest.DISPLAY_DIAGRAM, ViseTest.no_display_reason)\n def test_cpd_3d_draw(self):\n self.cpd_3d.draw_diagram()\n\n @patch(\"vise.chempotdiag.chem_pot_diag.plt.show\")\n def test_cpd_3d_draw_mock(self, mock):\n self.cpd_3d.draw_diagram()\n mock.assert_called_once_with()\n\n def test_cpd_3d_unstable(self):\n self.assertEqual({'A': [-10.0, 0.0, 0.0], 'B': [0.0, -10.0, 0.0]},\n self.cpd_3d_unstable.target_comp_chempot)\n\n @unittest.skipIf(not ViseTest.DISPLAY_DIAGRAM, ViseTest.no_display_reason)\n def test_cpd_3d_unstable_draw(self):\n self.cpd_3d_unstable.draw_diagram()\n\n @patch(\"vise.chempotdiag.chem_pot_diag.plt.show\")\n def test_cpd_3d_unstable_draw_mock(self, mock):\n self.cpd_3d_unstable.draw_diagram()\n mock.assert_called_once_with()\n\n def test_cpd_4d(self):\n self.assertEqual([Element.Sr, Element.Ca, Element.Mg, Element.O],\n self.cpd_4d.elements)\n for i in self.cpd_4d.vertices:\n self.assertTrue(i in [[0.0, -10.0, 0.0, 0.0],\n [0.0, 0.0, -10.0, 0.0],\n [0.0, -5.0, 0.0, -5.0],\n [0.0, 0.0, -5.0, -5.0]])\n self.cpd_4d.vertices.remove(i)\n\n @unittest.skipIf(not ViseTest.DISPLAY_DIAGRAM, ViseTest.no_display_reason)\n def test_pd_4d_plot(self):\n pdp = PDPlotter(self.pd_4d)\n pdp.show()\n\n @patch(\"pymatgen.analysis.phase_diagram.PDPlotter.show\")\n def test_pd_4d_plot_mock(self, mock):\n pdp = PDPlotter(self.pd_4d)\n pdp.show()\n mock.assert_called_once_with()\n\n @unittest.skipIf(not ViseTest.DISPLAY_DIAGRAM, ViseTest.no_display_reason)\n def test_comp_pd(self):\n # print(self.comp_cpd.elements)\n # print(self.comp_cpd.el_ref_list)\n # print(self.comp_cpd.dim)\n # print(self.comp_cpd.vertices)\n # print(self.comp_cpd.qhull_entries)\n # print(self.comp_cpd.comp_facets)\n # print(self.comp_cpd.target_comp)\n pdp = PDPlotter(self.comp_pd)\n pdp.show()\n\n @patch(\"pymatgen.analysis.phase_diagram.PDPlotter.show\")\n def test_comp_pd_mock(self, mock):\n pdp = PDPlotter(self.comp_pd)\n pdp.show()\n mock.assert_called_once_with()\n\n @unittest.skipIf(not ViseTest.DISPLAY_DIAGRAM, ViseTest.no_display_reason)\n def test_comp_cpd(self):\n print(self.comp_cpd.target_comp_abs_chempot)\n self.comp_cpd.draw_diagram()\n\n @patch(\"vise.chempotdiag.chem_pot_diag.plt.show\")\n def test_comp_cpd(self, mock):\n self.comp_cpd.draw_diagram()\n mock.assert_called_once_with()\n\n\nclass TestSortCoords(ViseTest):\n def setUp(self) -> None:\n # x + 2y + 3z = 4\n self.coords = \\\n np.array([[3, 2, -1], [-1, 2, 0], [-6, -1, 4], [1, -3, 3]])\n\n def test_sort_coords(self):\n print(sort_coords(self.coords))\n\n","sub_path":"vise/chempotdiag/tests/test_chem_pot_diag.py","file_name":"test_chem_pot_diag.py","file_ext":"py","file_size_in_byte":7773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"232663410","text":"from __future__ import unicode_literals\n\nimport os\nimport re\n\nimport dvc.logger as logger\nfrom dvc.command.base import fix_subparsers\nfrom dvc.config import Config\nfrom dvc.remote import _get, RemoteLOCAL\nfrom dvc.command.config import CmdConfig\n\n\nclass CmdRemoteAdd(CmdConfig):\n @staticmethod\n def resolve_path(path, config_file):\n \"\"\"Resolve path relative to config file location.\n\n Args:\n path: Path to be resolved.\n config_file: Path to config file, which `path` is specified\n relative to.\n\n Returns:\n Path relative to the `config_file` location. If `path` is an\n absolute path then it will be returned without change.\n\n \"\"\"\n if os.path.isabs(path):\n return path\n return os.path.relpath(path, os.path.dirname(config_file))\n\n def run(self):\n remote = _get({Config.SECTION_REMOTE_URL: self.args.url})\n if remote == RemoteLOCAL:\n self.args.url = self.resolve_path(\n self.args.url, self.configobj.filename\n )\n\n section = Config.SECTION_REMOTE_FMT.format(self.args.name)\n ret = self._set(section, Config.SECTION_REMOTE_URL, self.args.url)\n if ret != 0:\n return ret\n\n if self.args.default:\n msg = \"Setting '{}' as a default remote.\".format(self.args.name)\n logger.info(msg)\n ret = self._set(\n Config.SECTION_CORE, Config.SECTION_CORE_REMOTE, self.args.name\n )\n\n return ret\n\n\nclass CmdRemoteRemove(CmdConfig):\n def _remove_default(self, config):\n core = config.get(Config.SECTION_CORE, None)\n if core is None:\n return 0\n\n default = core.get(Config.SECTION_CORE_REMOTE, None)\n if default is None:\n return 0\n\n if default == self.args.name:\n return self._unset(\n Config.SECTION_CORE,\n opt=Config.SECTION_CORE_REMOTE,\n configobj=config,\n )\n\n def run(self):\n section = Config.SECTION_REMOTE_FMT.format(self.args.name)\n ret = self._unset(section)\n if ret != 0:\n return ret\n\n for configobj in [\n self.config._local_config,\n self.config._project_config,\n self.config._global_config,\n self.config._system_config,\n ]:\n self._remove_default(configobj)\n self.config.save(configobj)\n if configobj == self.configobj:\n break\n\n return 0\n\n\nclass CmdRemoteModify(CmdConfig):\n def run(self):\n section = Config.SECTION_REMOTE_FMT.format(self.args.name)\n self.args.name = \"{}.{}\".format(section, self.args.option)\n return super(CmdRemoteModify, self).run()\n\n\nclass CmdRemoteDefault(CmdConfig):\n def run(self):\n self.args.value = self.args.name\n self.args.name = \"core.remote\"\n return super(CmdRemoteDefault, self).run()\n\n\nclass CmdRemoteList(CmdConfig):\n def run(self):\n for section in self.configobj.keys():\n r = re.match(Config.SECTION_REMOTE_REGEX, section)\n if r:\n name = r.group(\"name\")\n url = self.configobj[section].get(\n Config.SECTION_REMOTE_URL, \"\"\n )\n logger.info(\"{}\\t{}\".format(name, url))\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n from dvc.command.config import parent_config_parser\n\n REMOTE_HELP = \"Manage set of tracked repositories.\"\n remote_parser = subparsers.add_parser(\n \"remote\",\n parents=[parent_parser],\n description=REMOTE_HELP,\n help=REMOTE_HELP,\n )\n\n remote_subparsers = remote_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use dvc remote CMD --help for \" \"command-specific help.\",\n )\n\n fix_subparsers(remote_subparsers)\n\n REMOTE_ADD_HELP = \"Add remote.\"\n remote_add_parser = remote_subparsers.add_parser(\n \"add\",\n parents=[parent_config_parser, parent_parser],\n description=REMOTE_ADD_HELP,\n help=REMOTE_ADD_HELP,\n )\n remote_add_parser.add_argument(\"name\", help=\"Name.\")\n remote_add_parser.add_argument(\n \"url\",\n help=\"URL. See full list of supported urls at \"\n \"https://dvc.org/doc/commands-reference/remote\",\n )\n remote_add_parser.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n default=False,\n help=\"Set as default remote.\",\n )\n remote_add_parser.set_defaults(func=CmdRemoteAdd)\n\n REMOTE_DEFAULT_HELP = \"Set/unset default remote.\"\n remote_default_parser = remote_subparsers.add_parser(\n \"default\",\n parents=[parent_config_parser, parent_parser],\n description=REMOTE_DEFAULT_HELP,\n help=REMOTE_DEFAULT_HELP,\n )\n remote_default_parser.add_argument(\n \"name\", nargs=\"?\", help=\"Name of the remote.\"\n )\n remote_default_parser.add_argument(\n \"-u\",\n \"--unset\",\n action=\"store_true\",\n default=False,\n help=\"Unset default remote.\",\n )\n remote_default_parser.set_defaults(func=CmdRemoteDefault)\n\n REMOTE_REMOVE_HELP = \"Remove remote.\"\n remote_remove_parser = remote_subparsers.add_parser(\n \"remove\",\n parents=[parent_config_parser, parent_parser],\n description=REMOTE_REMOVE_HELP,\n help=REMOTE_REMOVE_HELP,\n )\n remote_remove_parser.add_argument(\"name\", help=\"Name\")\n remote_remove_parser.set_defaults(func=CmdRemoteRemove)\n\n REMOTE_MODIFY_HELP = \"Modify remote.\"\n remote_modify_parser = remote_subparsers.add_parser(\n \"modify\",\n parents=[parent_config_parser, parent_parser],\n description=REMOTE_MODIFY_HELP,\n help=REMOTE_MODIFY_HELP,\n )\n remote_modify_parser.add_argument(\"name\", help=\"Name.\")\n remote_modify_parser.add_argument(\"option\", help=\"Option.\")\n remote_modify_parser.add_argument(\"value\", nargs=\"?\", help=\"Value.\")\n remote_modify_parser.add_argument(\n \"-u\",\n \"--unset\",\n default=False,\n action=\"store_true\",\n help=\"Unset option.\",\n )\n remote_modify_parser.set_defaults(func=CmdRemoteModify)\n\n REMOTE_LIST_HELP = \"List remotes.\"\n remote_list_parser = remote_subparsers.add_parser(\n \"list\",\n parents=[parent_config_parser, parent_parser],\n description=REMOTE_LIST_HELP,\n help=REMOTE_LIST_HELP,\n )\n remote_list_parser.set_defaults(func=CmdRemoteList)\n","sub_path":"dvc/command/remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"62550875","text":"\nimport numpy as np\nfrom numpy import linalg as LA\nfrom numpy import inf\nimport math\n#from math import inf\nfrom math import log\nimport itertools\nfrom numpy.linalg import inv\n\ndef delta (num,K,w,d):\n num = num - 1\n num = num % K**(w-d+1)\n answer = int(((num//K**(w-d)) + 1))\n #answer = answer.astype(int)\n return (answer);\n\n#print(\"Delta Trials:\")\n#print(delta(14,10,3,1)) #should be 1\n#print(delta(14,10,3,2)) #should be 2\n#print(delta(256,2,8,1)) #should be 2\n\ndef indexconverter(arrayinput):\n return(np.array([q-1 for q in arrayinput]).astype(np.int));\n#print(indexconverter(np.array([3,4,2])))\n\ndef delta_Set (num_Set,K,w,d):\n ans=np.zeros(len(num_Set))\n for i in range(0, len(num_Set)):\n ans[i] = delta(num_Set[i],K,w,d)\n ans = ans.astype(int)\n return (ans);\n\n#print(\"Delta set trials:\")\n#print(delta_Set([1,7,28],6,2,2)) #should be (1,1,4)\n#print(delta_Set([1,64,187],3,5,1)) #should be (1,1,3)\n \ndef intToBase (w,num,K):\n\n #s = np.zeros(w)\n #for d in range(0, w):\n #s[d] = delta(num,K,w,(d+1))\n return (np.array([delta(num,K,w,(d+1)) for d in range(w)]));\n\n#print(\"Int to base trials:\")\n#print(intToBase(3,22,3)) #should be [3,2,1]\n#print(intToBase(2,15,10)) #should be [2,5]\n\ndef baseToInt (w,digits,K):\n ans = 1\n for d in range(0, w):\n ans = ans + (digits[d]-1) * K**(w-d-1)\n return (ans);\n\n#print(\"Base to int trials:\")\n#print(baseToInt(4,[1,3,3,7],8)) #should be 151\n#print(baseToInt(3,[3,2,1],3)) #should be 22\n\ndef V (S,K,w,v):\n S = intToBase(w,S,K)\n F = np.zeros(K)\n for d in range(0,w):\n F[(S[d]-1)] = F[(S[d]-1)] + 1\n return (np.dot(v,F));\n\n#print(\"V Trials:\")\n#print(V(baseToInt(4,[1,3,3,7],8),8,4,[1,2,3,4,5,6,7,8])) #should be 14\n#print(V(78,8,4,[1,2,3,4,5,6,7,8])) #should be 11\n#print(V(14,2,4,[3,8])) #should be 27\n#print(V(3,2,4,[3,8])) #should be 17\n\ndef V_Set (S_Set,K,w,v):\n ans = np.zeros(len(S_Set))\n for i in range(0,len(S_Set)):\n ans[i] = V(S_Set[i],K,w,v)\n return(ans);\n\n#print(\"V_Set Trials:\")\n#print(V_Set([151,78],8,4,[1,2,3,4,5,6,7,8])) #should be [14,11]\n#print(V_Set([14,3],2,4,[3,8])) #should be [27,17]\n\ndef eta (noise,X,S,v,K,w):\n power = (-noise/2) * (X-V(S,K,w,v))**2\n ans = ((noise/(2*math.pi))**(1/2)) * math.exp(power)\n return(ans)\n\n#print(\"Eta Trials:\")\n#print(eta(1,14,baseToInt(4,[1,3,3,7],8),[1,2,3,4,5,6,7,8],8,4))\n\ndef eta_Set (S_Set,noise,X,v,K,w):\n ans = np.zeros(len(S_Set))\n power = noise/2 * np.square(X - V_Set(S_Set,K,w,v))\n power_min = min(power)\n power = power-power_min\n ans = np.exp(-power) * (noise/(2*math.pi))**(1/2) \n\n list1 = [ans,power_min]\n return(list1);\n\n#print(\"Eta Set Trials:\")\n#print(eta_Set([32,46],1,14,[1,2,3,4,5,6,7,8],8,4))\n\ndef eta_Log (noise,X,S,v,K,w):\n power = np.square(X-V(S,K,w,v))*(-noise)/2\n ans = 0.5 * np.log(noise/(2*math.pi)) + power\n return (ans)\n\n#print(\"Eta Log Trials:\")\n#print(eta_Log(1,14,baseToInt(4,[1,3,3,7],8),[1,2,3,4,5,6,7,8],8,4))\n#print(eta_Log(1,3.8,1,[0,4],2,2))\n\ndef eta_SetLog (S_Set,noise,X,v,K,w):\n ans = np.zeros(len(S_Set))\n power = np.square(X-V_Set(S_Set,K,w,v))*noise/2\n ans = 0.5*math.log(noise/(2*math.pi)) - power\n return (ans);\n\n#print(\"Eta Set Log Trials:\")\n#print(eta_SetLog([151,78],1,[14,17],[1,2,3,4,5,6,7,8],8,4))\n#print(eta_SetLog([1,3],0.5,[2,4.2,3.91,4.40,2.04,-0.4],[3,7],2,2))\n#print(eta_SetLog([1,3],1,3,[3,7],2,2))\n\ndef AllowedTo(w,S,K):\n if w > 1:\n extra = S[1:w]\n allowedStates = [extra,]*K\n newcolumn = np.arange(1,(K+1),1).reshape(K,1)\n allowedStates = np.hstack((allowedStates,newcolumn))\n else:\n allowedStates = np.arange(1,(K+1)).reshape(K,1)\n return(allowedStates);\n\n#print(\"AllowedTo Trials: \")\n#print(AllowedTo(4,[1,3,3,7],8))\n#print(AllowedTo(1,[1],8))\n\ndef AllowedFrom(w,S,K):\n if w>1:\n extra=S[0:(w-1)]\n allowedStates = [extra,]*K\n newcolumn = np.arange(1,(K+1),1).reshape(K,1)\n allowedStates = np.hstack((newcolumn,allowedStates))\n else:\n allowedStates = np.arange(1,(K+1)).reshape(K,1)\n return(allowedStates);\n#print(\"AllowedFrom Trials: \")\n#print(AllowedFrom(4,[1,3,3,7],8))\n#print(AllowedFrom(1,[1],8))\n\ndef possibleSt(t,w,K):\n if t>=w:\n return(np.arange(1,((K**w)+1)));\n else:\n ans = []\n ans = np.array(ans)\n q = np.arange(1,(K+1))\n zPositive = [p for p in itertools.product(q,repeat=t)]\n zPositive = np.array(zPositive).reshape(K**t,t)\n for i in range(0,zPositive.shape[0]):\n zAll = np.concatenate([zPositive[i,:],np.ones(w-t)])\n ans = np.append(ans,baseToInt(w,zAll,K))\n ans = np.array(ans)\n return(ans);\n\n#print(\"PossibleSt Trials:\")\n#print(possibleSt(1,2,2))\n#print(possibleSt(2, 3, 5))\n#should be: [1,6,11,16,21,26,31,36,41,46,51,56,61,66,71,76,81,86,91,96,101,106,\n#111,116,121]\n#print(possibleSt(2,3,2))\n#should be: [1,3,5,7]\n \ndef matrix_power(A,n):\n return(LA.matrix_power(A,n));\n\n#print(\"Matrix power Trials:\")\n#print(matrix_power([[1,3],[2,4]],4)) #should be [[199,435],[290,634]]\n#print(matrix_power([[1,4,7],[2,5,8],[3,6,9]],3)) #should be [[468,1062,1656],\n#[576,1305,2034],[684,1548,2412]]\n\ndef logm (M):\n M = np.array(M)\n K = M.shape[0]\n M_log = np.log(M)\n return(M_log)\n\n#print(\"logm trials:\")\n#print(logm([[1,3],[2,4]])) #should be [[0,1.0986],[0.693147,1.38629]] (rounded)\n#print(logm([[14,17,20],[15,18,21],[16,19,22]]))\n#should be [[2,64,2.83,2.996],[2.708,2.89,3.04],[2,77,2.94,3.09]] (rounded)\n\ndef expm (M):\n M = np.array(M)\n K = M.shape[0]\n M_exp = np.exp(M)\n return(M_exp)\n\n#print(\"expm trials:\")\n#print (expm([[1,3],[2,4]])) #should be [[2.72,20.09],[7.39,54.6]] (rounded)\n#print(expm([[1,4,7],[2,5,8],[3,6,9]]))\n#should be [[2.72,54.6,1097],[7.39,148.4,2981],[20.09,403.4,8103]] (rounded)\n\ndef sign(a):\n if a>0:\n outputofsignfunction = 1\n if a==0:\n outputofsignfunction = 0\n if a<0:\n outputofsignfunction = -1\n return(outputofsignfunction);\n\n#print(sign(-34))\n\ndef expArg (arg1,arg2,s1,s2):\n randomarray = np.append(arg1,arg2)\n if s1==0:\n list2 = [s2,arg2]\n return(list2)\n if s2==0:\n list3 = [s1,arg1]\n return(list3)\n if s1==1 and s2==1:\n ans = np.amax(randomarray)\n ans = ans + np.log(1 + math.exp(np.sum(randomarray - ans)))\n list4 = [1,ans]\n return(list4)\n if s1== -1 and s2== -1:\n ans = np.amax(randomarray)\n ans = ans + np.log(1 + math.exp(np.sum(randomarray - ans)))\n list5 = [-1,ans]\n return(list5)\n if s1==1 and s2== -1:\n ans = np.amax(randomarray)\n ans = ans + np.log(1 - math.exp(np.amin(randomarray - ans)))\n list6 = [sign(arg1-arg2),ans]\n return(list6)\n if s1== -1 and s2 ==1:\n ans = np.amax(randomarray)\n ans = ans + np.log(1 - math.exp(np.amin(randomarray - ans)))\n list7 = [sign(arg2-arg1),ans]\n return(list7);\n\n#print(\"ExpArg Trials:\")\n#print(expArg(37,51.51,1,-1)) #should be [-1,51.51]\n#print(expArg(37,51.51,-1,1)) #should be [1,51.51]\n#print(expArg(37,51.51,1,1)) #should be [1,51.51]\n#print(expArg(37,51.51,-1,-1)) #should be [-1,51.51]\n#print(expArg(37,51.51,0,1)) #should be [1,51.51]\n#print(expArg(37,51.51,1,0)) #should be [1,37]\n\n#to calculate log of the inverse of the matrix given in log form\n\ndef matrixInverseLog (M_log):\n M_log = np.asarray(M_log)\n K = M_log.shape[0]\n\n M_log_sign = np.zeros((K,K))\n M_log_sign[:] = 1 \n N = np.zeros((K,K))\n N_log = np.zeros((K,K))\n N_log[:] = -inf\n N_log_sign = np.zeros((K,K))\n for i in range(0,K):\n N[i,i] = 1\n N_log[i,i] = 0\n N_log_sign[i,i] = 1\n\n MN_log = np.column_stack((M_log,N_log))\n MN_log_sign = np.column_stack((M_log_sign,N_log_sign))\n\n for i in range(0,K):\n for j in range((i+1),2*K):\n MN_log_sign[i,j] = MN_log_sign[i,j]*MN_log_sign[i,i]\n MN_log[i,j] = MN_log[i,j] - MN_log[i,i]\n MN_log[i,i] = 0\n MN_log_sign[i,i] = 1\n\n if (i+2) <= K:\n for k in range((i+1),K):\n for j in range ((i+1),2*K):\n sign1 = MN_log_sign[k,j]\n sign2 = -MN_log_sign[k,i] * MN_log_sign[i,j]\n arg1 = MN_log[k,j]\n arg2 = MN_log[k,i] + MN_log[i,j]\n MN_log_sign[k,j] = expArg(arg1,arg2,sign1,sign2)[0]\n MN_log[k,j] = expArg(arg1,arg2,sign1,sign2)[1]\n MN_log[k,i] = -inf\n MN_log_sign[k,i] = 0\n for k in range((K-1),0,-1):\n for i in range((k-1),-1,-1):\n for j in range((k+1),2*K):\n sign1 = MN_log_sign[i,j]\n sign2 = -MN_log_sign[i,k] * MN_log_sign[k,j]\n arg1 = MN_log[i,j]\n arg2 = MN_log[i,k] + MN_log[k,j]\n MN_log_sign[i,j] = expArg(arg1,arg2,sign1,sign2)[0]\n MN_log[i,j] = expArg(arg1,arg2,sign1,sign2)[1]\n MN_log[i,k] = -inf\n MN_log_sign[i,k] = 0\n InverseM_log = MN_log[:,K:(2*K)]\n InverseM_sign = MN_log_sign[:,K:(2*K)]\n list8 = [InverseM_log, InverseM_sign]\n return(list8);\n\n#print(\"Matrix inverse log trials:\")\n#print(matrixInverseLog([[0.4,0.4],[0.5,0.6]]))\n\ndef v_sequence(compoundX,w):\n Time = len(compoundX)\n v_est = np.zeros(Time)\n\n v_est[0] = compoundX[0]\n if w>1:\n for i in range(1,w):\n v_est[i] = compoundX[i] - np.sum(v_est[0:i])\n for i in range(w,Time):\n v_est[i] = compoundX[i] - np.sum(v_est[(i-w+1):i])\n return(v_est);\n\n#print(\"v sequence trials:\")\n#print(v_sequence([6,8,7,6,6,7],4)) #should be [6,2,-1,-1,6,3]\n\n\n\n","sub_path":"Pythoncode2.7/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":9645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"199977419","text":"\"\"\"\nThis file is part of the openPMD-viewer.\n\nIt defines a number of helper functions that are used in main.py\n\nCopyright 2015-2017, openPMD-viewer contributors\nAuthors: Remi Lehe, Richard Pausch\nLicense: 3-Clause-BSD-LBNL\n\"\"\"\n\nimport os\nimport numpy as np\nimport h5py\nfrom .data_reader.particle_reader import read_species_data\n\n\ndef list_h5_files(path_to_dir):\n \"\"\"\n Return a list of the hdf5 files in this directory,\n and a list of the corresponding iterations\n\n Parameter\n ---------\n path_to_dir : string\n The path to the directory where the hdf5 files are.\n\n Returns\n -------\n A tuple with:\n - a list of strings which correspond to the absolute path of each file\n - an array of integers which correspond to the iteration of each file\n \"\"\"\n # Find all the files in the provided directory\n all_files = os.listdir(path_to_dir)\n\n # Select the hdf5 files\n iters_and_names = []\n for filename in all_files:\n # Use only the name that end with .h5 or .hdf5\n if filename[-3:] == '.h5' or filename[-5:] == '.hdf5':\n full_name = os.path.join(\n os.path.abspath(path_to_dir), filename)\n # extract all iterations from hdf5 file\n f = h5py.File(full_name, 'r')\n iterations = list(f['/data'].keys())\n f.close()\n # for each found iteration create list of tuples\n # (which can be sorted together)\n for key_iteration in iterations:\n iters_and_names.append((int(key_iteration), full_name))\n\n # Sort the list of tuples according to the iteration\n iters_and_names.sort()\n # Extract the list of filenames and iterations\n filenames = [name for (it, name) in iters_and_names]\n iterations = np.array([it for (it, name) in iters_and_names])\n\n return(filenames, iterations)\n\n\ndef apply_selection(file_handle, data_list, select, species, extensions):\n \"\"\"\n Select the elements of each particle quantities in data_list,\n based on the selection rules in `select`\n\n Parameters\n ----------\n file_handle: h5py.File object\n The HDF5 file from which to extract data\n\n data_list: list of 1darrays\n A list of arrays with one element per macroparticle, that represent\n different particle quantities\n\n select: dict\n A dictionary of rules to select the particles\n 'x' : [-4., 10.] (Particles having x between -4 and 10 microns)\n 'ux' : [-0.1, 0.1] (Particles having ux between -0.1 and 0.1 mc)\n 'uz' : [5., None] (Particles with uz above 5 mc)\n\n species: string\n Name of the species being requested\n\n extensions: list of strings\n The extensions that the current OpenPMDTimeSeries complies with\n\n Returns\n -------\n A list of 1darrays that correspond to data_list, but were only the\n macroparticles that meet the selection rules are kept\n \"\"\"\n # Create the array that determines whether the particle\n # should be selected or not.\n Ntot = len(data_list[0])\n select_array = np.ones(Ntot, dtype='bool')\n\n # Loop through the selection rules, and aggregate results in select_array\n for quantity in select.keys():\n q = read_species_data(file_handle, species, quantity, extensions)\n # Check lower bound\n if select[quantity][0] is not None:\n select_array = np.logical_and(\n select_array,\n q > select[quantity][0])\n # Check upper bound\n if select[quantity][1] is not None:\n select_array = np.logical_and(\n select_array,\n q < select[quantity][1])\n\n # Use select_array to reduce each quantity\n for i in range(len(data_list)):\n if len(data_list[i]) > 1: # Do not apply selection on scalar records\n data_list[i] = data_list[i][select_array]\n\n return(data_list)\n\n\ndef fit_bins_to_grid( hist_size, grid_size, grid_range ):\n \"\"\"\n Given a tentative number of bins `hist_size` for a histogram over\n the range `grid_range`, return a modified number of bins `hist_size`\n and a modified range `hist_range` so that the spacing of the histogram\n bins is an integer multiple (or integer divisor) of the grid spacing.\n\n Parameters:\n ----------\n hist_size: integer\n The number of bins in the histogram along the considered direction\n\n grid_size: integer\n The number of cells in the grid\n\n grid_range: list of floats (in meters)\n The extent of the grid\n\n Returns:\n --------\n hist_size: integer\n The new number of bins\n\n hist_range: list of floats (in microns)\n The new range of the histogram\n \"\"\"\n # The new histogram range is the same as the grid range\n hist_range = grid_range\n\n # Calculate histogram tentative spacing, and grid spacing\n hist_spacing = ( hist_range[1] - hist_range[0] ) * 1. / hist_size\n grid_spacing = ( grid_range[1] - grid_range[0] ) * 1. / grid_size\n\n # Modify the histogram spacing, so that either:\n if hist_spacing >= grid_spacing:\n # - The histogram spacing is an integer multiple of the grid spacing\n hist_spacing = int( hist_spacing / grid_spacing ) * grid_spacing\n else:\n # - The histogram spacing is an integer divisor of the grid spacing\n hist_spacing = grid_spacing / int( grid_spacing / hist_spacing )\n\n # Get the corresponding new number of bins, and the new range\n hist_size = int( ( hist_range[1] - hist_range[0] ) / hist_spacing )\n hist_range[1] = hist_range[0] + hist_size * hist_spacing\n\n # Convert the range to microns (since this is how particle positions\n # are returned in the openPMD-viewer)\n hist_range = [ 1.e6 * hist_range[0], 1.e6 * hist_range[1] ]\n\n return( hist_size, hist_range )\n\n\ndef combine_cylindrical_components( Fr, Ft, theta, coord, info ):\n \"\"\"\n Calculate the catesian field Fx or Fy,\n from the cylindrical components Fr and Ft.\n\n Parameters:\n -----------\n Fr, Ft: 3darrays or 2Darrays (depending on whether `theta` is None)\n Contains the value of the fields\n theta: float or None\n Indicates the angle of the plane in which Fr and Ft where taken\n coord: string\n Either 'x' or 'y' ; indicates which component to calculate\n info: FieldMetaInformation object\n Contains info on the coordinate system\n \"\"\"\n if theta is not None:\n # Fr and Fr are 2Darrays\n assert (Fr.ndim == 2) and (Ft.ndim == 2)\n\n if coord == 'x':\n F = np.cos(theta) * Fr - np.sin(theta) * Ft\n elif coord == 'y':\n F = np.sin(theta) * Fr + np.cos(theta) * Ft\n # Revert the sign below the axis\n F[: int(F.shape[0] / 2)] *= -1\n\n else:\n # Fr, Ft are 3Darrays, info corresponds to Cartesian data\n assert (Fr.ndim == 3) and (Ft.ndim == 3)\n\n # Calculate cos(theta) and sin(theta) in the transverse Cartesian plane\n # while avoiding divisions by 0\n r = np.sqrt( info.x[:,np.newaxis]**2 + info.y[np.newaxis,:]**2 )\n inv_r = 1./np.where( r!=0, r, 1. )\n # The value `1.`` is a placeholder in the above (to avoid division by 0)\n # The lines below replace this placeholder value.\n cos = np.where( r!=0, info.x[:,np.newaxis]*inv_r, 1. )\n sin = np.where( r!=0, info.y[np.newaxis,:]*inv_r, 0. )\n if coord == 'x':\n F = cos[:,:,np.newaxis] * Fr - sin[:,:,np.newaxis] * Ft\n elif coord == 'y':\n F = sin[:,:,np.newaxis] * Fr + cos[:,:,np.newaxis] * Ft\n\n return F\n\n\ndef construct_3d_from_circ( F3d, Fcirc, x_array, y_array, modes,\n nx, ny, nz, nr, nmodes, inv_dr, rmax ):\n \"\"\"\n Reconstruct the field from a quasi-cylindrical simulation (`Fcirc`), as\n a 3D cartesian array (`F3d`).\n \"\"\"\n for ix in range(nx):\n x = x_array[ix]\n for iy in range(ny):\n y = y_array[iy]\n r = np.sqrt( x**2 + y**2 )\n ir = nr - 1 - int( (rmax - r) * inv_dr + 0.5 )\n # Handle out-of-bounds\n if ir < 0:\n ir = 0\n if ir >= nr:\n ir = nr-1\n # Loop over all modes and recontruct data\n if r == 0:\n expItheta = 1. + 0.j\n else:\n expItheta = (x+1.j*y)/r\n for im in range(nmodes):\n mode = modes[im]\n if mode==0:\n F3d[ix, iy, :] += Fcirc[0, ir, :]\n else:\n cos = (expItheta**mode).real\n sin = (expItheta**mode).imag\n F3d[ix, iy, :] += Fcirc[2*mode-1, ir, :]*cos \\\n + Fcirc[2*mode, ir, :]*sin\n","sub_path":"opmd_viewer/openpmd_timeseries/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":8714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"402368321","text":"\"\"\"\nLED that gets toggled when a button is pressed.\n\"\"\"\n\nfrom RPi import GPIO\nimport time\n\ndef turn_on_light(light_pin):\n print('Turning light on')\n GPIO.output(light_pin, GPIO.HIGH)\n\ndef turn_off_light(light_pin):\n print('Turning light off')\n GPIO.output(light_pin, GPIO.LOW)\n\ndef toggle_light(light_pin):\n if GPIO.input(light_pin) == GPIO.HIGH:\n turn_off_light(light_pin)\n else:\n turn_on_light(light_pin)\n\nif __name__ == '__main__':\n GPIO.setmode(GPIO.BOARD)\n\n button_pin = 7\n light_pin = 11\n\n GPIO.setup(button_pin, GPIO.IN)\n GPIO.setup(light_pin, GPIO.OUT)\n\n try:\n # front-end debounce\n GPIO.add_event_detect(button_pin, GPIO.FALLING)\n detected_time = time.time()\n while True:\n if GPIO.event_detected(button_pin):\n if detected_time is None:\n print('detected_time is none')\n toggle_light(light_pin)\n detected_time = time.time()\n\n if detected_time and time.time() - detected_time > 0.5:\n detected_time = None\n\n time.sleep(0.01)\n\n except KeyboardInterrupt:\n pass\n finally:\n GPIO.cleanup()\n","sub_path":"button_light_3_manual_debounce_early.py","file_name":"button_light_3_manual_debounce_early.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"371755663","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CTCampGroup',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('score', models.FloatField(default=0)),\n ('defaultGroup', models.ForeignKey(to='auth.Group')),\n ],\n ),\n ]\n","sub_path":"main/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"593099254","text":"'''\n基于灰度图像的图像指纹的算法\n负责此算法中的计算图像指纹的操作\n'''\n\nfrom base import image_base\nfrom base import tool_base\n\n# 获取灰度值列表的平均值\ndef get_avgGray(list_gray):\n sum=0\n for g in list_gray:\n sum+=g\n return int(sum/len(list_gray))\n\n# 根据灰度值列表、灰度平均值来计算灰度图像指纹\ndef count_grayID(list_gray,avg_gray):\n grayID = \"\"\n for gray in list_gray:\n if gray >= avg_gray:\n grayID = grayID + \"1\"\n else:\n grayID = grayID + \"0\"\n return grayID\n\n# 提取分块1的2*8的灰度列表\ndef get_block1_grayList(img):\n list_gray = []\n for i in range(0, 2):\n for j in range(2, 10):\n pixel = image_base.get_pixel_byPercent(img, [i * 8.3, j * 8.3])\n list_gray.append(pixel)\n return list_gray\n\n# 提取分块2的8*2的灰度列表\ndef get_block2_grayList(img):\n list_gray = []\n for i in range(2, 10):\n for j in range(10, 12):\n pixel = image_base.get_pixel_byPercent(img, [i * 8.3, j * 8.3])\n list_gray.append(pixel)\n return list_gray\n\n# 提取分块3的2*8的灰度列表\ndef get_block3_grayList(img):\n list_gray = []\n for i in range(10, 12):\n for j in range(2, 10):\n pixel = image_base.get_pixel_byPercent(img, [i * 8.3, j * 8.3])\n list_gray.append(pixel)\n return list_gray\n\n# 提取分块4的8*2的灰度列表\ndef get_block4_grayList(img):\n list_gray = []\n for i in range(2, 10):\n for j in range(0, 2):\n pixel = image_base.get_pixel_byPercent(img, [i * 8.3, j * 8.3])\n list_gray.append(pixel)\n return list_gray\n\n# 提取分块5的8*8的灰度列表\ndef get_block5_grayList(img):\n list_gray = []\n for i in range(2, 10):\n for j in range(2, 10):\n pixel = image_base.get_pixel_byPercent(img, [i * 8.3, j * 8.3])\n list_gray.append(pixel)\n return list_gray\n\n# 获取灰度图像指纹\n# 输入img,是由OpenCV读取,并已经进行灰度转换的灰度图像\n# 输出list_ID,是一个列表,包含标识图像的2个图像指纹\ndef get_grayID(img):\n # 方法get_blockx_grayList(img)是获取从分块x中提取的像素的灰度值\n # 下面获取从分块①②③④⑤提取的像素得灰度值\n list_gray1=get_block1_grayList(img)\n list_gray2=get_block2_grayList(img)\n list_gray3=get_block3_grayList(img)\n list_gray4=get_block4_grayList(img)\n list_gray5=get_block5_grayList(img)\n # 方法get_avgGray(list_gray)是计算list_gray平均值\n # 下面计算分块①②③④⑤的灰度平均值\n avg_gary1=get_avgGray(list_gray1)\n avg_gary2=get_avgGray(list_gray2)\n avg_gary3=get_avgGray(list_gray3)\n avg_gary4=get_avgGray(list_gray4)\n avg_gary5=get_avgGray(list_gray5)\n # 方法count_grayID(list_gray,avg_gary)是对比list_gray和avg_gary得到图像指纹\n # 下面计算分块①②③④⑤的图像指纹\n ID1=count_grayID(list_gray1,avg_gary1)\n ID2=count_grayID(list_gray2,avg_gary2)\n ID3=count_grayID(list_gray3,avg_gary3)\n ID4=count_grayID(list_gray4,avg_gary4)\n ID5=count_grayID(list_gray5,avg_gary5)\n # 合并分块①②③④的图像指纹\n ID1=ID1+ID2+ID3+ID4\n # 得到标识图像的2个图像指纹,放入列表中\n list_ID=[ID1,ID5]\n return list_ID\n\n# 从两个图像对象中,计算加权汉明指纹,ID1占0.4,ID2占0.6\ndef calc_distance(image_object1,image_object2):\n hamL1=tool_base.calc_hammingDistance(image_object1.grayID_1,image_object2.grayID_1)\n hamL2=tool_base.calc_hammingDistance(image_object1.grayID_2,image_object2.grayID_2)\n hamL=hamL1*0.4+hamL2*0.6\n return hamL","sub_path":"action/grayID/grayID_action.py","file_name":"grayID_action.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"119769296","text":"import os\nimport shutil\nimport logging\nfrom libs.read import load_data\nfrom libs.report import make_profile\nfrom libs.utils import safe_dirs\nimport templates\n\nlogger = logging.getLogger('report')\n\n\ndef report(args):\n \"\"\"\n Create report in html format\n \"\"\"\n logger.info(\"reading sequeces\")\n data = load_data(args.json)\n out_dir = os.path.join(args.out, \"html\")\n safe_dirs(out_dir)\n\n logger.info(\"create profile\")\n make_profile(data, out_dir, args)\n\n path_template = os.path.normpath(os.path.dirname(os.path.realpath(templates.__file__)))\n css_template = os.path.join(path_template, \"info.css\")\n js_template = os.path.join(path_template, \"jquery.tablesorter.min.js\")\n css = os.path.join(out_dir, \"info.css\")\n js = os.path.join(out_dir, \"jquery.tablesorter.min.js\")\n if not os.path.exists(css):\n shutil.copy(css_template, css)\n shutil.copy(js_template, js)\n logger.info(\"Done\")\n","sub_path":"seqcluster/create_report.py","file_name":"create_report.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"307407018","text":"from scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.conf import settings\n\nfrom dirbot.items import Website\n\nclass LocalhostSpider(CrawlSpider):\n name = \"localhost\"\n allowed_domains = [\"localhost\"]\n start_urls = [\n \"http://localhost/drupal6/\",\n ]\n\n rules = (\n Rule(SgmlLinkExtractor(allow=('drupal', ), unique=True, ), callback='parse_item'),\n )\n\n def parse_item(self, response):\n self.log('A response from %s just arrived!' % response.url)\n hxs = HtmlXPathSelector(response)\n attributeSelectors = ['//@id', '//@class']\n items = []\n for selector in attributeSelectors:\n sites = hxs.select(selector)\n for site in sites:\n item = Website()\n item['name'] = settings['CURRENT_MODULE']\n item['description'] = 'mandatory field'\n item['selector'] = selector\n item['value'] = site.extract()\n item['url'] = response.url\n items.append(item)\n return items\n","sub_path":"ml-first-project/scrapy-dirbot/dirbot/spiders/localhost.py","file_name":"localhost.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"202237097","text":"#import config\nimport sys\nfrom . import log\nimport MySQLdb\nimport MySQLdb.cursors\n\n\ndef connect(host,user,password,db):\n try:\n con = MySQLdb.connect(host,user,password,db,cursorclass=MySQLdb.cursors.DictCursor)\n log.logger.debug('Succesfully connected to MySQL database')\n return con\n except Exception as e:\n log.logger.critical('Error with connecting to MySQL database')\n log.logger.debug(e)\n raise Exception\n\ndef update(db,q):\n\n try:\n cur = db.cursor()\n log.logger.debug('Succesfully set cursor to: ' + str(db))\n except Exception as e:\n log.logger.critical('Could not setup cursor to ' + str(db))\n log.logger.debug(e)\n raise Exception\n\n try:\n cur.execute(q)\n log.logger.debug('Succesfully send query to: ' + str(db))\n except Exception as e:\n log.logger.error('Error setting query: ' + q)\n log.logger.debug(e)\n raise Exception\n\n try:\n db.commit()\n log.logger.debug('Succcesfully commited the query to: ' + str(db))\n except Exception as e:\n log.logger.error('Error commiting query to ' + str(db))\n log.logger.debug(e)\n raise Exception\n\n\ndef query(db,q):\n try:\n cur = db.cursor()\n log.logger.debug('Succesfully set cursor to: ' + str(db))\n except Exception as e:\n log.logger.critical('Could not setup cursor to ' + str(db))\n log.logger.debug(e)\n raise Exception\n\n try:\n cur.execute(q)\n log.logger.debug('Succesfully send and returnd query to: ' + str(db))\n return cur.fetchall()\n except Exception as e:\n log.logger.critical('Error setting query: ' + q)\n log.logger.debug(e)\n raise Exception\n","sub_path":"lib/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"317882888","text":"import sys\nclass MaxPriorityQ(object):\n def __init__(self, cap):\n self.size = 0\n self.q = [sys.maxsize]\n self.cap = cap\n\n def findMax(self):\n return self.q[1]\n\n def insert(self, element):\n self.q.append(element)\n self.size += 1\n if self.size is 1:\n print(\"inserted element\", element)\n return\n idx = self.size\n parent = int(idx/2)\n while self.q[idx] > self.q[parent]:\n self.swap(idx, parent)\n idx = parent\n parent = int(idx/2)\n if idx == 1:\n break\n\n print(\"inserted element\", element)\n\n if self.size > self.cap:\n print(\"capacity is reached! Deleting max \", self.findMax())\n self.deleteMax()\n\n def deleteMax(self):\n self.swap(1, self.size)\n element = self.q[self.size]\n print(\"deleting max element \", element)\n self.size -= 1\n if self.size < 2:\n self.print()\n return\n # bubble up largest element to the top\n # the element who is at q[1] may not be the largest any more\n parent = 1\n left = self.getLeftidx(parent)\n right = self.getRightidx(parent)\n leftsmaller = self.q[parent] > self.q[left]\n if self.size == 2:\n if leftsmaller:\n pass\n else:\n self.swap(1, 2)\n self.print()\n return\n\n rightsmaller = self.q[parent] > self.q[right]\n while leftsmaller is False or rightsmaller is False:\n leftbiggest = True\n if (leftsmaller or rightsmaller) is False:\n # both are bigger than parent\n leftbiggest = self.q[left] > self.q[right]\n\n if leftbiggest and leftsmaller is False:\n self.swap(parent, left)\n parent = left\n left = self.getLeftidx(parent)\n if left > self.size:\n break\n leftsmaller = self.q[parent] > self.q[left]\n else:\n self.swap(parent, right)\n parent = right\n right = self.getRightidx(parent)\n if right > self.size:\n break\n rightsmaller = self.q[parent] > self.q[right]\n\n self.print()\n\n def getLeftidx(self, parent):\n return 2 * parent\n\n def getRightidx(self, parent):\n return 2 * parent + 1\n\n def size(self):\n return self.size\n\n def swap(self, i, j):\n temp = self.q[i]\n self.q[i] = self.q[j]\n self.q[j] = temp\n\n def print(self):\n print(\"Heap is\")\n for i in range(1, self.size + 1):\n print(self.q[i])\n\ns = MaxPriorityQ(5)\ns.insert(4)\ns.insert(4)\ns.insert(8)\ns.print()\ns.insert(9)\ns.print()\ns.insert(19)\ns.print()\ns.insert(119)\ns.deleteMax()\ns.deleteMax()\ns.deleteMax()\ns.deleteMax()","sub_path":"Test/NotCSharp/Python/MEDIUM-MAXPriorityQ.py","file_name":"MEDIUM-MAXPriorityQ.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"318280021","text":"import os\nimport requests\nimport unittest\nimport pytest\n\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"nostradamus.settings\"\n\nimport django\n\ndjango.setup()\n\nfrom apps.authentication.models import User\n\n\n@pytest.mark.usefixtures(\n \"sql_conn\", \"test_user_1\", \"host\", \"signin_url\", \"register_url\"\n)\nclass TestRegister(unittest.TestCase):\n def teardown_method(self, method):\n User.objects.filter(name=self.test_user_1[\"name\"]).delete()\n\n def test_auth_by_username(self):\n requests.post(\n self.host + self.register_url, data=self.test_user_1\n ).json()\n\n test_user = {\n \"credentials\": self.test_user_1[\"name\"],\n \"password\": self.test_user_1[\"password\"],\n }\n\n request = requests.post(self.host + self.signin_url, data=test_user)\n data = request.json()\n\n assert data is not None and \"exception\" not in data\n\n def test_auth_by_email(self):\n requests.post(\n self.host + self.register_url, data=self.test_user_1\n ).json()\n\n test_user = {\n \"credentials\": self.test_user_1[\"email\"],\n \"password\": self.test_user_1[\"password\"],\n }\n\n request = requests.post(self.host + self.signin_url, data=test_user)\n data = request.json()\n\n assert data is not None and \"exception\" not in data\n\n def test_auth_error(self):\n requests.post(\n self.host + self.register_url, data=self.test_user_1\n ).json()\n\n test_user = {\n \"credentials\": self.test_user_1[\"email\"],\n \"password\": \"1234Pass\",\n }\n\n request = requests.post(self.host + self.signin_url, data=test_user)\n data = request.json()\n\n assert data is not None and \"exception\" in data\n\n def test_auth_data(self):\n requests.post(\n self.host + self.register_url, data=self.test_user_1\n ).json()\n\n test_user = {\n \"credentials\": self.test_user_1[\"name\"],\n \"password\": self.test_user_1[\"password\"],\n }\n\n self.conn.execute(\n f\"SELECT name FROM authentication_team WHERE id=%s\",\n (self.test_user_1[\"team\"],),\n )\n team = self.conn.fetchone()[0]\n\n self.conn.execute(\n f\"SELECT id FROM authentication_user WHERE email=%s\",\n (self.test_user_1[\"email\"],),\n )\n user_id = self.conn.fetchone()[0]\n\n self.conn.execute(\n f\"\"\"\n SELECT\n name\n FROM\n authentication_role AS r\n INNER JOIN\n authentication_teammember AS tm\n ON r.id = tm.role_id\n WHERE\n tm.user_id = '{user_id}'\n \"\"\",\n )\n role = self.conn.fetchone()[0]\n\n request = requests.post(self.host + self.signin_url, data=test_user)\n data = request.json()\n\n assert all(\n [\n data[\"id\"] == user_id,\n data[\"name\"] == self.test_user_1[\"name\"],\n data[\"email\"] == self.test_user_1[\"email\"],\n data[\"team\"] == team,\n data[\"role\"] == role,\n ]\n )\n\n def test_auth_token(self):\n requests.post(\n self.host + self.register_url, data=self.test_user_1\n ).json()\n\n filter_route = \"analysis_and_training/\"\n test_user = {\n \"credentials\": self.test_user_1[\"name\"],\n \"password\": self.test_user_1[\"password\"],\n }\n\n request = requests.post(self.host + self.signin_url, data=test_user)\n token = \"JWT \" + request.json()[\"token\"]\n headers = {\"Authorization\": token}\n\n request = requests.get(self.host + filter_route, headers=headers)\n\n assert request.status_code == 200\n","sub_path":"nostradamus/tests/auth_tests/test_signin.py","file_name":"test_signin.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"516694789","text":"import os\nimport sys\nimport argparse\nimport numpy as np\nimport cv2\nimport open3d\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\nimport utility\nfrom skimage.feature import peak_local_max\nfrom skimage.measure import shannon_entropy\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser()\nparser.add_argument('data')\nparser.add_argument(\"--entropy_model\")\nparser.add_argument(\"--classifier_model\")\nargs = parser.parse_args()\nTMP_DIR = os.path.join(sys.path[0], \"tmp\")\n\n\nclass ViewData:\n obj_label = ''\n obj_index = 1\n view_index = 0\n phi = 0\n theta = 0\n voxel_size = float(1 / 50)\n n_voxel = 50\n\n\nidx2rot = {}\ncount = 0\nfor _phi in range(30, 151, 30):\n for _theta in range(0, 331, 30):\n idx2rot[count] = (_theta, _phi)\n count += 1\n\n\ndef normalize3d(vector):\n np_arr = np.asarray(vector)\n max_val = np.max(np_arr)\n np_normalized = np_arr / max_val\n return open3d.utility.Vector3dVector(np_normalized)\n\n\ndef custom_parser(string):\n number = int(string.split(\"_\")[0])\n return number\n\n\ndef nonblocking_custom_capture(mesh, rot_xyz, last_rot):\n ViewData.phi = -round(np.rad2deg(rot_xyz[0]))\n ViewData.theta = round(np.rad2deg(rot_xyz[2]))\n vis = open3d.visualization.Visualizer()\n vis.create_window(width=224, height=224, visible=False)\n # Rotate back from last rotation\n R_0 = mesh.get_rotation_matrix_from_xyz(last_rot)\n mesh.rotate(np.linalg.inv(R_0), center=mesh.get_center())\n # Then rotate to the next rotation\n R = mesh.get_rotation_matrix_from_xyz(rot_xyz)\n mesh.rotate(R, center=mesh.get_center())\n vis.add_geometry(mesh)\n vis.poll_events()\n path = f\"{TMP_DIR}/view_theta_{int(ViewData.theta)}_phi_{int(ViewData.phi)}.png\"\n vis.capture_screen_image(path)\n vis.destroy_window()\n\n\ndef classify(off_file, entropy_model, classifier):\n os.mkdir(TMP_DIR)\n FILENAME = os.path.join(sys.path[0], off_file)\n mesh = open3d.io.read_triangle_mesh(FILENAME)\n mesh.vertices = normalize3d(mesh.vertices)\n mesh.scale(1 / np.max(mesh.get_max_bound() - mesh.get_min_bound()), center=mesh.get_center())\n center = (mesh.get_max_bound() + mesh.get_min_bound()) / 2\n mesh = mesh.translate((-center[0], -center[1], -center[2]))\n voxel_grid = open3d.geometry.VoxelGrid.create_from_triangle_mesh_within_bounds(input=mesh,\n voxel_size=1 / 50,\n min_bound=np.array(\n [-0.5, -0.5, -0.5]),\n max_bound=np.array([0.5, 0.5, 0.5]))\n voxels = voxel_grid.get_voxels()\n grid_size = 50\n mask = np.zeros((grid_size, grid_size, grid_size))\n for vox in voxels:\n mask[vox.grid_index[0], vox.grid_index[1], vox.grid_index[2]] = 1\n mask = np.pad(mask, 3, 'constant')\n mask = np.resize(mask, (1, mask.shape[0], mask.shape[1], mask.shape[2], 1))\n pred_entropies = entropy_model.predict(mask)\n pred_entropies = np.resize(pred_entropies, (5, 12))\n coords = peak_local_max(pred_entropies, min_distance=1, exclude_border=False)\n peak_views = []\n for (y, x) in coords:\n peak_views.append((y * 12) + x)\n peak_views = sorted(peak_views)\n fig, ax = plt.subplots(1)\n image = ax.imshow(pred_entropies, cmap='rainbow')\n fig.colorbar(image, orientation='horizontal')\n for i in range(len(coords)):\n circle = plt.Circle((coords[i][1], coords[i][0]), radius=0.2, color='black')\n ax.add_patch(circle)\n\n plt.xticks([i for i in range(12)], [i * 30 for i in range(12)])\n plt.yticks([i for i in range(5)], [(i + 1) * 30 for i in range(5)])\n plt.show()\n\n # print(f\"[DEBUG] peak_views : {np.shape(peak_views)}\")\n print(f\"[DEBUG] peak_views : {peak_views}\")\n # print(f\"[DEBUG] _views-argwhere : {_views}\")\n\n mesh = open3d.io.read_triangle_mesh(FILENAME)\n mesh.vertices = normalize3d(mesh.vertices)\n mesh.compute_vertex_normals()\n rotations = []\n for j in range(5):\n for i in range(12):\n if ((j * 12) + i) in peak_views:\n rotations.append((-(j + 1) * np.pi / 6, 0, i * 2 * np.pi / 12))\n last_rotation = (0, 0, 0)\n for rot in rotations:\n nonblocking_custom_capture(mesh, rot, last_rotation)\n last_rotation = rot\n views = []\n views_images = []\n views_images_dir = os.listdir(TMP_DIR)\n i = 0\n for file in views_images_dir:\n if '.png' in file:\n i = i + 1\n plt.subplot(int(np.ceil(len(rotations) / 3)), 3, i)\n im = plt.imread(os.path.join(TMP_DIR, file))\n views_images.append(im)\n phi = int(file.split(\".\")[0].split(\"_\")[-1])\n theta = int(file.split(\".\")[0].split(\"_\")[-3])\n plt.imshow(im, cmap='gray')\n plt.title(label=f'({theta:.2f}, {phi:.2f})')\n plt.xticks([])\n plt.yticks([])\n\n views.append((theta, phi))\n\n views_images = np.array(views_images)\n plt.show()\n\n results = classifier.predict(views_images)\n labels = results[0]\n pred_views = results[1]\n for im in os.listdir(TMP_DIR):\n os.remove(os.path.join(TMP_DIR, im))\n os.rmdir(TMP_DIR)\n return labels, pred_views, views\n\n\ndef most_common(lst):\n return max(set(lst), key=lst.count)\n\n\ndef mode_rows(a):\n a = np.ascontiguousarray(a)\n void_dt = np.dtype((np.void, a.dtype.itemsize * np.prod(a.shape[1:])))\n _, ids, _count = np.unique(a.view(void_dt).ravel(), return_index=True, return_counts=True)\n largest_count_id = ids[_count.argmax()]\n most_frequent_row = a[largest_count_id]\n return most_frequent_row\n\n\ndef main():\n print(f\"[INFO] Loading models...\")\n entropy_model = keras.models.load_model(args.entropy_model)\n classifier = keras.models.load_model(args.classifier_model)\n print(f\"[INFO] Models loaded.\")\n x = args.data\n labels, pred_views, views = classify(x, entropy_model, classifier)\n vec2lab = utility.get_label_dict(inverse=True)\n for i in range(len(labels)):\n cl = vec2lab[np.argmax(labels[i])]\n pv = idx2rot[int(np.argmax(pred_views[i]))]\n tv = views[i]\n print(\n f\"[INFO] Predicted: {cl:<11} - {str(pv):<10} from {str(tv):<10} --> Offset: ({(np.array(pv) - np.array(tv))[0]}, \"\n f\"{(np.array(pv) - np.array(tv))[1]})\")\n print(f\"[INFO] Majority vote:\")\n labint = []\n for el in labels:\n labint.append(np.argmax(el))\n print(f\" class: {vec2lab[most_common(labint)]}\")\n angles = []\n pred_angles = []\n for i in range(len(labels)):\n angles.append(views[i])\n pred_angles.append(idx2rot[int(np.argmax(pred_views[i]))])\n angles = np.array(angles)\n pred_angles = np.array(pred_angles)\n offset = mode_rows(pred_angles - angles)\n print(f\" offset: theta={offset[0]} phi={offset[1]}\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":7087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"34928323","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom custom.enikshay.management.commands.utils import BaseEnikshayCaseMigration\n\nTEST_TYPE_VALUE = 'test_type_value'\nDATAMIGRATION_CASE_PROPERTY = 'datamigration_pmdt_test_type_value'\n\n\nclass Command(BaseEnikshayCaseMigration):\n case_type = 'test'\n case_properties_to_update = [\n TEST_TYPE_VALUE,\n ]\n datamigration_case_property = DATAMIGRATION_CASE_PROPERTY\n include_public_cases = True\n include_private_cases = False\n\n @staticmethod\n def get_case_property_updates(test, domain):\n if (\n test.get_case_property(DATAMIGRATION_CASE_PROPERTY) != 'yes'\n and test.get_case_property('migration_type') == 'pmdt_excel'\n and test.get_case_property('test_type')\n ):\n return {\n TEST_TYPE_VALUE: test.get_case_property('test_type')\n }\n else:\n return {}\n","sub_path":"custom/enikshay/management/commands/run_pmdt_test_type_value_fix.py","file_name":"run_pmdt_test_type_value_fix.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"493155734","text":"from twilio.rest import Client\n\n\ndef notify(code):\n # Dictionary of name/phone number pairs (redacted for obvious reasons)\n phone_numbers = {\"NAME\" : \"NUMBER\"}\n\n # Account SID, authentication token, and Twilio phone number (also redacted)\n account_sid = \"\"\n auth_token = \"\"\n twilio_number = \"\"\n\n client = Client(account_sid, auth_token)\n for name in phone_numbers:\n message = client.messages \\\n .create(\n body=code,\n from_=twilio_number,\n to=phone_numbers[name]\n )\n print(\"Notified \" + name + \"!\")\n\nnotify(\"Your Overwatch queue has popped!\")","sub_path":"src/messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"186526489","text":"from flask import Flask, render_template\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('landingPage.html')\n\n@app.route('/data/')\ndef return_fixture(name):\n response_data = open(\"data/%s\" % name).read()\n return response_data\n\n\n\nif __name__ == '__main__':\n # app.run()\n app.run(debug=True) #only use in development!","sub_path":"app/testProj.py","file_name":"testProj.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"269265612","text":"import codecs\nimport os\nimport re\nimport sys\nfrom random import randint, shuffle\n\nfamlist1 = [\n [\"FARFETCHD\"],\n [\"ONIX\"],\n [\"HITMONLEE\"],\n [\"HITMONCHAN\"],\n [\"LICKITUNG\"],\n [\"CHANSEY\"],\n [\"TANGELA\"],\n [\"KANGASKHAN\"],\n [\"SCYTHER\"],\n [\"ELECTABUZZ\"],\n [\"MAGMAR\"],\n [\"JYNX\"],\n [\"MR_MIME\"],\n [\"PINSIR\"],\n [\"TAUROS\"],\n [\"EEVEE\"],\n [\"DITTO\"],\n [\"LAPRAS\"],\n [\"PORYGON\"],\n [\"SNORLAX\"],\n [\"AERODACTYL\"]\n]\nfamlist2 = [\n [\"RATTATA\", \"RATICATE\"],\n [\"SPEAROW\", \"FEAROW\"],\n [\"EKANS\", \"ARBOK\"],\n [\"PIKACHU\", \"RAICHU\"],\n [\"SANDSHREW\", \"SANDSLASH\"],\n [\"CLEFAIRY\", \"CLEFABLE\"],\n [\"VULPIX\", \"NINETALES\"],\n [\"JIGGLYPUFF\", \"WIGGLYTUFF\"],\n [\"ZUBAT\", \"GOLBAT\"],\n [\"PARAS\", \"PARASECT\"],\n [\"VENONAT\", \"VENOMOTH\"],\n [\"DIGLETT\", \"DUGTRIO\"],\n [\"MEOWTH\", \"PERSIAN\"],\n [\"PSYDUCK\", \"GOLDUCK\"],\n [\"MANKEY\", \"PRIMEAPE\"],\n [\"GROWLITHE\", \"ARCANINE\"],\n [\"PONYTA\", \"RAPIDASH\"],\n [\"SLOWPOKE\", \"SLOWBRO\"],\n [\"MAGNEMITE\", \"MAGNETON\"],\n [\"TENTACOOL\", \"TENTACRUEL\"], \n [\"DODUO\", \"DODRIO\"],\n [\"SEEL\", \"DEWGONG\"],\n [\"GRIMER\", \"MUK\"],\n [\"SHELLDER\", \"CLOYSTER\"],\n [\"DROWZEE\", \"HYPNO\"],\n [\"KRABBY\", \"KINGLER\"],\n [\"VOLTORB\", \"ELECTRODE\"],\n [\"EXEGGCUTE\", \"EXEGGUTOR\"],\n [\"CUBONE\", \"MAROWAK\"],\n [\"KOFFING\", \"WEEZING\"],\n [\"RHYHORN\", \"RHYDON\"],\n [\"HORSEA\", \"SEADRA\"],\n [\"GOLDEEN\", \"SEAKING\"],\n [\"STARYU\", \"STARMIE\"],\n [\"MAGIKARP\", \"GYARADOS\"],\n [\"OMANYTE\", \"OMASTAR\"],\n [\"KABUTO\", \"KABUTOPS\"]\n]\nfamlist3 = [\n [\"BULBASAUR\", \"IVYSAUR\", \"VENUSAUR\"],\n [\"CHARMANDER\", \"CHARMELEON\", \"CHARIZARD\"],\n [\"SQUIRTLE\", \"WARTORTLE\", \"BLASTOISE\"],\n [\"CATERPIE\", \"METAPOD\", \"BUTTERFREE\"],\n [\"WEEDLE\", \"KAKUNA\", \"BEEDRILL\"],\n [\"PIDGEY\", \"PIDGEOTTO\", \"PIDGEOT\"],\n [\"NIDORAN_F\", \"NIDORINA\", \"NIDOQUEEN\"],\n [\"NIDORAN_M\", \"NIDORINO\", \"NIDOKING\"],\n [\"ODDISH\", \"GLOOM\", \"VILEPLUME\"],\n [\"POLIWAG\", \"POLIWHIRL\", \"POLIWRATH\"],\n [\"ABRA\", \"KADABRA\", \"ALAKAZAM\"],\n [\"MACHOP\", \"MACHOKE\", \"MACHAMP\"],\n [\"BELLSPROUT\", \"WEEPINBELL\", \"VICTREEBEL\"],\n [\"GEODUDE\", \"GRAVELER\", \"GOLEM\"],\n [\"GASTLY\", \"HAUNTER\", \"GENGAR\"],\n [\"DRATINI\", \"DRAGONAIR\", \"DRAGONITE\"]\n]\n\nbase_dex = {\n \"BULBASAUR\" : [\"BULBASAUR\", \"IVYSAUR\", \"VENUSAUR\"],\n \"CHARMANDER\" : [\"CHARMANDER\", \"CHARMELEON\", \"CHARIZARD\"],\n \"SQUIRTLE\" : [\"SQUIRTLE\", \"WARTORTLE\", \"BLASTOISE\"],\n \"CATERPIE\" : [\"CATERPIE\", \"METAPOD\", \"BUTTERFREE\"],\n \"WEEDLE\" : [\"WEEDLE\", \"KAKUNA\", \"BEEDRILL\"],\n \"PIDGEY\" : [\"PIDGEY\", \"PIDGEOTTO\", \"PIDGEOT\"],\n \"RATTATA\" : [\"RATTATA\", \"RATICATE\"],\n \"SPEAROW\" : [\"SPEAROW\", \"FEAROW\"],\n \"EKANS\" : [\"EKANS\", \"ARBOK\"],\n \"PIKACHU\" : [\"PIKACHU\", \"RAICHU\"],\n \"SANDSHREW\" : [\"SANDSHREW\", \"SANDSLASH\"],\n \"NIDORAN_F\" : [\"NIDORAN_F\", \"NIDORINA\", \"NIDOQUEEN\"],\n \"NIDORAN_M\" : [\"NIDORAN_M\", \"NIDORINO\", \"NIDOKING\"],\n \"CLEFAIRY\" : [\"CLEFAIRY\", \"CLEFABLE\"],\n \"VULPIX\" : [\"VULPIX\", \"NINETALES\"],\n \"JIGGLYPUFF\" : [\"JIGGLYPUFF\", \"WIGGLYTUFF\"],\n \"ZUBAT\" : [\"ZUBAT\", \"GOLBAT\"],\n \"ODDISH\" : [\"ODDISH\", \"GLOOM\", \"VILEPLUME\"],\n \"PARAS\" : [\"PARAS\", \"PARASECT\"],\n \"VENONAT\" : [\"VENONAT\", \"VENOMOTH\"],\n \"DIGLETT\" : [\"DIGLETT\", \"DUGTRIO\"],\n \"MEOWTH\" : [\"MEOWTH\", \"PERSIAN\"],\n \"PSYDUCK\" : [\"PSYDUCK\", \"GOLDUCK\"],\n \"MANKEY\" : [\"MANKEY\", \"PRIMEAPE\"],\n \"GROWLITHE\" : [\"GROWLITHE\", \"ARCANINE\"],\n \"POLIWAG\" : [\"POLIWAG\", \"POLIWHIRL\", \"POLIWRATH\"],\n \"ABRA\" : [\"ABRA\", \"KADABRA\", \"ALAKAZAM\"],\n \"MACHOP\" : [\"MACHOP\", \"MACHOKE\", \"MACHAMP\"],\n \"BELLSPROUT\" : [\"BELLSPROUT\", \"WEEPINBELL\", \"VICTREEBEL\"],\n \"TENTACOOL\" : [\"TENTACOOL\", \"TENTACRUEL\"], \n \"GEODUDE\" : [\"GEODUDE\", \"GRAVELER\", \"GOLEM\"],\n \"PONYTA\" : [\"PONYTA\", \"RAPIDASH\"],\n \"SLOWPOKE\" : [\"SLOWPOKE\", \"SLOWBRO\"],\n \"MAGNEMITE\" : [\"MAGNEMITE\", \"MAGNETON\"],\n \"FARFETCHD\" : [\"FARFETCHD\"],\n \"DODUO\" : [\"DODUO\", \"DODRIO\"],\n \"SEEL\" : [\"SEEL\", \"DEWGONG\"],\n \"GRIMER\" : [\"GRIMER\", \"MUK\"],\n \"SHELLDER\" : [\"SHELLDER\", \"CLOYSTER\"],\n \"GASTLY\" : [\"GASTLY\", \"HAUNTER\", \"GENGAR\"],\n \"ONIX\" : [\"ONIX\"],\n \"DROWZEE\" : [\"DROWZEE\", \"HYPNO\"],\n \"KRABBY\" : [\"KRABBY\", \"KINGLER\"],\n \"VOLTORB\" : [\"VOLTORB\", \"ELECTRODE\"], \n \"EXEGGCUTE\" : [\"EXEGGCUTE\", \"EXEGGUTOR\"],\n \"CUBONE\" : [\"CUBONE\", \"MAROWAK\"],\n \"HITMONLEE\" : [\"HITMONLEE\"],\n \"HITMONCHAN\" : [\"HITMONCHAN\"],\n \"LICKITUNG\" : [\"LICKITUNG\"],\n \"KOFFING\" : [\"KOFFING\", \"WEEZING\"],\n \"RHYHORN\" : [\"RHYHORN\", \"RHYDON\"],\n \"CHANSEY\" : [\"CHANSEY\"],\n \"TANGELA\" : [\"TANGELA\"],\n \"KANGASKHAN\" : [\"KANGASKHAN\"],\n \"HORSEA\" : [\"HORSEA\", \"SEADRA\"],\n \"GOLDEEN\" : [\"GOLDEEN\", \"SEAKING\"],\n \"STARYU\" : [\"STARYU\", \"STARMIE\"],\n \"SCYTHER\" : [\"SCYTHER\"],\n \"ELECTABUZZ\" : [\"ELECTABUZZ\"],\n \"MAGMAR\" : [\"MAGMAR\"],\n \"JYNX\" : [\"JYNX\"],\n \"MR_MIME\" : [\"MR_MIME\"],\n \"PINSIR\" : [\"PINSIR\"],\n \"TAUROS\" : [\"TAUROS\"],\n \"MAGIKARP\" : [\"MAGIKARP\", \"GYARADOS\"],\n \"EEVEE\" : [\"EEVEE\"],\n \"DITTO\" : [\"DITTO\"],\n \"LAPRAS\" : [\"LAPRAS\"],\n \"PORYGON\" : [\"PORYGON\"],\n \"SNORLAX\" : [\"SNORLAX\"],\n \"OMANYTE\" : [\"OMANYTE\", \"OMASTAR\"],\n \"KABUTO\" : [\"KABUTO\", \"KABUTOPS\"],\n \"AERODACTYL\" : [\"AERODACTYL\"],\n \"DRATINI\" : [\"DRATINI\", \"DRAGONAIR\", \"DRAGONITE\"]\n}\n\nnew_moves_dex = {\n \"EXEGGCUTE\" : ''' db 20,PSYWAVE\n db 25,REFLECT\n db 28,LEECH_SEED\n db 32,STUN_SPORE\n db 37,POISONPOWDER\n db 42,SOLARBEAM\n db 48,SLEEP_POWDER\n db 0''',\n \"GRIMER\" : ''' db 15,SLUDGE\n db 30,POISON_GAS\n db 33,MINIMIZE\n db 37,TOXIC\n db 42,HARDEN\n db 48,SCREECH\n db 55,ACID_ARMOR\n db 0''',\n \"RHYHORN\" :''' db 15,ROCK_SLIDE\n db 30,STOMP\n db 35,TAIL_WHIP\n db 40,FURY_ATTACK\n db 45,HORN_DRILL\n db 50,LEER\n db 55,TAKE_DOWN\n db 0''',\n \"SHELLDER\" :''' db 15,WATER_GUN\n db 18,SUPERSONIC\n db 23,CLAMP\n db 30,AURORA_BEAM\n db 39,LEER\n db 50,ICE_BEAM\n db 0''',\n \"TANGELA\" :''' db 15,VINE_WHIP\n db 29,ABSORB\n db 32,POISONPOWDER\n db 36,STUN_SPORE\n db 39,SLEEP_POWDER\n db 45,SLAM\n db 49,GROWTH\n db 0''',\n \"PSYDUCK\" :''' db 15,WATER_GUN\n db 28,TAIL_WHIP\n db 31,DISABLE\n db 36,CONFUSION\n db 43,FURY_SWIPES\n db 52,HYDRO_PUMP\n db 0''',\n \"MAGMAR\" :''' db 15,RAGE\n db 36,LEER\n db 39,CONFUSE_RAY\n db 43,FIRE_PUNCH\n db 48,SMOKESCREEN\n db 52,SMOG\n db 55,FLAMETHROWER\n db 0''',\n \"ELECTABUZZ\" :''' db 15,THUNDERSHOCK\n db 34,SWIFT\n db 37,SCREECH\n db 42,THUNDERPUNCH\n db 49,LIGHT_SCREEN\n db 54,THUNDER\n db 0''',\n \"KOFFING\" :''' db 24,SLUDGE\n db 37,SMOKESCREEN\n db 40,SELFDESTRUCT\n db 45,HAZE\n db 48,EXPLOSION\n db 0''',\n \"SEEL\" :''' db 15,WATER_GUN\n db 30,GROWL\n db 35,AURORA_BEAM\n db 40,REST\n db 45,TAKE_DOWN\n db 50,ICE_BEAM\n db 0''',\n \"VENONAT\" :''' db 15,CONFUSION\n db 24,POISONPOWDER\n db 27,LEECH_LIFE\n db 30,STUN_SPORE\n db 35,PSYBEAM\n db 38,SLEEP_POWDER\n db 43,PSYCHIC_M\n db 0''',\n \"DRATINI\" :''' db 10,THUNDER_WAVE\n db 15,WATER_GUN\n db 20,AGILITY\n db 30,SLAM\n db 40,DRAGON_RAGE\n db 50,HYPER_BEAM\n db 0''',\n \"KABUTO\" :''' db 15,WATER_GUN\n db 34,ABSORB\n db 39,SLASH\n db 44,LEER\n db 49,HYDRO_PUMP\n db 0''',\n \"GOLDEEN\" :''' db 15,WATER_GUN\n db 19,SUPERSONIC\n db 24,HORN_ATTACK\n db 30,FURY_ATTACK\n db 37,WATERFALL\n db 45,HORN_DRILL\n db 54,AGILITY\n db 0''',\n \"AERODACTYL\" :''' db 20,ROCK_THROW\n db 33,SUPERSONIC\n db 38,BITE\n db 45,TAKE_DOWN\n db 54,HYPER_BEAM\n db 0''',\n \"MAGNEMITE\" :''' db 15,THUNDERSHOCK\n db 21,SONICBOOM\n db 29,SUPERSONIC\n db 35,THUNDER_WAVE\n db 41,SWIFT\n db 47,SCREECH\n db 0'''\n}\n\n\nend_dex = {}\n\n# this block is in charge of determining how the pokemon get switched around\nfor pokemon in base_dex:\n ogfam = base_dex[pokemon]\n famsize = len(ogfam)\n \n if (famsize == 1):\n famlist = famlist1\n elif (famsize == 2):\n famlist = famlist2\n elif (famsize == 3):\n famlist = famlist3\n \n newfam = famlist[randint(0, len(famlist)-1)]\n for i in range(famsize):\n end_dex[ogfam[i]] = (newfam[i])\n famlist.remove(newfam)\n\n# this block builds out the list of files that need editing\nfiles_list = []\n# wild locations\ndir = os.path.join(os.getcwd(), 'pokered', 'data', 'wildPokemon')\nfor f in os.listdir(dir):\n if f.endswith('.asm'):\n files_list.append(dir + \"/\" + f)\n\ndir = os.path.join(os.getcwd(), 'pokered', 'engine', 'items') # old rod encounters and the ghost Marowak\ndir += '/items.asm'\nfiles_list.append(dir)\n\ndir = os.path.join(os.getcwd(), 'pokered', 'data') # good rod encounters\ndir += '/good_rod.asm'\nfiles_list.append(dir)\n\ndir = os.path.join(os.getcwd(), 'pokered', 'data') # super rod encounters\ndir += '/super_rod.asm'\nfiles_list.append(dir)\n\ndir = os.path.join(os.getcwd(), 'pokered', 'data') # casino prize pokemon\ndir += '/prizes.asm'\nfiles_list.append(dir)\ndir = os.path.join(os.getcwd(), 'pokered', 'data')\ndir += '/prize_mon_levels.asm'\nfiles_list.append(dir)\n\ndir = os.path.join(os.getcwd(), 'pokered', 'data') # trade pokemon\ndir += '/trades.asm'\nfiles_list.append(dir)\n\ndir = os.path.join(os.getcwd(), 'pokered', 'scripts') # Magikarp salesman\ndir += '/mtmoonpokecenter.asm'\nfiles_list.append(dir)\n\ndir = os.path.join(os.getcwd(), 'pokered', 'constants') # starters\ndir += '/starter_mons.asm'\nfiles_list.append(dir)\n\ndir = os.path.join(os.getcwd(), 'pokered', 'engine', 'overworld') # fossils\ndir += '/cinnabar_lab.asm'\nfiles_list.append(dir)\n\n# gift pokemon\ndir = os.path.join(os.getcwd(), 'pokered', 'scripts') # Eevee\ndir += '/celadonmansion5.asm'\nfiles_list.append(dir)\ndir = os.path.join(os.getcwd(), 'pokered', 'scripts') # Hitmonlee / Hitmonchan\ndir += '/fightingdojo.asm'\nfiles_list.append(dir)\ndir = os.path.join(os.getcwd(), 'pokered', 'scripts') # Snorlax\ndir += '/route12.asm'\nfiles_list.append(dir)\ndir = os.path.join(os.getcwd(), 'pokered', 'scripts') # Lapras\ndir += '/silphco7.asm'\nfiles_list.append(dir)\n\n# overworld pokemon\ndir = os.path.join(os.getcwd(), 'pokered', 'scripts') # Snorlax\ndir += '/route16.asm'\nfiles_list.append(dir)\ndir = os.path.join(os.getcwd(), 'pokered', 'data', 'mapObjects') # powerplant Voltorbs / Electrodes\ndir += '/powerplant.asm'\nfiles_list.append(dir)\n\n# A E S T H E T I C S\ndir = os.path.join(os.getcwd(), 'pokered', 'data') # title screen pokemon\ndir += '/title_mons.asm'\nfiles_list.append(dir)\ndir = os.path.join(os.getcwd(), 'pokered', 'data') # credits screen pokemon\ndir += '/credit_mons.asm'\nfiles_list.append(dir)\n\n# this block does the actual data replacement in the asm files\nfor fl in files_list:\n with open(fl) as f:\n content = [x.strip('\\n') for x in f.readlines()]\n out_lines = []\n for line in content:\n new_line = ''\n for word in line.split():\n if word in end_dex:\n line = line.replace(word, end_dex[word])\n for word in line.split(\",\"):\n if word in end_dex:\n line = line.replace(word, end_dex[word])\n out_lines.append(line)\n out = open(fl, 'w')\n out_content = \"\\n\".join(out_lines)\n out.write(out_content)\n out.close()\n\n# this block edits the movesets of certain pokemon\ndir = os.path.join(os.getcwd(), 'pokered', 'data')\ndir += '/evos_moves.asm'\nwith open(dir) as f:\n content = f.read()\n out = open(dir, 'w')\n out_content = content\n for pogey in new_moves_dex:\n srx = re.search(';' + pogey + '\\n(.*\\n)*?;Learnset\\n((\\s*.*\\n)*?\\s*db 0)', out_content)\n out_content = re.sub( srx.group(2), new_moves_dex[pogey], out_content)\n out.write(out_content)\n out.close()\n\n# gym leader shuffle starts here\nclass Brock(object):\n def __init__ (self, ordinal):\n self.name = \"Brock\"\n self.sprite = 'SPRITE_BLACK_HAIR_BOY_2'\n self.opp = 'BROCK'\n self.ordinal = ordinal\n self.ai_line = '\\tdb 1,0 ; BROCK'\n self.move_list = [ \"db 1,BIDE\",\n \"db 1,ROCK_THROW\",\n \"db 2,ROCK_SLIDE\",\n \"db 2,ROCK_SLIDE\",\n \"db 3,ROCK_SLIDE\",\n \"db 3,ROCK_SLIDE\",\n \"db 3,ROCK_SLIDE\"]\n self.team = \"BrockData:\\n\\tdb $FF,\" \n self.team_list = [\"12,GEODUDE,14,ONIX,0\",\n \"18,GRAVELER,21,ONIX,0\",\n \"21,GRAVELER,18,AERODACTYL,24,ONIX,0\",\n \"29,GOLEM,24,AERODACTYL,29,ONIX,0\",\n \"37,OMANYTE,39,GOLEM,37,AERODACTYL,43,ONIX,0\",\n \"38,OMASTAR,37,GOLEM,38,AERODACTYL,43,ONIX,0\",\n \"42,OMASTAR,40,GOLEM,42,AERODACTYL,47,ONIX,0\"]\n self.team += self.team_list[self.ordinal]\n\nclass Misty(object):\n def __init__ (self, ordinal):\n self.name = \"Misty\"\n self.sprite = 'SPRITE_BRUNETTE_GIRL'\n self.opp = 'MISTY'\n self.ordinal = ordinal\n self.ai_line = '\\tdb 1,3,0 ; MISTY'\n self.move_list = [ \"db 1,BUBBLE\",\n \"db 1,BUBBLEBEAM\",\n \"db 2,BUBBLEBEAM\",\n \"db 2,BUBBLEBEAM\",\n \"db 3,BUBBLEBEAM\",\n \"db 3,BUBBLEBEAM\",\n \"db 3,BUBBLEBEAM\"]\n self.team = \"MistyData:\\n db $FF,\"\n self.team_list = [\"12,PSYDUCK,14,STARYU,0\",\n \"18,PSYDUCK,21,STARMIE,0\",\n \"21,PSYDUCK,18,LAPRAS,24,STARMIE,0\",\n \"29,GOLDUCK,24,LAPRAS,29,STARMIE,0\",\n \"37,TENTACOOL,39,GOLDUCK,37,LAPRAS,43,STARMIE,0\",\n \"38,TENTACRUEL,37,GOLDUCK,38,LAPRAS,43,STARMIE,0\",\n \"42,TENTACRUEL,40,GOLDUCK,42,LAPRAS,47,STARMIE,0\"]\n self.team += self.team_list[self.ordinal]\n\nclass Surge(object):\n def __init__ (self, ordinal):\n self.name = \"LtSurge\"\n self.sprite = 'SPRITE_ROCKER'\n self.opp = 'LT_SURGE'\n self.ordinal = ordinal\n self.ai_line = '\\tdb 1,3,0 ; LT_SURGE'\n self.move_list = [ \"db 1,THUNDERSHOCK\",\n \"db 1,THUNDERBOLT\",\n \"db 2,THUNDERBOLT\",\n \"db 2,THUNDERBOLT\",\n \"db 3,THUNDERBOLT\",\n \"db 3,THUNDERBOLT\",\n \"db 3,THUNDERBOLT\"]\n self.team = \"LtSurgeData:\\n\\tdb $FF,\"\n self.team_list = [\"12,VOLTORB,14,PIKACHU,0\",\n \"18,VOLTORB,21,RAICHU,0\",\n \"21,VOLTORB,18,ELECTABUZZ,24,RAICHU,0\",\n \"29,ELECTRODE,24,ELECTABUZZ,29,RAICHU,0\",\n \"37,MAGNEMITE,39,ELECTRODE,37,ELECTABUZZ,43,RAICHU,0\",\n \"38,MAGNETON,37,ELECTRODE,38,ELECTABUZZ,43,RAICHU,0\",\n \"42,MAGNETON,40,ELECTRODE,42,ELECTABUZZ,47,RAICHU,0\"]\n self.team += self.team_list[self.ordinal]\n\nclass Erika(object):\n def __init__ (self, ordinal):\n self.name = \"Erika\"\n self.sprite = 'SPRITE_ERIKA'\n self.opp = 'ERIKA'\n self.ordinal = ordinal\n self.ai_line = '\\tdb 1,3,0 ; ERIKA'\n self.move_list = [ \"db 1,MEGA_DRAIN\",\n \"db 1,MEGA_DRAIN\",\n \"db 2,MEGA_DRAIN\",\n \"db 2,MEGA_DRAIN\",\n \"db 3,MEGA_DRAIN\",\n \"db 3,MEGA_DRAIN\",\n \"db 3,MEGA_DRAIN\"]\n self.team = \"ErikaData:\\n db $FF,\"\n self.team_list = [\"12,BELLSPROUT,14,ODDISH,0\",\n \"18,WEEPINBELL,21,GLOOM,0\",\n \"21,WEEPINBELL,18,TANGELA,24,VILEPLUME,0\",\n \"29,VICTREEBEL,24,TANGELA,29,VILEPLUME,0\",\n \"37,EXEGGCUTE,39,VICTREEBEL,37,TANGELA,43,VILEPLUME,0\",\n \"38,EXEGGUTOR,37,VICTREEBEL,38,TANGELA,43,VILEPLUME,0\",\n \"42,EXEGGUTOR,40,VICTREEBEL,42,TANGELA,47,VILEPLUME,0\"]\n self.team += self.team_list[self.ordinal]\n\nclass Koga(object):\n def __init__ (self, ordinal):\n self.name = \"Koga\"\n self.sprite = 'SPRITE_BLACKBELT'\n self.opp = 'KOGA'\n self.ordinal = ordinal\n self.ai_line = '\\tdb 1,3,0 ; KOGA'\n self.move_list = [ \"db 1,TOXIC\",\n \"db 1,TOXIC\",\n \"db 2,TOXIC\",\n \"db 2,TOXIC\",\n \"db 3,TOXIC\",\n \"db 3,TOXIC\",\n \"db 3,TOXIC\"]\n self.team = \"KogaData:\\n db $FF,\"\n self.team_list = [\"12,GRIMER,14,KOFFING,0\",\n \"18,GRIMER,21,WEEZING,0\",\n \"21,GRIMER,18,GOLBAT,24,WEEZING,0\",\n \"29,MUK,24,GOLBAT,29,WEEZING,0\",\n \"37,NIDORINO,39,MUK,37,GOLBAT,43,WEEZING,0\",\n \"38,NIDOKING,37,MUK,38,GOLBAT,43,WEEZING,0\",\n \"42,NIDOKING,40,MUK,42,GOLBAT,47,WEEZING,0\"]\n self.team += self.team_list[self.ordinal]\n\nclass Sabrina(object):\n def __init__ (self, ordinal):\n self.name = \"Sabrina\"\n self.sprite = 'SPRITE_GIRL'\n self.opp = 'SABRINA'\n self.ordinal = ordinal\n self.ai_line = '\\tdb 1,3,0 ; BLAINE'\n self.move_list = [ \"db 1,PSYWAVE\",\n \"db 1,PSYWAVE\",\n \"db 2,PSYWAVE\",\n \"db 2,PSYWAVE\",\n \"db 3,PSYWAVE\",\n \"db 3,PSYWAVE\",\n \"db 3,PSYWAVE\"]\n self.team = \"SabrinaData:\\n db $FF,\"\n self.team_list = [\"12,VENONAT,14,KADABRA,0\",\n \"18,VENONAT,21,KADABRA,0\",\n \"21,VENONAT,18,MR_MIME,24,KADABRA,0\",\n \"29,VENOMOTH,24,MR_MIME,29,ALAKAZAM,0\",\n \"37,SLOWPOKE,39,MR_MIME,37,VENOMOTH,43,ALAKAZAM,0\",\n \"38,SLOWBRO,37,MR_MIME,38,VENOMOTH,43,ALAKAZAM,0\",\n \"42,SLOWBRO,40,MR_MIME,42,VENOMOTH,47,ALAKAZAM,0\"]\n self.team += self.team_list[self.ordinal]\n\nclass Blaine(object):\n def __init__ (self, ordinal):\n self.name = \"Blaine\"\n self.sprite = 'SPRITE_FAT_BALD_GUY'\n self.opp = 'BLAINE'\n self.ordinal = ordinal\n self.ai_line = '\\tdb 1,3,0 ; SABRINA'\n self.move_list = [ \"db 1,EMBER\",\n \"db 1,FLAMETHROWER\",\n \"db 2,FLAMETHROWER\",\n \"db 2,FIRE_BLAST\",\n \"db 3,FIRE_BLAST\",\n \"db 3,FIRE_BLAST\",\n \"db 3,FIRE_BLAST\"]\n self.team = \"BlaineData:\\n db $FF,\"\n self.team_list = [\"12,PONYTA,14,GROWLITHE,0\",\n \"18,PONYTA,21,ARCANINE,0\",\n \"21,PONYTA,18,MAGMAR,24,ARCANINE,0\",\n \"29,RAPIDASH,24,MAGMAR,29,ARCANINE,0\",\n \"37,VULPIX,39,RAPIDASH,37,MAGMAR,43,ARCANINE,0\",\n \"38,NINETALES,37,RAPIDASH,38,MAGMAR,43,ARCANINE,0\",\n \"42,NINETALES,40,MAGMAR,42,RAPIDASH,47,ARCANINE,0\"]\n self.team += self.team_list[self.ordinal]\n\n# shuffles the gym leader order\ngyms = [0, 1, 2, 3, 4, 5, 6]\nshuffle(gyms)\nleaders = [Brock(gyms[0]), Misty(gyms[1]), Surge(gyms[2]), Erika(gyms[3]), Koga(gyms[4]), Sabrina(gyms[5]), Blaine(gyms[6])]\nleaders.sort(key=lambda x: x.ordinal)\n\n# rewrites the party data for each leader\ndir = os.path.join(os.getcwd(), 'pokered', 'data')\ndir += '/trainer_parties.asm'\nfor leader in leaders:\n with open(dir) as f:\n content = f.read()\n out = open(dir, 'w')\n out_content = re.sub( leader.name+'Data.*\\n.*0', leader.team, content)\n out.write(out_content)\n out.close()\n\n# swaps out the map sprites and encounter data for each gym\ncity_list = ['/pewtergym.asm', '/ceruleangym.asm', '/vermiliongym.asm', \n '/celadongym.asm', '/fuchsiagym.asm', '/saffrongym.asm', '/cinnabargym.asm']\ndir = os.path.join(os.getcwd(), 'pokered', 'data', 'mapObjects')\nfor city in range(len(city_list)):\n temp_dir = dir + city_list[city]\n leader = leaders[city]\n with open(temp_dir) as f:\n content = f.read()\n out = open(temp_dir, 'w')\n out_content = re.sub( 'object .*?\\,', 'object ' + leader.sprite + ',', content, 1)\n out_content = re.sub( 'OPP_.*,', 'OPP_' + leader.opp + ',', out_content, 1)\n out.write(out_content)\n out.close()\n\n# swaps around the special trainer moves\nnew_moves = ''\nfor leader in leaders:\n new_moves += ('\\t' + leader.move_list[leader.ordinal] + '\\n')\ndir = os.path.join(os.getcwd(), 'pokered', 'data')\ndir += '/trainer_moves.asm'\nwith open(dir) as f:\n content = f.read()\n out = open(dir, 'w')\n out_content = re.sub('(\\tdb \\d\\,.*\\n){7}', new_moves, content)\n out.write(out_content)\n out.close()\n\n# swaps around the ai\ndir = os.path.join(os.getcwd(), 'pokered', 'engine', 'battle')\ndir += '/trainer_ai.asm'\nnew_ais = '' # gotta build out the new_ais string\nfor leader in leaders:\n new_ais += (leader.ai_line + '\\n')\nog_ais = ''\nwith open(dir) as f: # gotta build out the og_ais string\n content = f.read()\n srx = re.search('db.*BRUNO.*\\n(.*\\n)(.*\\n)(.*\\n)(.*\\n)(.*\\n)(.*\\n)(.*\\n)', content)\n for g in srx.groups():\n og_ais += g\nwith open(dir) as f:\n content = f.read()\n out = open(dir, 'w')\n out_content = re.sub( og_ais, new_ais, content)\n out.write(out_content)\n out.close()\n\n\n\n# # gym leader stuff to consider:\n# # text stuff:\n# # pokered/scripts/pewtergym.asm\n# # 'Gym1LeaderName:\\n\\tdb \"BROCK@\"'\n# # in the same vein, probably pokered/scripts/pewtercity.asm for the sign outside the gym\n# # TM stuff (for all gyms, not just pewter):\n# # pokered/scripts/pewtergym\n# # etc\n# # dialog:\n# # pokered/text/maps/pewter_gym_1.asm\n# # pokered/text/maps/pewter_gym_2.asm\n# # pokered/text/maps/cerulean_gym.asm\n# # etc\n\n","sub_path":"RedBlue/randomize.py","file_name":"randomize.py","file_ext":"py","file_size_in_byte":22700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"70702196","text":"def solution(play_time, adv_time, logs):\n # 1. 문자열 입력값을 초로 변환\n play_time = str_to_int(play_time)\n adv_time = str_to_int(adv_time)\n\n\n # 2. 구간별 배열\n all_time = [0 for i in range(play_time + 1)]\n\n for log in logs:\n start, end = log.split('-')\n start = str_to_int(start)\n end = str_to_int(end)\n all_time[start] += 1\n all_time[end] -= 1\n\n # 3. 구간별 시청자 수 구하기\n for i in range(1, len(all_time)):\n all_time[i] = all_time[i] + all_time[i - 1]\n\n # 4. 구간별 시청자 수의 누적합 구하기\n for i in range(1, len(all_time)):\n all_time[i] = all_time[i] + all_time[i - 1]\n\n # 5. 플레이 타임동안 누적 시청자 수의 최대값인 구간 구하기 \n most_view = 0\n max_time = 0\n for start_time in range(1, play_time):\n if start_time + adv_time < play_time:\n end_time = start_time + adv_time\n else:\n end_time = play_time\n sum_played = all_time[end_time] - all_time[start_time]\n if most_view < sum_played:\n most_view = sum_played\n max_time = start_time + 1\n # 최대값 반환 \n return int_to_str(max_time)\n\n\n# 1-1. 문자열을 숫자로 변환하는 함수\ndef str_to_int(time):\n h, m, s = time.split(':')\n return int(h) * 3600 + int(m) * 60 + int(s)\n\n# 5-1. 숫자를 문자열로 변환하는 함수 \ndef int_to_str(time):\n h = time // 3600\n h = '0' + str(h) if h < 10 else str(h)\n time = time % 3600\n m = time // 60\n m = '0' + str(m) if m < 10 else str(m)\n time = time % 60\n s = '0' + str(time) if time < 10 else str(time)\n return h + ':' + m + ':' + s","sub_path":"Wonyoung/Programmers/kakao_광고삽입.py","file_name":"kakao_광고삽입.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"637055135","text":"import pandas as pd\nimport numpy as np\nimport random\nimport sys\n# sys.path.append('/home/maciej/Documents/gazspot-prediction/preprocessing_danych')\nsys.path.append('/data/mkonieczka/gazspot-prediction/')\n\nimport time\n\nfrom preprocessing_danych.dataset_config import *\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.metrics import mean_squared_error as mse\nimport matplotlib.pyplot as plt\n\n# !pip install tensorflow==1.6\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, LSTM, BatchNormalization, SimpleRNN\nfrom tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint\n\ndef build_model(input_shape,\n rnn_size, \n dropout_level, \n batch_normalization, \n hidden_dense_layer_size,\n optimazer,\n learning_rate, \n batch_size):\n model = Sequential()\n\n\n model.add(LSTM(rnn_size, input_shape=(input_shape)))\n if (dropout_level > 0): model.add(Dropout(dropout_level))\n if batch_normalization: model.add(BatchNormalization())\n if (hidden_dense_layer_size > 0):\n model.add(Dense(hidden_dense_layer_size, activation='relu'))\n if (dropout_level > 0): model.add(Dropout(0.2))\n\n model.add(Dense(1))\n opts = {'rmsprop': tf.keras.optimizers.RMSprop(learning_rate=learning_rate, decay=1e-6),\n 'adam': tf.keras.optimizers.Adam(lr=learning_rate, decay=1e-6)}\n opt = opts[optimazer]\n \n # Compile model\n model.compile(\n loss='mse',\n optimizer=opt\n )\n return model\n\ndef preprocesing_data(series, time_window_len):\n \n sequential_data = []\n for idx in range(len(series) - time_window_len):\n sequential_data.append([np.array(series.values[idx : idx+time_window_len]), series.values[idx+time_window_len]])\n \n X = []\n y = []\n for seq, target in sequential_data: \n X.append(seq) \n y.append(target) \n X = np.array(X)\n X = np.reshape(X, (X.shape[0], X.shape[1], 1))\n indexes = series.index[time_window_len:]\n return X, np.array(y), indexes\n\ndef shuffle_dataset(X, y, rseed=0):\n random.seed(rseed)\n a = np.arange(len(y))\n random.shuffle(a)\n return X[a], y[a]\n\n\n# preprocessing configs\n\nPRED_LEN = 1\n\n\ndiff_functions = ['pct_change', 'diff']\nscalers = ['MiMax', 'STD']\nsequence_sizes = [7, 14, 21, 28, 49]\n\n\n# LSTM configs \n\nMODEL_TYPE = 'LSTM1'\nEPOCHS = 30\n\nrnn_sizes = [64, 256]\ndropout_levels = [0.1]\nbatch_normalizations = [True]\nhidden_dense_layer_sizes = [0, 32]\noptimazers = ['adam']\nlearning_rates = [0.001]\nbatch_sizes = [8]\n\n\n#TODO: wykorzystanie analizy z ARIMY odnośnie wpływających danych - sezonowość\n\n# Wczytanie danych\n# df = pd.read_csv(\"/content/drive/My Drive/Mgr_gas_transaction/tge_spot_preprocessed.csv\",index_col=['TransactionDate'], parse_dates=['TransactionDate'])\n# df = pd.read_pickle(\"data/tge_spot_preprocessed.p\")\ndf = pd.read_csv(\"data/tge_spot_preprocessed.csv\", index_col=['TransactionDate'], parse_dates=['TransactionDate'])\nfor diff_function in diff_functions:\n if (diff_function == 'pct_change'):\n df_new = df.pct_change().dropna()\n if (diff_function == 'diff'):\n df_new = df.diff().dropna()\n \n for scaler_name in scalers:\n if (scaler_name == 'STD'):\n scaler = StandardScaler()\n if (scaler_name == 'MiMax'):\n scaler = MinMaxScaler(feature_range=(0,1))\n \n df_scalled = pd.DataFrame(scaler.fit_transform(df_new), index=df_new.index, columns=['TGEgasDA'])\n df_scalled.dropna(inplace=True)\n\n for sequence_size in sequence_sizes:\n\n X, y, idxes = preprocesing_data(df_scalled, sequence_size)\n\n split_test_idx = idxes.astype(str).to_list().index(test_index[0])\n split_idx = idxes.astype(str).to_list().index(val_index[0])\n\n\n train_X, train_y = X[:split_idx], y[:split_idx]\n val_X, val_y = X[split_idx : split_test_idx], y[split_idx : split_test_idx]\n test_X, test_y = X[split_test_idx:], y[split_test_idx:]\n\n # randomize batch before train network\n train_X, train_y = shuffle_dataset(train_X, train_y)\n\n for rnn_size in rnn_sizes:\n for dropout_level in dropout_levels:\n for batch_normalization in batch_normalizations:\n for hidden_dense_layer_size in hidden_dense_layer_sizes:\n for optimazer in optimazers:\n for learning_rate in learning_rates:\n for batch_size in batch_sizes:\n \n model = build_model(train_X.shape[1:],\n rnn_size, \n dropout_level, \n batch_normalization, \n hidden_dense_layer_size,\n optimazer,\n learning_rate, \n batch_size)\n \n NAME = f\"{MODEL_TYPE}-{sequence_size}-W_LEN-{scaler_name}-SCL-{rnn_size}-RNN_S-{int(time.time())}\"\n tensorboard = TensorBoard(log_dir=f\"logs_{diff_function}/{NAME}\")\n \n model.fit(train_X, train_y, \n epochs=EPOCHS, batch_size=batch_size, \n validation_data=(val_X, val_y), \n callbacks=[tensorboard])\n model.save(f\"models_{diff_function}/{NAME}\")\n\n # score model\n pred_y = scaler.inverse_transform(model.predict(train_X))\n true_y = scaler.inverse_transform(train_y)\n print(f\"RMSE_train = {mse(true_y, pred_y) ** (1/2)}\")\n\n\n pred_y = scaler.inverse_transform(model.predict(val_X))\n true_y = scaler.inverse_transform(val_y)\n print(f\"RMSE_val = {mse(true_y, pred_y) ** (1/2)}\")\n\n\n\n\n\n\n\n\n\n# # scaler.inverse_transform(train_y.reshape(-1,1))\n# # scaler.inverse_transform(model.predict(train_X))\n\n# data = \n# # df_true = df[train_index[0]: val_index[0]][:-1]\n# # df_true = df[test_index[0]: test_index[1]]\n# df_true = df[val_index[0]: val_index[1]]\n\n# df_true['pct_change'] = df_true['TGEgasDA'].pct_change()\n# df_true['TGEgasDA_shift'] = df_true['TGEgasDA'].shift()\n# df_true.dropna(inplace=True)\n# df_true = df_true[TIME_WINDOW_LEN:]\n# print(len(df_true))\n# print(len(data))\n# df_true['pred'] = data[1:]\n# df_true['TGEgasDA_pred'] = df_true['TGEgasDA_shift'] + df_true['TGEgasDA_shift'] * df_true['pred']\n# df_true[-30:]\n\n# mse(df_true['pct_change'], df_true['pred']) ** (1/2)\n\n# df_true[['pct_change','pred']].plot(figsize=(20,4))\n\n# mse(df_true['TGEgasDA'][1:],df_true['TGEgasDA_pred'].shift().dropna()) ** (1/2)\n\n# mse(df_true['TGEgasDA'],df_true['TGEgasDA_pred']) ** (1/2)\n\n# df_true[['TGEgasDA','TGEgasDA_pred']].plot(figsize=(20,4))\n\n\n# # TODO: print RMSE on val_dataset _ invert on scaler","sub_path":"LSTM/train_lstm/lstm_1warstwa.py","file_name":"lstm_1warstwa.py","file_ext":"py","file_size_in_byte":7725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"613317863","text":"str = input()\nsandro = ['D', 'o', 'n', 'a', 'l', 'd']\n\ndef calCost(char, expectedChar):\n\tif char == expectedChar:\n\t\treturn 0\n\n\tif (\n\t\t(char.islower() and expectedChar.islower()) or\n\t\t(char.isupper() and expectedChar.isupper()) or\n\t\t(char.lower() == expectedChar.lower())\n\t\t):\n\t\treturn 5\n\n\treturn 10\n\nmin = 100\nfor i in range(0, len(str) - len(sandro) + 1):\n\tcost = 0\n\tfor j in range(0, len(sandro)):\n\t\tcost += calCost(str[i+j], sandro[j])\n\n\tif cost < min:\n\t\tmin = cost\n\nprint(min)\n\n","sub_path":"Timus/1786.py","file_name":"1786.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"558004387","text":"'''Machine Leaning Model Class'''\n\n# Importing Libraries\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom datetime import timedelta\nfrom sklearn.preprocessing import RobustScaler\n\n# Neural Network library\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, Dense, Dropout\nfrom tensorflow.keras.models import load_model\n\n\nclass MachineLearningModel:\n '''\n Class used to handle the following LSTM tasks:\n 1. Data pre-processing : Scaling Data\n 2. Model building : LSTM hidden layer structuring\n 3. Model training : Training model on technical indicator data\n 4. Model validation : Validating model\n 5. Exporting and saving the model : Saving as .h5 file recommended\n \n Functions\n ---------\n \n print_df() : prints the dataframe used to instantiate the class\n \n split_sequence() : splits the multivariate time sequence\n \n '''\n \n def __init__(self, \n data : pd.DataFrame,\n n_in : int = 100,\n n_out : int = 14,\n n_layers : int = 1,\n n_nodes : int = 30,\n epochs : int = 16,\n batch_size : int = 128,\n validation_split : float = 0.1,\n activation : str = \"tanh\",\n optimizer : str ='adam', \n loss : str ='mse'):\n '''\n Parameters\n ----------\n data : DataFrame\n data consisting of technical indicators and market data\n \n n_in : int\n number of periods looking back to learn\n default = 100\n \n n_out : int\n number of periods to predict\n default = 30\n \n n_layers : int\n number of hidden layers in add_layer() class method\n will build n number of hidden layers for the model\n default = 1\n \n n_nodes : int\n number of nodes in each layer built by the add_layer() method\n each layer built by the above-mentioned method will contain n nodes\n default = 30\n \n epochs : int\n number of epochs for LSTM training\n default = 50\n \n batch_size : int\n batch size for LSTM trainig, number of data-items per epoch\n default = 128\n \n validation_split : float\n amount of data to be used for model validation during training\n default = 0.1\n \n activateion : str\n activation method used by the LSTM model\n default = \"tanh\" \n tanh : Sigmoid specifically, is used as the gating function for the three gates (in, out, and forget) in LSTM, since it outputs a value between 0 and 1, and it can either let no flow or complete flow of information throughout the gates.\n Full list of all activation functions: \n https://www.tensorflow.org/api_docs/python/tf/keras/activations\n \n optimizer : str\n optimzer used by LSTM model\n default = \"adam\"\n adam: Acronynm for \"adaptive moment estimation\". \n Adam is an optimization algorithm that can be used instead of the classical stochastic gradient descent procedure to update network weights iterative based in training data. Adam was presented by Diederik Kingma from OpenAI and Jimmy Ba from the University of Toronto in their 2015 ICLR paper (poster) titled “Adam: A Method for Stochastic Optimization“. I will quote liberally from their paper in this post, unless stated otherwise.\n Full list of all optimizers:\n https://www.tensorflow.org/api_docs/python/tf/keras/optimizers\n \n loss : str\n loss function used by the LSTM model\n default = mse\n mse : Acronym for \"mean squared error\".\n MSE is sensitive towards outliers and given several examples with the same input feature values, the optimal prediction will be their mean target value. \n '''\n \n self.df = data\n self.n_in = n_in\n self.n_out = n_out\n self.n_layers = n_layers\n self.n_nodes = n_nodes\n self.epochs = epochs\n self.batch_size = batch_size\n self.validation_split = validation_split\n self.activation = activation\n self.optimizer = optimizer\n self.loss = loss\n \n # LSTM class variables\n self.n_features = None\n self.close_scaler = None\n self.model = None\n self.train_df = None\n self.predictions = None\n self.rmse_value = None\n \n\n ########################\n ### Helper Functions ###\n ########################\n def print_df(self):\n '''Prints DataFrame head'''\n print(self.df.head())\n \n def get_model_summary(self):\n '''Prints Model Summary'''\n try:\n self.model.summary()\n except:\n print(\"Model is not built.\")\n \n def get_model(self):\n '''Returns Model Object'''\n return self.model\n \n def drop_columns(self, cols : list = ['open', 'high', 'low', 'volume']):\n '''Drops pd.DataFrame colums'''\n try:\n self.df.drop(columns=cols, inplace=True)\n except:\n print(\"Dataframe un-used columns alread dropped\")\n \n def set_model_shape(self):\n '''Sets model shape by passing shape to n_features'''\n self.n_features = self.df.shape[1]\n \n \n \n #################################\n ### LSTM Model Data Functions ###\n #################################\n def split_sequence(self, sequence):\n '''\n Splits the multivariate time sequence\n \n Parameters\n ----------\n sequence : np.array\n numpy array of the dataframe used to train the model\n \n Returns\n -------\n X, y : np.array\n Time sequence values for X and y portions of the dataset\n '''\n\n # creating a list for both variables\n X, y = [], []\n\n for i in range(len(sequence)):\n\n # finding the end of the current sequence\n end = i + self.n_in\n out_end = end + self.n_out\n\n # breaking out of the loop if we have exceeded the dataset length\n if out_end > len(sequence):\n break\n\n # splitting the sequences into: x = past prices and indicators, y = prices ahead\n sequence_x, sequence_y = sequence[i:end, :], sequence[end:out_end, 0]\n\n X.append(sequence_x)\n y.append(sequence_y)\n\n return np.array(X), np.array(y)\n\n \n def add_hidden_layers(self, \n n_layers : int, \n n_nodes : int, \n activation : int, \n drop : int = None, \n drop_rate : float = 0.5):\n '''\n Creates a specific amount of hidden layers for the model\n \n Parameters\n ----------\n n_layers : int\n number of layers to be added to the model\n \n n_nodes : int\n number of nodes to be added to each layer\n \n activation : str\n activation function used by each layers in the model\n Full list of all activation functions: \n https://www.tensorflow.org/api_docs/python/tf/keras/activations\n \n drop : int\n every n-th hidden layer after which a Dropout layer to be added\n \n drop_rate : float\n rate for each Dropout layer\n default = 0.5\n \n '''\n\n # creating the specified number of hidden layers with the specified number of nodes\n for x in range(1,n_layers+1):\n self.model.add(LSTM(n_nodes, activation=activation, return_sequences=True))\n\n # adds a Dropout layer after every n-th hidden layer\n try:\n if x % drop == 0:\n self.model.add(Dropout(drop_rate))\n except:\n pass\n \n def add_dense_layers(self, n_layers : int, n_out : int):\n '''\n Creates a specific amount of Dense layers for the model\n \n Parameters\n ----------\n n_layers : int\n number of layers to be added to the model \n '''\n\n # creating the specified number of hidden layers with the specified number of nodes\n for x in range(1,n_layers+1):\n self.model.add(Dense(n_out))\n \n def validate(self):\n '''Vaildates predictions'''\n self.predictions = self.validater()\n self.rmse()\n\n \n def validater(self):\n '''\n Creates predicted values.\n \n Returns\n -------\n predictions : pd.DataFrame\n Predicted values for the model\n '''\n \n # create empty pd.DataFrame to store predictions\n predictions = pd.DataFrame(index=self.train_df.index, columns=[self.train_df.columns[0]])\n\n for i in range(self.n_in, len(self.train_df)-self.n_in, self.n_out):\n # create data time windows\n x = self.train_df[-i - self.n_in:-i]\n # predict using the time window\n y_pred = self.model.predict(np.array(x).reshape(1, self.n_in, self.n_features))\n \n # inverse the close scaler to return 'close' values\n y_pred = self.close_scaler.inverse_transform(y_pred)[0]\n \n # store values and append using business-days as frequency\n pred_df = pd.DataFrame(y_pred, \n index=pd.date_range(start=x.index[-1], \n periods=len(y_pred), \n freq=\"B\"),\n columns=[x.columns[0]])\n \n # Updating the predictions DF\n predictions.update(pred_df)\n \n predictions = predictions.fillna(method='bfill')\n\n return predictions\n\n\n def rmse(self):\n '''\n Calculates the RMS (root mean square) error between the two pd.Dataframes\n '''\n df = pd.DataFrame(self.df['close'].copy())\n df['close_pred'] = self.predictions\n df.dropna(inplace=True)\n df['diff'] = df['close'] - df['close_pred']\n rms = (df[['diff']]**2).mean()\n error = float(np.sqrt(rms))\n self.rmse_value = error\n\n \n \n ######################################\n ### LSTM Model Builder and Trainer ###\n ######################################\n def build_model(self, summary : int = 1, verbose : int = 0):\n '''\n Trains LSTM model : \n 1. Scales the data using RobustScaler()\n Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the quantile range. Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the transform method. \n Ref : https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html\n \n 2. Splits the sequence into X and y : self.split_sequence()\n \n 3. Builds LSTM model : hard-coded layers and self.add_hidden_layers()\n \n 4. Trains LSTM Model\n \n Returns\n -------\n trained_model : tf.model\n Trained LSTM model history\n '''\n # drop un-used columns from pd.DataFrame\n self.drop_columns()\n \n # set self.n_features parameter\n self.set_model_shape()\n \n # deep copy the pd.DataFrame containing technical indicators\n self.train_df = self.df.copy(deep=True)\n\n # declare a scaler using RobustScaler() for 'close' data\n self.close_scaler = RobustScaler()\n self.close_scaler.fit(self.train_df[['close']])\n \n # declare a scaler using RobustScaler() for technical indicator data\n scaler = RobustScaler()\n \n # scale the data\n self.train_df = pd.DataFrame(scaler.fit_transform(self.train_df), columns=self.train_df.columns, index=self.train_df.index)\n \n # split data into appropriate sequences\n X, y = self.split_sequence(self.train_df.to_numpy())\n \n # instatiate the TensorFlow model\n self.model = Sequential()\n\n # create an input layer\n self.model.add(LSTM(90, \n activation=self.activation, \n return_sequences=True, \n input_shape=(self.n_in, self.n_features)))\n\n # add hidden layers\n self.add_hidden_layers(n_layers=self.n_layers, \n n_nodes=self.n_nodes, \n activation=self.activation)\n\n # add the last hidden layer\n self.model.add(LSTM(60, activation=self.activation))\n\n # add output layers\n self.add_dense_layers(n_layers=1, n_out=30)\n self.add_dense_layers(n_layers=1, n_out=self.n_out)\n\n # compile the data\n self.model.compile(optimizer=self.optimizer, loss=self.loss, metrics=['accuracy'])\n \n if summary == 1:\n self.model.summary()\n\n hist = self.model.fit(X, y, \n epochs=self.epochs, \n batch_size=self.batch_size,\n validation_split=self.validation_split, \n verbose=verbose)\n\n return hist\n\n \n #####################################\n ### Model export/import functions ###\n #####################################\n def save_model(self, filename : str, filetype : str = 'h5'):\n '''Saves model'''\n \n if filetype == 'h5':\n '''Saves the entire model'''\n self.model.save(filename+'.h5')\n \n elif filetype == 'json':\n '''Saves only model architecture'''\n string = self.model.to_json()\n return string\n \n elif filetype == 'weights':\n '''Saves model weights'''\n self.model.save_weights(filepath+'.h5')\n \n else:\n print(\"Incorrect model file type.\")\n \n \n def load_model(self, filename : str):\n '''Loads model'''\n self.model = load_model(filename)\n \n ###############################\n ### Visualization Functions ###\n ###############################\n def visualize_training_results(self, hist):\n '''\n Visualizes the training results \n '''\n \n # plot\n history = hist.history\n plt.figure(figsize=(16,5))\n plt.plot(history['val_loss'])\n plt.plot(history['loss'])\n plt.legend(['val_loss', 'loss'])\n plt.title('Loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.show()\n \n plt.figure(figsize=(16,5))\n plt.plot(history['val_accuracy'])\n plt.plot(history['accuracy'])\n plt.legend(['val_accuracy', 'accuracy'])\n plt.title('Accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.show()\n\n \n def visualize_training_price(self):\n '''\n Visualizes Actual vs. Predicted stock price\n '''\n\n # plot\n plt.figure(figsize=(16,6))\n plt.plot(self.predictions, label='Predicted')\n plt.plot(self.df[\"close\"], label='Actual')\n plt.title(f\"Predicted vs. Actual Closing Prices\")\n plt.ylabel(\"Price, $USD\")\n plt.legend()\n plt.show()\n \n \nclass ForecastPrice:\n '''Class is used to forecast Closing price of stock based on pre-trained LSTM model'''\n def __init__(self,\n data : pd.DataFrame,\n n_in : int = 100,\n n_out : int = 14):\n \n self.df = data\n self.n_in = n_in\n self.n_out = n_out\n \n \n # Model class parameters\n self.model = None\n self.scaler = None\n self.close_scaler = None\n self.n_features = None\n self.forecasted_price = None\n \n \n \n ########################\n ### Helper Functions ###\n ########################\n def print_df(self):\n '''Prints DataFrame head'''\n print(self.df.head())\n \n def get_model_summary(self):\n '''Prints Model Summary'''\n try:\n self.model.summary()\n except:\n print(\"Model is not built.\")\n \n def get_model(self):\n '''Returns Model Object'''\n return self.model\n \n def drop_columns(self, cols : list = ['open', 'high', 'low', 'volume']):\n '''Drops pd.DataFrame colums'''\n try:\n self.df.drop(columns=cols, inplace=True)\n except:\n print(\"Dataframe un-used columns alread dropped\")\n \n def set_model_shape(self):\n '''Sets model shape by passing shape to n_features'''\n self.n_features = self.df.shape[1]\n \n def load_model(self, filename : str):\n '''Loads model'''\n self.model = load_model(filename)\n \n def forecast(self):\n '''Forecasts stock price based on pre-trained LSTM model'''\n \n # drop un-used columns from pd.DataFrame\n self.drop_columns()\n \n # set self.n_features parameter\n self.set_model_shape()\n \n # deep copy the pd.DataFrame containing technical indicators\n forecast_df = self.df.copy(deep=True)\n \n self.close_scaler = RobustScaler()\n self.close_scaler.fit(forecast_df[['close']])\n \n self.scaler = RobustScaler()\n transformed_forecast_df = pd.DataFrame(self.scaler.fit_transform(forecast_df), \n columns=forecast_df.columns, \n index=forecast_df.index).tail(self.n_in)\n \n \n # transform technical analysis data to np.array\n forecast_arr = np.array(transformed_forecast_df).reshape(1, \n self.n_in, \n self.n_features)\n \n # predicting off of the new data\n pred_y = self.model.predict(forecast_arr)\n \n # inverse_transform the predicted values back to original scale\n pred_y = self.close_scaler.inverse_transform(pred_y)[0]\n \n # parse perdicted values to pd.DataFrame, adjust date scale (index)\n preds = pd.DataFrame(pred_y, \n index=pd.date_range(start=forecast_df.index[-1]+timedelta(days=1), \n periods=len(pred_y)), \n columns=[forecast_df.columns[0]])\n \n # set class variable\n self.forecasted_price = preds\n \n return preds\n \n \n","sub_path":"code/ml/lstm_model.py","file_name":"lstm_model.py","file_ext":"py","file_size_in_byte":19317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"48803842","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport bz2\nimport os\n\noutput = bz2.BZ2File('esempio.txt.bz2', 'wb')\ntry:\n output.write('Il contenuto del file di esempio va qui..\\n')\nfinally:\n output.close()\n\nos.system('file esempio.txt.bz2')\n","sub_path":"dumpscripts/bz2_file_write.py","file_name":"bz2_file_write.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"86332170","text":"import bpy\nimport copy\nfrom copy import deepcopy\n\n#\n# export lamps from the scene\n#\ndef export_point_lamp(object):\n global text_offset\n old = text_offset \n text_offset = 0\n file = object.data.name + \".point_lamp\"\n print(file)\n f = open(file, \"w\")\n f.write(\"POINTLAMPTEXT\\n\") \n lamp = object.data \n start_block(f, lamp.name)\n export_vec3(f, \"*color\", lamp.color)\n export_float(f, \"*distance\", lamp.distance)\n export_float(f, \"*energy\", lamp.energy)\n export_float(f, \"*linear_attenuation\", lamp.linear_attenuation)\n export_float(f, \"*quadratic_attenuation\", lamp.quadratic_attenuation)\n end_block(f);\n f.close() \n text_offset = old\n return\n \n \ndef export_point_lamps(f):\n if not (\"*point_lamp\" in used_entities.keys()):\n return\n for object in used_entities[\"*point_lamp\"]:\n data = object.data\n if data != None:\n export_point_lamp(object)\n return\n\ndef export_directional_light(object):\n global text_offset\n old = text_offset \n text_offset = 0\n file = object.data.name + \".dir_lamp\"\n print(file)\n f = open(file, \"w\")\n f.write(\"DIRLAMPTEXT\\n\") \n lamp = object.data \n start_block(f, lamp.name)\n export_vec3(f, \"*color\", lamp.color)\n export_float(f, \"*distance\", lamp.distance)\n export_float(f, \"*energy\", lamp.energy)\n export_vec3(f, \"*direction\", [0, 0, 1])\n end_block(f);\n f.close() \n text_offset = old\n return\n\ndef export_dir_lamps(f):\n if not (\"*directional_lamp\" in used_entities.keys()):\n return\n for object in used_entities[\"*directional_lamp\"]:\n data = object.data\n if data != None:\n export_directional_light(object)\n return","sub_path":"source/audio/blender/punk_exporter/punk_export_light.py","file_name":"punk_export_light.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"629542411","text":"from pathlib import Path\nfrom requests_pkcs12 import post\nimport re\nimport asyncio\nfrom app.services import Sanitize_Calls\nimport urllib3\n\nurl = \"https://connect.dcbankapi.ca:35345/integrationapi/v1.0/Atm/SearchAtmLocator\"\nheader = {'Content-Type': 'application/json'}\nlocations = dict()\n\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # to remove warning message\nasync def get_atm_locations(latitude: float, longitude: float):\n\n data = {\"latitude\": latitude, \"longitude\": longitude}\n\n data_check = await Sanitize_Calls.get_atm_locations_check(latitude, longitude)\n\n if True in data_check:\n r = post('{}'.format(url), json=data, headers=header, verify=False)\n\n items = r.text.split('{')\n counter = 0\n\n while counter < 30:\n fields = items[counter + 3].split(\":\")\n split_lat = fields[7].split(\",\")\n split_long = fields[8].split(\",\")\n locations[split_lat[0]] = split_long[0]\n counter += 1\n else: \n return False, data_check\n\n return True, locations\n","sub_path":"main-service-master/mount/app/services/DC_Bank_Calls.py","file_name":"DC_Bank_Calls.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"368804830","text":"from flask import Flask, request, redirect\r\nfrom twilio.twiml.messaging_response import MessagingResponse\r\n\r\n@app.route(\"/sms\", methods=['GET', 'POST'])\r\ndef sms_reply():\r\n \"\"\"Respond to incoming calls with a simple text message.\"\"\"\r\n # Start our TwiML response\r\n resp = MessagingResponse()\r\n\r\n # Add a message\r\n resp.message(\"Welcome everyone to my presentation!!\")\r\n\r\n return str(resp)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n\t\r\n\t\r\n\t\r\n\r\n\r\n\r\n\r\n","sub_path":"SMS_PPT.py","file_name":"SMS_PPT.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"259956280","text":"from genericpath import isfile\nimport PySimpleGUI as sg\nfrom instapy.util import smart_run\nimport connector as con\nimport threading\n\nsg.theme('Reddit')\n\ndef janela_login():\n #LAYOUT DEFINITION\n layout = [\n [sg.Text('Usuário : ',size=(10,0)),sg.Input(size=(15,0),key='user')],\n [sg.Text('Senha : ',size=(10,0)),sg.InputText(size=(15,0), key='password', password_char='*')],\n [sg.Button('Login'),sg.Button('Exit',key='Exit')],\n #[sg.Output(size=(60,15))],\n [sg.Image(r'robot.png')]\n ]\n # DEFINIR JANELA E PASSAR O LAYOUT COMO PARAMETRO\n return sg.Window(\"Bot for Instagram\",layout=layout,size=(400,200),element_justification='c',resizable=True,finalize=True)\n\n\ndef janela_principal():\n layout = [\n [sg.Text('Selecionar uma opção abaixo:')],\n [sg.Button('Auto-Follow'),sg.Button('Auto-Like'),sg.Button('Auto-Comment')]\n ]\n return sg.Window('Menu Principal',layout=layout,size=(300,300),finalize=True,element_justification='c',resizable=True,)\n\ndef janela_auto_follow():\n layout = [\n [sg.Text('Configurações')],\n [sg.Text('Tags a serem seguidas : ',size=(15,0)),sg.Input(size=(15,0),key='tags')],\n [sg.Text('Quantidade de seguidores : ',size=(15,0)),sg.Input(size=(15,0),key='qntdFollowers')],\n [sg.Button('Iniciar Follow'),sg.Button('Voltar')],\n [sg.Output(size=(60,15))]\n ]\n return sg.Window('Auto-Follow Config and Start',layout=layout,finalize=True,size=(400,250),element_justification='c',resizable=True,)\n\n\ndef janela_auto_like():\n layout = [\n [sg.Text('Tag para curtir : ',size=(15,0)),sg.Input(size=(15,0),key='tagsToLike')],\n [sg.Text('Quantidade de Likes : ',size=(15,0)),sg.Input(size=(15,0),key='qntdLikes')],\n [sg.Button('Iniciar Likes'),sg.Button('Voltar')],\n [sg.Output(size=(60,15))]\n\n ]\n return sg.Window('Auto-Like Config and Start',layout=layout,finalize=True,size=(400,250),element_justification='c',resizable=True,)\n\ndef janela_auto_comment():\n layout = [\n [sg.Text('Informe a url no qual você quer comentar: ',size=(15,0)),sg.Input(size=(15,0),key='urlToComment')],\n [sg.Text('Quantidade de comentários : ',size=(15,0)),sg.Input(size=(15,0),key='qntdComment')],\n [sg.Text('Quantidade de Usuários para Marcar por comentário : ',size=(15,0)),sg.Input(size=(15,0),key='qntdUserPerComment')],\n [sg.Button('Iniciar Auto-Comment'),sg.Button('Voltar')],\n [sg.Button('Salvar Nova Lista de Usuarios'),sg.Button('Listar Usuarios Salvos')],\n # [sg.Output(size=(60,15))]\n\n\n ]\n return sg.Window('Auto-Comment Config and Start',layout=layout,finalize=True,size=(400,600),element_justification='c',resizable=True,)\n\n\n\ntela_login = janela_login()\ntela_principal = None\ntela_auto_follow = None\ntela_auto_like = None\ntela_auto_comment = None\nglobal connection\n\n\nwhile True:\n window,event,values = sg.read_all_windows()\n # BLOCK OF CLOSE ACTIONS - START\n if window == tela_login and event == sg.WIN_CLOSED:\n break\n if window == tela_principal and event == sg.WIN_CLOSED:\n break\n if window == tela_auto_follow and event == sg.WIN_CLOSED:\n break\n if window == tela_auto_like and event == sg.WIN_CLOSED:\n break\n # BLOCK OF CLOSE ACTIONS - END\n\n # EVENTS SCREEN - LOGIN\n if window == tela_login:\n #print('Clicou no Login')\n user = values['user']\n password = values['password']\n \"\"\" print(f'User : {user}')\n print(f'Password : {password}') \"\"\"\n if (password == '' or user == '') and event == 'Login':\n sg.popup('Necessário Preencher Usuário e Senha!')\n elif (password != '' and user != '') and event == 'Login':\n sg.popup('Iniciando processo de Login - Aguarde até ser redirecionado para a pagina do seu Perfil!')\n print('Iniciando processo de Login - Aguarde até ser redirecionado para a pagina do seu Perfil!')\n connection = con.Connector(user,password)\n #connection.makeConnection()\n #con.Connector.\n #resultado = connection.makeConnection(user,password)\n sg.popup('Login realizado com sucesso!')\n #print(vars(resultado))\n #print(dir(resultado))\n tela_principal = janela_principal()\n tela_login.hide()\n \n if event == 'Exit':\n break\n # EVENTS SCREEN - MAIN - START\n if window == tela_principal:\n if event == 'Auto-Follow':\n tela_auto_follow = janela_auto_follow()\n tela_principal.hide()\n elif event == 'Auto-Like':\n tela_auto_like = janela_auto_like()\n tela_principal.hide()\n elif event == 'Auto-Comment':\n tela_auto_comment = janela_auto_comment()\n tela_principal.hide()\n # EVENTS SCREEN - MAIN - END\n\n # EVENTS SCREEN - AUTO-FOLLOW - START \n if window == tela_auto_follow:\n if event == 'Voltar':\n tela_principal = janela_principal()\n tela_auto_follow.hide()\n if event == 'Iniciar Follow':\n tags = values['tags']\n quantidadeSeguidores = int(values['qntdFollowers'])\n if tags == '' or quantidadeSeguidores == '':\n sg.popup('Necessário informar a quantidade de seguidores e as tags')\n else:\n #try:\n threading.Thread(target=connection.autoFollow,args=(tags,quantidadeSeguidores)).start()\n #except:\n # print('An exception ocurred')\n # EVENTS SCREEN - AUTO-LIKE - START \n if window == tela_auto_like:\n if event == 'Voltar':\n tela_principal = janela_principal()\n tela_auto_like.hide()\n if event == 'Iniciar Likes':\n tags = values['tagsToLike']\n quantidadeLikes = int(values['qntdLikes'])\n if tags == '' or quantidadeLikes == '':\n sg.popup('Necessário informar a quantidade de seguidores e as tags')\n else:\n try:\n threading.Thread(target=connection.autoLike,args=(tags,quantidadeLikes)).start()\n #connection.autoLike(tags,quantidadeLikes)\n except:\n print('An exception ocurred')\n\n # EVENTS SCREEN - AUTO-FOLLOW - END\n # EVENTS SCREEN - AUTO-COMMENT - START \n if window == tela_auto_comment:\n if event == 'Voltar':\n tela_principal = janela_principal()\n tela_auto_comment.hide()\n if event == 'Iniciar Auto-Comment':\n urlToLike = values['urlToComment']\n quantidadeComment = int(values['qntdComment'])\n qntdUserPerComment = int(values['qntdUserPerComment'])\n if urlToLike == '' or quantidadeComment == '' or qntdUserPerComment == '':\n sg.popup('Necessário informar a quantidade de comentáros, a quantidade de usuarios por comentários e a url para comentar')\n else:\n #try:\n print(urlToLike)\n threading.Thread(target=connection.autoComment,args=(urlToLike,quantidadeComment,qntdUserPerComment)).start()\n #except:\n #print('An exception ocurred')\n if event == 'Salvar Nova Lista de Usuarios':\n threading.Thread(target=connection.saveListFollowing).start()\n if event == 'Listar Usuarios Salvos':\n threading.Thread(target=connection.getListFollowing).start()\n # EVENTS SCREEN - AUTO-COMMENT - END\n","sub_path":"newLayout.py","file_name":"newLayout.py","file_ext":"py","file_size_in_byte":8153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"568441779","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 2018 jem@seethis.link\n# Licensed under the MIT license (http://opensource.org/licenses/MIT)\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom enum import Enum\n\n###############################################################################\n# classes #\n###############################################################################\n\nclass USBDeviceInfo(object):\n def __init__(self, vid, pid, description):\n self.vid = vid\n self.pid = pid\n self.description = description\n\n def is_prototype(self):\n return self.vid == 0x6666\n\nclass USBKeyplusKeyboardInfo(USBDeviceInfo):\n def __init__(self, vid, pid, description, interface=3):\n super(USBKeyplusKeyboardInfo, self).__init__(\n vid, pid, description,\n )\n self.interface = interface\n\nclass USBBootloaderInfo(USBDeviceInfo):\n def __init__(self, vid, pid, bootloader, description):\n super(USBBootloaderInfo, self).__init__(\n vid, pid, description\n )\n self.bootloader = bootloader\n\n\n###############################################################################\n# constants #\n###############################################################################\n\nclass BootloaderType(Enum):\n XUSB_BOOT = 0\n NRF24LU1P_512 = 1\n NRF24LU1P_FACTORY = 2\n\n\nKEYPLUS_USB_IDS = {\n (0x6666, 0x1111): USBKeyplusKeyboardInfo(\n vid = 0x6666,\n pid = 0x1111,\n description = \"keyplus keyboard xmega (prototype)\",\n ),\n\n (0x6666, 0x3333): USBKeyplusKeyboardInfo(\n vid = 0x6666,\n pid = 0x3333,\n description = \"keyplus nRF24 wireless keyboard dongle (prototype)\",\n ),\n\n (0x1209, 0xBB00): USBKeyplusKeyboardInfo(\n vid = 0x1209,\n pid = 0xBB00,\n description = \"keyplus keyboard xmega\",\n ),\n\n (0x1209, 0xBB02): USBKeyplusKeyboardInfo(\n vid = 0x1209,\n pid = 0xBB02,\n description = \"keyplus nRF24 wireless keyboard dongle\",\n ),\n}\n\nBOOTLOADER_USB_IDS = {\n (0x6666, 0xB007): USBBootloaderInfo(\n vid = 0x6666,\n pid = 0xB007,\n bootloader = BootloaderType.XUSB_BOOT,\n description = \"xusb boot (prototype id)\",\n ),\n\n (0x1209, 0xBB01): USBBootloaderInfo(\n vid = 0x1209,\n pid = 0xBB01,\n bootloader = BootloaderType.XUSB_BOOT,\n description = \"keyplus xusb boot bootloader\",\n ),\n\n (0x1209, 0xBB03): USBBootloaderInfo(\n vid = 0x1209,\n pid = 0xBB03,\n bootloader = BootloaderType.NRF24LU1P_512,\n description = \"keyplus nrf24lu1p-512 bootloader\",\n ),\n\n (0x1915, 0x0101): USBBootloaderInfo(\n vid = 0x1915,\n pid = 0x0101,\n bootloader = BootloaderType.NRF24LU1P_FACTORY,\n description = \"Nordic nRF24LU1+ factory bootloader\",\n ),\n}\n\n###############################################################################\n# functions #\n###############################################################################\n\ndef is_keyplus_usb_id(vendor_id, product_id):\n return (vendor_id, product_id) in KEYPLUS_USB_IDS\n\ndef is_bootloader_usb_id(vendor_id, product_id):\n return (vendor_id, product_id) in BOOTLOADER_USB_IDS\n","sub_path":"host-software/keyplus/usb_ids.py","file_name":"usb_ids.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}
+{"seq_id":"353270046","text":"import requests\nimport math\nimport sys\nimport argparse\n \n# create parser\nparser = argparse.ArgumentParser()\n \n###\n## needs python3\n## run as \n## python3 case.py